|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
"""Smart Contracts Audit dataset.""" |
|
|
|
|
|
import os |
|
import re |
|
import pandas as pd |
|
import datasets |
|
from pyparsing import col |
|
|
|
|
|
_CITATION = """\ |
|
@misc{storhaug2022smartcontractsaudit, |
|
title = {Smart Contracts Audit Dataset}, |
|
author={André Storhaug}, |
|
year={2022} |
|
} |
|
""" |
|
|
|
_DESCRIPTION = """\ |
|
Smart Contracts Audit Dataset. |
|
This is a dataset of audited verified (Etherscan.io) Smart Contracts \ |
|
that are deployed to the Ethereum blockchain. |
|
""" |
|
|
|
|
|
_HOMEPAGE = "https://andstor.github.io/verified-smart-contracts-audit" |
|
|
|
|
|
_LICENSE = "" |
|
|
|
|
|
|
|
|
|
_URLS = { |
|
"flattened": { |
|
"dev": [f"data/flattened/validation/part.{part}.parquet" for part in range(2)], |
|
"test": [f"data/flattened/test/part.{part}.parquet" for part in range(2)], |
|
"train": [f"data/flattened/train/part.{part}.parquet" for part in range(11)] |
|
}, |
|
"inflated": { |
|
"dev": [f"data/inflated/validation/part.{part}.parquet" for part in range(1)], |
|
"test": [f"data/inflated/test/part.{part}.parquet" for part in range(1)], |
|
"train": [f"data/inflated/train/part.{part}.parquet" for part in range(5)] |
|
}, |
|
"metadata": "data/metadata.parquet" |
|
} |
|
|
|
|
|
_TOOLS = { |
|
'flattened': { |
|
'all': ["solidetector", "slither", "oyente", "smartcheck"], |
|
'solidetector': ["solidetector"], |
|
'slither': ['slither'], |
|
'oyente': ['oyente'], |
|
'smartcheck': ['smartcheck'], |
|
}, |
|
'inflated': { |
|
'all': ["solidetector"], |
|
'solidetector': ["solidetector"], |
|
} |
|
} |
|
|
|
_TOOLS_AUDIT_DESC = { |
|
'solidetector': { |
|
'level_col': 'severity', |
|
'levels': {'High': 3, 'Medium': 2, 'Low': 1 }, |
|
}, |
|
'slither': { |
|
'level_col': 'impact', |
|
'levels': {'High': 3, 'Medium': 2, 'Low': 1, 'Informational': -1, 'Optimization': -2}, |
|
}, |
|
'oyente': { |
|
'level_col': 'level', |
|
'levels': {'Warning': 3}, |
|
}, |
|
'smartcheck': { |
|
'level_col': 'severity', |
|
'levels': {3: 3, 2: 2, 1: 1}, |
|
} |
|
} |
|
|
|
|
|
_LEVELS = { |
|
'High': 3, |
|
'Warning': 3, |
|
3: 3, |
|
'Medium': 2, |
|
2: 2, |
|
'Low': 1, |
|
1: 1, |
|
'Informational': -1, |
|
'Optimization': -2, |
|
} |
|
|
|
_EMBEDDED_LEVEL = "High" |
|
|
|
def _check_strings(search_list, input_string): |
|
return [s in input_string for s in search_list] |
|
|
|
|
|
class SmartContractsAudit(datasets.GeneratorBasedBuilder): |
|
"""Smart Contracts Audit Dataset.""" |
|
|
|
VERSION = datasets.Version("1.0.0") |
|
|
|
|
|
|
|
|
|
BUILDER_CONFIGS = [ |
|
datasets.BuilderConfig(name="flattened_all", version=VERSION, description="Flattened data labeled with all tools"), |
|
datasets.BuilderConfig(name="flattened_all_extended", version=VERSION, description="Flattened data with metadata, labeled with all tools"), |
|
datasets.BuilderConfig(name="flattened_all_embedded", version=VERSION, description="Flattened data with embedded labeled with all tools"), |
|
|
|
|
|
datasets.BuilderConfig(name="flattened_slither", version=VERSION, description="Flattened data with metadata, labeled with SoliDetector"), |
|
datasets.BuilderConfig(name="flattened_slither_extended", version=VERSION, description="Flattened data labeled with SoliDetector"), |
|
datasets.BuilderConfig(name="flattened_slither_embedded", version=VERSION, description="Flattened data with embedded labeled with SoliDetector"), |
|
|
|
|
|
datasets.BuilderConfig(name="flattened_solidetector", version=VERSION, description="Flattened data with metadata, labeled with SoliDetector"), |
|
datasets.BuilderConfig(name="flattened_solidetector_extended", version=VERSION, description="Flattened data labeled with SoliDetector"), |
|
datasets.BuilderConfig(name="flattened_solidetector_embedded", version=VERSION, description="Flattened data with embedded labeled with SoliDetector"), |
|
|
|
|
|
datasets.BuilderConfig(name="flattened_oyente", version=VERSION, description="Flattened data with metadata, labeled with Oyente"), |
|
datasets.BuilderConfig(name="flattened_oyente_extended", version=VERSION, description="Flattened data labeled with Oyente"), |
|
datasets.BuilderConfig(name="flattened_oyente_embedded", version=VERSION, description="Flattened data with embedded labeled with Oyente"), |
|
|
|
|
|
datasets.BuilderConfig(name="flattened_smartcheck", version=VERSION, description="Flattened data with metadata, labeled with SmartCheck"), |
|
datasets.BuilderConfig(name="flattened_smartcheck_extended", version=VERSION, description="Flattened data labeled with SmartCheck"), |
|
datasets.BuilderConfig(name="flattened_smartcheck_embedded", version=VERSION, description="Flattened data with embedded labeled with SmartCheck"), |
|
|
|
|
|
datasets.BuilderConfig(name="inflated_all", version=VERSION, description="Inflated data labeled with all tools"), |
|
datasets.BuilderConfig(name="inflated_all_embedded", version=VERSION, description="Inflated data with embedded labeled with all tools"), |
|
|
|
|
|
datasets.BuilderConfig(name="inflated_solidetector", version=VERSION, description="Inflated data labeled with SoliDetector"), |
|
datasets.BuilderConfig(name="inflated_solidetector_embedded", version=VERSION, description="Inflated data with embedded labeled with SoliDetector"), |
|
|
|
|
|
|
|
|
|
] |
|
|
|
DEFAULT_CONFIG_NAME = "inflated_all" |
|
|
|
def _info(self): |
|
|
|
data_split = self.config.name.split("_")[0] |
|
tool = self.config.name.split("_")[1] |
|
|
|
if "embedded" in self.config.name: |
|
features = datasets.Features( |
|
{ |
|
"text": datasets.Value("string"), |
|
"language": datasets.Value("string") |
|
} |
|
) |
|
elif "flattened" in self.config.name: |
|
features = datasets.Features( |
|
{ |
|
'contract_name': datasets.Value("string"), |
|
'contract_address': datasets.Value("string"), |
|
'language': datasets.Value("string"), |
|
'source_code': datasets.Value("string"), |
|
**{ t: datasets.Value("string") for t in _TOOLS[data_split][tool] }, |
|
'abi': datasets.Value("string"), |
|
'compiler_version': datasets.Value("string"), |
|
'optimization_used': datasets.Value("bool"), |
|
'runs': datasets.Value("int64"), |
|
'constructor_arguments': datasets.Value("string"), |
|
'evm_version': datasets.Value("string"), |
|
'library': datasets.Value("string"), |
|
'license_type': datasets.Value("string"), |
|
'proxy': datasets.Value("bool"), |
|
'implementation': datasets.Value("string"), |
|
'swarm_source': datasets.Value("string") |
|
} |
|
) |
|
elif "inflated" in self.config.name: |
|
features = datasets.Features( |
|
{ |
|
'contract_name': datasets.Value("string"), |
|
'file_path': datasets.Value("string"), |
|
'contract_address': datasets.Value("string"), |
|
'language': datasets.Value("string"), |
|
'source_code': datasets.Value("string"), |
|
**{ t: datasets.Value("string") for t in _TOOLS[data_split][tool] }, |
|
'compiler_version': datasets.Value("string"), |
|
'license_type': datasets.Value("string"), |
|
'swarm_source': datasets.Value("string") |
|
} |
|
) |
|
|
|
if "extended" in self.config.name: |
|
features["tx_count"] = datasets.Value("int64") |
|
features["balance"] = datasets.Value("string") |
|
|
|
return datasets.DatasetInfo( |
|
|
|
description=_DESCRIPTION, |
|
|
|
features=features, |
|
|
|
|
|
|
|
|
|
homepage=_HOMEPAGE, |
|
|
|
license=_LICENSE, |
|
|
|
citation=_CITATION, |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
|
|
|
|
|
|
|
|
|
|
|
|
urls = _URLS[self.config.name.split("_")[0]] |
|
downloaded_files = dl_manager.download_and_extract(urls) |
|
|
|
metadata = None |
|
if "extended" in self.config.name: |
|
metadata = dl_manager.download_and_extract(_URLS["metadata"]) |
|
|
|
if "flattened" in self.config.name or "inflated" in self.config.name: |
|
return [ |
|
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"files": downloaded_files["train"], "metadata": metadata}), |
|
datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"files": downloaded_files["dev"], "metadata": metadata}), |
|
datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"files": downloaded_files["test"], "metadata": metadata}), |
|
] |
|
else: |
|
return [ |
|
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"files": downloaded_files["train"], "metadata": metadata}), |
|
] |
|
|
|
|
|
def _generate_examples(self, files, metadata): |
|
"""Yields examples.""" |
|
|
|
|
|
|
|
data_split = self.config.name.split("_")[0] |
|
tool = self.config.name.split("_")[1] |
|
|
|
lvl = _EMBEDDED_LEVEL |
|
|
|
|
|
|
|
|
|
|
|
if metadata is not None: |
|
meta = pd.read_parquet(metadata) |
|
|
|
for path in files: |
|
if "embedded" in self.config.name: |
|
columns = ['contract_address', 'source_code', 'language'] |
|
columns.extend(["file_path"] if "inflated" in self.config.name else []) |
|
columns.extend(_TOOLS[data_split][tool]) |
|
data = pd.read_parquet(path, columns=columns) |
|
elif "flattened" in self.config.name: |
|
data = pd.read_parquet(path) |
|
data['runs'].fillna(0, inplace=True) |
|
else: |
|
data = pd.read_parquet(path) |
|
|
|
|
|
if metadata is not None: |
|
data = pd.merge(data, meta, how="left", on="contract_address") |
|
|
|
for index, row in data.iterrows(): |
|
|
|
if "flattened" in self.config.name: |
|
|
|
key = row['contract_address'] |
|
if "embedded" in self.config.name: |
|
is_vulnerable = False |
|
is_secure = False |
|
for t in _TOOLS[data_split][tool]: |
|
if pd.isnull(row[t]): |
|
continue |
|
if row[t] == "[]": |
|
is_secure = True |
|
continue |
|
|
|
vuln_levels = [_TOOLS_AUDIT_DESC[t]["level_col"] + '": "' + k for k,v in _LEVELS.items() if v >= _LEVELS[lvl]] |
|
if any(_check_strings(vuln_levels, row[t])): |
|
is_vulnerable = True |
|
break |
|
else: |
|
is_secure = True |
|
continue |
|
|
|
label = "" |
|
if is_vulnerable: |
|
label = "// VULNERABLE\n" |
|
elif is_secure: |
|
label = "// SECURE\n" |
|
else: |
|
label = "// UNKNOWN\n" |
|
|
|
yield key, { |
|
'text': label + row['source_code'], |
|
'language': row['language'], |
|
} |
|
else: |
|
yield key, { |
|
'contract_name': row['contract_name'], |
|
'contract_address': row['contract_address'], |
|
'language': row['language'], |
|
'source_code': row['source_code'], |
|
**{ t: row[t] for t in _TOOLS[data_split][tool] }, |
|
'abi': row['abi'], |
|
'compiler_version': row['compiler_version'], |
|
'optimization_used': row['optimization_used'], |
|
'runs': row['runs'], |
|
'constructor_arguments': row['constructor_arguments'], |
|
'evm_version': row['evm_version'], |
|
'library': row['library'], |
|
'license_type': row['license_type'], |
|
'proxy': row['proxy'], |
|
'implementation': row['implementation'], |
|
'swarm_source': row['swarm_source'], |
|
**({'tx_count': row["tx_count"]} if metadata is not None else {}), |
|
**({'balance': row["balance"]} if metadata is not None else {}) |
|
} |
|
|
|
elif "inflated" in self.config.name: |
|
|
|
key = row['contract_address'] + ":" + row['file_path'] + ":" + str(hash(row['source_code'])) |
|
if "embedded" in self.config.name: |
|
is_vulnerable = False |
|
is_secure = False |
|
for t in _TOOLS[data_split][tool]: |
|
if pd.isnull(row[t]): |
|
continue |
|
if row[t] == "[]": |
|
is_secure = True |
|
continue |
|
|
|
vuln_levels = [_TOOLS_AUDIT_DESC[t]["level_col"] + '": "' + k for k,v in _LEVELS.items() if v >= _LEVELS[lvl]] |
|
if any(_check_strings(vuln_levels, row[t])): |
|
is_vulnerable = True |
|
break |
|
else: |
|
is_secure = True |
|
continue |
|
|
|
label = "" |
|
if is_vulnerable: |
|
label = "// VULNERABLE\n" |
|
elif is_secure: |
|
label = "// SECURE\n" |
|
else: |
|
label = "// UNKNOWN\n" |
|
|
|
yield key, { |
|
'text': label + row['source_code'], |
|
'language': row['language'], |
|
} |
|
else: |
|
yield key, { |
|
'contract_name': row['contract_name'], |
|
'file_path': row['file_path'], |
|
'contract_address': row['contract_address'], |
|
'language': row['language'], |
|
'source_code': row['source_code'], |
|
**{ t: row[t] for t in _TOOLS[data_split][tool] }, |
|
'compiler_version': row['compiler_version'], |
|
'license_type': row['license_type'], |
|
'swarm_source': row['swarm_source'] |
|
} |
|
|