flattened_contract_4tools / smart_contracts_audit.py
momilla's picture
Upload smart_contracts_audit.py
63b9d0a
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Smart Contracts Audit dataset."""
import os
import re
import pandas as pd
import datasets
from pyparsing import col
_CITATION = """\
@misc{storhaug2022smartcontractsaudit,
title = {Smart Contracts Audit Dataset},
author={André Storhaug},
year={2022}
}
"""
_DESCRIPTION = """\
Smart Contracts Audit Dataset.
This is a dataset of audited verified (Etherscan.io) Smart Contracts \
that are deployed to the Ethereum blockchain.
"""
# TODO: Add a link to an official homepage for the dataset here
_HOMEPAGE = "https://andstor.github.io/verified-smart-contracts-audit"
# TODO: Add the license for the dataset here if you can find it
_LICENSE = ""
# Add link to the official dataset URLs here
# The HuggingFace Datasets library doesn't host the datasets but only points to the original files.
# This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
_URLS = {
"flattened": {
"dev": [f"data/flattened/validation/part.{part}.parquet" for part in range(2)],
"test": [f"data/flattened/test/part.{part}.parquet" for part in range(2)],
"train": [f"data/flattened/train/part.{part}.parquet" for part in range(11)]
},
"inflated": {
"dev": [f"data/inflated/validation/part.{part}.parquet" for part in range(1)],
"test": [f"data/inflated/test/part.{part}.parquet" for part in range(1)],
"train": [f"data/inflated/train/part.{part}.parquet" for part in range(5)]
},
"metadata": "data/metadata.parquet"
}
# Supported tools and columns config
_TOOLS = {
'flattened': {
'all': ["solidetector", "slither", "oyente", "smartcheck"],
'solidetector': ["solidetector"],
'slither': ['slither'],
'oyente': ['oyente'],
'smartcheck': ['smartcheck'],
},
'inflated': {
'all': ["solidetector"],
'solidetector': ["solidetector"],
}
}
_TOOLS_AUDIT_DESC = {
'solidetector': {
'level_col': 'severity',
'levels': {'High': 3, 'Medium': 2, 'Low': 1 },
},
'slither': {
'level_col': 'impact',
'levels': {'High': 3, 'Medium': 2, 'Low': 1, 'Informational': -1, 'Optimization': -2},
},
'oyente': {
'level_col': 'level',
'levels': {'Warning': 3},
},
'smartcheck': {
'level_col': 'severity',
'levels': {3: 3, 2: 2, 1: 1},
}
}
_LEVELS = {
'High': 3,
'Warning': 3,
3: 3,
'Medium': 2,
2: 2,
'Low': 1,
1: 1,
'Informational': -1,
'Optimization': -2,
}
_EMBEDDED_LEVEL = "High"
def _check_strings(search_list, input_string):
return [s in input_string for s in search_list]
# Name of the dataset usually match the script name with CamelCase instead of snake_case
class SmartContractsAudit(datasets.GeneratorBasedBuilder):
"""Smart Contracts Audit Dataset."""
VERSION = datasets.Version("1.0.0")
# You will be able to load one or the other configurations in the following list with
# data = datasets.load_dataset('my_dataset', 'all')
# data = datasets.load_dataset('my_dataset', 'plain_text')
BUILDER_CONFIGS = [
datasets.BuilderConfig(name="flattened_all", version=VERSION, description="Flattened data labeled with all tools"),
datasets.BuilderConfig(name="flattened_all_extended", version=VERSION, description="Flattened data with metadata, labeled with all tools"),
datasets.BuilderConfig(name="flattened_all_embedded", version=VERSION, description="Flattened data with embedded labeled with all tools"),
#*(lambda VERSION=VERSION: [ datasets.BuilderConfig(name="flattened_all_embedded_" + lvl, version=VERSION) for lvl in ["high", "medium", "low", "informational", "optimization"]])(),
datasets.BuilderConfig(name="flattened_slither", version=VERSION, description="Flattened data with metadata, labeled with SoliDetector"),
datasets.BuilderConfig(name="flattened_slither_extended", version=VERSION, description="Flattened data labeled with SoliDetector"),
datasets.BuilderConfig(name="flattened_slither_embedded", version=VERSION, description="Flattened data with embedded labeled with SoliDetector"),
#*(lambda VERSION=VERSION: [ datasets.BuilderConfig(name="flattened_slither_embedded_" + lvl, version=VERSION) for lvl in ["high", "medium", "low", "informational", "optimization"]])(),
datasets.BuilderConfig(name="flattened_solidetector", version=VERSION, description="Flattened data with metadata, labeled with SoliDetector"),
datasets.BuilderConfig(name="flattened_solidetector_extended", version=VERSION, description="Flattened data labeled with SoliDetector"),
datasets.BuilderConfig(name="flattened_solidetector_embedded", version=VERSION, description="Flattened data with embedded labeled with SoliDetector"),
#*(lambda VERSION=VERSION: [ datasets.BuilderConfig(name="flattened_solidetector_embedded_" + lvl, version=VERSION) for lvl in ["high", "medium", "low"]])(),
datasets.BuilderConfig(name="flattened_oyente", version=VERSION, description="Flattened data with metadata, labeled with Oyente"),
datasets.BuilderConfig(name="flattened_oyente_extended", version=VERSION, description="Flattened data labeled with Oyente"),
datasets.BuilderConfig(name="flattened_oyente_embedded", version=VERSION, description="Flattened data with embedded labeled with Oyente"),
#*(lambda VERSION=VERSION: [ datasets.BuilderConfig(name="flattened_oyente_embedded_" + lvl, version=VERSION) for lvl in ["high", "medium", "low"]])(),
datasets.BuilderConfig(name="flattened_smartcheck", version=VERSION, description="Flattened data with metadata, labeled with SmartCheck"),
datasets.BuilderConfig(name="flattened_smartcheck_extended", version=VERSION, description="Flattened data labeled with SmartCheck"),
datasets.BuilderConfig(name="flattened_smartcheck_embedded", version=VERSION, description="Flattened data with embedded labeled with SmartCheck"),
#*(lambda VERSION=VERSION: [ datasets.BuilderConfig(name="flattened_smartcheck_embedded_" + lvl, version=VERSION) for lvl in ["high", "medium", "low"]])(),
datasets.BuilderConfig(name="inflated_all", version=VERSION, description="Inflated data labeled with all tools"),
datasets.BuilderConfig(name="inflated_all_embedded", version=VERSION, description="Inflated data with embedded labeled with all tools"),
#*(lambda VERSION=VERSION: [ datasets.BuilderConfig(name="inflated_all_embedded_" + lvl, version=VERSION) for lvl in ["high", "medium", "low"]])(),
datasets.BuilderConfig(name="inflated_solidetector", version=VERSION, description="Inflated data labeled with SoliDetector"),
datasets.BuilderConfig(name="inflated_solidetector_embedded", version=VERSION, description="Inflated data with embedded labeled with SoliDetector"),
#*(lambda VERSION=VERSION: [ datasets.BuilderConfig(name="inflated_solidetector_embedded_" + lvl, version=VERSION) for lvl in ["high", "medium", "low"]])(),
#datasets.BuilderConfig(name="solidetector", version=VERSION, description="Labeling with SoliDetector"),
#datasets.BuilderConfig(name="solidetector_plain_text", version=VERSION, description="Labeling with SoliDetector plain text version"),
]
DEFAULT_CONFIG_NAME = "inflated_all" # It's not mandatory to have a default configuration. Just use one if it make sense.
def _info(self):
# This method specifies the datasets.DatasetInfo object which contains informations and typings for the dataset
data_split = self.config.name.split("_")[0]
tool = self.config.name.split("_")[1]
if "embedded" in self.config.name: # This is an example to show how to have different features for "first_domain" and "second_domain"
features = datasets.Features(
{
"text": datasets.Value("string"),
"language": datasets.Value("string")
}
)
elif "flattened" in self.config.name: # This is the name of the configuration selected in BUILDER_CONFIGS above
features = datasets.Features(
{
'contract_name': datasets.Value("string"),
'contract_address': datasets.Value("string"),
'language': datasets.Value("string"),
'source_code': datasets.Value("string"),
**{ t: datasets.Value("string") for t in _TOOLS[data_split][tool] },
'abi': datasets.Value("string"), # JSON string
'compiler_version': datasets.Value("string"),
'optimization_used': datasets.Value("bool"),
'runs': datasets.Value("int64"),
'constructor_arguments': datasets.Value("string"),
'evm_version': datasets.Value("string"),
'library': datasets.Value("string"),
'license_type': datasets.Value("string"),
'proxy': datasets.Value("bool"),
'implementation': datasets.Value("string"),
'swarm_source': datasets.Value("string")
}
)
elif "inflated" in self.config.name: # This is an example to show how to have different features for "first_domain" and "second_domain"
features = datasets.Features(
{
'contract_name': datasets.Value("string"),
'file_path': datasets.Value("string"),
'contract_address': datasets.Value("string"),
'language': datasets.Value("string"),
'source_code': datasets.Value("string"),
**{ t: datasets.Value("string") for t in _TOOLS[data_split][tool] },
'compiler_version': datasets.Value("string"),
'license_type': datasets.Value("string"),
'swarm_source': datasets.Value("string")
}
)
if "extended" in self.config.name:
features["tx_count"] = datasets.Value("int64")
features["balance"] = datasets.Value("string")
return datasets.DatasetInfo(
# This is the description that will appear on the datasets page.
description=_DESCRIPTION,
# This defines the different columns of the dataset and their types
features=features, # Here we define them above because they are different between the two configurations
# If there's a common (input, target) tuple from the features, uncomment supervised_keys line below and
# specify them. They'll be used if as_supervised=True in builder.as_dataset.
# supervised_keys=("sentence", "label"),
# Homepage of the dataset for documentation
homepage=_HOMEPAGE,
# License for the dataset if available
license=_LICENSE,
# Citation for the dataset
citation=_CITATION,
)
def _split_generators(self, dl_manager):
# This method is tasked with downloading/extracting the data and defining the splits depending on the configuration
# If several configurations are possible (listed in BUILDER_CONFIGS), the configuration selected by the user is in self.config.name
# dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLS
# It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
# By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
urls = _URLS[self.config.name.split("_")[0]]
downloaded_files = dl_manager.download_and_extract(urls)
metadata = None
if "extended" in self.config.name:
metadata = dl_manager.download_and_extract(_URLS["metadata"])
if "flattened" in self.config.name or "inflated" in self.config.name:
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"files": downloaded_files["train"], "metadata": metadata}),
datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"files": downloaded_files["dev"], "metadata": metadata}),
datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"files": downloaded_files["test"], "metadata": metadata}),
]
else:
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"files": downloaded_files["train"], "metadata": metadata}),
]
# method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
def _generate_examples(self, files, metadata):
"""Yields examples."""
# This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
# The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example.
#data = pd.read_parquet(filepath)
data_split = self.config.name.split("_")[0]
tool = self.config.name.split("_")[1]
lvl = _EMBEDDED_LEVEL
#lvl = self.config.name.split("_")[-1].capitalize()
#if lvl not in _LEVELS:
# lvl = min(_LEVELS, key=_LEVELS.get)
# Load metadata
if metadata is not None:
meta = pd.read_parquet(metadata)
for path in files:
if "embedded" in self.config.name:
columns = ['contract_address', 'source_code', 'language']
columns.extend(["file_path"] if "inflated" in self.config.name else [])
columns.extend(_TOOLS[data_split][tool])
data = pd.read_parquet(path, columns=columns)
elif "flattened" in self.config.name:
data = pd.read_parquet(path)
data['runs'].fillna(0, inplace=True)
else:
data = pd.read_parquet(path)
# Add metadata
if metadata is not None:
data = pd.merge(data, meta, how="left", on="contract_address")
for index, row in data.iterrows():
if "flattened" in self.config.name:
# Yields examples as (key, example) tuples
key = row['contract_address']
if "embedded" in self.config.name:
is_vulnerable = False
is_secure = False
for t in _TOOLS[data_split][tool]:
if pd.isnull(row[t]):
continue
if row[t] == "[]":
is_secure = True
continue
vuln_levels = [_TOOLS_AUDIT_DESC[t]["level_col"] + '": "' + k for k,v in _LEVELS.items() if v >= _LEVELS[lvl]]
if any(_check_strings(vuln_levels, row[t])):
is_vulnerable = True
break
else:
is_secure = True
continue
label = ""
if is_vulnerable:
label = "// VULNERABLE\n"
elif is_secure:
label = "// SECURE\n"
else:
label = "// UNKNOWN\n"
yield key, {
'text': label + row['source_code'],
'language': row['language'],
}
else:
yield key, {
'contract_name': row['contract_name'],
'contract_address': row['contract_address'],
'language': row['language'],
'source_code': row['source_code'],
**{ t: row[t] for t in _TOOLS[data_split][tool] },
'abi': row['abi'],
'compiler_version': row['compiler_version'],
'optimization_used': row['optimization_used'],
'runs': row['runs'],
'constructor_arguments': row['constructor_arguments'],
'evm_version': row['evm_version'],
'library': row['library'],
'license_type': row['license_type'],
'proxy': row['proxy'],
'implementation': row['implementation'],
'swarm_source': row['swarm_source'],
**({'tx_count': row["tx_count"]} if metadata is not None else {}),
**({'balance': row["balance"]} if metadata is not None else {})
}
elif "inflated" in self.config.name:
# Yields examples as (key, example) tuples
key = row['contract_address'] + ":" + row['file_path'] + ":" + str(hash(row['source_code']))
if "embedded" in self.config.name:
is_vulnerable = False
is_secure = False
for t in _TOOLS[data_split][tool]:
if pd.isnull(row[t]):
continue
if row[t] == "[]":
is_secure = True
continue
vuln_levels = [_TOOLS_AUDIT_DESC[t]["level_col"] + '": "' + k for k,v in _LEVELS.items() if v >= _LEVELS[lvl]]
if any(_check_strings(vuln_levels, row[t])):
is_vulnerable = True
break
else:
is_secure = True
continue
label = ""
if is_vulnerable:
label = "// VULNERABLE\n"
elif is_secure:
label = "// SECURE\n"
else:
label = "// UNKNOWN\n"
yield key, {
'text': label + row['source_code'],
'language': row['language'],
}
else:
yield key, {
'contract_name': row['contract_name'],
'file_path': row['file_path'],
'contract_address': row['contract_address'],
'language': row['language'],
'source_code': row['source_code'],
**{ t: row[t] for t in _TOOLS[data_split][tool] },
'compiler_version': row['compiler_version'],
'license_type': row['license_type'],
'swarm_source': row['swarm_source']
}