the-stack-smol-xs / the-stack-smol-xs.py
root
add dataset script
b8c07f5
raw
history blame
3.01 kB
"""Odex dataset."""
import json
import datasets
_CITATION = """\
"""
_DESCRIPTION = """\
"""
_HOMEPAGE = " "
LIST_LANG = ['ada', 'agda', 'alloy', 'antlr', 'applescript', 'assembly', 'augeas', 'awk', 'batchfile', 'bison',
'bluespec', 'c', 'c++', 'c-sharp', 'clojure', 'cmake', 'coffeescript', 'common-lisp', 'css', 'cuda', 'dart', 'dockerfile', 'elixir',
'elm', 'emacs-lisp','erlang', 'f-sharp', 'fortran', 'glsl', 'go', 'groovy', 'haskell','html', 'idris', 'isabelle', 'java',
'java-server-pages', 'javascript', 'julia', 'kotlin', 'lean', 'literate-agda', 'literate-coffeescript', 'literate-haskell',
'lua', 'makefile', 'maple', 'markdown', 'mathematica', 'matlab', 'ocaml', 'pascal', 'perl', 'php', 'powershell', 'prolog',
'protocol-buffer', 'python', 'r', 'racket', 'restructuredtext', 'rmarkdown', 'ruby', 'rust', 'sas', 'scala', 'scheme',
'shell', 'smalltalk', 'solidity', 'sparql', 'sql', 'stan', 'standard-ml', 'stata', 'systemverilog', 'tcl', 'tcsh', 'tex',
'thrift', 'typescript', 'verilog', 'vhdl', 'visual-basic', 'xslt', 'yacc', 'zig']
_URLs = {lang: f"data/{lang}/data.json" for lang in LIST_LANG}
class Stack(datasets.GeneratorBasedBuilder):
"""Stack Code dataset."""
VERSION = datasets.Version("1.0.0")
BUILDER_CONFIGS = [
datasets.BuilderConfig(
name=lang,
version=datasets.Version("1.0.0"),
description=_DESCRIPTION,
) for lang in _URLs.keys()
]
DEFAULT_CONFIG_NAME = "python"
def _info(self):
features = datasets.Features({"content": datasets.Value("string"),
"lang": datasets.Value("string"),
"size": datasets.Value("int64"),
"ext": datasets.Value("string"),
"max_stars_count": datasets.Value("int64"),
"avg_line_length": datasets.Value("float64"),
"max_line_length": datasets.Value("int64"),
"alphanum_fraction": datasets.Value("float64"),
})
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=features,
supervised_keys=None,
citation=_CITATION,
homepage=_HOMEPAGE)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
config_urls = _URLs[self.config.name]
data_dir = dl_manager.download_and_extract(config_urls)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={"filepath": data_dir, "split": "train"},
),
]
def _generate_examples(self, filepath, split):
key = 0
for line in open(filepath, encoding="utf-8"):
line = json.loads(line)
yield key, line
key += 1