|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
"""The OpenWebText2 Corpus""" |
|
|
|
import io |
|
import json |
|
|
|
import zstandard |
|
|
|
import datasets |
|
from datasets.exceptions import DefunctDatasetError |
|
|
|
|
|
_CITATION = """\ |
|
@article{pile, |
|
title={The {P}ile: An 800GB Dataset of Diverse Text for Language Modeling}, |
|
author={Gao, Leo and Biderman, Stella and Black, Sid and Golding, Laurence and Hoppe, Travis and Foster, Charles and Phang, Jason and He, Horace and Thite, Anish and Nabeshima, Noa and Presser, Shawn and Leahy, Connor}, |
|
journal={arXiv preprint arXiv:2101.00027}, |
|
year={2020} |
|
} |
|
""" |
|
|
|
_DESCRIPTION = """\ |
|
OpenWebText2 is part of EleutherAi/The Pile dataset and is an enhanced version of the original OpenWebTextCorpus \ |
|
covering all Reddit submissions from 2005 up until April 2020, \ |
|
with further months becoming available after the corresponding PushShift dump files are released. |
|
""" |
|
|
|
_HOST_URL = "https://the-eye.eu" |
|
_URL = f"{_HOST_URL}/public/AI/pile_preliminary_components/openwebtext2.jsonl.zst.tar" |
|
|
|
|
|
class Openwebtext2(datasets.GeneratorBasedBuilder): |
|
"""The OpenWebText2 dataset.""" |
|
|
|
BUILDER_CONFIGS = [ |
|
datasets.BuilderConfig( |
|
name="plain_text", |
|
description="Plain text", |
|
version=datasets.Version("1.0.0"), |
|
) |
|
] |
|
|
|
def _info(self): |
|
raise DefunctDatasetError( |
|
"Dataset 'the_pile_openwebtext2' is defunct and no longer accessible due to unavailability of the source data" |
|
) |
|
return datasets.DatasetInfo( |
|
description=_DESCRIPTION, |
|
features=datasets.Features( |
|
{ |
|
"title": datasets.Value("string"), |
|
"text": datasets.Value("string"), |
|
"reddit_scores": datasets.Sequence(datasets.Value("int32")), |
|
} |
|
), |
|
homepage="https://openwebtext2.readthedocs.io/en/latest/", |
|
citation=_CITATION, |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
archive = dl_manager.download(_URL) |
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, |
|
gen_kwargs={"files": dl_manager.iter_archive(archive)}, |
|
), |
|
] |
|
|
|
def _generate_examples(self, files): |
|
"""Yields examples.""" |
|
_id = 0 |
|
for path, file in files: |
|
if not path.endswith(".jsonl.zst"): |
|
continue |
|
reader = Reader() |
|
for document, metadata in reader.read_jsonl(file, get_meta=True): |
|
yield _id, { |
|
"title": metadata["title"], |
|
"text": document, |
|
"reddit_scores": metadata["reddit_scores"], |
|
} |
|
_id += 1 |
|
|
|
|
|
|
|
class Reader: |
|
def __init__(self): |
|
pass |
|
|
|
def read_jsonl(self, fh, get_meta=False, autojoin_paragraphs=True, para_joiner="\n\n"): |
|
self.fh = fh |
|
cctx = zstandard.ZstdDecompressor() |
|
reader = io.BufferedReader(cctx.stream_reader(fh)) |
|
for line in reader: |
|
ob = json.loads(line) |
|
|
|
if isinstance(ob, str): |
|
assert not get_meta |
|
yield ob |
|
continue |
|
|
|
text = ob["text"] |
|
|
|
if autojoin_paragraphs and isinstance(text, list): |
|
text = para_joiner.join(text) |
|
|
|
if get_meta: |
|
yield text, (ob["meta"] if "meta" in ob else {}) |
|
else: |
|
yield text |
|
|