|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
"""Reddit dataset using tldr as summaries.""" |
|
|
|
import json |
|
import os |
|
|
|
import datasets |
|
|
|
|
|
_CITATION = """ |
|
@inproceedings{volske-etal-2017-tl, |
|
title = {TL;DR: Mining {R}eddit to Learn Automatic Summarization}, |
|
author = {V{\"o}lske, Michael and Potthast, Martin and Syed, Shahbaz and Stein, Benno}, |
|
booktitle = {Proceedings of the Workshop on New Frontiers in Summarization}, |
|
month = {sep}, |
|
year = {2017}, |
|
address = {Copenhagen, Denmark}, |
|
publisher = {Association for Computational Linguistics}, |
|
url = {https://www.aclweb.org/anthology/W17-4508}, |
|
doi = {10.18653/v1/W17-4508}, |
|
pages = {59--63}, |
|
abstract = {Recent advances in automatic text summarization have used deep neural networks to generate high-quality abstractive summaries, but the performance of these models strongly depends on large amounts of suitable training data. We propose a new method for mining social media for author-provided summaries, taking advantage of the common practice of appending a {``}TL;DR{''} to long posts. A case study using a large Reddit crawl yields the Webis-TLDR-17 dataset, complementing existing corpora primarily from the news genre. Our technique is likely applicable to other social media sites and general web crawls.}, |
|
} |
|
""" |
|
|
|
_DESCRIPTION = """ |
|
This corpus contains preprocessed posts from the Reddit dataset. |
|
The dataset consists of 3,848,330 posts with an average length of 270 words for content, |
|
and 28 words for the summary. |
|
|
|
Features includes strings: author, body, normalizedBody, content, summary, subreddit, subreddit_id. |
|
Content is used as document and summary is used as summary. |
|
""" |
|
|
|
_URL = "data/corpus-webis-tldr-17.zip" |
|
|
|
_DOCUMENT = "content" |
|
_SUMMARY = "summary" |
|
_ADDITIONAL_FEATURES = ["author", "body", "normalizedBody", "subreddit", "subreddit_id", "id"] |
|
|
|
|
|
class Reddit(datasets.GeneratorBasedBuilder): |
|
"""Reddit Dataset.""" |
|
|
|
VERSION = datasets.Version("1.0.0") |
|
|
|
def _info(self): |
|
return datasets.DatasetInfo( |
|
description=_DESCRIPTION, |
|
features=datasets.Features( |
|
{k: datasets.Value("string") for k in _ADDITIONAL_FEATURES + [_DOCUMENT, _SUMMARY]} |
|
), |
|
supervised_keys=None, |
|
homepage="https://github.com/webis-de/webis-tldr-17-corpus", |
|
citation=_CITATION, |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
"""Returns SplitGenerators.""" |
|
dl_path = dl_manager.download_and_extract(_URL) |
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, |
|
gen_kwargs={"path": os.path.join(dl_path, "corpus-webis-tldr-17.json")}, |
|
) |
|
] |
|
|
|
def _generate_examples(self, path=None): |
|
"""Yields examples.""" |
|
with open(path, "rb") as f: |
|
for i, line in enumerate(f): |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
d = json.loads(line) |
|
if _SUMMARY in d and _DOCUMENT in d: |
|
yield i, {k: d.get(k, "") for k in _ADDITIONAL_FEATURES + [_DOCUMENT, _SUMMARY]} |
|
|