|
import json |
|
|
|
import datasets |
|
|
|
|
|
logger = datasets.logging.get_logger(__name__) |
|
|
|
|
|
_CITATION = """\ |
|
@inproceedings{DBLP:conf/iclr/KaushikHL20, |
|
author = {Divyansh Kaushik and |
|
Eduard H. Hovy and |
|
Zachary Chase Lipton}, |
|
title = {Learning The Difference That Makes {A} Difference With Counterfactually-Augmented |
|
Data}, |
|
booktitle = {8th International Conference on Learning Representations, {ICLR} 2020, |
|
Addis Ababa, Ethiopia, April 26-30, 2020}, |
|
publisher = {OpenReview.net}, |
|
year = {2020}, |
|
url = {https://openreview.net/forum?id=Sklgs0NFvr}, |
|
timestamp = {Thu, 07 May 2020 17:11:48 +0200}, |
|
biburl = {https://dblp.org/rec/conf/iclr/KaushikHL20.bib}, |
|
bibsource = {dblp computer science bibliography, https://dblp.org} |
|
} |
|
""" |
|
|
|
_DESCRIPTION = """\ |
|
The SNLI corpus (version 1.0) is a collection of 570k human-written English sentence pairs manually labeled for balanced classification with the labels entailment, contradiction, and neutral, supporting the task of natural language inference (NLI), also known as recognizing textual entailment (RTE). In the ICLR 2020 paper [Learning the Difference that Makes a Difference with Counterfactually-Augmented Data](https://openreview.net/forum?id=Sklgs0NFvr), Kaushik et. al. provided a dataset with counterfactual perturbations on the SNLI and IMDB data. This repository contains the original and counterfactual perturbations for the SNLI data, which was generated after processing the original data from [here](https://github.com/acmi-lab/counterfactually-augmented-data).""" |
|
|
|
_URL = "https://huggingface.co/datasets/sagnikrayc/snli-cf-kaushik/resolve/main" |
|
_URLS = { |
|
"train": f"{_URL}/train.jsonl", |
|
"validation": f"{_URL}/validation.jsonl", |
|
"test": f"{_URL}/test.jsonl", |
|
} |
|
|
|
|
|
class SnliCFConfig(datasets.BuilderConfig): |
|
"""BuilderConfig for SQUAD.""" |
|
|
|
def __init__(self, **kwargs): |
|
"""BuilderConfig for SQUAD. |
|
|
|
Args: |
|
**kwargs: keyword arguments forwarded to super. |
|
""" |
|
super(SnliCFConfig, self).__init__(**kwargs) |
|
|
|
|
|
class SnliCF(datasets.GeneratorBasedBuilder): |
|
"""SQUAD: The Stanford Question Answering Dataset. Version 1.1.""" |
|
|
|
BUILDER_CONFIGS = [ |
|
SnliCFConfig( |
|
name="plain_text", |
|
version=datasets.Version("1.0.0", ""), |
|
description="Plain text", |
|
), |
|
] |
|
|
|
def _info(self): |
|
return datasets.DatasetInfo( |
|
description=_DESCRIPTION, |
|
features=datasets.Features( |
|
{ |
|
"idx": datasets.Value("string"), |
|
"premise": datasets.Value("string"), |
|
"hypothesis": datasets.Value("string"), |
|
"label": datasets.Value("string"), |
|
"type": datasets.Value("string"), |
|
} |
|
), |
|
|
|
|
|
supervised_keys=None, |
|
homepage="https://github.com/acmi-lab/counterfactually-augmented-data", |
|
citation=_CITATION, |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
downloaded_files = dl_manager.download_and_extract(_URLS) |
|
|
|
return [ |
|
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"]}), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.VALIDATION, gen_kwargs={"filepath": downloaded_files["validation"]} |
|
), |
|
datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": downloaded_files["test"]}), |
|
] |
|
|
|
def _generate_examples(self, filepath): |
|
"""This function returns the examples in the raw (text) form.""" |
|
logger.info("generating examples from = %s", filepath) |
|
with open(filepath, encoding="utf-8") as rf: |
|
for idx, line in enumerate(rf): |
|
if line: |
|
_line = json.loads(line) |
|
yield idx, { |
|
"premise": _line["premise"], |
|
"hypothesis": _line["hypothesis"], |
|
"idx": _line["idx"], |
|
"type": _line["type"], |
|
"label": _line["label"] |
|
} |