Commit
·
791c5c3
1
Parent(s):
53bd002
Support streaming xtreme dataset for PAWS-X config (#4132)
Browse files* Support streaming xtreme dataset for PAWS-X config
* Align tasks in dataset card
Commit from https://github.com/huggingface/datasets/commit/8caed0c1e7b9658f08c10c8b90eb203b2cedc8e4
README.md
CHANGED
@@ -434,6 +434,7 @@ licenses:
|
|
434 |
multilinguality:
|
435 |
- multilingual
|
436 |
- translation
|
|
|
437 |
size_categories:
|
438 |
- n<1K
|
439 |
- 1K<n<10K
|
@@ -449,21 +450,21 @@ source_datasets:
|
|
449 |
- extended|tatoeba
|
450 |
- extended|squad
|
451 |
task_categories:
|
|
|
452 |
- question-answering
|
453 |
-
- structure-prediction
|
454 |
- text-classification
|
455 |
- text-retrieval
|
|
|
456 |
task_ids:
|
457 |
-
- open-domain-qa
|
458 |
- multiple-choice-qa
|
459 |
- extractive-qa
|
460 |
-
-
|
461 |
-
- part-of-speech-tagging
|
462 |
- natural-language-inference
|
463 |
- text-classification-other-paraphrase-identification
|
464 |
- text-retrieval-other-parallel-sentence-retrieval
|
|
|
|
|
465 |
paperswithcode_id: xtreme
|
466 |
-
pretty_name: XTREME
|
467 |
---
|
468 |
|
469 |
# Dataset Card for "xtreme"
|
|
|
434 |
multilinguality:
|
435 |
- multilingual
|
436 |
- translation
|
437 |
+
pretty_name: XTREME
|
438 |
size_categories:
|
439 |
- n<1K
|
440 |
- 1K<n<10K
|
|
|
450 |
- extended|tatoeba
|
451 |
- extended|squad
|
452 |
task_categories:
|
453 |
+
- multiple-choice
|
454 |
- question-answering
|
|
|
455 |
- text-classification
|
456 |
- text-retrieval
|
457 |
+
- token-classification
|
458 |
task_ids:
|
|
|
459 |
- multiple-choice-qa
|
460 |
- extractive-qa
|
461 |
+
- open-domain-qa
|
|
|
462 |
- natural-language-inference
|
463 |
- text-classification-other-paraphrase-identification
|
464 |
- text-retrieval-other-parallel-sentence-retrieval
|
465 |
+
- named-entity-recognition
|
466 |
+
- part-of-speech-tagging
|
467 |
paperswithcode_id: xtreme
|
|
|
468 |
---
|
469 |
|
470 |
# Dataset Card for "xtreme"
|
xtreme.py
CHANGED
@@ -461,7 +461,6 @@ class Xtreme(datasets.GeneratorBasedBuilder):
|
|
461 |
]
|
462 |
|
463 |
def _info(self):
|
464 |
-
# TODO(xtreme): Specifies the datasets.DatasetInfo object
|
465 |
features = {text_feature: datasets.Value("string") for text_feature in self.config.text_features.keys()}
|
466 |
if "answers" in features.keys():
|
467 |
features["answers"] = datasets.features.Sequence(
|
@@ -471,7 +470,7 @@ class Xtreme(datasets.GeneratorBasedBuilder):
|
|
471 |
}
|
472 |
)
|
473 |
if self.config.name.startswith("PAWS-X"):
|
474 |
-
features
|
475 |
if self.config.name == "XNLI":
|
476 |
features["gold_label"] = datasets.Value("string")
|
477 |
|
@@ -517,10 +516,6 @@ class Xtreme(datasets.GeneratorBasedBuilder):
|
|
517 |
|
518 |
def _split_generators(self, dl_manager):
|
519 |
"""Returns SplitGenerators."""
|
520 |
-
# TODO(xtreme): Downloads the data and defines the splits
|
521 |
-
# dl_manager is a datasets.download.DownloadManager that can be used to
|
522 |
-
# download and extract URLs
|
523 |
-
|
524 |
if self.config.name == "tydiqa":
|
525 |
train_url = "v1.1/tydiqa-goldp-v1.1-train.json"
|
526 |
dev_url = "v1.1/tydiqa-goldp-v1.1-dev.json"
|
@@ -593,30 +588,7 @@ class Xtreme(datasets.GeneratorBasedBuilder):
|
|
593 |
),
|
594 |
]
|
595 |
if self.config.name.startswith("PAWS-X"):
|
596 |
-
|
597 |
-
paws_x_dir = dl_manager.download_and_extract(self.config.data_url)
|
598 |
-
data_dir = os.path.join(paws_x_dir, "x-final", lang)
|
599 |
-
return [
|
600 |
-
datasets.SplitGenerator(
|
601 |
-
name=datasets.Split.VALIDATION,
|
602 |
-
# These kwargs will be passed to _generate_examples
|
603 |
-
gen_kwargs={"filepath": os.path.join(data_dir, "dev_2k.tsv")},
|
604 |
-
),
|
605 |
-
datasets.SplitGenerator(
|
606 |
-
name=datasets.Split.TEST,
|
607 |
-
# These kwargs will be passed to _generate_examples
|
608 |
-
gen_kwargs={"filepath": os.path.join(data_dir, "test_2k.tsv")},
|
609 |
-
),
|
610 |
-
datasets.SplitGenerator(
|
611 |
-
name=datasets.Split.TRAIN,
|
612 |
-
# These kwargs will be passed to _generate_examples
|
613 |
-
gen_kwargs={
|
614 |
-
"filepath": os.path.join(data_dir, "translated_train.tsv")
|
615 |
-
if lang != "en"
|
616 |
-
else os.path.join(data_dir, "train.tsv")
|
617 |
-
},
|
618 |
-
),
|
619 |
-
]
|
620 |
elif self.config.name.startswith("tatoeba"):
|
621 |
lang = self.config.name.split(".")[1]
|
622 |
|
@@ -733,16 +705,7 @@ class Xtreme(datasets.GeneratorBasedBuilder):
|
|
733 |
"gold_label": row["gold_label"],
|
734 |
}
|
735 |
if self.config.name.startswith("PAWS-X"):
|
736 |
-
|
737 |
-
data = csv.reader(f, delimiter="\t")
|
738 |
-
next(data) # skip header
|
739 |
-
for id_, row in enumerate(data):
|
740 |
-
if len(row) == 4:
|
741 |
-
yield id_, {
|
742 |
-
"sentence1": row[1],
|
743 |
-
"sentence2": row[2],
|
744 |
-
"label": row[3],
|
745 |
-
}
|
746 |
if self.config.name.startswith("XQuAD"):
|
747 |
with open(filepath, encoding="utf-8") as f:
|
748 |
xquad = json.load(f)
|
@@ -857,6 +820,50 @@ class Xtreme(datasets.GeneratorBasedBuilder):
|
|
857 |
}
|
858 |
|
859 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
860 |
class UdposParser:
|
861 |
|
862 |
features = datasets.Features(
|
|
|
461 |
]
|
462 |
|
463 |
def _info(self):
|
|
|
464 |
features = {text_feature: datasets.Value("string") for text_feature in self.config.text_features.keys()}
|
465 |
if "answers" in features.keys():
|
466 |
features["answers"] = datasets.features.Sequence(
|
|
|
470 |
}
|
471 |
)
|
472 |
if self.config.name.startswith("PAWS-X"):
|
473 |
+
features = PawsxParser.features
|
474 |
if self.config.name == "XNLI":
|
475 |
features["gold_label"] = datasets.Value("string")
|
476 |
|
|
|
516 |
|
517 |
def _split_generators(self, dl_manager):
|
518 |
"""Returns SplitGenerators."""
|
|
|
|
|
|
|
|
|
519 |
if self.config.name == "tydiqa":
|
520 |
train_url = "v1.1/tydiqa-goldp-v1.1-train.json"
|
521 |
dev_url = "v1.1/tydiqa-goldp-v1.1-dev.json"
|
|
|
588 |
),
|
589 |
]
|
590 |
if self.config.name.startswith("PAWS-X"):
|
591 |
+
return PawsxParser.split_generators(dl_manager=dl_manager, config=self.config)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
592 |
elif self.config.name.startswith("tatoeba"):
|
593 |
lang = self.config.name.split(".")[1]
|
594 |
|
|
|
705 |
"gold_label": row["gold_label"],
|
706 |
}
|
707 |
if self.config.name.startswith("PAWS-X"):
|
708 |
+
yield from PawsxParser.generate_examples(config=self.config, filepath=filepath, **kwargs)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
709 |
if self.config.name.startswith("XQuAD"):
|
710 |
with open(filepath, encoding="utf-8") as f:
|
711 |
xquad = json.load(f)
|
|
|
820 |
}
|
821 |
|
822 |
|
823 |
+
class PawsxParser:
|
824 |
+
|
825 |
+
features = datasets.Features(
|
826 |
+
{
|
827 |
+
"sentence1": datasets.Value("string"),
|
828 |
+
"sentence2": datasets.Value("string"),
|
829 |
+
"label": datasets.Value("string"),
|
830 |
+
}
|
831 |
+
)
|
832 |
+
|
833 |
+
@staticmethod
|
834 |
+
def split_generators(dl_manager=None, config=None):
|
835 |
+
lang = config.name.split(".")[1]
|
836 |
+
archive = dl_manager.download(config.data_url)
|
837 |
+
split_filenames = {
|
838 |
+
datasets.Split.TRAIN: "translated_train.tsv" if lang != "en" else "train.tsv",
|
839 |
+
datasets.Split.VALIDATION: "dev_2k.tsv",
|
840 |
+
datasets.Split.TEST: "test_2k.tsv",
|
841 |
+
}
|
842 |
+
return [
|
843 |
+
datasets.SplitGenerator(
|
844 |
+
name=split,
|
845 |
+
gen_kwargs={"filepath": dl_manager.iter_archive(archive), "filename": split_filenames[split]},
|
846 |
+
)
|
847 |
+
for split in split_filenames
|
848 |
+
]
|
849 |
+
|
850 |
+
@staticmethod
|
851 |
+
def generate_examples(config=None, filepath=None, filename=None):
|
852 |
+
lang = config.name.split(".")[1]
|
853 |
+
for path, file in filepath:
|
854 |
+
if f"/{lang}/" in path and path.endswith(filename):
|
855 |
+
lines = (line.decode("utf-8") for line in file)
|
856 |
+
data = csv.reader(lines, delimiter="\t")
|
857 |
+
next(data) # skip header
|
858 |
+
for id_, row in enumerate(data):
|
859 |
+
if len(row) == 4:
|
860 |
+
yield id_, {
|
861 |
+
"sentence1": row[1],
|
862 |
+
"sentence2": row[2],
|
863 |
+
"label": row[3],
|
864 |
+
}
|
865 |
+
|
866 |
+
|
867 |
class UdposParser:
|
868 |
|
869 |
features = datasets.Features(
|