nikita-savelyev-intel commited on
Commit
50d8e46
·
verified ·
1 Parent(s): 3aa7b20

Delete loading script

Browse files
Files changed (1) hide show
  1. squadshifts.py +0 -180
squadshifts.py DELETED
@@ -1,180 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace Datasets Authors.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
-
16
- # Lint as: python3
17
- """SQUAD: The Stanford Question Answering Dataset."""
18
-
19
-
20
- import json
21
-
22
- import datasets
23
-
24
-
25
- logger = datasets.logging.get_logger(__name__)
26
-
27
-
28
- _DESCRIPTION = r"""\
29
- SquadShifts consists of four new test sets for the Stanford Question Answering \
30
- Dataset (SQuAD) from four different domains: Wikipedia articles, New York \
31
- Times articles, Reddit comments, and Amazon product reviews. Each dataset \
32
- was generated using the same data generating pipeline, Amazon Mechanical \
33
- Turk interface, and data cleaning code as the original SQuAD v1.1 dataset. \
34
- The "new-wikipedia" dataset measures overfitting on the original SQuAD v1.1 \
35
- dataset. The "new-york-times", "reddit", and "amazon" datasets measure \
36
- robustness to natural distribution shifts. We encourage SQuAD model developers \
37
- to also evaluate their methods on these new datasets! \
38
- """
39
-
40
- _LICENSE = "CC-BY-4.0"
41
-
42
- _CITATION = """\
43
- @InProceedings{pmlr-v119-miller20a,
44
- title = {The Effect of Natural Distribution Shift on Question Answering Models},
45
- author = {Miller, John and Krauth, Karl and Recht, Benjamin and Schmidt, Ludwig},
46
- booktitle = {Proceedings of the 37th International Conference on Machine Learning},
47
- pages = {6905--6916},
48
- year = {2020},
49
- editor = {III, Hal Daumé and Singh, Aarti},
50
- volume = {119},
51
- series = {Proceedings of Machine Learning Research},
52
- month = {13--18 Jul},
53
- publisher = {PMLR},
54
- pdf = {http://proceedings.mlr.press/v119/miller20a/miller20a.pdf},
55
- url = {https://proceedings.mlr.press/v119/miller20a.html},
56
- }
57
- """
58
-
59
- _URL = "https://raw.githubusercontent.com/modestyachts/squadshifts-website/master/datasets/"
60
- _URLS = {
61
- "new_wiki": _URL + "new_wiki_v1.0.json",
62
- "nyt": _URL + "nyt_v1.0.json",
63
- "reddit": _URL + "reddit_v1.0.json",
64
- "amazon": _URL + "amazon_reviews_v1.0.json",
65
- }
66
-
67
-
68
- class SquadShiftsConfig(datasets.BuilderConfig):
69
- """BuilderConfig for SquadShifts."""
70
-
71
- def __init__(self, **kwargs):
72
- """BuilderConfig for SQUAD.
73
-
74
- Args:
75
- **kwargs: keyword arguments forwarded to super.
76
- """
77
- super(SquadShiftsConfig, self).__init__(**kwargs)
78
-
79
-
80
- class SquadShifts(datasets.GeneratorBasedBuilder):
81
- """SquadShifts consists of four new test sets for the SQUAD dataset."""
82
-
83
- BUILDER_CONFIGS = [
84
- SquadShiftsConfig(
85
- name="new_wiki",
86
- version=datasets.Version("1.0.0", ""),
87
- description="SQuADShifts New Wikipedia article dataset",
88
- ),
89
- SquadShiftsConfig(
90
- name="nyt",
91
- version=datasets.Version("1.0.0", ""),
92
- description="SQuADShifts New York Times article dataset.",
93
- ),
94
- SquadShiftsConfig(
95
- name="reddit",
96
- version=datasets.Version("1.0.0", ""),
97
- description="SQuADShifts Reddit comment dataset.",
98
- ),
99
- SquadShiftsConfig(
100
- name="amazon",
101
- version=datasets.Version("1.0.0", ""),
102
- description="SQuADShifts Amazon product review dataset.",
103
- ),
104
- ]
105
-
106
- def _info(self):
107
- return datasets.DatasetInfo(
108
- description=_DESCRIPTION,
109
- features=datasets.Features(
110
- {
111
- "id": datasets.Value("string"),
112
- "title": datasets.Value("string"),
113
- "context": datasets.Value("string"),
114
- "question": datasets.Value("string"),
115
- "answers": datasets.features.Sequence(
116
- {
117
- "text": datasets.Value("string"),
118
- "answer_start": datasets.Value("int32"),
119
- }
120
- ),
121
- }
122
- ),
123
- homepage="https://modestyachts.github.io/squadshifts-website/index.html",
124
- license=_LICENSE,
125
- citation=_CITATION,
126
- )
127
-
128
- def _split_generators(self, dl_manager):
129
- urls_to_download = _URLS
130
- downloaded_files = dl_manager.download_and_extract(urls_to_download)
131
-
132
- if self.config.name == "new_wiki" or self.config.name == "default":
133
- return [
134
- datasets.SplitGenerator(
135
- name=datasets.Split.TEST, gen_kwargs={"filepath": downloaded_files["new_wiki"]}
136
- ),
137
- ]
138
- elif self.config.name == "nyt":
139
- return [
140
- datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": downloaded_files["nyt"]}),
141
- ]
142
- elif self.config.name == "reddit":
143
- return [
144
- datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": downloaded_files["reddit"]}),
145
- ]
146
- elif self.config.name == "amazon":
147
- return [
148
- datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": downloaded_files["amazon"]}),
149
- ]
150
- else:
151
- raise ValueError(f"SQuADShifts dataset name {self.config.name} not found!")
152
-
153
- def _generate_examples(self, filepath):
154
- """This function returns the examples in the raw (text) form."""
155
- logger.info("generating examples from = %s", filepath)
156
- with open(filepath, encoding="utf-8") as f:
157
- squad = json.load(f)
158
- for article in squad["data"]:
159
- title = article.get("title", "").strip()
160
- for paragraph in article["paragraphs"]:
161
- context = paragraph["context"].strip()
162
- for qa in paragraph["qas"]:
163
- question = qa["question"].strip()
164
- id_ = qa["id"]
165
-
166
- answer_starts = [answer["answer_start"] for answer in qa["answers"]]
167
- answers = [answer["text"].strip() for answer in qa["answers"]]
168
-
169
- # Features currently used are "context", "question", and "answers".
170
- # Others are extracted here for the ease of future expansions.
171
- yield id_, {
172
- "title": title,
173
- "context": context,
174
- "question": question,
175
- "id": id_,
176
- "answers": {
177
- "answer_start": answer_starts,
178
- "text": answers,
179
- },
180
- }