Theoreticallyhugo commited on
Commit
9b65a04
·
1 Parent(s): 503bd8f

first commit

Browse files
Files changed (1) hide show
  1. essays_SuG_dataset.py +435 -0
essays_SuG_dataset.py ADDED
@@ -0,0 +1,435 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # für kompletten text tokens mit labels liefern
2
+
3
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+ # TODO: Address all TODOs and remove all explanatory comments
17
+ """TODO: Add a description here."""
18
+
19
+
20
+ import json
21
+ from pathlib import Path
22
+
23
+ import datasets
24
+
25
+
26
+ # Find for instance the citation on arxiv or on the dataset repo/website
27
+ _CITATION = """\
28
+ @InProceedings{huggingface:dataset,
29
+ title = {a fancy dataset},
30
+ author={Hugo Meinhof, Elisa Luebbers},
31
+ year={2024}
32
+ }
33
+ """
34
+
35
+ # TODO: Add description of the dataset here
36
+ # You can copy an official description
37
+ _DESCRIPTION = """\
38
+ This dataset contains 402 argumentative essays from non-native """
39
+
40
+ # TODO: Add a link to an official homepage for the dataset here
41
+ _HOMEPAGE = ""
42
+
43
+ # TODO: Add the licence for the dataset here if you can find it
44
+ _LICENSE = ""
45
+
46
+ # TODO: Add link to the official dataset URLs here
47
+ # The HuggingFace Datasets library doesn't host the datasets but only points to the original files.
48
+ # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
49
+ # _URLS = {
50
+ # "first_domain": "https://huggingface.co/great-new-dataset-first_domain.zip",
51
+ # "second_domain": "https://huggingface.co/great-new-dataset-second_domain.zip",
52
+ # }
53
+
54
+
55
+ class Fancy(datasets.GeneratorBasedBuilder):
56
+ """
57
+ TODO: Short description of my dataset.
58
+ """
59
+
60
+ VERSION = datasets.Version("1.1.0")
61
+
62
+ # This is an example of a dataset with multiple configurations.
63
+ # If you don't want/need to define several sub-sets in your dataset,
64
+ # just remove the BUILDER_CONFIG_CLASS and the BUILDER_CONFIGS attributes.
65
+
66
+ # If you need to make complex sub-parts in the datasets with configurable options
67
+ # You can create your own builder configuration class to store attribute, inheriting from datasets.BuilderConfig
68
+ # BUILDER_CONFIG_CLASS = MyBuilderConfig
69
+
70
+ # You will be able to load one or the other configurations in the following list with
71
+ # data = datasets.load_dataset('my_dataset', 'first_domain')
72
+ # data = datasets.load_dataset('my_dataset', 'second_domain')
73
+
74
+ BUILDER_CONFIGS = [
75
+ datasets.BuilderConfig(
76
+ name="full_labels",
77
+ version=VERSION,
78
+ description="get all the data conveyed by the labels, O, B-Claim, I-Claim, etc.",
79
+ ),
80
+ datasets.BuilderConfig(
81
+ name="spans",
82
+ version=VERSION,
83
+ description="get the spans, O, B-Span, I-Span.",
84
+ ),
85
+ datasets.BuilderConfig(
86
+ name="simple",
87
+ version=VERSION,
88
+ description="get the labels without B/I, O, MajorClaim, Claim, Premise",
89
+ ),
90
+ datasets.BuilderConfig(
91
+ name="sep_tok",
92
+ version=VERSION,
93
+ description="get the labels without B/I, meaning O, Claim, Premise"
94
+ + ", etc.\n insert seperator tokens <s> ... </s>",
95
+ ),
96
+ datasets.BuilderConfig(
97
+ name="sep_tok_full_labels",
98
+ version=VERSION,
99
+ description="get the labels with B/I, meaning O, I-Claim, I-Premise"
100
+ + ", etc.\n insert seperator tokens <s> ... </s>",
101
+ ),
102
+ ]
103
+
104
+ DEFAULT_CONFIG_NAME = "full_labels"
105
+
106
+ def _info(self):
107
+ # This method specifies the datasets.DatasetInfo object which contains informations and typings for the dataset
108
+ if (
109
+ self.config.name == "full_labels"
110
+ ): # This is the name of the configuration selected in BUILDER_CONFIGS above
111
+ features = datasets.Features(
112
+ {
113
+ "id": datasets.Value("int16"),
114
+ "tokens": datasets.Sequence(datasets.Value("string")),
115
+ "ner_tags": datasets.Sequence(
116
+ datasets.ClassLabel(
117
+ names=[
118
+ "O",
119
+ "B-MajorClaim",
120
+ "I-MajorClaim",
121
+ "B-Claim",
122
+ "I-Claim",
123
+ "B-Premise",
124
+ "I-Premise",
125
+ ]
126
+ )
127
+ ),
128
+ "text": datasets.Value("string"),
129
+ }
130
+ )
131
+ elif (
132
+ self.config.name == "spans"
133
+ ): # This is an example to show how to have different features for "first_domain" and "second_domain"
134
+ features = datasets.Features(
135
+ {
136
+ "id": datasets.Value("int16"),
137
+ "tokens": datasets.Sequence(datasets.Value("string")),
138
+ "ner_tags": datasets.Sequence(
139
+ datasets.ClassLabel(
140
+ names=[
141
+ "O",
142
+ "B",
143
+ "I",
144
+ ]
145
+ )
146
+ ),
147
+ "text": datasets.Value("string"),
148
+ }
149
+ )
150
+ elif (
151
+ self.config.name == "simple"
152
+ ): # This is an example to show how to have different features for "first_domain" and "second_domain"
153
+ features = datasets.Features(
154
+ {
155
+ "id": datasets.Value("int16"),
156
+ "tokens": datasets.Sequence(datasets.Value("string")),
157
+ "ner_tags": datasets.Sequence(
158
+ datasets.ClassLabel(
159
+ names=[
160
+ "O",
161
+ "X_placeholder_X",
162
+ "MajorClaim",
163
+ "Claim",
164
+ "Premise",
165
+ ]
166
+ )
167
+ ),
168
+ "text": datasets.Value("string"),
169
+ }
170
+ )
171
+ elif self.config.name == "sep_tok":
172
+ features = datasets.Features(
173
+ {
174
+ "id": datasets.Value("int16"),
175
+ "tokens": datasets.Sequence(datasets.Value("string")),
176
+ "ner_tags": datasets.Sequence(
177
+ datasets.ClassLabel(
178
+ names=[
179
+ "O",
180
+ "X_placeholder_X",
181
+ "MajorClaim",
182
+ "Claim",
183
+ "Premise",
184
+ ]
185
+ )
186
+ ),
187
+ "text": datasets.Value("string"),
188
+ }
189
+ )
190
+ elif self.config.name == "sep_tok_full_labels":
191
+ features = datasets.Features(
192
+ {
193
+ "id": datasets.Value("int16"),
194
+ "tokens": datasets.Sequence(datasets.Value("string")),
195
+ "ner_tags": datasets.Sequence(
196
+ datasets.ClassLabel(
197
+ names=[
198
+ "O",
199
+ "B-MajorClaim",
200
+ "I-MajorClaim",
201
+ "B-Claim",
202
+ "I-Claim",
203
+ "B-Premise",
204
+ "I-Premise",
205
+ ]
206
+ )
207
+ ),
208
+ "text": datasets.Value("string"),
209
+ }
210
+ )
211
+
212
+ return datasets.DatasetInfo(
213
+ # This is the description that will appear on the datasets page.
214
+ description=_DESCRIPTION,
215
+ # This defines the different columns of the dataset and their types
216
+ features=features, # Here we define them above because they are different between the two configurations
217
+ # If there's a common (input, target) tuple from the features, uncomment supervised_keys line below and
218
+ # specify them. They'll be used if as_supervised=True in builder.as_dataset.
219
+ # supervised_keys=("sentence", "label"),
220
+ # Homepage of the dataset for documentation
221
+ homepage=_HOMEPAGE,
222
+ # License for the dataset if available
223
+ license=_LICENSE,
224
+ # Citation for the dataset
225
+ citation=_CITATION,
226
+ )
227
+
228
+ def _range_generator(self, train=0.8, test=0.2):
229
+ """
230
+ returns three range objects to access the list of essays
231
+ these are the train, test, and validate range, where the size of the
232
+ validation range is dictated by the other two ranges
233
+ """
234
+ return (
235
+ range(0, int(402 * train)), # train
236
+ range(int(402 * train), int(402 * (train + test))), # test
237
+ range(int(402 * (train + test)), 402), # validate
238
+ )
239
+
240
+ @staticmethod
241
+ def _find_data():
242
+ """
243
+ try to find the data folder and return the path to it if found,
244
+ otherwise return none
245
+
246
+ returns:
247
+ path to data folder or None
248
+ """
249
+
250
+ # get path to the current working directory
251
+ cwd = Path.cwd()
252
+ # check for whether the data folder is in cwd.
253
+ # if it isnt, change cwd to its parent directory
254
+ # do this three times only (dont want infinite recursion)
255
+ for _ in range(3):
256
+ if Path.is_dir(cwd / "fancy_dataset"):
257
+ # print(f"found 'data' folder at {cwd}")
258
+ # input(f"returning {cwd / 'data'}")
259
+ return cwd / "fancy_dataset"
260
+ cwd = cwd.parent
261
+ raise FileNotFoundError("data directory has not been found")
262
+
263
+ def _get_essay_list(self):
264
+ """
265
+ read the essay.json and return a list of dicts, where each dict is an essay
266
+ """
267
+
268
+ path = self._find_data() / "essay.json"
269
+ with open(path, "r") as r:
270
+ lines = r.readlines()
271
+
272
+ essays = []
273
+ for line in lines:
274
+ essays.append(json.loads(line))
275
+
276
+ return essays
277
+
278
+ def _split_generators(self, dl_manager):
279
+ # This method is tasked with downloading/extracting the data and defining the splits depending on the configuration
280
+ # If several configurations are possible (listed in BUILDER_CONFIGS), the configuration selected by the user is in self.config.name
281
+
282
+ # dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLS
283
+ # It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
284
+ # By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
285
+
286
+ train, test, validate = self._range_generator()
287
+ essays = self._get_essay_list()
288
+
289
+ if len(validate) > 0:
290
+ return [
291
+ datasets.SplitGenerator(
292
+ name=datasets.Split.TRAIN,
293
+ # These kwargs will be passed to _generate_examples
294
+ gen_kwargs={
295
+ "data": essays,
296
+ "id_range": train,
297
+ },
298
+ ),
299
+ datasets.SplitGenerator(
300
+ name=datasets.Split.VALIDATION,
301
+ # These kwargs will be passed to _generate_examples
302
+ gen_kwargs={
303
+ "data": essays,
304
+ "id_range": validate,
305
+ },
306
+ ),
307
+ datasets.SplitGenerator(
308
+ name=datasets.Split.TEST,
309
+ # These kwargs will be passed to _generate_examples
310
+ gen_kwargs={
311
+ "data": essays,
312
+ "id_range": test,
313
+ },
314
+ ),
315
+ ]
316
+ else:
317
+ return [
318
+ datasets.SplitGenerator(
319
+ name=datasets.Split.TRAIN,
320
+ # These kwargs will be passed to _generate_examples
321
+ gen_kwargs={
322
+ "data": essays,
323
+ "id_range": train,
324
+ },
325
+ ),
326
+ datasets.SplitGenerator(
327
+ name=datasets.Split.TEST,
328
+ # These kwargs will be passed to _generate_examples
329
+ gen_kwargs={
330
+ "data": essays,
331
+ "id_range": test,
332
+ },
333
+ ),
334
+ ]
335
+
336
+ def _get_id(self, essay):
337
+ return int(essay["docID"].split("_")[-1])
338
+
339
+ def _get_tokens(self, essay):
340
+ tokens = []
341
+ for sentence in essay["sentences"]:
342
+ for token in sentence["tokens"]:
343
+ tokens.append((token["surface"], token["gid"]))
344
+ return tokens
345
+
346
+ def _get_label_dict(self, essay):
347
+ label_dict = {}
348
+ for unit in essay["argumentation"]["units"]:
349
+ if self.config.name == "spans":
350
+ label = "Span"
351
+ else:
352
+ label = unit["attributes"]["role"]
353
+ for i, gid in enumerate(unit["tokens"]):
354
+ if i == 0:
355
+ location = "B-"
356
+ else:
357
+ location = "I-"
358
+ label_dict[gid] = location + label
359
+ return label_dict
360
+
361
+ def _match_tokens(self, tokens, label_dict):
362
+ text = []
363
+ labels = []
364
+ for surface, gid in tokens:
365
+ # for each token, unpack it into its surface and gid
366
+ # then match the gid to the label and pack them back together
367
+
368
+ # if the config requires separator tokens
369
+ if (
370
+ self.config.name == "sep_tok"
371
+ or self.config.name == "sep_tok_full_labels"
372
+ ):
373
+ if label_dict.get(gid, "O")[0] == "B":
374
+ # if we are at the beginning of a span
375
+ # insert begin of sequence token (BOS) and "O" label
376
+ text.append("<s>")
377
+ labels.append("O")
378
+ elif (
379
+ label_dict.get(gid, "O") == "O"
380
+ and len(labels) != 0
381
+ and labels[-1][0] != "O"
382
+ ):
383
+ # if we are not in a span, and the previous label was
384
+ # of a span
385
+ # intert end of sequence token (EOS) and "O" label
386
+ text.append("</s>")
387
+ labels.append("O")
388
+
389
+ # always append the surface form
390
+ text.append(surface)
391
+
392
+ # append the correct type of label, depending on the config
393
+ if self.config.name == "full_labels":
394
+ labels.append(label_dict.get(gid, "O"))
395
+
396
+ elif self.config.name == "spans":
397
+ labels.append(label_dict.get(gid, "O")[0])
398
+
399
+ elif self.config.name == "simple":
400
+ labels.append(label_dict.get(gid, "__O")[2:])
401
+
402
+ elif self.config.name == "sep_tok":
403
+ labels.append(label_dict.get(gid, "__O")[2:])
404
+
405
+ elif self.config.name == "sep_tok_full_labels":
406
+ labels.append(label_dict.get(gid, "O"))
407
+
408
+ else:
409
+ raise KeyError()
410
+ return text, labels
411
+
412
+ def _get_text(self, essay):
413
+ return essay["text"]
414
+
415
+ def _process_essay(self, essay):
416
+ id = self._get_id(essay)
417
+ # input(id)
418
+ tokens = self._get_tokens(essay)
419
+ # input(tokens)
420
+ label_dict = self._get_label_dict(essay)
421
+ # input(label_dict)
422
+ tokens, labels = self._match_tokens(tokens, label_dict)
423
+ # input(tokens)
424
+ # input(labels)
425
+ text = self._get_text(essay)
426
+ return {"id": id, "tokens": tokens, "ner_tags": labels, "text": text}
427
+
428
+ # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
429
+ def _generate_examples(self, data, id_range):
430
+ # This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
431
+ # The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example.
432
+
433
+ for id in id_range:
434
+ # input(data[id])
435
+ yield id, self._process_essay(data[id])