Theoreticallyhugo commited on
Commit
28c621e
·
1 Parent(s): 6e86234

added information on where spans begin and end in the untouched text string

Browse files
Files changed (1) hide show
  1. essays_SuG.py +63 -24
essays_SuG.py CHANGED
@@ -126,6 +126,8 @@ class Fancy(datasets.GeneratorBasedBuilder):
126
  )
127
  ),
128
  "text": datasets.Value("string"),
 
 
129
  }
130
  )
131
  elif (
@@ -145,6 +147,8 @@ class Fancy(datasets.GeneratorBasedBuilder):
145
  )
146
  ),
147
  "text": datasets.Value("string"),
 
 
148
  }
149
  )
150
  elif (
@@ -166,6 +170,8 @@ class Fancy(datasets.GeneratorBasedBuilder):
166
  )
167
  ),
168
  "text": datasets.Value("string"),
 
 
169
  }
170
  )
171
  elif self.config.name == "sep_tok":
@@ -185,6 +191,8 @@ class Fancy(datasets.GeneratorBasedBuilder):
185
  )
186
  ),
187
  "text": datasets.Value("string"),
 
 
188
  }
189
  )
190
  elif self.config.name == "sep_tok_full_labels":
@@ -206,6 +214,8 @@ class Fancy(datasets.GeneratorBasedBuilder):
206
  )
207
  ),
208
  "text": datasets.Value("string"),
 
 
209
  }
210
  )
211
 
@@ -252,11 +262,15 @@ class Fancy(datasets.GeneratorBasedBuilder):
252
  # check for whether the data folder is in cwd.
253
  # if it isnt, change cwd to its parent directory
254
  # do this three times only (dont want infinite recursion)
255
- for _ in range(3):
256
- if Path.is_dir(cwd / "fancy_dataset"):
257
- # print(f"found 'data' folder at {cwd}")
 
 
 
 
258
  # input(f"returning {cwd / 'data'}")
259
- return cwd / "fancy_dataset"
260
  cwd = cwd.parent
261
  raise FileNotFoundError("data directory has not been found")
262
 
@@ -340,7 +354,14 @@ class Fancy(datasets.GeneratorBasedBuilder):
340
  tokens = []
341
  for sentence in essay["sentences"]:
342
  for token in sentence["tokens"]:
343
- tokens.append((token["surface"], token["gid"]))
 
 
 
 
 
 
 
344
  return tokens
345
 
346
  def _get_label_dict(self, essay):
@@ -361,33 +382,44 @@ class Fancy(datasets.GeneratorBasedBuilder):
361
  def _match_tokens(self, tokens, label_dict):
362
  text = []
363
  labels = []
364
- for surface, gid in tokens:
 
 
 
365
  # for each token, unpack it into its surface and gid
366
  # then match the gid to the label and pack them back together
367
 
368
- # if the config requires separator tokens
369
- if (
370
- self.config.name == "sep_tok"
371
- or self.config.name == "sep_tok_full_labels"
372
- ):
373
- if label_dict.get(gid, "O")[0] == "B":
374
- # if we are at the beginning of a span
375
- # insert begin of sequence token (BOS) and "O" label
376
  text.append("<s>")
377
  labels.append("O")
378
- elif (
379
- label_dict.get(gid, "O") == "O"
380
- and len(labels) != 0
381
- and labels[-1][0] != "O"
 
 
 
 
 
 
 
 
382
  ):
383
- # if we are not in a span, and the previous label was
384
- # of a span
385
- # intert end of sequence token (EOS) and "O" label
386
  text.append("</s>")
387
  labels.append("O")
 
388
 
389
  # always append the surface form
390
  text.append(surface)
 
391
 
392
  # append the correct type of label, depending on the config
393
  if self.config.name == "full_labels":
@@ -407,7 +439,7 @@ class Fancy(datasets.GeneratorBasedBuilder):
407
 
408
  else:
409
  raise KeyError()
410
- return text, labels
411
 
412
  def _get_text(self, essay):
413
  return essay["text"]
@@ -419,11 +451,18 @@ class Fancy(datasets.GeneratorBasedBuilder):
419
  # input(tokens)
420
  label_dict = self._get_label_dict(essay)
421
  # input(label_dict)
422
- tokens, labels = self._match_tokens(tokens, label_dict)
423
  # input(tokens)
424
  # input(labels)
425
  text = self._get_text(essay)
426
- return {"id": id, "tokens": tokens, "ner_tags": labels, "text": text}
 
 
 
 
 
 
 
427
 
428
  # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
429
  def _generate_examples(self, data, id_range):
 
126
  )
127
  ),
128
  "text": datasets.Value("string"),
129
+ "span_begins": datasets.Sequence(datasets.Value("int16")),
130
+ "span_ends": datasets.Sequence(datasets.Value("int16")),
131
  }
132
  )
133
  elif (
 
147
  )
148
  ),
149
  "text": datasets.Value("string"),
150
+ "span_begins": datasets.Sequence(datasets.Value("int16")),
151
+ "span_ends": datasets.Sequence(datasets.Value("int16")),
152
  }
153
  )
154
  elif (
 
170
  )
171
  ),
172
  "text": datasets.Value("string"),
173
+ "span_begins": datasets.Sequence(datasets.Value("int16")),
174
+ "span_ends": datasets.Sequence(datasets.Value("int16")),
175
  }
176
  )
177
  elif self.config.name == "sep_tok":
 
191
  )
192
  ),
193
  "text": datasets.Value("string"),
194
+ "span_begins": datasets.Sequence(datasets.Value("int16")),
195
+ "span_ends": datasets.Sequence(datasets.Value("int16")),
196
  }
197
  )
198
  elif self.config.name == "sep_tok_full_labels":
 
214
  )
215
  ),
216
  "text": datasets.Value("string"),
217
+ "span_begins": datasets.Sequence(datasets.Value("int16")),
218
+ "span_ends": datasets.Sequence(datasets.Value("int16")),
219
  }
220
  )
221
 
 
262
  # check for whether the data folder is in cwd.
263
  # if it isnt, change cwd to its parent directory
264
  # do this three times only (dont want infinite recursion)
265
+ for _ in range(5):
266
+ if Path.is_dir(cwd / "essays_SuG"):
267
+ print(f"found 'essays_SuG' folder at {cwd}")
268
+ # input(f"returning {cwd / 'essays_SuG'}")
269
+ return cwd / "essays_SuG"
270
+ if Path.is_dir(cwd / "data"):
271
+ print(f"found 'data' folder at {cwd}")
272
  # input(f"returning {cwd / 'data'}")
273
+ return cwd / "data"
274
  cwd = cwd.parent
275
  raise FileNotFoundError("data directory has not been found")
276
 
 
354
  tokens = []
355
  for sentence in essay["sentences"]:
356
  for token in sentence["tokens"]:
357
+ tokens.append(
358
+ (
359
+ token["surface"],
360
+ token["gid"],
361
+ token["characterOffsetBegin"],
362
+ token["characterOffsetEnd"],
363
+ )
364
+ )
365
  return tokens
366
 
367
  def _get_label_dict(self, essay):
 
382
  def _match_tokens(self, tokens, label_dict):
383
  text = []
384
  labels = []
385
+ begins = []
386
+ ends = []
387
+ last_end = 0
388
+ for surface, gid, begin, end in tokens:
389
  # for each token, unpack it into its surface and gid
390
  # then match the gid to the label and pack them back together
391
 
392
+ if label_dict.get(gid, "O")[0] == "B":
393
+ # if we are at the beginning of a span
394
+ # insert begin of sequence token (BOS) and "O" label
395
+ if (
396
+ self.config.name == "sep_tok"
397
+ or self.config.name == "sep_tok_full_labels"
398
+ ):
399
+ # if the config requires separator tokens
400
  text.append("<s>")
401
  labels.append("O")
402
+ begins.append(begin)
403
+ elif (
404
+ label_dict.get(gid, "O") == "O"
405
+ and len(labels) != 0
406
+ and labels[-1][0] != "O"
407
+ ):
408
+ # if we are not in a span, and the previous label was
409
+ # of a span
410
+ # intert end of sequence token (EOS) and "O" label
411
+ if (
412
+ self.config.name == "sep_tok"
413
+ or self.config.name == "sep_tok_full_labels"
414
  ):
415
+ # if the config requires separator tokens
 
 
416
  text.append("</s>")
417
  labels.append("O")
418
+ ends.append(last_end)
419
 
420
  # always append the surface form
421
  text.append(surface)
422
+ last_end = end
423
 
424
  # append the correct type of label, depending on the config
425
  if self.config.name == "full_labels":
 
439
 
440
  else:
441
  raise KeyError()
442
+ return text, labels, begins, ends
443
 
444
  def _get_text(self, essay):
445
  return essay["text"]
 
451
  # input(tokens)
452
  label_dict = self._get_label_dict(essay)
453
  # input(label_dict)
454
+ tokens, labels, begins, ends = self._match_tokens(tokens, label_dict)
455
  # input(tokens)
456
  # input(labels)
457
  text = self._get_text(essay)
458
+ return {
459
+ "id": id,
460
+ "tokens": tokens,
461
+ "ner_tags": labels,
462
+ "text": text,
463
+ "span_begins": begins,
464
+ "span_ends": ends,
465
+ }
466
 
467
  # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
468
  def _generate_examples(self, data, id_range):