|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
"""TODO: Add a description here.""" |
|
|
|
|
|
import json |
|
from pathlib import Path |
|
|
|
import datasets |
|
|
|
|
|
|
|
_CITATION = """\ |
|
@InProceedings{huggingface:dataset, |
|
title = {a fancy dataset}, |
|
author={Hugo Meinhof, Elisa Luebbers}, |
|
year={2024} |
|
} |
|
""" |
|
|
|
|
|
|
|
_DESCRIPTION = """\ |
|
This dataset contains 402 argumentative essays from non-native """ |
|
|
|
|
|
_HOMEPAGE = "" |
|
|
|
|
|
_LICENSE = "" |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class Fancy(datasets.GeneratorBasedBuilder): |
|
""" |
|
TODO: Short description of my dataset. |
|
""" |
|
|
|
VERSION = datasets.Version("1.1.0") |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
BUILDER_CONFIGS = [ |
|
datasets.BuilderConfig( |
|
name="full_labels", |
|
version=VERSION, |
|
description="get all the data conveyed by the labels, O, B-Claim, I-Claim, etc.", |
|
), |
|
datasets.BuilderConfig( |
|
name="spans", |
|
version=VERSION, |
|
description="get the spans, O, B-Span, I-Span.", |
|
), |
|
datasets.BuilderConfig( |
|
name="simple", |
|
version=VERSION, |
|
description="get the labels without B/I, O, MajorClaim, Claim, Premise", |
|
), |
|
datasets.BuilderConfig( |
|
name="sep_tok", |
|
version=VERSION, |
|
description="get the labels without B/I, meaning O, Claim, Premise" |
|
+ ", etc.\n insert seperator tokens <s> ... </s>", |
|
), |
|
datasets.BuilderConfig( |
|
name="sep_tok_full_labels", |
|
version=VERSION, |
|
description="get the labels with B/I, meaning O, I-Claim, I-Premise" |
|
+ ", etc.\n insert seperator tokens <s> ... </s>", |
|
), |
|
] |
|
|
|
DEFAULT_CONFIG_NAME = "full_labels" |
|
|
|
def _info(self): |
|
|
|
if ( |
|
self.config.name == "full_labels" |
|
): |
|
features = datasets.Features( |
|
{ |
|
"id": datasets.Value("int16"), |
|
"tokens": datasets.Sequence(datasets.Value("string")), |
|
"ner_tags": datasets.Sequence( |
|
datasets.ClassLabel( |
|
names=[ |
|
"O", |
|
"B-MajorClaim", |
|
"I-MajorClaim", |
|
"B-Claim", |
|
"I-Claim", |
|
"B-Premise", |
|
"I-Premise", |
|
] |
|
) |
|
), |
|
"text": datasets.Value("string"), |
|
"span_begins": datasets.Sequence(datasets.Value("int16")), |
|
"span_ends": datasets.Sequence(datasets.Value("int16")), |
|
} |
|
) |
|
elif ( |
|
self.config.name == "spans" |
|
): |
|
features = datasets.Features( |
|
{ |
|
"id": datasets.Value("int16"), |
|
"tokens": datasets.Sequence(datasets.Value("string")), |
|
"ner_tags": datasets.Sequence( |
|
datasets.ClassLabel( |
|
names=[ |
|
"O", |
|
"B", |
|
"I", |
|
] |
|
) |
|
), |
|
"text": datasets.Value("string"), |
|
"span_begins": datasets.Sequence(datasets.Value("int16")), |
|
"span_ends": datasets.Sequence(datasets.Value("int16")), |
|
} |
|
) |
|
elif ( |
|
self.config.name == "simple" |
|
): |
|
features = datasets.Features( |
|
{ |
|
"id": datasets.Value("int16"), |
|
"tokens": datasets.Sequence(datasets.Value("string")), |
|
"ner_tags": datasets.Sequence( |
|
datasets.ClassLabel( |
|
names=[ |
|
"O", |
|
"X_placeholder_X", |
|
"MajorClaim", |
|
"Claim", |
|
"Premise", |
|
] |
|
) |
|
), |
|
"text": datasets.Value("string"), |
|
"span_begins": datasets.Sequence(datasets.Value("int16")), |
|
"span_ends": datasets.Sequence(datasets.Value("int16")), |
|
} |
|
) |
|
elif self.config.name == "sep_tok": |
|
features = datasets.Features( |
|
{ |
|
"id": datasets.Value("int16"), |
|
"tokens": datasets.Sequence(datasets.Value("string")), |
|
"ner_tags": datasets.Sequence( |
|
datasets.ClassLabel( |
|
names=[ |
|
"O", |
|
"X_placeholder_X", |
|
"MajorClaim", |
|
"Claim", |
|
"Premise", |
|
] |
|
) |
|
), |
|
"text": datasets.Value("string"), |
|
"span_begins": datasets.Sequence(datasets.Value("int16")), |
|
"span_ends": datasets.Sequence(datasets.Value("int16")), |
|
} |
|
) |
|
elif self.config.name == "sep_tok_full_labels": |
|
features = datasets.Features( |
|
{ |
|
"id": datasets.Value("int16"), |
|
"tokens": datasets.Sequence(datasets.Value("string")), |
|
"ner_tags": datasets.Sequence( |
|
datasets.ClassLabel( |
|
names=[ |
|
"O", |
|
"B-MajorClaim", |
|
"I-MajorClaim", |
|
"B-Claim", |
|
"I-Claim", |
|
"B-Premise", |
|
"I-Premise", |
|
] |
|
) |
|
), |
|
"text": datasets.Value("string"), |
|
"span_begins": datasets.Sequence(datasets.Value("int16")), |
|
"span_ends": datasets.Sequence(datasets.Value("int16")), |
|
} |
|
) |
|
|
|
return datasets.DatasetInfo( |
|
|
|
description=_DESCRIPTION, |
|
|
|
features=features, |
|
|
|
|
|
|
|
|
|
homepage=_HOMEPAGE, |
|
|
|
license=_LICENSE, |
|
|
|
citation=_CITATION, |
|
) |
|
|
|
def _range_generator(self, train=0.8, test=0.2): |
|
""" |
|
returns three range objects to access the list of essays |
|
these are the train, test, and validate range, where the size of the |
|
validation range is dictated by the other two ranges |
|
""" |
|
return ( |
|
range(0, int(402 * train)), |
|
range(int(402 * train), int(402 * (train + test))), |
|
range(int(402 * (train + test)), 402), |
|
) |
|
|
|
@staticmethod |
|
def _find_data(): |
|
""" |
|
try to find the data folder and return the path to it if found, |
|
otherwise return none |
|
|
|
returns: |
|
path to data folder or None |
|
""" |
|
|
|
|
|
cwd = Path.cwd() |
|
|
|
|
|
|
|
for _ in range(5): |
|
if Path.is_dir(cwd / "essays_SuG"): |
|
print(f"found 'essays_SuG' folder at {cwd}") |
|
|
|
return cwd / "essays_SuG" |
|
if Path.is_dir(cwd / "data"): |
|
print(f"found 'data' folder at {cwd}") |
|
|
|
return cwd / "data" |
|
cwd = cwd.parent |
|
raise FileNotFoundError("data directory has not been found") |
|
|
|
def _get_essay_list(self): |
|
""" |
|
read the essay.json and return a list of dicts, where each dict is an essay |
|
""" |
|
|
|
path = self._find_data() / "essay.json" |
|
with open(path, "r") as r: |
|
lines = r.readlines() |
|
|
|
essays = [] |
|
for line in lines: |
|
essays.append(json.loads(line)) |
|
|
|
return essays |
|
|
|
def _split_generators(self, dl_manager): |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
train, test, validate = self._range_generator(1, 0) |
|
essays = self._get_essay_list() |
|
|
|
if len(validate) > 0: |
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, |
|
|
|
gen_kwargs={ |
|
"data": essays, |
|
"id_range": train, |
|
}, |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.VALIDATION, |
|
|
|
gen_kwargs={ |
|
"data": essays, |
|
"id_range": validate, |
|
}, |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TEST, |
|
|
|
gen_kwargs={ |
|
"data": essays, |
|
"id_range": test, |
|
}, |
|
), |
|
] |
|
else: |
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, |
|
|
|
gen_kwargs={ |
|
"data": essays, |
|
"id_range": train, |
|
}, |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TEST, |
|
|
|
gen_kwargs={ |
|
"data": essays, |
|
"id_range": test, |
|
}, |
|
), |
|
] |
|
|
|
def _get_id(self, essay): |
|
return int(essay["docID"].split("_")[-1]) |
|
|
|
def _get_tokens(self, essay): |
|
tokens = [] |
|
for sentence in essay["sentences"]: |
|
for token in sentence["tokens"]: |
|
tokens.append( |
|
( |
|
token["surface"], |
|
token["gid"], |
|
token["characterOffsetBegin"], |
|
token["characterOffsetEnd"], |
|
) |
|
) |
|
return tokens |
|
|
|
def _get_label_dict(self, essay): |
|
label_dict = {} |
|
for unit in essay["argumentation"]["units"]: |
|
if self.config.name == "spans": |
|
label = "Span" |
|
else: |
|
label = unit["attributes"]["role"] |
|
for i, gid in enumerate(unit["tokens"]): |
|
if i == 0: |
|
location = "B-" |
|
else: |
|
location = "I-" |
|
label_dict[gid] = location + label |
|
return label_dict |
|
|
|
def _match_tokens(self, tokens, label_dict): |
|
text = [] |
|
labels = [] |
|
begins = [] |
|
ends = [] |
|
last_end = 0 |
|
for surface, gid, begin, end in tokens: |
|
|
|
|
|
|
|
if label_dict.get(gid, "O")[0] == "B": |
|
|
|
|
|
if ( |
|
self.config.name == "sep_tok" |
|
or self.config.name == "sep_tok_full_labels" |
|
): |
|
|
|
text.append("<s>") |
|
labels.append("O") |
|
begins.append(begin) |
|
elif ( |
|
label_dict.get(gid, "O") == "O" |
|
and len(labels) != 0 |
|
and labels[-1][0] != "O" |
|
): |
|
|
|
|
|
|
|
if ( |
|
self.config.name == "sep_tok" |
|
or self.config.name == "sep_tok_full_labels" |
|
): |
|
|
|
text.append("</s>") |
|
labels.append("O") |
|
ends.append(last_end) |
|
|
|
|
|
text.append(surface) |
|
last_end = end |
|
|
|
|
|
if self.config.name == "full_labels": |
|
labels.append(label_dict.get(gid, "O")) |
|
|
|
elif self.config.name == "spans": |
|
labels.append(label_dict.get(gid, "O")[0]) |
|
|
|
elif self.config.name == "simple": |
|
labels.append(label_dict.get(gid, "__O")[2:]) |
|
|
|
elif self.config.name == "sep_tok": |
|
labels.append(label_dict.get(gid, "__O")[2:]) |
|
|
|
elif self.config.name == "sep_tok_full_labels": |
|
labels.append(label_dict.get(gid, "O")) |
|
|
|
else: |
|
raise KeyError() |
|
return text, labels, begins, ends |
|
|
|
def _get_text(self, essay): |
|
return essay["text"] |
|
|
|
def _process_essay(self, essay): |
|
id = self._get_id(essay) |
|
|
|
tokens = self._get_tokens(essay) |
|
|
|
label_dict = self._get_label_dict(essay) |
|
|
|
tokens, labels, begins, ends = self._match_tokens(tokens, label_dict) |
|
|
|
|
|
text = self._get_text(essay) |
|
return { |
|
"id": id, |
|
"tokens": tokens, |
|
"ner_tags": labels, |
|
"text": text, |
|
"span_begins": begins, |
|
"span_ends": ends, |
|
} |
|
|
|
|
|
def _generate_examples(self, data, id_range): |
|
|
|
|
|
|
|
for id in id_range: |
|
|
|
yield id, self._process_essay(data[id]) |
|
|