ygorg commited on
Commit
89480ab
·
verified ·
1 Parent(s): bf4e708

Convert indentation to spaces

Browse files
Files changed (1) hide show
  1. MANTRAGSC.py +223 -223
MANTRAGSC.py CHANGED
@@ -29,34 +29,34 @@ from filelock import FileLock
29
 
30
  _CITATION = """\
31
  @article{10.1093/jamia/ocv037,
32
- author = {Kors, Jan A and Clematide, Simon and Akhondi,
33
- Saber A and van Mulligen, Erik M and Rebholz-Schuhmann, Dietrich},
34
- title = "{A multilingual gold-standard corpus for biomedical concept recognition: the Mantra GSC}",
35
- journal = {Journal of the American Medical Informatics Association},
36
- volume = {22},
37
- number = {5},
38
- pages = {948-956},
39
- year = {2015},
40
- month = {05},
41
- abstract = "{Objective To create a multilingual gold-standard corpus for biomedical concept recognition.Materials
42
- and methods We selected text units from different parallel corpora (Medline abstract titles, drug labels,
43
- biomedical patent claims) in English, French, German, Spanish, and Dutch. Three annotators per language
44
- independently annotated the biomedical concepts, based on a subset of the Unified Medical Language System and
45
- covering a wide range of semantic groups. To reduce the annotation workload, automatically generated
46
- preannotations were provided. Individual annotations were automatically harmonized and then adjudicated, and
47
- cross-language consistency checks were carried out to arrive at the final annotations.Results The number of final
48
- annotations was 5530. Inter-annotator agreement scores indicate good agreement (median F-score 0.79), and are
49
- similar to those between individual annotators and the gold standard. The automatically generated harmonized
50
- annotation set for each language performed equally well as the best annotator for that language.Discussion The use
51
- of automatic preannotations, harmonized annotations, and parallel corpora helped to keep the manual annotation
52
- efforts manageable. The inter-annotator agreement scores provide a reference standard for gauging the performance
53
- of automatic annotation techniques.Conclusion To our knowledge, this is the first gold-standard corpus for
54
- biomedical concept recognition in languages other than English. Other distinguishing features are the wide variety
55
- of semantic groups that are being covered, and the diversity of text genres that were annotated.}",
56
- issn = {1067-5027},
57
- doi = {10.1093/jamia/ocv037},
58
- url = {https://doi.org/10.1093/jamia/ocv037},
59
- eprint = {https://academic.oup.com/jamia/article-pdf/22/5/948/34146393/ocv037.pdf},
60
  }
61
  """
62
 
@@ -73,216 +73,216 @@ _LICENSE = "CC_BY_4p0"
73
  _URL = "https://files.ifi.uzh.ch/cl/mantra/gsc/GSC-v1.1.zip"
74
 
75
  _LANGUAGES_2 = {
76
- "es": "Spanish",
77
- "fr": "French",
78
- "de": "German",
79
- "nl": "Dutch",
80
- "en": "English",
81
  }
82
 
83
  _DATASET_TYPES = {
84
- "emea": "EMEA",
85
- "medline": "Medline",
86
- "patents": "Patent",
87
  }
88
 
89
  @dataclass
90
  class DrBenchmarkConfig(datasets.BuilderConfig):
91
- name: str = None
92
- version: datasets.Version = None
93
- description: str = None
94
- schema: str = None
95
- subset_id: str = None
96
 
97
  class MANTRAGSC(datasets.GeneratorBasedBuilder):
98
 
99
- SOURCE_VERSION = datasets.Version("1.0.0")
100
-
101
- BUILDER_CONFIGS = []
102
-
103
- for language, dataset_type in product(_LANGUAGES_2, _DATASET_TYPES):
104
-
105
- if dataset_type == "patents" and language in ["nl", "es"]:
106
- continue
107
-
108
- BUILDER_CONFIGS.append(
109
- DrBenchmarkConfig(
110
- name=f"{language}_{dataset_type}",
111
- version=SOURCE_VERSION,
112
- description=f"Mantra GSC {_LANGUAGES_2[language]} {_DATASET_TYPES[dataset_type]} source schema",
113
- schema="source",
114
- subset_id=f"{language}_{_DATASET_TYPES[dataset_type]}",
115
- )
116
- )
117
-
118
- DEFAULT_CONFIG_NAME = "fr_medline"
119
-
120
- def _info(self):
121
-
122
- if self.config.name.find("emea") != -1:
123
- names = ['B-ANAT', 'I-ANAT', 'I-PHEN', 'B-PROC', 'I-CHEM', 'I-PHYS', 'B-DEVI', 'O', 'B-PHYS', 'I-DEVI', 'B-OBJC', 'I-DISO', 'B-PHEN', 'I-LIVB', 'B-DISO', 'B-LIVB', 'B-CHEM', 'I-PROC']
124
- elif self.config.name.find("medline") != -1:
125
- names = ['B-ANAT', 'I-ANAT', 'B-PROC', 'I-CHEM', 'I-PHYS', 'B-GEOG', 'B-DEVI', 'O', 'B-PHYS', 'I-LIVB', 'B-OBJC', 'I-DISO', 'I-DEVI', 'B-PHEN', 'B-DISO', 'B-LIVB', 'B-CHEM', 'I-PROC']
126
- elif self.config.name.find("patents") != -1:
127
- names = ['B-ANAT', 'I-ANAT', 'B-PROC', 'I-CHEM', 'I-PHYS', 'B-DEVI', 'O', 'I-LIVB', 'B-OBJC', 'I-DISO', 'B-PHEN', 'I-PROC', 'B-DISO', 'I-DEVI', 'B-LIVB', 'B-CHEM', 'B-PHYS']
128
-
129
- features = datasets.Features(
130
- {
131
- "id": datasets.Value("string"),
132
- "tokens": [datasets.Value("string")],
133
- "ner_tags": datasets.Sequence(
134
- datasets.features.ClassLabel(
135
- names = names,
136
- )
137
- ),
138
- }
139
- )
140
-
141
- return datasets.DatasetInfo(
142
- description=_DESCRIPTION,
143
- features=features,
144
- homepage=_HOMEPAGE,
145
- license=str(_LICENSE),
146
- citation=_CITATION,
147
- )
148
-
149
- def _split_generators(self, dl_manager):
150
-
151
- language, dataset_type = self.config.name.split("_")
152
 
153
  # Fixes concurrency issues when extracting files after download has ended
154
  # cf. https://github.com/huggingface/datasets/issues/4661#issuecomment-2792885416
155
  with FileLock(Path(datasets.config.HF_CACHE_HOME) / "tmp_MANTRAGSC.lock"):
156
- data_dir = dl_manager.download_and_extract(_URL)
157
- data_dir = Path(data_dir) / "GSC-v1.1" / f"{_DATASET_TYPES[dataset_type]}_GSC_{language}_man.xml"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
158
 
159
- return [
160
- datasets.SplitGenerator(
161
- name=datasets.Split.TRAIN,
162
- gen_kwargs={
163
- "data_dir": data_dir,
164
- "split": "train",
165
- },
166
- ),
167
- datasets.SplitGenerator(
168
- name=datasets.Split.VALIDATION,
169
- gen_kwargs={
170
- "data_dir": data_dir,
171
- "split": "validation",
172
- },
173
- ),
174
- datasets.SplitGenerator(
175
- name=datasets.Split.TEST,
176
- gen_kwargs={
177
- "data_dir": data_dir,
178
- "split": "test",
179
- },
180
- ),
181
- ]
182
-
183
- def _generate_examples(self, data_dir, split):
184
-
185
- with open(data_dir) as fd:
186
- doc = xmltodict.parse(fd.read())
187
-
188
- all_res = []
189
-
190
- for d in doc["Corpus"]["document"]:
191
-
192
- if type(d["unit"]) != type(list()):
193
- d["unit"] = [d["unit"]]
194
-
195
- for u in d["unit"]:
196
-
197
- text = u["text"]
198
-
199
- if "e" in u.keys():
200
-
201
- if type(u["e"]) != type(list()):
202
- u["e"] = [u["e"]]
203
-
204
- tags = [{
205
- "label": current["@grp"].upper(),
206
- "offset_start": int(current["@offset"]),
207
- "offset_end": int(current["@offset"]) + int(current["@len"]),
208
- } for current in u["e"]]
209
-
210
- else:
211
- tags = []
212
-
213
- _tokens = text.split(" ")
214
- tokens = []
215
- for i, t in enumerate(_tokens):
216
-
217
- concat = " ".join(_tokens[0:i+1])
218
-
219
- offset_start = len(concat) - len(t)
220
- offset_end = len(concat)
221
-
222
- tokens.append({
223
- "token": t,
224
- "offset_start": offset_start,
225
- "offset_end": offset_end,
226
- })
227
-
228
- ner_tags = [["O", 0] for o in tokens]
229
-
230
- for tag in tags:
231
-
232
- cpt = 0
233
-
234
- for idx, token in enumerate(tokens):
235
-
236
- rtok = range(token["offset_start"], token["offset_end"]+1)
237
- rtag = range(tag["offset_start"], tag["offset_end"]+1)
238
-
239
- # Check if the ranges are overlapping
240
- if bool(set(rtok) & set(rtag)):
241
-
242
- # if ner_tags[idx] != "O" and ner_tags[idx] != tag['label']:
243
- # print(f"{token} - currently: {ner_tags[idx]} - after: {tag['label']}")
244
-
245
- if ner_tags[idx][0] == "O":
246
- cpt += 1
247
- ner_tags[idx][0] = tag["label"]
248
- ner_tags[idx][1] = cpt
249
-
250
- for i in range(len(ner_tags)):
251
-
252
- tag = ner_tags[i][0]
253
-
254
- if tag == "O":
255
- continue
256
- elif tag != "O" and ner_tags[i][1] == 1:
257
- ner_tags[i][0] = "B-" + tag
258
- elif tag != "O" and ner_tags[i][1] != 1:
259
- ner_tags[i][0] = "I-" + tag
260
-
261
- obj = {
262
- "id": u["@id"],
263
- "tokens": [t["token"] for t in tokens],
264
- "ner_tags": [n[0] for n in ner_tags],
265
- }
266
-
267
- all_res.append(obj)
268
-
269
- ids = [r["id"] for r in all_res]
270
 
271
- random.seed(4)
272
- random.shuffle(ids)
273
- random.shuffle(ids)
274
- random.shuffle(ids)
275
 
276
- train, validation, test = np.split(ids, [int(len(ids)*0.70), int(len(ids)*0.80)])
 
 
 
 
 
277
 
278
- if split == "train":
279
- allowed_ids = list(train)
280
- elif split == "validation":
281
- allowed_ids = list(validation)
282
- elif split == "test":
283
- allowed_ids = list(test)
284
-
285
- for r in all_res:
286
- identifier = r["id"]
287
- if identifier in allowed_ids:
288
- yield identifier, r
 
29
 
30
  _CITATION = """\
31
  @article{10.1093/jamia/ocv037,
32
+ author = {Kors, Jan A and Clematide, Simon and Akhondi,
33
+ Saber A and van Mulligen, Erik M and Rebholz-Schuhmann, Dietrich},
34
+ title = "{A multilingual gold-standard corpus for biomedical concept recognition: the Mantra GSC}",
35
+ journal = {Journal of the American Medical Informatics Association},
36
+ volume = {22},
37
+ number = {5},
38
+ pages = {948-956},
39
+ year = {2015},
40
+ month = {05},
41
+ abstract = "{Objective To create a multilingual gold-standard corpus for biomedical concept recognition.Materials
42
+ and methods We selected text units from different parallel corpora (Medline abstract titles, drug labels,
43
+ biomedical patent claims) in English, French, German, Spanish, and Dutch. Three annotators per language
44
+ independently annotated the biomedical concepts, based on a subset of the Unified Medical Language System and
45
+ covering a wide range of semantic groups. To reduce the annotation workload, automatically generated
46
+ preannotations were provided. Individual annotations were automatically harmonized and then adjudicated, and
47
+ cross-language consistency checks were carried out to arrive at the final annotations.Results The number of final
48
+ annotations was 5530. Inter-annotator agreement scores indicate good agreement (median F-score 0.79), and are
49
+ similar to those between individual annotators and the gold standard. The automatically generated harmonized
50
+ annotation set for each language performed equally well as the best annotator for that language.Discussion The use
51
+ of automatic preannotations, harmonized annotations, and parallel corpora helped to keep the manual annotation
52
+ efforts manageable. The inter-annotator agreement scores provide a reference standard for gauging the performance
53
+ of automatic annotation techniques.Conclusion To our knowledge, this is the first gold-standard corpus for
54
+ biomedical concept recognition in languages other than English. Other distinguishing features are the wide variety
55
+ of semantic groups that are being covered, and the diversity of text genres that were annotated.}",
56
+ issn = {1067-5027},
57
+ doi = {10.1093/jamia/ocv037},
58
+ url = {https://doi.org/10.1093/jamia/ocv037},
59
+ eprint = {https://academic.oup.com/jamia/article-pdf/22/5/948/34146393/ocv037.pdf},
60
  }
61
  """
62
 
 
73
  _URL = "https://files.ifi.uzh.ch/cl/mantra/gsc/GSC-v1.1.zip"
74
 
75
  _LANGUAGES_2 = {
76
+ "es": "Spanish",
77
+ "fr": "French",
78
+ "de": "German",
79
+ "nl": "Dutch",
80
+ "en": "English",
81
  }
82
 
83
  _DATASET_TYPES = {
84
+ "emea": "EMEA",
85
+ "medline": "Medline",
86
+ "patents": "Patent",
87
  }
88
 
89
  @dataclass
90
  class DrBenchmarkConfig(datasets.BuilderConfig):
91
+ name: str = None
92
+ version: datasets.Version = None
93
+ description: str = None
94
+ schema: str = None
95
+ subset_id: str = None
96
 
97
  class MANTRAGSC(datasets.GeneratorBasedBuilder):
98
 
99
+ SOURCE_VERSION = datasets.Version("1.0.0")
100
+
101
+ BUILDER_CONFIGS = []
102
+
103
+ for language, dataset_type in product(_LANGUAGES_2, _DATASET_TYPES):
104
+
105
+ if dataset_type == "patents" and language in ["nl", "es"]:
106
+ continue
107
+
108
+ BUILDER_CONFIGS.append(
109
+ DrBenchmarkConfig(
110
+ name=f"{language}_{dataset_type}",
111
+ version=SOURCE_VERSION,
112
+ description=f"Mantra GSC {_LANGUAGES_2[language]} {_DATASET_TYPES[dataset_type]} source schema",
113
+ schema="source",
114
+ subset_id=f"{language}_{_DATASET_TYPES[dataset_type]}",
115
+ )
116
+ )
117
+
118
+ DEFAULT_CONFIG_NAME = "fr_medline"
119
+
120
+ def _info(self):
121
+
122
+ if self.config.name.find("emea") != -1:
123
+ names = ['B-ANAT', 'I-ANAT', 'I-PHEN', 'B-PROC', 'I-CHEM', 'I-PHYS', 'B-DEVI', 'O', 'B-PHYS', 'I-DEVI', 'B-OBJC', 'I-DISO', 'B-PHEN', 'I-LIVB', 'B-DISO', 'B-LIVB', 'B-CHEM', 'I-PROC']
124
+ elif self.config.name.find("medline") != -1:
125
+ names = ['B-ANAT', 'I-ANAT', 'B-PROC', 'I-CHEM', 'I-PHYS', 'B-GEOG', 'B-DEVI', 'O', 'B-PHYS', 'I-LIVB', 'B-OBJC', 'I-DISO', 'I-DEVI', 'B-PHEN', 'B-DISO', 'B-LIVB', 'B-CHEM', 'I-PROC']
126
+ elif self.config.name.find("patents") != -1:
127
+ names = ['B-ANAT', 'I-ANAT', 'B-PROC', 'I-CHEM', 'I-PHYS', 'B-DEVI', 'O', 'I-LIVB', 'B-OBJC', 'I-DISO', 'B-PHEN', 'I-PROC', 'B-DISO', 'I-DEVI', 'B-LIVB', 'B-CHEM', 'B-PHYS']
128
+
129
+ features = datasets.Features(
130
+ {
131
+ "id": datasets.Value("string"),
132
+ "tokens": [datasets.Value("string")],
133
+ "ner_tags": datasets.Sequence(
134
+ datasets.features.ClassLabel(
135
+ names = names,
136
+ )
137
+ ),
138
+ }
139
+ )
140
+
141
+ return datasets.DatasetInfo(
142
+ description=_DESCRIPTION,
143
+ features=features,
144
+ homepage=_HOMEPAGE,
145
+ license=str(_LICENSE),
146
+ citation=_CITATION,
147
+ )
148
+
149
+ def _split_generators(self, dl_manager):
150
+
151
+ language, dataset_type = self.config.name.split("_")
152
 
153
  # Fixes concurrency issues when extracting files after download has ended
154
  # cf. https://github.com/huggingface/datasets/issues/4661#issuecomment-2792885416
155
  with FileLock(Path(datasets.config.HF_CACHE_HOME) / "tmp_MANTRAGSC.lock"):
156
+ data_dir = dl_manager.download_and_extract(_URL)
157
+ data_dir = Path(data_dir) / "GSC-v1.1" / f"{_DATASET_TYPES[dataset_type]}_GSC_{language}_man.xml"
158
+
159
+ return [
160
+ datasets.SplitGenerator(
161
+ name=datasets.Split.TRAIN,
162
+ gen_kwargs={
163
+ "data_dir": data_dir,
164
+ "split": "train",
165
+ },
166
+ ),
167
+ datasets.SplitGenerator(
168
+ name=datasets.Split.VALIDATION,
169
+ gen_kwargs={
170
+ "data_dir": data_dir,
171
+ "split": "validation",
172
+ },
173
+ ),
174
+ datasets.SplitGenerator(
175
+ name=datasets.Split.TEST,
176
+ gen_kwargs={
177
+ "data_dir": data_dir,
178
+ "split": "test",
179
+ },
180
+ ),
181
+ ]
182
+
183
+ def _generate_examples(self, data_dir, split):
184
+
185
+ with open(data_dir) as fd:
186
+ doc = xmltodict.parse(fd.read())
187
+
188
+ all_res = []
189
+
190
+ for d in doc["Corpus"]["document"]:
191
+
192
+ if type(d["unit"]) != type(list()):
193
+ d["unit"] = [d["unit"]]
194
+
195
+ for u in d["unit"]:
196
+
197
+ text = u["text"]
198
+
199
+ if "e" in u.keys():
200
+
201
+ if type(u["e"]) != type(list()):
202
+ u["e"] = [u["e"]]
203
+
204
+ tags = [{
205
+ "label": current["@grp"].upper(),
206
+ "offset_start": int(current["@offset"]),
207
+ "offset_end": int(current["@offset"]) + int(current["@len"]),
208
+ } for current in u["e"]]
209
+
210
+ else:
211
+ tags = []
212
+
213
+ _tokens = text.split(" ")
214
+ tokens = []
215
+ for i, t in enumerate(_tokens):
216
+
217
+ concat = " ".join(_tokens[0:i+1])
218
+
219
+ offset_start = len(concat) - len(t)
220
+ offset_end = len(concat)
221
+
222
+ tokens.append({
223
+ "token": t,
224
+ "offset_start": offset_start,
225
+ "offset_end": offset_end,
226
+ })
227
+
228
+ ner_tags = [["O", 0] for o in tokens]
229
+
230
+ for tag in tags:
231
+
232
+ cpt = 0
233
+
234
+ for idx, token in enumerate(tokens):
235
+
236
+ rtok = range(token["offset_start"], token["offset_end"]+1)
237
+ rtag = range(tag["offset_start"], tag["offset_end"]+1)
238
+
239
+ # Check if the ranges are overlapping
240
+ if bool(set(rtok) & set(rtag)):
241
+
242
+ # if ner_tags[idx] != "O" and ner_tags[idx] != tag['label']:
243
+ # print(f"{token} - currently: {ner_tags[idx]} - after: {tag['label']}")
244
+
245
+ if ner_tags[idx][0] == "O":
246
+ cpt += 1
247
+ ner_tags[idx][0] = tag["label"]
248
+ ner_tags[idx][1] = cpt
249
+
250
+ for i in range(len(ner_tags)):
251
+
252
+ tag = ner_tags[i][0]
253
+
254
+ if tag == "O":
255
+ continue
256
+ elif tag != "O" and ner_tags[i][1] == 1:
257
+ ner_tags[i][0] = "B-" + tag
258
+ elif tag != "O" and ner_tags[i][1] != 1:
259
+ ner_tags[i][0] = "I-" + tag
260
+
261
+ obj = {
262
+ "id": u["@id"],
263
+ "tokens": [t["token"] for t in tokens],
264
+ "ner_tags": [n[0] for n in ner_tags],
265
+ }
266
+
267
+ all_res.append(obj)
268
+
269
+ ids = [r["id"] for r in all_res]
270
 
271
+ random.seed(4)
272
+ random.shuffle(ids)
273
+ random.shuffle(ids)
274
+ random.shuffle(ids)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
275
 
276
+ train, validation, test = np.split(ids, [int(len(ids)*0.70), int(len(ids)*0.80)])
 
 
 
277
 
278
+ if split == "train":
279
+ allowed_ids = list(train)
280
+ elif split == "validation":
281
+ allowed_ids = list(validation)
282
+ elif split == "test":
283
+ allowed_ids = list(test)
284
 
285
+ for r in all_res:
286
+ identifier = r["id"]
287
+ if identifier in allowed_ids:
288
+ yield identifier, r