tznurmin commited on
Commit
1683dc6
·
verified ·
1 Parent(s): 8585e08

Convert dataset to Parquet (#1)

Browse files

- Convert dataset to Parquet (c06146153a29c5c8d3a4cd27878ea7a2919193a2)
- Add 'strains' config data files (1db987caa6bc9a8806375afd5ec5fbe27311bd4d)
- Delete loading script (1ee40cc7d7e54eb60574afff4f9e0559ff802c48)
- Delete data file (725df46de952c2efd5b64e3c712dbd703dbb18aa)

.gitignore DELETED
@@ -1,2 +0,0 @@
1
- flake.nix
2
- flake.lock
 
 
 
README.md CHANGED
@@ -37,7 +37,7 @@ dataset_info:
37
  - name: train
38
  num_bytes: 50995544
39
  num_examples: 505
40
- download_size: 21777460
41
  dataset_size: 50995544
42
  - config_name: strains
43
  features:
@@ -70,6 +70,16 @@ dataset_info:
70
  - name: train
71
  num_bytes: 41153821
72
  num_examples: 400
73
- download_size: 21777460
74
  dataset_size: 41153821
 
 
 
 
 
 
 
 
 
 
75
  ---
 
37
  - name: train
38
  num_bytes: 50995544
39
  num_examples: 505
40
+ download_size: 11318610
41
  dataset_size: 50995544
42
  - config_name: strains
43
  features:
 
70
  - name: train
71
  num_bytes: 41153821
72
  num_examples: 400
73
+ download_size: 8836267
74
  dataset_size: 41153821
75
+ configs:
76
+ - config_name: pathogens
77
+ data_files:
78
+ - split: train
79
+ path: pathogens/train-*
80
+ default: true
81
+ - config_name: strains
82
+ data_files:
83
+ - split: train
84
+ path: strains/train-*
85
  ---
pathogens/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c3d6e32db9118f41876ac2144fa5fc951c9985da44eaef244ea5fdb81e8d7a88
3
+ size 11318610
strains/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fd15182d90d212137f5c72604a1021969e7ac266d395312bd9db692ed02693c1
3
+ size 8836267
tea_curated.py DELETED
@@ -1,238 +0,0 @@
1
- # -*- coding: utf-8 -*-
2
- """
3
- TEA: Taxonomic Entity Augmentation (curated raw corpus)
4
-
5
- Usage
6
- -----
7
- >>> from datasets import load_dataset
8
- >>> ds = load_dataset("tznurmin/tea_curated",
9
- ... "pathogens",
10
- ... trust_remote_code=True,
11
- ... split="train")
12
- """
13
-
14
- import json
15
- import logging
16
- import re
17
- from pathlib import Path
18
-
19
- import datasets
20
-
21
- logger = logging.getLogger(__name__)
22
-
23
- # ---------------------------------------------------------------------------
24
- # Hub metadata
25
- # ---------------------------------------------------------------------------
26
- _HOMEPAGE = "https://github.com/tznurmin/TEA_curated_data"
27
-
28
- _LICENSE = (
29
- "MIT (curation JSON annotations) and mixed Creative-Commons licences "
30
- "(CC-BY / CC-BY-SA / CC-BY-NC / CC-BY-NC-SA) for article texts"
31
- )
32
-
33
- _CITATION = r"""
34
- @misc{nurminen2025tea,
35
- author = {Nurminen, Toni and Kim, John and Si, Tong and Chang, Matthew Wook},
36
- title = {TEA: Taxonomic Entity Augmentation for Biological Literature},
37
- year = {2025},
38
- howpublished = {\url{https://github.com/tznurmin/TEA_curated_data}},
39
- note = {Dataset available at \url{https://github.com/tznurmin/TEA_curated_data}}
40
- }
41
- """
42
-
43
- _DESCRIPTION = (
44
- "Taxonomic Entity Augmentation (TEA) curated dataset. It contains the full "
45
- "text of biomedical articles (under their original Creative-Commons licences) "
46
- "and curation JSON files that mark pathogen / strain / species entities."
47
- "Two configs are provided:\n\n"
48
- "1: **pathogens** – commensals, probiotics, opportunistics, negatives, strains, pathogens\n"
49
- "2: **strains** – strains, species, negatives\n\n"
50
- "Each token is annotated in **IOB2** format (B-XXX, I-XXX, O). Per-record "
51
- "licence information and attribution text are included."
52
- )
53
-
54
- _URL = "https://github.com/tznurmin/TEA_curated_data/archive/refs/tags/v1.1.tar.gz"
55
-
56
- # ---------------------------------------------------------------------------
57
- # IOB2 label sets
58
- # ---------------------------------------------------------------------------
59
- _LABELS = {
60
- "pathogens": [
61
- "O",
62
- "B-COMM", "I-COMM",
63
- "B-PROB", "I-PROB",
64
- "B-OPPO", "I-OPPO",
65
- "B-NEGA", "I-NEGA",
66
- "B-STRA", "I-STRA",
67
- "B-PATH", "I-PATH",
68
- ],
69
- "strains": [
70
- "O",
71
- "B-STRA", "I-STRA",
72
- "B-SPEC", "I-SPEC",
73
- "B-NEGA", "I-NEGA",
74
- ],
75
- }
76
-
77
- # Map raw JSON tags (colour suffix stripped) to 4-letter codes
78
- _CODE4 = {
79
- "commensals": "COMM",
80
- "probiotics": "PROB",
81
- "opportunistics": "OPPO",
82
- "negatives": "NEGA",
83
- "strains": "STRA",
84
- "pathogens": "PATH",
85
- "species": "SPEC",
86
- }
87
-
88
- # ---------------------------------------------------------------------------
89
- # DatasetBuilder
90
- # ---------------------------------------------------------------------------
91
- class TeaCurated(datasets.GeneratorBasedBuilder):
92
- VERSION = datasets.Version("1.0.0")
93
-
94
- BUILDER_CONFIGS = [
95
- datasets.BuilderConfig(
96
- name="pathogens",
97
- version=VERSION,
98
- description="Pathogen curation data",
99
- ),
100
- datasets.BuilderConfig(
101
- name="strains",
102
- version=VERSION,
103
- description="Strain curation data",
104
- ),
105
- ]
106
- DEFAULT_CONFIG_NAME = "pathogens"
107
-
108
-
109
- def _info(self):
110
- return datasets.DatasetInfo(
111
- description=_DESCRIPTION,
112
- citation=_CITATION,
113
- homepage=_HOMEPAGE,
114
- license=_LICENSE,
115
- features=datasets.Features(
116
- {
117
- "hash": datasets.Value("string"),
118
- "tokens": datasets.Sequence(datasets.Value("string")),
119
- "ner_tags": datasets.Sequence(
120
- datasets.ClassLabel(names=_LABELS[self.config.name])
121
- ),
122
- "locations": datasets.Value("string"),
123
- "license": datasets.Value("string"),
124
- "license_version": datasets.Value("string"),
125
- "license_url": datasets.Value("string"),
126
- "attribution_text": datasets.Value("string"),
127
- }
128
- ),
129
- supervised_keys=None,
130
- )
131
-
132
-
133
- def _split_generators(self, dl_manager):
134
- extracted = Path(dl_manager.download_and_extract(_URL))
135
- base_dir = extracted / "TEA_curated_data-1.1"
136
-
137
- curation_json = (
138
- base_dir / "curation_data" / self.config.name / f"{self.config.name}.json"
139
- )
140
- articles_dir = base_dir / "source_articles"
141
-
142
- return [
143
- datasets.SplitGenerator(
144
- name=datasets.Split.TRAIN,
145
- gen_kwargs={
146
- "curation_path": curation_json,
147
- "articles_dir": articles_dir,
148
- },
149
- )
150
- ]
151
-
152
-
153
- def _generate_examples(self, curation_path: Path, articles_dir: Path):
154
- with open(curation_path, encoding="utf-8") as f:
155
- curation = json.load(f)
156
-
157
- attribution = _parse_attribution_file(articles_dir.parent / "attribution.txt")
158
-
159
- for art_hash, annotation in curation.items():
160
- if art_hash not in attribution:
161
- raise ValueError(f"Attribution missing for article hash {art_hash!r}")
162
-
163
- txt_path = articles_dir / art_hash / f"{art_hash}.txt"
164
- text = txt_path.read_text(encoding="utf-8")
165
- text = re.sub(r"\$/i\$", "", text)
166
- text = re.sub(r"\$i\$", "", text)
167
- text = re.sub(r"\s+", " ", text)
168
-
169
- tokens = text.split()
170
- ner_tags = ["O"] * len(tokens)
171
-
172
- for raw_tag, locs in annotation.items():
173
- code = _CODE4[raw_tag.split("/")[0]]
174
- for loc in locs: # e.g. "353+2"
175
- start, length = map(int, loc.split("+"))
176
- for i in range(start, start + length):
177
- if i < len(tokens):
178
- ner_tags[i] = ("B-" if i == start else "I-") + code
179
-
180
- meta = attribution[art_hash]
181
-
182
- yield art_hash, {
183
- "hash": art_hash,
184
- "tokens": tokens,
185
- "ner_tags": ner_tags,
186
- "locations": json.dumps(annotation),
187
- "license": meta["license"],
188
- "license_version": meta["license_version"],
189
- "license_url": meta["license_url"],
190
- "attribution_text": meta["text"],
191
- }
192
-
193
- # ---------------------------------------------------------------------------
194
- # Attribution parser
195
- # ---------------------------------------------------------------------------
196
- def _parse_attribution_file(file_path: Path) -> dict[str, dict[str, str]]:
197
- """
198
- Parse attribution.txt into
199
- {checksum: {license, license_version, license_url, text}}.
200
- """
201
- out = {}
202
- csum = lic = lic_url = lic_ver = None
203
- temp_lines = []
204
-
205
- with open(file_path, encoding="utf-8") as fp:
206
- for line in fp:
207
- if "Checksum: " in line:
208
- csum = line.split("Checksum: ")[1].strip()
209
- elif "is obtained by the following licence: " in line:
210
- lic_url = line.split("is obtained by the following licence: ")[1].strip()
211
- lic_parts = (
212
- line.split(
213
- "is obtained by the following licence: http://creativecommons.org/licenses/"
214
- )[1]
215
- .strip()
216
- .split("/")
217
- )
218
- lic_ver = lic_parts[1]
219
- lic = f"CC-{lic_parts[0].upper()}"
220
- out[csum] = {
221
- "license": lic,
222
- "license_version": lic_ver,
223
- "license_url": lic_url,
224
- }
225
- else:
226
- if len(line.strip()) > 0:
227
- temp_lines.append(line.strip())
228
- else:
229
- # blank line marks end of a record
230
- out[csum]["text"] = " ".join(temp_lines).strip()
231
- csum = lic = lic_url = lic_ver = None
232
- temp_lines = []
233
-
234
- # flush last record if file does not end with newline
235
- if csum and "text" not in out[csum]:
236
- out[csum]["text"] = " ".join(temp_lines).strip()
237
-
238
- return out