Datasets:
solving URL and local conflicts
Browse files
sd-nlp.py
CHANGED
|
@@ -23,7 +23,10 @@ from __future__ import absolute_import, division, print_function
|
|
| 23 |
import json
|
| 24 |
import pdb
|
| 25 |
import datasets
|
|
|
|
|
|
|
| 26 |
|
|
|
|
| 27 |
|
| 28 |
class SourceDataNLP(datasets.GeneratorBasedBuilder):
|
| 29 |
"""SourceDataNLP provides datasets to train NLP tasks in cell and molecular biology."""
|
|
@@ -67,15 +70,14 @@ class SourceDataNLP(datasets.GeneratorBasedBuilder):
|
|
| 67 |
|
| 68 |
_LICENSE = "CC-BY 4.0"
|
| 69 |
|
| 70 |
-
_URLS = {
|
| 71 |
-
"NER": "https://huggingface.co/datasets/EMBO/sd-nlp/blob/main/sd_panels.zip",
|
| 72 |
-
"ROLES": "https://huggingface.co/datasets/EMBO/sd-nlp/blob/main/sd_panels.zip",
|
| 73 |
-
"BORING": "https://huggingface.co/datasets/EMBO/sd-nlp/blob/main/sd_panels.zip",
|
| 74 |
-
"PANELIZATION": "https://huggingface.co/datasets/EMBO/sd-nlp/blob/main/sd_figs.zip",
|
| 75 |
-
}
|
| 76 |
-
|
| 77 |
VERSION = datasets.Version("0.0.1")
|
| 78 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 79 |
BUILDER_CONFIGS = [
|
| 80 |
datasets.BuilderConfig(name="NER", version="0.0.1", description="Dataset for entity recognition"),
|
| 81 |
datasets.BuilderConfig(name="GENEPROD_ROLES", version="0.0.1", description="Dataset for semantic roles."),
|
|
@@ -157,47 +159,41 @@ class SourceDataNLP(datasets.GeneratorBasedBuilder):
|
|
| 157 |
def _split_generators(self, dl_manager: datasets.DownloadManager):
|
| 158 |
"""Returns SplitGenerators.
|
| 159 |
Uses local files if a data_dir is specified. Otherwise downloads the files from their official url."""
|
| 160 |
-
|
| 161 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 162 |
else:
|
| 163 |
-
|
| 164 |
-
|
| 165 |
-
if self.config.name in ["NER", "GENEPROD_ROLES", "SMALL_MOL_ROLES", "BORING"]:
|
| 166 |
-
data_dir += "/sd_panels"
|
| 167 |
-
elif self.config.name == "PANELIZATION":
|
| 168 |
-
data_dir += "/sd_figs"
|
| 169 |
-
else:
|
| 170 |
-
raise ValueError(f"unkonwn config name: {self.config.name}")
|
| 171 |
return [
|
| 172 |
datasets.SplitGenerator(
|
| 173 |
name=datasets.Split.TRAIN,
|
| 174 |
# These kwargs will be passed to _generate_examples
|
| 175 |
gen_kwargs={
|
| 176 |
-
"filepath": data_dir + "/train.jsonl",
|
| 177 |
-
"split": "train",
|
| 178 |
-
},
|
| 179 |
),
|
| 180 |
datasets.SplitGenerator(
|
| 181 |
name=datasets.Split.TEST,
|
| 182 |
gen_kwargs={
|
| 183 |
-
"filepath": data_dir + "/test.jsonl",
|
| 184 |
-
"split": "test"},
|
| 185 |
),
|
| 186 |
datasets.SplitGenerator(
|
| 187 |
name=datasets.Split.VALIDATION,
|
| 188 |
gen_kwargs={
|
| 189 |
-
"filepath": data_dir + "/eval.jsonl",
|
| 190 |
-
"split": "eval",
|
| 191 |
-
},
|
| 192 |
),
|
| 193 |
]
|
| 194 |
|
| 195 |
-
def _generate_examples(self, filepath
|
| 196 |
"""Yields examples. This method will receive as arguments the `gen_kwargs` defined in the previous `_split_generators` method.
|
| 197 |
It is in charge of opening the given file and yielding (key, example) tuples from the dataset
|
| 198 |
The key is not important, it's more here for legacy reason (legacy from tfds)"""
|
| 199 |
|
| 200 |
with open(filepath, encoding="utf-8") as f:
|
|
|
|
| 201 |
for id_, row in enumerate(f):
|
| 202 |
data = json.loads(row)
|
| 203 |
if self.config.name == "NER":
|
|
|
|
| 23 |
import json
|
| 24 |
import pdb
|
| 25 |
import datasets
|
| 26 |
+
import os
|
| 27 |
+
import logger
|
| 28 |
|
| 29 |
+
_BASE_URL = "https://huggingface.co/datasets/EMBO/sd-nlp/resolve/main/"
|
| 30 |
|
| 31 |
class SourceDataNLP(datasets.GeneratorBasedBuilder):
|
| 32 |
"""SourceDataNLP provides datasets to train NLP tasks in cell and molecular biology."""
|
|
|
|
| 70 |
|
| 71 |
_LICENSE = "CC-BY 4.0"
|
| 72 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 73 |
VERSION = datasets.Version("0.0.1")
|
| 74 |
|
| 75 |
+
_URLS = {
|
| 76 |
+
"NER": f"{_BASE_URL}sd_panels.zip",
|
| 77 |
+
"ROLES": f"{_BASE_URL}sd_panels.zip",
|
| 78 |
+
"BORING": f"{_BASE_URL}sd_panels.zip",
|
| 79 |
+
"PANELIZATION": f"{_BASE_URL}sd_figs.zip",
|
| 80 |
+
}
|
| 81 |
BUILDER_CONFIGS = [
|
| 82 |
datasets.BuilderConfig(name="NER", version="0.0.1", description="Dataset for entity recognition"),
|
| 83 |
datasets.BuilderConfig(name="GENEPROD_ROLES", version="0.0.1", description="Dataset for semantic roles."),
|
|
|
|
| 159 |
def _split_generators(self, dl_manager: datasets.DownloadManager):
|
| 160 |
"""Returns SplitGenerators.
|
| 161 |
Uses local files if a data_dir is specified. Otherwise downloads the files from their official url."""
|
| 162 |
+
url = self._URLS[self.config.name]
|
| 163 |
+
data_dir = dl_manager.download_and_extract(url)
|
| 164 |
+
if self.config.name in ["NER", "GENEPROD_ROLES", "SMALL_MOL_ROLES", "BORING"]:
|
| 165 |
+
data_dir += "/220304_sd_panels"
|
| 166 |
+
elif self.config.name == "PANELIZATION":
|
| 167 |
+
data_dir += "/sd_figs"
|
| 168 |
else:
|
| 169 |
+
raise ValueError(f"unkonwn config name: {self.config.name}")
|
| 170 |
+
print(data_dir)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 171 |
return [
|
| 172 |
datasets.SplitGenerator(
|
| 173 |
name=datasets.Split.TRAIN,
|
| 174 |
# These kwargs will be passed to _generate_examples
|
| 175 |
gen_kwargs={
|
| 176 |
+
"filepath": data_dir + "/train.jsonl"},
|
|
|
|
|
|
|
| 177 |
),
|
| 178 |
datasets.SplitGenerator(
|
| 179 |
name=datasets.Split.TEST,
|
| 180 |
gen_kwargs={
|
| 181 |
+
"filepath": data_dir + "/test.jsonl"},
|
|
|
|
| 182 |
),
|
| 183 |
datasets.SplitGenerator(
|
| 184 |
name=datasets.Split.VALIDATION,
|
| 185 |
gen_kwargs={
|
| 186 |
+
"filepath": data_dir + "/eval.jsonl"},
|
|
|
|
|
|
|
| 187 |
),
|
| 188 |
]
|
| 189 |
|
| 190 |
+
def _generate_examples(self, filepath):
|
| 191 |
"""Yields examples. This method will receive as arguments the `gen_kwargs` defined in the previous `_split_generators` method.
|
| 192 |
It is in charge of opening the given file and yielding (key, example) tuples from the dataset
|
| 193 |
The key is not important, it's more here for legacy reason (legacy from tfds)"""
|
| 194 |
|
| 195 |
with open(filepath, encoding="utf-8") as f:
|
| 196 |
+
# logger.info("⏳ Generating examples from = %s", filepath)
|
| 197 |
for id_, row in enumerate(f):
|
| 198 |
data = json.loads(row)
|
| 199 |
if self.config.name == "NER":
|