Datasets:

Languages:
Indonesian
ArXiv:
License:
holylovenia commited on
Commit
a8906c4
·
verified ·
1 Parent(s): cda1c92

Upload ind_proner.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. ind_proner.py +191 -0
ind_proner.py ADDED
@@ -0,0 +1,191 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ from pathlib import Path
17
+ from typing import Dict, List, Tuple
18
+
19
+ import datasets
20
+
21
+ from seacrowd.utils import schemas
22
+ from seacrowd.utils.common_parser import load_conll_data
23
+ from seacrowd.utils.configs import SEACrowdConfig
24
+ from seacrowd.utils.constants import Licenses, Tasks
25
+
26
+ _CITATION = """\
27
+ @INPROCEEDINGS{9212879,
28
+ author={Akmal, Muhammad and Romadhony, Ade},
29
+ booktitle={2020 International Conference on Data Science and Its Applications (ICoDSA)},
30
+ title={Corpus Development for Indonesian Product Named Entity Recognition Using Semi-supervised Approach},
31
+ year={2020},
32
+ volume={},
33
+ number={},
34
+ pages={1-5},
35
+ keywords={Feature extraction;Labeling;Buildings;Semisupervised learning;Training data;Text recognition;Manuals;proner;semi-supervised learning;crf},
36
+ doi={10.1109/ICoDSA50139.2020.9212879}
37
+ }
38
+ """
39
+
40
+ _DATASETNAME = "ind_proner"
41
+
42
+ _DESCRIPTION = """\
43
+ Indonesian PRONER is a corpus for Indonesian product named entity recognition . It contains data was labeled manually
44
+ and data that was labeled automatically through a semi-supervised learning approach of conditional random fields (CRF).
45
+ """
46
+
47
+ _HOMEPAGE = "https://github.com/dziem/proner-labeled-text"
48
+
49
+ _LANGUAGES = {"ind": "id"}
50
+
51
+ _LANGUAGE_CODES = list(_LANGUAGES.values())
52
+
53
+ _LICENSE = Licenses.CC_BY_4_0.value
54
+
55
+ _LOCAL = False
56
+
57
+ _URLS = {
58
+ "automatic": "https://raw.githubusercontent.com/dziem/proner-labeled-text/master/automatically_labeled.tsv",
59
+ "manual": "https://raw.githubusercontent.com/dziem/proner-labeled-text/master/manually_labeled.tsv",
60
+ }
61
+
62
+ _ANNOTATION_TYPES = list(_URLS.keys())
63
+ _ANNOTATION_IDXS = {"l1": 0, "l2": 1}
64
+
65
+ _SUPPORTED_TASKS = [Tasks.NAMED_ENTITY_RECOGNITION]
66
+
67
+ _SOURCE_VERSION = "1.0.0"
68
+
69
+ _SEACROWD_VERSION = "2024.06.20"
70
+
71
+ logger = datasets.logging.get_logger(__name__)
72
+
73
+
74
+ class IndPRONERDataset(datasets.GeneratorBasedBuilder):
75
+ """
76
+ Indonesian PRONER is a product named entity recognition dataset from https://github.com/dziem/proner-labeled-text.
77
+ """
78
+
79
+ SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
80
+ SEACROWD_VERSION = datasets.Version(_SEACROWD_VERSION)
81
+
82
+ BUILDER_CONFIGS = (
83
+ [
84
+ SEACrowdConfig(
85
+ name=f"{_DATASETNAME}_{annotation_type}_source",
86
+ version=datasets.Version(_SOURCE_VERSION),
87
+ description=f"{_DATASETNAME}_{annotation_type} source schema",
88
+ schema="source",
89
+ subset_id=f"{_DATASETNAME}_{annotation_type}",
90
+ )
91
+ for annotation_type in _ANNOTATION_TYPES
92
+ ]
93
+ + [
94
+ SEACrowdConfig(
95
+ name=f"{_DATASETNAME}_{annotation_type}_l1_seacrowd_seq_label",
96
+ version=datasets.Version(_SEACROWD_VERSION),
97
+ description=f"{_DATASETNAME}_{annotation_type}_l1 SEACrowd schema",
98
+ schema="seacrowd_seq_label",
99
+ subset_id=f"{_DATASETNAME}_{annotation_type}_l1",
100
+ )
101
+ for annotation_type in _ANNOTATION_TYPES
102
+ ]
103
+ + [
104
+ SEACrowdConfig(
105
+ name=f"{_DATASETNAME}_{annotation_type}_l2_seacrowd_seq_label",
106
+ version=datasets.Version(_SEACROWD_VERSION),
107
+ description=f"{_DATASETNAME}_{annotation_type}_l2 SEACrowd schema",
108
+ schema="seacrowd_seq_label",
109
+ subset_id=f"{_DATASETNAME}_{annotation_type}_l2",
110
+ )
111
+ for annotation_type in _ANNOTATION_TYPES
112
+ ]
113
+ )
114
+
115
+ label_classes = [
116
+ "B-PRO",
117
+ "B-BRA",
118
+ "B-TYP",
119
+ "I-PRO",
120
+ "I-BRA",
121
+ "I-TYP",
122
+ "O",
123
+ ]
124
+
125
+ def _extract_label(self, text: str, idx: int) -> str:
126
+ split = text.split("|")
127
+ if len(split) > 1 and idx != -1:
128
+ return split[idx]
129
+ else:
130
+ return text
131
+
132
+ def _info(self) -> datasets.DatasetInfo:
133
+ if self.config.schema == "source":
134
+ features = datasets.Features(
135
+ {
136
+ "id": datasets.Value("string"),
137
+ "tokens": datasets.Sequence(datasets.Value("string")),
138
+ "ner_tags": datasets.Sequence(datasets.Value("string")),
139
+ }
140
+ )
141
+ elif self.config.schema == "seacrowd_seq_label":
142
+ features = schemas.seq_label_features(label_names=self.label_classes)
143
+
144
+ return datasets.DatasetInfo(
145
+ description=_DESCRIPTION,
146
+ features=features,
147
+ homepage=_HOMEPAGE,
148
+ license=_LICENSE,
149
+ citation=_CITATION,
150
+ )
151
+
152
+ def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
153
+ """
154
+ Returns SplitGenerators.
155
+ """
156
+
157
+ annotation_type = self.config.subset_id.split("_")[2]
158
+ path = dl_manager.download_and_extract(_URLS[annotation_type])
159
+
160
+ return [
161
+ datasets.SplitGenerator(
162
+ name=datasets.Split.TRAIN,
163
+ gen_kwargs={
164
+ "filepath": path,
165
+ "split": "train",
166
+ },
167
+ )
168
+ ]
169
+
170
+ def _generate_examples(self, filepath: Path, split: str) -> Tuple[int, Dict]:
171
+ """
172
+ Yields examples as (key, example) tuples.
173
+ """
174
+ label_idx = -1
175
+ subset_id = self.config.subset_id.split("_")
176
+ if len(subset_id) > 3:
177
+ if subset_id[3] in _ANNOTATION_IDXS:
178
+ label_idx = _ANNOTATION_IDXS[subset_id[3]]
179
+
180
+ idx = 0
181
+ conll_dataset = load_conll_data(filepath)
182
+ if self.config.schema == "source":
183
+ for _, row in enumerate(conll_dataset):
184
+ x = {"id": str(idx), "tokens": row["sentence"], "ner_tags": list(map(self._extract_label, row["label"], [label_idx] * len(row["label"])))}
185
+ yield idx, x
186
+ idx += 1
187
+ elif self.config.schema == "seacrowd_seq_label":
188
+ for _, row in enumerate(conll_dataset):
189
+ x = {"id": str(idx), "tokens": row["sentence"], "labels": list(map(self._extract_label, row["label"], [label_idx] * len(row["label"])))}
190
+ yield idx, x
191
+ idx += 1