README added
Browse files- README.md +13 -0
- create_splits.py +175 -0
README.md
ADDED
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# NER for Icelandic - MIM-GOLD-NER splits
|
2 |
+
|
3 |
+
## MIM-GOLD-NER
|
4 |
+
|
5 |
+
The original MIM-GOLD-NER data is found at http://hdl.handle.net/20.500.12537/42
|
6 |
+
|
7 |
+
This repository packages the data for use with the Datasets library from hugginface.
|
8 |
+
|
9 |
+
## Splits
|
10 |
+
|
11 |
+
Since the original data does not have train, dev and test splits there was a need to create them. See `create_splits.py` for how that was done.
|
12 |
+
|
13 |
+
|
create_splits.py
ADDED
@@ -0,0 +1,175 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#
|
2 |
+
# want data from all documents
|
3 |
+
# want data from all classes
|
4 |
+
#
|
5 |
+
|
6 |
+
file_names = [
|
7 |
+
"adjudications.txt",
|
8 |
+
"blog.txt",
|
9 |
+
"books.txt",
|
10 |
+
"emails.txt",
|
11 |
+
"fbl.txt",
|
12 |
+
"laws.txt",
|
13 |
+
"mbl.txt",
|
14 |
+
"radio_tv_news.txt",
|
15 |
+
"school_essays.txt",
|
16 |
+
"scienceweb.txt",
|
17 |
+
"webmedia.txt",
|
18 |
+
"websites.txt",
|
19 |
+
"written-to-be-spoken.txt"
|
20 |
+
]
|
21 |
+
|
22 |
+
def read_file(file_name):
|
23 |
+
data = []
|
24 |
+
sentence = []
|
25 |
+
with open(file_name) as fh:
|
26 |
+
for line in fh.readlines():
|
27 |
+
if not line.strip() and sentence:
|
28 |
+
data.append(sentence)
|
29 |
+
sentence = []
|
30 |
+
continue
|
31 |
+
w, t = line.strip().split()
|
32 |
+
sentence.append((w, t))
|
33 |
+
return data
|
34 |
+
|
35 |
+
from collections import defaultdict
|
36 |
+
def calc_stats(data):
|
37 |
+
stats = defaultdict(int)
|
38 |
+
for sent in data:
|
39 |
+
stats["n_sentences"] += 1
|
40 |
+
for token, label in sent:
|
41 |
+
stats[label] += 1
|
42 |
+
return stats
|
43 |
+
|
44 |
+
|
45 |
+
import pprint
|
46 |
+
def get_total_stats():
|
47 |
+
total_stats = defaultdict(int)
|
48 |
+
for file_name in file_names:
|
49 |
+
d = read_file("data/"+file_name)
|
50 |
+
stats = calc_stats(d)
|
51 |
+
#print(f"--- [{file_name}]---")
|
52 |
+
#pprint.pprint(stats)
|
53 |
+
for k, v in stats.items():
|
54 |
+
total_stats[k] += v
|
55 |
+
#print("---- TOTAL ---- ")
|
56 |
+
#pprint.pprint(total_stats)
|
57 |
+
return total_stats
|
58 |
+
|
59 |
+
import random
|
60 |
+
random.seed(1)
|
61 |
+
|
62 |
+
def check_if_not_done(stats, total_stats, target):
|
63 |
+
for k, v in total_stats.items():
|
64 |
+
if v * target > stats[k]:
|
65 |
+
return True
|
66 |
+
return False
|
67 |
+
|
68 |
+
def create_splits(train=0.8, test=0.1, dev=0.1):
|
69 |
+
train_data = []
|
70 |
+
test_data = []
|
71 |
+
dev_data = []
|
72 |
+
|
73 |
+
total_stats = get_total_stats()
|
74 |
+
|
75 |
+
for file_name in file_names:
|
76 |
+
train_stats = defaultdict(int)
|
77 |
+
test_stats = defaultdict(int)
|
78 |
+
dev_stats = defaultdict(int)
|
79 |
+
|
80 |
+
d = read_file("data/"+file_name)
|
81 |
+
stats = calc_stats(d)
|
82 |
+
random.shuffle(d)
|
83 |
+
|
84 |
+
file_train = []
|
85 |
+
file_test = []
|
86 |
+
file_dev = []
|
87 |
+
|
88 |
+
for sent in d:
|
89 |
+
if check_if_not_done(test_stats, stats, test):
|
90 |
+
# TEST data
|
91 |
+
use = False
|
92 |
+
for token in sent:
|
93 |
+
w, tag = token
|
94 |
+
if tag == 'O':
|
95 |
+
continue
|
96 |
+
if test_stats[tag] < test * stats[tag] - 5:
|
97 |
+
use = True
|
98 |
+
if test_stats['n_sentences'] < test * stats['n_sentences'] - 5:
|
99 |
+
use = True
|
100 |
+
if use:
|
101 |
+
file_test.append(sent)
|
102 |
+
test_stats['n_sentences'] += 1
|
103 |
+
for w, t in sent:
|
104 |
+
test_stats[t] += 1
|
105 |
+
elif check_if_not_done(dev_stats, stats, dev):
|
106 |
+
# DEV DATA
|
107 |
+
use = False
|
108 |
+
for token in sent:
|
109 |
+
w, tag = token
|
110 |
+
if tag == 'O':
|
111 |
+
continue
|
112 |
+
if dev_stats[tag] < dev * stats[tag] - 5:
|
113 |
+
use = True
|
114 |
+
if dev_stats['n_sentences'] < dev * stats['n_sentences'] - 5:
|
115 |
+
use = True
|
116 |
+
if use:
|
117 |
+
file_dev.append(sent)
|
118 |
+
dev_stats['n_sentences'] += 1
|
119 |
+
for w, t in sent:
|
120 |
+
dev_stats[t] += 1
|
121 |
+
else:
|
122 |
+
file_train.append(sent)
|
123 |
+
train_stats['n_sentences'] += 1
|
124 |
+
for w, t in sent:
|
125 |
+
train_stats[t] += 1
|
126 |
+
else:
|
127 |
+
file_train.append(sent)
|
128 |
+
train_stats['n_sentences'] += 1
|
129 |
+
for w, t in sent:
|
130 |
+
train_stats[t] += 1
|
131 |
+
try:
|
132 |
+
assert len(d) == len(file_train) + len(file_dev) + len(file_test)
|
133 |
+
except:
|
134 |
+
import pdb; pdb.set_trace()
|
135 |
+
train_data += file_train
|
136 |
+
test_data += file_test
|
137 |
+
dev_data += file_dev
|
138 |
+
|
139 |
+
return train_data, test_data, dev_data
|
140 |
+
|
141 |
+
train, test, dev = create_splits()
|
142 |
+
|
143 |
+
total_stats = get_total_stats()
|
144 |
+
print("---- total -----")
|
145 |
+
pprint.pprint(total_stats)
|
146 |
+
print("----- test ----")
|
147 |
+
test_stats = calc_stats(test)
|
148 |
+
pprint.pprint(test_stats)
|
149 |
+
print("----- dev ----")
|
150 |
+
dev_stats = calc_stats(dev)
|
151 |
+
pprint.pprint(dev_stats)
|
152 |
+
print("----- train ----")
|
153 |
+
train_stats = calc_stats(train)
|
154 |
+
pprint.pprint(train_stats)
|
155 |
+
|
156 |
+
|
157 |
+
with open("train.txt", "w") as outf:
|
158 |
+
for sent in train:
|
159 |
+
for w, t in sent:
|
160 |
+
outf.writelines(f"{w} {t}\n")
|
161 |
+
outf.writelines("\n")
|
162 |
+
|
163 |
+
|
164 |
+
with open("test.txt", "w") as outf:
|
165 |
+
for sent in test:
|
166 |
+
for w, t in sent:
|
167 |
+
outf.writelines(f"{w} {t}\n")
|
168 |
+
outf.writelines("\n")
|
169 |
+
|
170 |
+
|
171 |
+
with open("dev.txt", "w") as outf:
|
172 |
+
for sent in dev:
|
173 |
+
for w, t in sent:
|
174 |
+
outf.writelines(f"{w} {t}\n")
|
175 |
+
outf.writelines("\n")
|