Commit
·
a3d7896
1
Parent(s):
c1be960
initial release
Browse files- README.md +29 -0
- config.json +0 -0
- maker.py +62 -0
- merges.txt +0 -0
- pytorch_model.bin +3 -0
- special_tokens_map.json +51 -0
- tokenizer.json +0 -0
- tokenizer_config.json +58 -0
- ud.py +81 -0
- vocab.json +0 -0
README.md
ADDED
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
language:
|
3 |
+
- "uk"
|
4 |
+
tags:
|
5 |
+
- "ukrainian"
|
6 |
+
- "token-classification"
|
7 |
+
- "pos"
|
8 |
+
- "dependency-parsing"
|
9 |
+
base_model: benjamin/roberta-large-wechsel-ukrainian
|
10 |
+
datasets:
|
11 |
+
- "universal_dependencies"
|
12 |
+
license: "mit"
|
13 |
+
pipeline_tag: "token-classification"
|
14 |
+
---
|
15 |
+
|
16 |
+
# roberta-large-wechsel-ukrainian-ud-goeswith
|
17 |
+
|
18 |
+
## Model Description
|
19 |
+
|
20 |
+
This is a RoBERTa model for POS-tagging and dependency-parsing (using `goeswith` for subwords), derived from [roberta-large-wechsel-ukrainian](https://huggingface.co/benjamin/roberta-large-wechsel-ukrainian).
|
21 |
+
|
22 |
+
## How to Use
|
23 |
+
|
24 |
+
```py
|
25 |
+
from transformers import pipeline
|
26 |
+
nlp=pipeline("universal-dependencies","KoichiYasuoka/roberta-large-wechsel-ukrainian-ud-goeswith",trust_remote_code=True,aggregation_strategy="simple")
|
27 |
+
print(nlp("Біжать алеї звуків, саджених у гами."))
|
28 |
+
```
|
29 |
+
|
config.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
maker.py
ADDED
@@ -0,0 +1,62 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#! /usr/bin/python3
|
2 |
+
src="benjamin/roberta-large-wechsel-ukrainian"
|
3 |
+
tgt="KoichiYasuoka/roberta-large-wechsel-ukrainian-ud-goeswith"
|
4 |
+
url="https://github.com/UniversalDependencies/UD_Ukrainian-"
|
5 |
+
import os
|
6 |
+
for e in ["IU","ParlaMint"]:
|
7 |
+
u=url+e
|
8 |
+
d=os.path.basename(u)
|
9 |
+
os.system("test -d "+d+" || git clone --depth=1 "+u)
|
10 |
+
os.system("for F in train dev test ; do cat UD_Ukrainian-*/*-$F.conllu > $F.conllu ; done")
|
11 |
+
class UDgoeswithDataset(object):
|
12 |
+
def __init__(self,conllu,tokenizer):
|
13 |
+
self.ids,self.tags,label=[],[],set()
|
14 |
+
with open(conllu,"r",encoding="utf-8") as r:
|
15 |
+
cls,sep,msk=tokenizer.cls_token_id,tokenizer.sep_token_id,tokenizer.mask_token_id
|
16 |
+
dep,c,m="-|_|dep",[],False
|
17 |
+
for s in r:
|
18 |
+
t=s.split("\t")
|
19 |
+
if len(t)==10:
|
20 |
+
if t[0].isdecimal():
|
21 |
+
i=int(t[0])
|
22 |
+
if m:
|
23 |
+
t[1]=" "+t[1]
|
24 |
+
c.append(t)
|
25 |
+
m=t[9].find("SpaceAfter=No")<0
|
26 |
+
elif c!=[]:
|
27 |
+
v=tokenizer([t[1] for t in c],add_special_tokens=False)["input_ids"]
|
28 |
+
for i in range(len(v)-1,-1,-1):
|
29 |
+
for j in range(1,len(v[i])):
|
30 |
+
c.insert(i+1,[c[i][0],"_","_","X","_","_",c[i][0],"goeswith","_","_"])
|
31 |
+
y=["0"]+[t[0] for t in c]
|
32 |
+
h=[i if t[6]=="0" else y.index(t[6]) for i,t in enumerate(c,1)]
|
33 |
+
p,v=[t[3]+"|"+t[5]+"|"+t[7] for t in c],sum(v,[])
|
34 |
+
if len(v)<tokenizer.model_max_length-3:
|
35 |
+
self.ids.append([cls]+v+[sep])
|
36 |
+
self.tags.append([dep]+p+[dep])
|
37 |
+
label=set(sum([self.tags[-1],list(label)],[]))
|
38 |
+
for i,k in enumerate(v):
|
39 |
+
self.ids.append([cls]+v[0:i]+[msk]+v[i+1:]+[sep,k])
|
40 |
+
self.tags.append([dep]+[t if h[j]==i+1 else dep for j,t in enumerate(p)]+[dep,dep])
|
41 |
+
c,m=[],False
|
42 |
+
self.label2id={l:i for i,l in enumerate(sorted(label))}
|
43 |
+
def __call__(*args):
|
44 |
+
label=set(sum([list(t.label2id) for t in args],[]))
|
45 |
+
lid={l:i for i,l in enumerate(sorted(label))}
|
46 |
+
for t in args:
|
47 |
+
t.label2id=lid
|
48 |
+
return lid
|
49 |
+
__len__=lambda self:len(self.ids)
|
50 |
+
__getitem__=lambda self,i:{"input_ids":self.ids[i],"labels":[self.label2id[t] for t in self.tags[i]]}
|
51 |
+
from transformers import AutoTokenizer,AutoConfig,AutoModelForTokenClassification,DataCollatorForTokenClassification,TrainingArguments,Trainer
|
52 |
+
tkz=AutoTokenizer.from_pretrained(src)
|
53 |
+
trainDS=UDgoeswithDataset("train.conllu",tkz)
|
54 |
+
devDS=UDgoeswithDataset("dev.conllu",tkz)
|
55 |
+
testDS=UDgoeswithDataset("test.conllu",tkz)
|
56 |
+
lid=trainDS(devDS,testDS)
|
57 |
+
cfg=AutoConfig.from_pretrained(src,num_labels=len(lid),label2id=lid,id2label={i:l for l,i in lid.items()})
|
58 |
+
arg=TrainingArguments(num_train_epochs=3,per_device_train_batch_size=8,output_dir=tgt,overwrite_output_dir=True,save_total_limit=2,eval_strategy="epoch",learning_rate=5e-05,warmup_ratio=0.1,save_safetensors=False)
|
59 |
+
trn=Trainer(args=arg,data_collator=DataCollatorForTokenClassification(tkz),model=AutoModelForTokenClassification.from_pretrained(src,config=cfg),train_dataset=trainDS,eval_dataset=devDS)
|
60 |
+
trn.train()
|
61 |
+
trn.save_model(tgt)
|
62 |
+
tkz.save_pretrained(tgt)
|
merges.txt
ADDED
The diff for this file is too large to render.
See raw diff
|
|
pytorch_model.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:4e39cc1d902591b19678e5b60142883e49d6d74c4bc229eb7d144bb725cd81e1
|
3 |
+
size 1440696742
|
special_tokens_map.json
ADDED
@@ -0,0 +1,51 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"bos_token": {
|
3 |
+
"content": "<s>",
|
4 |
+
"lstrip": false,
|
5 |
+
"normalized": false,
|
6 |
+
"rstrip": false,
|
7 |
+
"single_word": false
|
8 |
+
},
|
9 |
+
"cls_token": {
|
10 |
+
"content": "<s>",
|
11 |
+
"lstrip": false,
|
12 |
+
"normalized": false,
|
13 |
+
"rstrip": false,
|
14 |
+
"single_word": false
|
15 |
+
},
|
16 |
+
"eos_token": {
|
17 |
+
"content": "</s>",
|
18 |
+
"lstrip": false,
|
19 |
+
"normalized": false,
|
20 |
+
"rstrip": false,
|
21 |
+
"single_word": false
|
22 |
+
},
|
23 |
+
"mask_token": {
|
24 |
+
"content": "<mask>",
|
25 |
+
"lstrip": true,
|
26 |
+
"normalized": false,
|
27 |
+
"rstrip": false,
|
28 |
+
"single_word": false
|
29 |
+
},
|
30 |
+
"pad_token": {
|
31 |
+
"content": "<pad>",
|
32 |
+
"lstrip": false,
|
33 |
+
"normalized": false,
|
34 |
+
"rstrip": false,
|
35 |
+
"single_word": false
|
36 |
+
},
|
37 |
+
"sep_token": {
|
38 |
+
"content": "</s>",
|
39 |
+
"lstrip": false,
|
40 |
+
"normalized": false,
|
41 |
+
"rstrip": false,
|
42 |
+
"single_word": false
|
43 |
+
},
|
44 |
+
"unk_token": {
|
45 |
+
"content": "<unk>",
|
46 |
+
"lstrip": false,
|
47 |
+
"normalized": false,
|
48 |
+
"rstrip": false,
|
49 |
+
"single_word": false
|
50 |
+
}
|
51 |
+
}
|
tokenizer.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
tokenizer_config.json
ADDED
@@ -0,0 +1,58 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"add_prefix_space": false,
|
3 |
+
"added_tokens_decoder": {
|
4 |
+
"0": {
|
5 |
+
"content": "<s>",
|
6 |
+
"lstrip": false,
|
7 |
+
"normalized": false,
|
8 |
+
"rstrip": false,
|
9 |
+
"single_word": false,
|
10 |
+
"special": true
|
11 |
+
},
|
12 |
+
"1": {
|
13 |
+
"content": "<pad>",
|
14 |
+
"lstrip": false,
|
15 |
+
"normalized": false,
|
16 |
+
"rstrip": false,
|
17 |
+
"single_word": false,
|
18 |
+
"special": true
|
19 |
+
},
|
20 |
+
"2": {
|
21 |
+
"content": "</s>",
|
22 |
+
"lstrip": false,
|
23 |
+
"normalized": false,
|
24 |
+
"rstrip": false,
|
25 |
+
"single_word": false,
|
26 |
+
"special": true
|
27 |
+
},
|
28 |
+
"3": {
|
29 |
+
"content": "<unk>",
|
30 |
+
"lstrip": false,
|
31 |
+
"normalized": false,
|
32 |
+
"rstrip": false,
|
33 |
+
"single_word": false,
|
34 |
+
"special": true
|
35 |
+
},
|
36 |
+
"4": {
|
37 |
+
"content": "<mask>",
|
38 |
+
"lstrip": true,
|
39 |
+
"normalized": false,
|
40 |
+
"rstrip": false,
|
41 |
+
"single_word": false,
|
42 |
+
"special": true
|
43 |
+
}
|
44 |
+
},
|
45 |
+
"bos_token": "<s>",
|
46 |
+
"clean_up_tokenization_spaces": false,
|
47 |
+
"cls_token": "<s>",
|
48 |
+
"eos_token": "</s>",
|
49 |
+
"errors": "replace",
|
50 |
+
"extra_special_tokens": {},
|
51 |
+
"mask_token": "<mask>",
|
52 |
+
"model_max_length": 512,
|
53 |
+
"pad_token": "<pad>",
|
54 |
+
"sep_token": "</s>",
|
55 |
+
"tokenizer_class": "RobertaTokenizer",
|
56 |
+
"trim_offsets": true,
|
57 |
+
"unk_token": "<unk>"
|
58 |
+
}
|
ud.py
ADDED
@@ -0,0 +1,81 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import numpy
|
2 |
+
from transformers import TokenClassificationPipeline
|
3 |
+
|
4 |
+
class UniversalDependenciesPipeline(TokenClassificationPipeline):
|
5 |
+
def _forward(self,model_inputs):
|
6 |
+
import torch
|
7 |
+
v=model_inputs["input_ids"][0].tolist()
|
8 |
+
with torch.no_grad():
|
9 |
+
e=self.model(input_ids=torch.tensor([v[0:i]+[self.tokenizer.mask_token_id]+v[i+1:]+[j] for i,j in enumerate(v[1:-1],1)],device=self.device))
|
10 |
+
return {"logits":e.logits[:,1:-2,:],**model_inputs}
|
11 |
+
def check_model_type(self,supported_models):
|
12 |
+
pass
|
13 |
+
def postprocess(self,model_outputs,**kwargs):
|
14 |
+
if "logits" not in model_outputs:
|
15 |
+
return "".join(self.postprocess(x,**kwargs) for x in model_outputs)
|
16 |
+
e=model_outputs["logits"].numpy()
|
17 |
+
r=[1 if i==0 else -1 if j.endswith("|root") else 0 for i,j in sorted(self.model.config.id2label.items())]
|
18 |
+
e+=numpy.where(numpy.add.outer(numpy.identity(e.shape[0]),r)==0,0,-numpy.inf)
|
19 |
+
g=self.model.config.label2id["X|_|goeswith"]
|
20 |
+
r=numpy.tri(e.shape[0])
|
21 |
+
for i in range(e.shape[0]):
|
22 |
+
for j in range(i+2,e.shape[1]):
|
23 |
+
r[i,j]=r[i,j-1] if numpy.argmax(e[i,j-1])==g else 1
|
24 |
+
e[:,:,g]+=numpy.where(r==0,0,-numpy.inf)
|
25 |
+
m,p=numpy.max(e,axis=2),numpy.argmax(e,axis=2)
|
26 |
+
h=self.chu_liu_edmonds(m)
|
27 |
+
z=[i for i,j in enumerate(h) if i==j]
|
28 |
+
if len(z)>1:
|
29 |
+
k,h=z[numpy.argmax(m[z,z])],numpy.min(m)-numpy.max(m)
|
30 |
+
m[:,z]+=[[0 if j in z and (i!=j or i==k) else h for i in z] for j in range(m.shape[0])]
|
31 |
+
h=self.chu_liu_edmonds(m)
|
32 |
+
v=[(s,e) for s,e in model_outputs["offset_mapping"][0].tolist() if s<e]
|
33 |
+
q=[self.model.config.id2label[p[j,i]].split("|") for i,j in enumerate(h)]
|
34 |
+
if "aggregation_strategy" in kwargs and kwargs["aggregation_strategy"]!="none":
|
35 |
+
for i,j in reversed(list(enumerate(q[1:],1))):
|
36 |
+
if j[-1]=="goeswith" and set([t[-1] for t in q[h[i]+1:i+1]])=={"goeswith"}:
|
37 |
+
h=[b if i>b else b-1 for a,b in enumerate(h) if i!=a]
|
38 |
+
v[i-1]=(v[i-1][0],v.pop(i)[1])
|
39 |
+
q.pop(i)
|
40 |
+
elif v[i-1][1]>v[i][0]:
|
41 |
+
h=[b if i>b else b-1 for a,b in enumerate(h) if i!=a]
|
42 |
+
v[i-1]=(v[i-1][0],v.pop(i)[1])
|
43 |
+
q.pop(i)
|
44 |
+
t=model_outputs["sentence"].replace("\n"," ")
|
45 |
+
for i,(s,e) in reversed(list(enumerate(v))):
|
46 |
+
w=t[s:e]
|
47 |
+
if w.startswith(" "):
|
48 |
+
j=len(w)-len(w.lstrip())
|
49 |
+
w=w.lstrip()
|
50 |
+
v[i]=(v[i][0]+j,v[i][1])
|
51 |
+
if w.endswith(" "):
|
52 |
+
j=len(w)-len(w.rstrip())
|
53 |
+
w=w.rstrip()
|
54 |
+
v[i]=(v[i][0],v[i][1]-j)
|
55 |
+
if w.strip()=="":
|
56 |
+
h=[b if i>b else b-1 for a,b in enumerate(h) if i!=a]
|
57 |
+
v.pop(i)
|
58 |
+
q.pop(i)
|
59 |
+
u="# text = "+t+"\n"
|
60 |
+
for i,(s,e) in enumerate(v):
|
61 |
+
u+="\t".join([str(i+1),t[s:e],"_",q[i][0],"_","|".join(q[i][1:-1]),str(0 if h[i]==i else h[i]+1),q[i][-1],"_","_" if i+1<len(v) and e<v[i+1][0] else "SpaceAfter=No"])+"\n"
|
62 |
+
return u+"\n"
|
63 |
+
def chu_liu_edmonds(self,matrix):
|
64 |
+
h=numpy.argmax(matrix,axis=0)
|
65 |
+
x=[-1 if i==j else j for i,j in enumerate(h)]
|
66 |
+
for b in [lambda x,i,j:-1 if i not in x else x[i],lambda x,i,j:-1 if j<0 else x[j]]:
|
67 |
+
y=[]
|
68 |
+
while x!=y:
|
69 |
+
y=list(x)
|
70 |
+
for i,j in enumerate(x):
|
71 |
+
x[i]=b(x,i,j)
|
72 |
+
if max(x)<0:
|
73 |
+
return h
|
74 |
+
y,x=[i for i,j in enumerate(x) if j==max(x)],[i for i,j in enumerate(x) if j<max(x)]
|
75 |
+
z=matrix-numpy.max(matrix,axis=0)
|
76 |
+
m=numpy.block([[z[x,:][:,x],numpy.max(z[x,:][:,y],axis=1).reshape(len(x),1)],[numpy.max(z[y,:][:,x],axis=0),numpy.max(z[y,y])]])
|
77 |
+
k=[j if i==len(x) else x[j] if j<len(x) else y[numpy.argmax(z[y,x[i]])] for i,j in enumerate(self.chu_liu_edmonds(m))]
|
78 |
+
h=[j if i in y else k[x.index(i)] for i,j in enumerate(h)]
|
79 |
+
i=y[numpy.argmax(z[x[k[-1]],y] if k[-1]<len(x) else z[y,y])]
|
80 |
+
h[i]=x[k[-1]] if k[-1]<len(x) else i
|
81 |
+
return h
|
vocab.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|