KoichiYasuoka commited on
Commit
b4c4140
·
1 Parent(s): efb6584

initial release

Browse files
Files changed (9) hide show
  1. README.md +33 -0
  2. config.json +0 -0
  3. maker.py +99 -0
  4. pytorch_model.bin +3 -0
  5. special_tokens_map.json +37 -0
  6. tokenizer.json +0 -0
  7. tokenizer_config.json +68 -0
  8. ud.py +121 -0
  9. vocab.txt +0 -0
README.md ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ language:
3
+ - "lzh"
4
+ tags:
5
+ - "classical chinese"
6
+ - "literary chinese"
7
+ - "ancient chinese"
8
+ - "token-classification"
9
+ - "pos"
10
+ - "dependency-parsing"
11
+ base_model: KoichiYasuoka/modernbert-large-classical-chinese
12
+ datasets:
13
+ - "universal_dependencies"
14
+ license: "apache-2.0"
15
+ pipeline_tag: "token-classification"
16
+ widget:
17
+ - text: "孟子見梁惠王"
18
+ ---
19
+
20
+ # modernbert-large-classical-chinese-ud-triangular
21
+
22
+ ## Model Description
23
+
24
+ This is a ModernBERT model pretrained on Classical Chinese texts for POS-tagging and dependency-parsing (using `goeswith` for subwords), derived from [modernbert-large-classical-chinese](https://huggingface.co/KoichiYasuoka/modernbert-large-classical-chinese) and [UD_Classical_Chinese-Kyoto](https://github.com/UniversalDependencies/UD_Classical_Chinese-Kyoto).
25
+
26
+ ## How to Use
27
+
28
+ ```py
29
+ from transformers import pipeline
30
+ nlp=pipeline("universal-dependencies","KoichiYasuoka/modernbert-large-classical-chinese-ud-triangular",trust_remote_code=True,aggregation_strategy="simple")
31
+ print(nlp("孟子見梁惠王"))
32
+ ```
33
+
config.json ADDED
The diff for this file is too large to render. See raw diff
 
maker.py ADDED
@@ -0,0 +1,99 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #! /usr/bin/python3
2
+ src="KoichiYasuoka/modernbert-large-classical-chinese"
3
+ tgt="KoichiYasuoka/modernbert-large-classical-chinese-ud-triangular"
4
+ url="https://github.com/UniversalDependencies/UD_Classical_Chinese-Kyoto"
5
+ import os
6
+ d=os.path.basename(url)
7
+ os.system("test -d "+d+" || git clone --depth=1 "+url)
8
+ os.system("for F in train dev test ; do cp "+d+"/*-$F.conllu $F.conllu ; done")
9
+ class UDTriangularDataset(object):
10
+ def __init__(self,conllu,tokenizer):
11
+ self.conllu=open(conllu,"r",encoding="utf-8")
12
+ self.tokenizer=tokenizer
13
+ self.seeks=[0]
14
+ label=set(["SYM|x","X|x"])
15
+ dep=set(["X|x|r-goeswith"])
16
+ s=self.conllu.readline()
17
+ while s!="":
18
+ if s=="\n":
19
+ self.seeks.append(self.conllu.tell())
20
+ else:
21
+ w=s.split("\t")
22
+ if len(w)==10:
23
+ if w[0].isdecimal():
24
+ p=w[3]
25
+ q="" if w[5]=="_" else "|"+w[5]
26
+ d=("|" if w[6]=="0" else "|l-" if int(w[0])<int(w[6]) else "|r-")+w[7]
27
+ label.add(p+"|o"+q)
28
+ label.add(p+"|x"+q)
29
+ dep.add(p+"|o"+q+d)
30
+ dep.add(p+"|x"+q+d)
31
+ s=self.conllu.readline()
32
+ lid={l:i for i,l in enumerate(sorted(label))}
33
+ for i,d in enumerate(sorted(dep),len(lid)):
34
+ lid[d]=i
35
+ self.label2id=lid
36
+ def __call__(*args):
37
+ lid={l:i for i,l in enumerate(sorted(set(sum([list(t.label2id) for t in args],[]))))}
38
+ for t in args:
39
+ t.label2id=lid
40
+ return lid
41
+ def __del__(self):
42
+ self.conllu.close()
43
+ __len__=lambda self:len(self.seeks)-1
44
+ def __getitem__(self,i):
45
+ s=self.seeks[i]
46
+ self.conllu.seek(s)
47
+ c,t=[],[""]
48
+ while t[0]!="\n":
49
+ t=self.conllu.readline().split("\t")
50
+ if len(t)==10 and t[0].isdecimal():
51
+ c.append(t)
52
+ v=self.tokenizer([t[1] for t in c],add_special_tokens=False)["input_ids"]
53
+ for i in range(len(v)-1,-1,-1):
54
+ for j in range(1,len(v[i])):
55
+ c.insert(i+1,[c[i][0],"_","_","X","_","_",c[i][0],"goeswith","_","_"])
56
+ y=["0"]+[t[0] for t in c]
57
+ h=[i if t[6]=="0" else y.index(t[6]) for i,t in enumerate(c,1)]
58
+ x=["o" if k>i or sum([1 if j==i+1 else 0 for j in h[i+1:]])>0 else "x" for i,k in enumerate(h)]
59
+ p=[t[3]+"|"+x[i] if t[5]=="_" else t[3]+"|"+x[i]+"|"+t[5] for i,t in enumerate(c)]
60
+ d=[t[7] if t[6]=="0" else "l-"+t[7] if int(t[0])<int(t[6]) else "r-"+t[7] for t in c]
61
+ v=sum(v,[])
62
+ ids=[self.tokenizer.cls_token_id]
63
+ upos=["SYM|x"]
64
+ for i,k in enumerate(v):
65
+ if len(v)<127 or x[i]=="o":
66
+ ids.append(k)
67
+ upos.append(p[i]+"|"+d[i] if h[i]==i+1 else p[i])
68
+ for j in range(i+1,len(v)):
69
+ ids.append(v[j])
70
+ upos.append(p[j]+"|"+d[j] if h[j]==i+1 else p[i]+"|"+d[i] if h[i]==j+1 else p[j])
71
+ ids.append(self.tokenizer.sep_token_id)
72
+ upos.append("SYM|x")
73
+ i=0
74
+ while len(ids)>8192:
75
+ try:
76
+ i=ids.index(self.tokenizer.sep_token_id,ids.index(self.tokenizer.sep_token_id,i+1)+1)-1
77
+ except:
78
+ break
79
+ while len(ids)>8192 and ids[i]!=self.tokenizer.sep_token_id:
80
+ if upos[i].endswith("|x"):
81
+ ids.pop(i)
82
+ upos.pop(i)
83
+ i-=1
84
+ else:
85
+ break
86
+ return {"input_ids":ids[:8192],"labels":[self.label2id[p] for p in upos[:8192]]}
87
+ from transformers import AutoTokenizer,AutoConfig,AutoModelForTokenClassification,DataCollatorForTokenClassification,TrainingArguments,Trainer
88
+ tkz=AutoTokenizer.from_pretrained(src)
89
+ trainDS=UDTriangularDataset("train.conllu",tkz)
90
+ devDS=UDTriangularDataset("dev.conllu",tkz)
91
+ testDS=UDTriangularDataset("test.conllu",tkz)
92
+ lid=trainDS(devDS,testDS)
93
+ cfg=AutoConfig.from_pretrained(src,num_labels=len(lid),label2id=lid,id2label={i:l for l,i in lid.items()},ignore_mismatched_sizes=True,trust_remote_code=True)
94
+ mdl=AutoModelForTokenClassification.from_pretrained(src,config=cfg,ignore_mismatched_sizes=True,trust_remote_code=True)
95
+ arg=TrainingArguments(num_train_epochs=3,per_device_train_batch_size=1,dataloader_pin_memory=False,output_dir=tgt,overwrite_output_dir=True,save_total_limit=2,learning_rate=5e-05,warmup_ratio=0.1,save_safetensors=False)
96
+ trn=Trainer(args=arg,data_collator=DataCollatorForTokenClassification(tkz),model=mdl,train_dataset=trainDS)
97
+ trn.train()
98
+ trn.save_model(tgt)
99
+ tkz.save_pretrained(tgt)
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bb8e8df139fa73232c28276261dbe9972cb560002cbff876a414c0a849dc04b9
3
+ size 1485600194
special_tokens_map.json ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cls_token": {
3
+ "content": "[CLS]",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "mask_token": {
10
+ "content": "[MASK]",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": {
17
+ "content": "[PAD]",
18
+ "lstrip": false,
19
+ "normalized": false,
20
+ "rstrip": false,
21
+ "single_word": false
22
+ },
23
+ "sep_token": {
24
+ "content": "[SEP]",
25
+ "lstrip": false,
26
+ "normalized": false,
27
+ "rstrip": false,
28
+ "single_word": false
29
+ },
30
+ "unk_token": {
31
+ "content": "[UNK]",
32
+ "lstrip": false,
33
+ "normalized": false,
34
+ "rstrip": false,
35
+ "single_word": false
36
+ }
37
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,68 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {
3
+ "0": {
4
+ "content": "[CLS]",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false,
9
+ "special": true
10
+ },
11
+ "1": {
12
+ "content": "[PAD]",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false,
17
+ "special": true
18
+ },
19
+ "2": {
20
+ "content": "[SEP]",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false,
25
+ "special": true
26
+ },
27
+ "3": {
28
+ "content": "[UNK]",
29
+ "lstrip": false,
30
+ "normalized": false,
31
+ "rstrip": false,
32
+ "single_word": false,
33
+ "special": true
34
+ },
35
+ "4": {
36
+ "content": "[MASK]",
37
+ "lstrip": false,
38
+ "normalized": false,
39
+ "rstrip": false,
40
+ "single_word": false,
41
+ "special": true
42
+ }
43
+ },
44
+ "clean_up_tokenization_spaces": true,
45
+ "cls_token": "[CLS]",
46
+ "do_basic_tokenize": true,
47
+ "do_lower_case": false,
48
+ "extra_special_tokens": {},
49
+ "mask_token": "[MASK]",
50
+ "model_input_names": [
51
+ "input_ids",
52
+ "attention_mask"
53
+ ],
54
+ "model_max_length": 1000000000000000019884624838656,
55
+ "never_split": [
56
+ "[CLS]",
57
+ "[PAD]",
58
+ "[SEP]",
59
+ "[UNK]",
60
+ "[MASK]"
61
+ ],
62
+ "pad_token": "[PAD]",
63
+ "sep_token": "[SEP]",
64
+ "strip_accents": false,
65
+ "tokenize_chinese_chars": true,
66
+ "tokenizer_class": "BertTokenizerFast",
67
+ "unk_token": "[UNK]"
68
+ }
ud.py ADDED
@@ -0,0 +1,121 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy
2
+ from transformers import TokenClassificationPipeline
3
+
4
+ class UniversalDependenciesPipeline(TokenClassificationPipeline):
5
+ def __init__(self,**kwargs):
6
+ super().__init__(**kwargs)
7
+ x=self.model.config.label2id
8
+ self.root=numpy.full((len(x)),-numpy.inf)
9
+ self.left_arc=numpy.full((len(x)),-numpy.inf)
10
+ self.right_arc=numpy.full((len(x)),-numpy.inf)
11
+ for k,v in x.items():
12
+ if k.endswith("|root"):
13
+ self.root[v]=0
14
+ elif k.find("|l-")>0:
15
+ self.left_arc[v]=0
16
+ elif k.find("|r-")>0:
17
+ self.right_arc[v]=0
18
+ def check_model_type(self,supported_models):
19
+ pass
20
+ def postprocess(self,model_outputs,**kwargs):
21
+ import torch
22
+ if "logits" not in model_outputs:
23
+ return "".join(self.postprocess(x,**kwargs) for x in model_outputs)
24
+ m=model_outputs["logits"][0].cpu().numpy()
25
+ k=numpy.argmax(m,axis=1).tolist()
26
+ x=[self.model.config.id2label[i].split("|")[1]=="o" for i in k[1:-1]]
27
+ v=model_outputs["input_ids"][0].tolist()
28
+ off=model_outputs["offset_mapping"][0].tolist()
29
+ for i,(s,e) in reversed(list(enumerate(off))):
30
+ if s<e:
31
+ d=model_outputs["sentence"][s:e]
32
+ j=len(d)-len(d.lstrip())
33
+ if j>0:
34
+ d=d.lstrip()
35
+ off[i][0]+=j
36
+ j=len(d)-len(d.rstrip())
37
+ if j>0:
38
+ d=d.rstrip()
39
+ off[i][1]-=j
40
+ if d.strip()=="":
41
+ off.pop(i)
42
+ v.pop(i)
43
+ x.pop(i-1)
44
+ if len(x)<127:
45
+ x=[True]*len(x)
46
+ else:
47
+ w=sum([len(x)-i+1 if b else 0 for i,b in enumerate(x)])+1
48
+ for i in numpy.argsort(numpy.max(m,axis=1)[1:-1]):
49
+ if x[i]==False and w+len(x)-i<8192:
50
+ x[i]=True
51
+ w+=len(x)-i+1
52
+ w=[self.tokenizer.cls_token_id]
53
+ for i,j in enumerate(x):
54
+ if j:
55
+ w+=v[i+1:]
56
+ with torch.no_grad():
57
+ e=self.model(input_ids=torch.tensor([w]).to(self.device))
58
+ m=e.logits[0].cpu().numpy()
59
+ w=len(v)-2
60
+ e=numpy.full((w,w,m.shape[-1]),m.min())
61
+ k=1
62
+ for i in range(w):
63
+ if x[i]:
64
+ e[i,i]=m[k]+self.root
65
+ k+=1
66
+ for j in range(1,w-i):
67
+ e[i+j,i]=m[k]+self.left_arc
68
+ e[i,i+j]=m[k]+self.right_arc
69
+ k+=1
70
+ k+=1
71
+ g=self.model.config.label2id["X|x|r-goeswith"]
72
+ m,r=numpy.max(e,axis=2),numpy.tri(e.shape[0])
73
+ for i in range(e.shape[0]):
74
+ for j in range(i+2,e.shape[1]):
75
+ r[i,j]=1
76
+ if numpy.argmax(e[i,j-1])==g and numpy.argmax(m[:,j-1])==i:
77
+ r[i,j]=r[i,j-1]
78
+ e[:,:,g]+=numpy.where(r==0,0,-numpy.inf)
79
+ m,p=numpy.max(e,axis=2),numpy.argmax(e,axis=2)
80
+ h=self.chu_liu_edmonds(m)
81
+ z=[i for i,j in enumerate(h) if i==j]
82
+ if len(z)>1:
83
+ k,h=z[numpy.argmax(m[z,z])],numpy.min(m)-numpy.max(m)
84
+ m[:,z]+=[[0 if j in z and (i!=j or i==k) else h for i in z] for j in range(m.shape[0])]
85
+ h=self.chu_liu_edmonds(m)
86
+ v=[(s,e) for s,e in off if s<e]
87
+ q=[self.model.config.id2label[p[j,i]].split("|") for i,j in enumerate(h)]
88
+ if "aggregation_strategy" in kwargs and kwargs["aggregation_strategy"]!="none":
89
+ for i,j in reversed(list(enumerate(q[1:],1))):
90
+ if j[-1]=="r-goeswith" and set([t[-1] for t in q[h[i]+1:i+1]])=={"r-goeswith"}:
91
+ h=[b if i>b else b-1 for a,b in enumerate(h) if i!=a]
92
+ v[i-1]=(v[i-1][0],v.pop(i)[1])
93
+ q.pop(i)
94
+ elif v[i-1][1]>v[i][0]:
95
+ h=[b if i>b else b-1 for a,b in enumerate(h) if i!=a]
96
+ v[i-1]=(v[i-1][0],v.pop(i)[1])
97
+ q.pop(i)
98
+ t=model_outputs["sentence"].replace("\n"," ")
99
+ u="# text = "+t+"\n"
100
+ for i,(s,e) in enumerate(v):
101
+ u+="\t".join([str(i+1),t[s:e],t[s:e],q[i][0],"_","_" if len(q[i])<4 else "|".join(q[i][2:-1]),str(0 if h[i]==i else h[i]+1),"root" if q[i][-1]=="root" else q[i][-1][2:],"_","_" if i+1<len(v) and e<v[i+1][0] else "SpaceAfter=No"])+"\n"
102
+ return u+"\n"
103
+ def chu_liu_edmonds(self,matrix):
104
+ h=numpy.argmax(matrix,axis=0)
105
+ x=[-1 if i==j else j for i,j in enumerate(h)]
106
+ for b in [lambda x,i,j:-1 if i not in x else x[i],lambda x,i,j:-1 if j<0 else x[j]]:
107
+ y=[]
108
+ while x!=y:
109
+ y=list(x)
110
+ for i,j in enumerate(x):
111
+ x[i]=b(x,i,j)
112
+ if max(x)<0:
113
+ return h
114
+ y,x=[i for i,j in enumerate(x) if j==max(x)],[i for i,j in enumerate(x) if j<max(x)]
115
+ z=matrix-numpy.max(matrix,axis=0)
116
+ m=numpy.block([[z[x,:][:,x],numpy.max(z[x,:][:,y],axis=1).reshape(len(x),1)],[numpy.max(z[y,:][:,x],axis=0),numpy.max(z[y,y])]])
117
+ k=[j if i==len(x) else x[j] if j<len(x) else y[numpy.argmax(z[y,x[i]])] for i,j in enumerate(self.chu_liu_edmonds(m))]
118
+ h=[j if i in y else k[x.index(i)] for i,j in enumerate(h)]
119
+ i=y[numpy.argmax(z[x[k[-1]],y] if k[-1]<len(x) else z[y,y])]
120
+ h[i]=x[k[-1]] if k[-1]<len(x) else i
121
+ return h
vocab.txt ADDED
The diff for this file is too large to render. See raw diff