--- tags: - "japanese" - "question-answering" - "dependency-parsing" datasets: - "universal_dependencies" license: "cc-by-sa-4.0" pipeline_tag: "question-answering" widget: - text: "国語" context: "全学年にわたって小学校の国語の教科書に挿し絵が用いられている" - text: "教科書" context: "全学年にわたって小学校の国語の教科書に挿し絵が用いられている" - text: "の" context: "全学年にわたって小学校の国語[MASK]教科書に挿し絵が用いられている" --- # deberta-base-japanese-aozora-ud-head ## Model Description This is a DeBERTa(V2) model pretrained on 青空文庫 for dependency-parsing (head-detection on long-unit-words) as question-answering, derived from [deberta-base-japanese-aozora](https://huggingface.co/KoichiYasuoka/deberta-base-japanese-aozora) and [UD_Japanese-GSDLUW](https://github.com/UniversalDependencies/UD_Japanese-GSDLUW). Use [MASK] inside `context` to avoid ambiguity when specifying a multiple-used word as `question`. ## How to Use ```py import torch from transformers import AutoTokenizer,AutoModelForQuestionAnswering tokenizer=AutoTokenizer.from_pretrained("KoichiYasuoka/deberta-base-japanese-aozora-ud-head") model=AutoModelForQuestionAnswering.from_pretrained("KoichiYasuoka/deberta-base-japanese-aozora-ud-head") question="国語" context="全学年にわたって小学校の国語の教科書に挿し絵が用いられている" inputs=tokenizer(question,context,return_tensors="pt",return_offsets_mapping=True) offsets=inputs.pop("offset_mapping").tolist()[0] outputs=model(**inputs) start,end=torch.argmax(outputs.start_logits),torch.argmax(outputs.end_logits) print(context[offsets[start][0]:offsets[end][-1]]) ``` or (with [ufal.chu-liu-edmonds](https://pypi.org/project/ufal.chu-liu-edmonds/)) ```py class TransformersUD(object): def __init__(self,bert): import os from transformers import (AutoTokenizer,AutoModelForQuestionAnswering, AutoModelForTokenClassification,AutoConfig,TokenClassificationPipeline) self.tokenizer=AutoTokenizer.from_pretrained(bert) self.model=AutoModelForQuestionAnswering.from_pretrained(bert) d=os.path.join(bert,"tagger") if os.path.isdir(d): m=AutoModelForTokenClassification.from_pretrained(d) else: from transformers.file_utils import hf_bucket_url c=AutoConfig.from_pretrained(hf_bucket_url(bert,"tagger/config.json")) m=AutoModelForTokenClassification.from_pretrained( hf_bucket_url(bert,"tagger/pytorch_model.bin"),config=c) self.tagger=TokenClassificationPipeline(model=m,tokenizer=self.tokenizer, aggregation_strategy="simple") def __call__(self,text): import numpy,torch,ufal.chu_liu_edmonds y=self.tagger(text) w=[(t["start"],t["end"],t["entity_group"].split("|")) for t in y] r=[text[s:e] for s,e,p in w] v=self.tokenizer(r,add_special_tokens=False)["input_ids"] m=numpy.full((len(v)+1,len(v)+1),numpy.nan) for i,t in enumerate(v): a=[[self.tokenizer.cls_token_id]+t+[self.tokenizer.sep_token_id]] a+=v[0:i]+[[self.tokenizer.mask_token_id]]+v[i+1:]+[[a[0][-1]]] b,c=[len(sum(a[0:j],[])) for j in range(1,len(a))],sum(a,[]) d=self.model(input_ids=torch.tensor([c]), token_type_ids=torch.tensor([[0]*len(a[0])+[1]*(len(c)-len(a[0]))])) s,e=d.start_logits.tolist()[0],d.end_logits.tolist()[0] for j in range(len(b)-1): m[i+1,0 if i==j else j+1]=s[b[j]]+e[b[j+1]-1] h=ufal.chu_liu_edmonds.chu_liu_edmonds(m)[0] u="# text = "+text.replace("\n"," ")+"\n" for i,(s,e,p) in enumerate(w,1): u+="\t".join([str(i),r[i-1],"_",p[0],"_","|".join(p[1:-1]),str(h[i]), p[-1],"_","_" if i