Upload 14 files
Browse files- README.md +4 -0
- added_tokens.json +1 -0
- bert_ner_model_loader.py +191 -0
- biobert_utils.py +160 -0
- config.json +34 -0
- eval_results.txt +4 -0
- model_config.json +38 -0
- packages.txt +1 -0
- pytorch_model.bin +3 -0
- requirements.txt +37 -0
- special_tokens_map.json +1 -0
- tokenizer_config.json +1 -0
- training_args.bin +3 -0
- vocab.txt +0 -0
README.md
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
---
|
| 2 |
+
language:
|
| 3 |
+
- en
|
| 4 |
+
---
|
added_tokens.json
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
{}
|
bert_ner_model_loader.py
ADDED
|
@@ -0,0 +1,191 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""BERT NER Inference."""
|
| 2 |
+
|
| 3 |
+
from __future__ import absolute_import, division, print_function
|
| 4 |
+
|
| 5 |
+
import json
|
| 6 |
+
import os
|
| 7 |
+
|
| 8 |
+
import torch
|
| 9 |
+
import torch.nn.functional as F
|
| 10 |
+
from torch.nn import CrossEntropyLoss
|
| 11 |
+
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
|
| 12 |
+
from torch.utils.data.distributed import DistributedSampler
|
| 13 |
+
from tqdm import tqdm, trange
|
| 14 |
+
from nltk import word_tokenize
|
| 15 |
+
# from transformers import (BertConfig, BertForTokenClassification,
|
| 16 |
+
# BertTokenizer)
|
| 17 |
+
from pytorch_transformers import (BertForTokenClassification, BertTokenizer)
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
class BertNer(BertForTokenClassification):
|
| 21 |
+
|
| 22 |
+
def forward(self, input_ids, token_type_ids=None, attention_mask=None, valid_ids=None):
|
| 23 |
+
sequence_output = self.bert(input_ids, token_type_ids, attention_mask, head_mask=None)[0]
|
| 24 |
+
batch_size,max_len,feat_dim = sequence_output.shape
|
| 25 |
+
valid_output = torch.zeros(batch_size,max_len,feat_dim,dtype=torch.float32,device='cpu')
|
| 26 |
+
# valid_output = torch.zeros(batch_size,max_len,feat_dim,dtype=torch.float32,device='cuda' if torch.cuda.is_available() else 'cpu')
|
| 27 |
+
for i in range(batch_size):
|
| 28 |
+
jj = -1
|
| 29 |
+
for j in range(max_len):
|
| 30 |
+
if valid_ids[i][j].item() == 1:
|
| 31 |
+
jj += 1
|
| 32 |
+
valid_output[i][jj] = sequence_output[i][j]
|
| 33 |
+
sequence_output = self.dropout(valid_output)
|
| 34 |
+
logits = self.classifier(sequence_output)
|
| 35 |
+
return logits
|
| 36 |
+
|
| 37 |
+
class Ner:
|
| 38 |
+
|
| 39 |
+
def __init__(self,model_dir: str):
|
| 40 |
+
self.model , self.tokenizer, self.model_config = self.load_model(model_dir)
|
| 41 |
+
self.label_map = self.model_config["label_map"]
|
| 42 |
+
self.max_seq_length = self.model_config["max_seq_length"]
|
| 43 |
+
self.label_map = {int(k):v for k,v in self.label_map.items()}
|
| 44 |
+
self.device = "cpu"
|
| 45 |
+
# self.device = "cuda" if torch.cuda.is_available() else "cpu"
|
| 46 |
+
self.model = self.model.to(self.device)
|
| 47 |
+
self.model.eval()
|
| 48 |
+
|
| 49 |
+
def load_model(self, model_dir: str, model_config: str = "model_config.json"):
|
| 50 |
+
model_config = os.path.join(model_dir,model_config)
|
| 51 |
+
model_config = json.load(open(model_config))
|
| 52 |
+
model = BertNer.from_pretrained(model_dir)
|
| 53 |
+
tokenizer = BertTokenizer.from_pretrained(model_dir, do_lower_case=model_config["do_lower"])
|
| 54 |
+
return model, tokenizer, model_config
|
| 55 |
+
|
| 56 |
+
def tokenize(self, text: str):
|
| 57 |
+
""" tokenize input"""
|
| 58 |
+
words = word_tokenize(text)
|
| 59 |
+
tokens = []
|
| 60 |
+
valid_positions = []
|
| 61 |
+
for i,word in enumerate(words):
|
| 62 |
+
token = self.tokenizer.tokenize(word)
|
| 63 |
+
tokens.extend(token)
|
| 64 |
+
for i in range(len(token)):
|
| 65 |
+
if i == 0:
|
| 66 |
+
valid_positions.append(1)
|
| 67 |
+
else:
|
| 68 |
+
valid_positions.append(0)
|
| 69 |
+
# print("valid positions from text o/p:=>", valid_positions)
|
| 70 |
+
return tokens, valid_positions
|
| 71 |
+
|
| 72 |
+
def preprocess(self, text: str):
|
| 73 |
+
""" preprocess """
|
| 74 |
+
tokens, valid_positions = self.tokenize(text)
|
| 75 |
+
## insert "[CLS]"
|
| 76 |
+
tokens.insert(0,"[CLS]")
|
| 77 |
+
valid_positions.insert(0,1)
|
| 78 |
+
## insert "[SEP]"
|
| 79 |
+
tokens.append("[SEP]")
|
| 80 |
+
valid_positions.append(1)
|
| 81 |
+
segment_ids = []
|
| 82 |
+
for i in range(len(tokens)):
|
| 83 |
+
segment_ids.append(0)
|
| 84 |
+
input_ids = self.tokenizer.convert_tokens_to_ids(tokens)
|
| 85 |
+
# print("input ids with berttokenizer:=>", input_ids)
|
| 86 |
+
input_mask = [1] * len(input_ids)
|
| 87 |
+
while len(input_ids) < self.max_seq_length:
|
| 88 |
+
input_ids.append(0)
|
| 89 |
+
input_mask.append(0)
|
| 90 |
+
segment_ids.append(0)
|
| 91 |
+
valid_positions.append(0)
|
| 92 |
+
return input_ids,input_mask,segment_ids,valid_positions
|
| 93 |
+
|
| 94 |
+
def predict_entity(self, B_lab, I_lab, words, labels, entity_list):
|
| 95 |
+
temp=[]
|
| 96 |
+
entity=[]
|
| 97 |
+
|
| 98 |
+
for word, (label, confidence), B_l, I_l in zip(words, labels, B_lab, I_lab):
|
| 99 |
+
|
| 100 |
+
if ((label==B_l) or (label==I_l)) and label!='O':
|
| 101 |
+
if label==B_l:
|
| 102 |
+
entity.append(temp)
|
| 103 |
+
temp=[]
|
| 104 |
+
temp.append(label)
|
| 105 |
+
|
| 106 |
+
temp.append(word)
|
| 107 |
+
|
| 108 |
+
entity.append(temp)
|
| 109 |
+
# print(entity)
|
| 110 |
+
|
| 111 |
+
entity_name_label = []
|
| 112 |
+
for entity_name in entity[1:]:
|
| 113 |
+
for ent_key, ent_value in entity_list.items():
|
| 114 |
+
if (ent_key==entity_name[0]):
|
| 115 |
+
# entity_name_label.append(' '.join(entity_name[1:]) + ": " + ent_value)
|
| 116 |
+
entity_name_label.append([' '.join(entity_name[1:]), ent_value])
|
| 117 |
+
|
| 118 |
+
return entity_name_label
|
| 119 |
+
|
| 120 |
+
def predict(self, text: str):
|
| 121 |
+
input_ids,input_mask,segment_ids,valid_ids = self.preprocess(text)
|
| 122 |
+
# print("valid ids:=>", segment_ids)
|
| 123 |
+
input_ids = torch.tensor([input_ids],dtype=torch.long,device=self.device)
|
| 124 |
+
input_mask = torch.tensor([input_mask],dtype=torch.long,device=self.device)
|
| 125 |
+
segment_ids = torch.tensor([segment_ids],dtype=torch.long,device=self.device)
|
| 126 |
+
valid_ids = torch.tensor([valid_ids],dtype=torch.long,device=self.device)
|
| 127 |
+
|
| 128 |
+
with torch.no_grad():
|
| 129 |
+
logits = self.model(input_ids, segment_ids, input_mask,valid_ids)
|
| 130 |
+
# print("logit values:=>", logits)
|
| 131 |
+
logits = F.softmax(logits,dim=2)
|
| 132 |
+
# print("logit values:=>", logits[0])
|
| 133 |
+
logits_label = torch.argmax(logits,dim=2)
|
| 134 |
+
logits_label = logits_label.detach().cpu().numpy().tolist()[0]
|
| 135 |
+
# print("logits label value list:=>", logits_label)
|
| 136 |
+
|
| 137 |
+
logits_confidence = [values[label].item() for values,label in zip(logits[0],logits_label)]
|
| 138 |
+
|
| 139 |
+
logits = []
|
| 140 |
+
pos = 0
|
| 141 |
+
for index,mask in enumerate(valid_ids[0]):
|
| 142 |
+
if index == 0:
|
| 143 |
+
continue
|
| 144 |
+
if mask == 1:
|
| 145 |
+
logits.append((logits_label[index-pos],logits_confidence[index-pos]))
|
| 146 |
+
else:
|
| 147 |
+
pos += 1
|
| 148 |
+
logits.pop()
|
| 149 |
+
labels = [(self.label_map[label],confidence) for label,confidence in logits]
|
| 150 |
+
words = word_tokenize(text)
|
| 151 |
+
|
| 152 |
+
entity_list = {'B-PER':'Person',
|
| 153 |
+
'B-FAC':'Facility',
|
| 154 |
+
'B-LOC':'Location',
|
| 155 |
+
'B-ORG':'Organization',
|
| 156 |
+
'B-ART':'Work Of Art',
|
| 157 |
+
'B-EVENT':'Event',
|
| 158 |
+
'B-DATE':'Date-Time Entity',
|
| 159 |
+
'B-TIME':'Date-Time Entity',
|
| 160 |
+
'B-LAW':'Law Terms',
|
| 161 |
+
'B-PRODUCT':'Product',
|
| 162 |
+
'B-PERCENT':'Percentage',
|
| 163 |
+
'B-MONEY':'Currency',
|
| 164 |
+
'B-LANGUAGE':'Langauge',
|
| 165 |
+
'B-NORP':'Nationality / Religion / Political group',
|
| 166 |
+
'B-QUANTITY':'Quantity',
|
| 167 |
+
'B-ORDINAL':'Ordinal Number',
|
| 168 |
+
'B-CARDINAL':'Cardinal Number'}
|
| 169 |
+
|
| 170 |
+
B_labels=[]
|
| 171 |
+
I_labels=[]
|
| 172 |
+
for label, confidence in labels:
|
| 173 |
+
if (label[:1]=='B'):
|
| 174 |
+
B_labels.append(label)
|
| 175 |
+
I_labels.append('O')
|
| 176 |
+
elif (label[:1]=='I'):
|
| 177 |
+
I_labels.append(label)
|
| 178 |
+
B_labels.append('O')
|
| 179 |
+
else:
|
| 180 |
+
B_labels.append('O')
|
| 181 |
+
I_labels.append('O')
|
| 182 |
+
|
| 183 |
+
assert len(labels) == len(words) == len(I_labels) == len(B_labels)
|
| 184 |
+
|
| 185 |
+
output = self.predict_entity(B_labels, I_labels, words, labels, entity_list)
|
| 186 |
+
print(output)
|
| 187 |
+
|
| 188 |
+
# output = [{"word":word,"tag":label,"confidence":confidence} for word,(label,confidence) in zip(words,labels)]
|
| 189 |
+
return output
|
| 190 |
+
|
| 191 |
+
|
biobert_utils.py
ADDED
|
@@ -0,0 +1,160 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""BERT NER Inference."""
|
| 2 |
+
|
| 3 |
+
import json
|
| 4 |
+
import os
|
| 5 |
+
import torch
|
| 6 |
+
import torch.nn.functional as F
|
| 7 |
+
from nltk import word_tokenize
|
| 8 |
+
from pytorch_transformers import (BertForTokenClassification, BertTokenizer)
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
class BertNer(BertForTokenClassification):
|
| 12 |
+
|
| 13 |
+
def forward(self, input_ids, token_type_ids=None, attention_mask=None, valid_ids=None):
|
| 14 |
+
sequence_output = self.bert(input_ids, token_type_ids, attention_mask, head_mask=None)[0]
|
| 15 |
+
batch_size,max_len,feat_dim = sequence_output.shape
|
| 16 |
+
# valid_output = torch.zeros(batch_size,max_len,feat_dim,dtype=torch.float32,device='cuda' if torch.cuda.is_available() else 'cpu')
|
| 17 |
+
valid_output = torch.zeros(batch_size,max_len,feat_dim,dtype=torch.float32,device='cpu')
|
| 18 |
+
for i in range(batch_size):
|
| 19 |
+
jj = -1
|
| 20 |
+
for j in range(max_len):
|
| 21 |
+
if valid_ids[i][j].item() == 1:
|
| 22 |
+
jj += 1
|
| 23 |
+
valid_output[i][jj] = sequence_output[i][j]
|
| 24 |
+
sequence_output = self.dropout(valid_output)
|
| 25 |
+
logits = self.classifier(sequence_output)
|
| 26 |
+
return logits
|
| 27 |
+
|
| 28 |
+
class BIOBERT_Ner:
|
| 29 |
+
|
| 30 |
+
def __init__(self,model_dir: str):
|
| 31 |
+
self.model , self.tokenizer, self.model_config = self.load_model(model_dir)
|
| 32 |
+
self.label_map = self.model_config["label_map"]
|
| 33 |
+
self.max_seq_length = self.model_config["max_seq_length"]
|
| 34 |
+
self.label_map = {int(k):v for k,v in self.label_map.items()}
|
| 35 |
+
self.device = "cpu"
|
| 36 |
+
# self.device = "cuda" if torch.cuda.is_available() else "cpu"
|
| 37 |
+
self.model = self.model.to(self.device)
|
| 38 |
+
self.model.eval()
|
| 39 |
+
|
| 40 |
+
def load_model(self, model_dir: str, model_config: str = "model_config.json"):
|
| 41 |
+
model_config = os.path.join(model_dir,model_config)
|
| 42 |
+
model_config = json.load(open(model_config))
|
| 43 |
+
model = BertNer.from_pretrained(model_dir)
|
| 44 |
+
tokenizer = BertTokenizer.from_pretrained(model_dir, do_lower_case=model_config["do_lower"])
|
| 45 |
+
return model, tokenizer, model_config
|
| 46 |
+
|
| 47 |
+
def tokenize(self, text: str):
|
| 48 |
+
""" tokenize input"""
|
| 49 |
+
words = word_tokenize(text)
|
| 50 |
+
tokens = []
|
| 51 |
+
valid_positions = []
|
| 52 |
+
for i,word in enumerate(words):
|
| 53 |
+
token = self.tokenizer.tokenize(word)
|
| 54 |
+
tokens.extend(token)
|
| 55 |
+
for i in range(len(token)):
|
| 56 |
+
if i == 0:
|
| 57 |
+
valid_positions.append(1)
|
| 58 |
+
else:
|
| 59 |
+
valid_positions.append(0)
|
| 60 |
+
return tokens, valid_positions
|
| 61 |
+
|
| 62 |
+
def preprocess(self, text: str):
|
| 63 |
+
""" preprocess """
|
| 64 |
+
|
| 65 |
+
tokens, valid_positions = self.tokenize(text)
|
| 66 |
+
|
| 67 |
+
## insert "[CLS]"
|
| 68 |
+
tokens.insert(0,"[CLS]")
|
| 69 |
+
|
| 70 |
+
valid_positions.insert(0,1)
|
| 71 |
+
|
| 72 |
+
## insert "[SEP]"
|
| 73 |
+
tokens.append("[SEP]")
|
| 74 |
+
|
| 75 |
+
valid_positions.append(1)
|
| 76 |
+
segment_ids = []
|
| 77 |
+
for i in range(len(tokens)):
|
| 78 |
+
segment_ids.append(0)
|
| 79 |
+
input_ids = self.tokenizer.convert_tokens_to_ids(tokens)
|
| 80 |
+
input_mask = [1] * len(input_ids)
|
| 81 |
+
while len(input_ids) < self.max_seq_length:
|
| 82 |
+
input_ids.append(0)
|
| 83 |
+
input_mask.append(0)
|
| 84 |
+
segment_ids.append(0)
|
| 85 |
+
valid_positions.append(0)
|
| 86 |
+
return input_ids,input_mask,segment_ids,valid_positions
|
| 87 |
+
|
| 88 |
+
def predict_entity(self, B_lab, I_lab, words, labels, entity_list):
|
| 89 |
+
temp=[]
|
| 90 |
+
entity=[]
|
| 91 |
+
|
| 92 |
+
for word, label, B_l, I_l in zip(words, labels, B_lab, I_lab):
|
| 93 |
+
|
| 94 |
+
if ((label==B_l) or (label==I_l)) and label!='O':
|
| 95 |
+
if label==B_l:
|
| 96 |
+
entity.append(temp)
|
| 97 |
+
temp=[]
|
| 98 |
+
temp.append(label)
|
| 99 |
+
|
| 100 |
+
temp.append(word)
|
| 101 |
+
|
| 102 |
+
entity.append(temp)
|
| 103 |
+
|
| 104 |
+
entity_name_label = []
|
| 105 |
+
for entity_name in entity[1:]:
|
| 106 |
+
for ent_key, ent_value in entity_list.items():
|
| 107 |
+
if (ent_key==entity_name[0]):
|
| 108 |
+
entity_name_label.append([' '.join(entity_name[1:]), ent_value])
|
| 109 |
+
|
| 110 |
+
return entity_name_label
|
| 111 |
+
|
| 112 |
+
def predict(self, text: str):
|
| 113 |
+
print("text:", text)
|
| 114 |
+
input_ids,input_mask,segment_ids,valid_ids = self.preprocess(text)
|
| 115 |
+
input_ids = torch.tensor([input_ids],dtype=torch.long,device=self.device)
|
| 116 |
+
input_mask = torch.tensor([input_mask],dtype=torch.long,device=self.device)
|
| 117 |
+
segment_ids = torch.tensor([segment_ids],dtype=torch.long,device=self.device)
|
| 118 |
+
valid_ids = torch.tensor([valid_ids],dtype=torch.long,device=self.device)
|
| 119 |
+
|
| 120 |
+
with torch.no_grad():
|
| 121 |
+
logits = self.model(input_ids, segment_ids, input_mask,valid_ids)
|
| 122 |
+
logits = F.softmax(logits,dim=2)
|
| 123 |
+
logits_label = torch.argmax(logits,dim=2)
|
| 124 |
+
logits_label = logits_label.detach().cpu().numpy().tolist()[0]
|
| 125 |
+
|
| 126 |
+
logits = []
|
| 127 |
+
pos = 0
|
| 128 |
+
for index,mask in enumerate(valid_ids[0]):
|
| 129 |
+
if index == 0:
|
| 130 |
+
continue
|
| 131 |
+
if mask == 1:
|
| 132 |
+
logits.append((logits_label[index-pos]))
|
| 133 |
+
else:
|
| 134 |
+
pos += 1
|
| 135 |
+
logits.pop()
|
| 136 |
+
labels = [(self.label_map[label]) for label in logits]
|
| 137 |
+
words = word_tokenize(text)
|
| 138 |
+
|
| 139 |
+
entity_list = {'B-ANATOMY':'Anatomy', 'B-GENE':'Gene', 'B-CHEMICAL':'Chemical', 'B-DISEASE':'Disease', 'B-PROTEIN':'Protein', 'B-ORGANISM':'Organism', 'B-CANCER':'Cancer', 'B-ORGAN':'Organ', 'B-CELL':'Cell', 'B-TISSUE':'Tissue', 'B-PATHOLOGY_TERM':'Pathlogy', 'B-COMPLEX':'Complex', 'B-TAXON':'Taxon'}
|
| 140 |
+
|
| 141 |
+
B_labels=[]
|
| 142 |
+
I_labels=[]
|
| 143 |
+
for label in labels:
|
| 144 |
+
if (label[:1]=='B'):
|
| 145 |
+
B_labels.append(label)
|
| 146 |
+
I_labels.append('O')
|
| 147 |
+
elif (label[:1]=='I'):
|
| 148 |
+
I_labels.append(label)
|
| 149 |
+
B_labels.append('O')
|
| 150 |
+
else:
|
| 151 |
+
B_labels.append('O')
|
| 152 |
+
I_labels.append('O')
|
| 153 |
+
|
| 154 |
+
assert len(labels) == len(words) == len(I_labels) == len(B_labels)
|
| 155 |
+
|
| 156 |
+
output = self.predict_entity(B_labels, I_labels, words, labels, entity_list)
|
| 157 |
+
|
| 158 |
+
return output
|
| 159 |
+
|
| 160 |
+
|
config.json
ADDED
|
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"architectures": [
|
| 3 |
+
"BertForMaskedLM"
|
| 4 |
+
],
|
| 5 |
+
"attention_probs_dropout_prob": 0.1,
|
| 6 |
+
"finetuning_task": null,
|
| 7 |
+
"hidden_act": "gelu",
|
| 8 |
+
"hidden_dropout_prob": 0.1,
|
| 9 |
+
"hidden_size": 768,
|
| 10 |
+
"id2label": {
|
| 11 |
+
"0": "LABEL_0",
|
| 12 |
+
"1": "LABEL_1"
|
| 13 |
+
},
|
| 14 |
+
"initializer_range": 0.02,
|
| 15 |
+
"intermediate_size": 3072,
|
| 16 |
+
"is_decoder": false,
|
| 17 |
+
"label2id": {
|
| 18 |
+
"LABEL_0": 0,
|
| 19 |
+
"LABEL_1": 1
|
| 20 |
+
},
|
| 21 |
+
"layer_norm_eps": 1e-12,
|
| 22 |
+
"max_position_embeddings": 512,
|
| 23 |
+
"num_attention_heads": 12,
|
| 24 |
+
"num_hidden_layers": 12,
|
| 25 |
+
"num_labels": 27,
|
| 26 |
+
"output_attentions": false,
|
| 27 |
+
"output_hidden_states": false,
|
| 28 |
+
"output_past": true,
|
| 29 |
+
"pruned_heads": {},
|
| 30 |
+
"torchscript": false,
|
| 31 |
+
"type_vocab_size": 2,
|
| 32 |
+
"use_bfloat16": false,
|
| 33 |
+
"vocab_size": 28996
|
| 34 |
+
}
|
eval_results.txt
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
f1 = 0.6010022731969414
|
| 2 |
+
loss = 0.24734244643932496
|
| 3 |
+
precision = 0.6018625506596533
|
| 4 |
+
recall = 0.6001444515141614
|
model_config.json
ADDED
|
@@ -0,0 +1,38 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"bert_model": "biobert-base-cased",
|
| 3 |
+
"do_lower": false,
|
| 4 |
+
"max_seq_length": 128,
|
| 5 |
+
"num_labels": 29,
|
| 6 |
+
"label_map":
|
| 7 |
+
{
|
| 8 |
+
"0" :"O",
|
| 9 |
+
"1" :"B-ANATOMY",
|
| 10 |
+
"2" :"I-ANATOMY",
|
| 11 |
+
"3" :"B-GENE",
|
| 12 |
+
"4" :"I-GENE",
|
| 13 |
+
"5" :"B-CHEMICAL",
|
| 14 |
+
"6" :"I-CHEMICAL",
|
| 15 |
+
"7" :"B-DISEASE",
|
| 16 |
+
"8" :"I-DISEASE",
|
| 17 |
+
"9" :"B-PROTEIN",
|
| 18 |
+
"10" :"I-PROTEIN",
|
| 19 |
+
"11" :"B-ORGANISM",
|
| 20 |
+
"12" :"I-ORGANISM",
|
| 21 |
+
"13" :"B-CANCER",
|
| 22 |
+
"14" :"I-CANCER",
|
| 23 |
+
"15" :"B-ORGAN",
|
| 24 |
+
"16" :"I-ORGAN",
|
| 25 |
+
"17" :"B-CELL",
|
| 26 |
+
"18" :"I-CELL",
|
| 27 |
+
"19" :"B-TISSUE",
|
| 28 |
+
"20" :"I-TISSUE",
|
| 29 |
+
"21": "B-PATHOLOGY_TERM",
|
| 30 |
+
"22": "I-PATHOLOGY_TERM",
|
| 31 |
+
"23": "B-COMPLEX",
|
| 32 |
+
"24": "I-COMPLEX",
|
| 33 |
+
"25": "B-TAXON",
|
| 34 |
+
"26": "I-TAXON",
|
| 35 |
+
"27" :"[CLS]",
|
| 36 |
+
"28" :"[SEP]"
|
| 37 |
+
}
|
| 38 |
+
}
|
packages.txt
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
git lfs update --force
|
pytorch_model.bin
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:412399c4d81a36efcc63d3c6eebb37d9a442576b0e637eac08fd45d830b02efa
|
| 3 |
+
size 433372007
|
requirements.txt
ADDED
|
@@ -0,0 +1,37 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
boto3==1.14.56
|
| 2 |
+
botocore==1.17.56
|
| 3 |
+
certifi==2020.6.20
|
| 4 |
+
chardet==3.0.4
|
| 5 |
+
click==7.1.2
|
| 6 |
+
docutils==0.15.2
|
| 7 |
+
fire==0.3.1
|
| 8 |
+
Flask==1.1.2
|
| 9 |
+
Flask-Cors==3.0.9
|
| 10 |
+
geoip2==3.0.0
|
| 11 |
+
idna==2.8
|
| 12 |
+
itsdangerous==1.1.0
|
| 13 |
+
Jinja2==2.11.2
|
| 14 |
+
jmespath==0.10.0
|
| 15 |
+
joblib==0.16.0
|
| 16 |
+
MarkupSafe==1.1.1
|
| 17 |
+
maxminddb==1.5.2
|
| 18 |
+
nltk==3.4.5
|
| 19 |
+
numpy==1.23.5
|
| 20 |
+
Pillow==7.2.0
|
| 21 |
+
python-dateutil==2.8.2
|
| 22 |
+
pytorch-transformers==1.2.0
|
| 23 |
+
pytz==2020.1
|
| 24 |
+
regex==2020.7.14
|
| 25 |
+
requests==2.22.0
|
| 26 |
+
s3transfer==0.3.3
|
| 27 |
+
sacremoses==0.0.43
|
| 28 |
+
sentencepiece==0.1.91
|
| 29 |
+
six==1.15.0
|
| 30 |
+
termcolor==1.1.0
|
| 31 |
+
torch==1.4.0
|
| 32 |
+
torchvision==0.5.0
|
| 33 |
+
tqdm==4.48.2
|
| 34 |
+
transformers==2.1.1
|
| 35 |
+
urllib3==1.25.6
|
| 36 |
+
uWSGI==2.0.19.1
|
| 37 |
+
Werkzeug==1.0.1
|
special_tokens_map.json
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
{"unk_token": "[UNK]", "sep_token": "[SEP]", "pad_token": "[PAD]", "cls_token": "[CLS]", "mask_token": "[MASK]"}
|
tokenizer_config.json
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
{"do_lower_case": false, "init_inputs": []}
|
training_args.bin
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:c8d04d6c200da456bab414d4ce7ba1a7473ac55f2c50f2a14f43782b55cbb225
|
| 3 |
+
size 1208
|
vocab.txt
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|