Spaces:
Running
Running
Commit
·
3e0cc3d
1
Parent(s):
2274431
Upload 2 files
Browse files- app.py +57 -0
- bert_ner_model_loader.py +191 -0
app.py
ADDED
@@ -0,0 +1,57 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
from datetime import date
|
3 |
+
import json
|
4 |
+
import csv
|
5 |
+
import datetime
|
6 |
+
import smtplib
|
7 |
+
from email.mime.text import MIMEText
|
8 |
+
import requests
|
9 |
+
from transformers import AutoTokenizer, AutoModelWithLMHead
|
10 |
+
import gc
|
11 |
+
import os
|
12 |
+
import json
|
13 |
+
import numpy as np
|
14 |
+
from tqdm import trange
|
15 |
+
import torch
|
16 |
+
import torch.nn.functional as F
|
17 |
+
from bert_ner_model_loader import Ner
|
18 |
+
import pandas as pd
|
19 |
+
|
20 |
+
cwd = os.getcwd()
|
21 |
+
bert_ner_model = os.path.join(cwd)
|
22 |
+
Entities_Found =[]
|
23 |
+
Entity_Types = []
|
24 |
+
k = 0
|
25 |
+
def generate_emotion(article):
|
26 |
+
text = "Input sentence: "
|
27 |
+
text += article
|
28 |
+
|
29 |
+
model_ner = Ner(bert_ner_model)
|
30 |
+
|
31 |
+
output = model_ner.predict(text)
|
32 |
+
print(output)
|
33 |
+
k = 0
|
34 |
+
for i in output:
|
35 |
+
for j in i:
|
36 |
+
if k == 0:
|
37 |
+
Entities_Found.append(j)
|
38 |
+
k += 1
|
39 |
+
else:
|
40 |
+
Entity_Types.append(j)
|
41 |
+
k = 0
|
42 |
+
result = {'Entities Found':Entities_Found, 'Entity Types':Entity_Types}
|
43 |
+
return pd.DataFrame(result)
|
44 |
+
|
45 |
+
|
46 |
+
inputs=gr.Textbox(lines=10, label="Sentences",elem_id="inp_div")
|
47 |
+
outputs = [gr.Dataframe(row_count = (2, "dynamic"), col_count=(2, "fixed"), label="Here is the Result", headers=["Entities Found","Entity Types"])]
|
48 |
+
|
49 |
+
demo = gr.Interface(
|
50 |
+
generate_emotion,
|
51 |
+
inputs,
|
52 |
+
outputs,
|
53 |
+
title="Entity Recognition For Input Text",
|
54 |
+
description="Feel free to give your feedback",
|
55 |
+
css=".gradio-container {background-color: lightgray} #inp_div {background-color: [#7](https://www1.example.com/issues/7)FB3D5;"
|
56 |
+
)
|
57 |
+
demo.launch()
|
bert_ner_model_loader.py
ADDED
@@ -0,0 +1,191 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""BERT NER Inference."""
|
2 |
+
|
3 |
+
from __future__ import absolute_import, division, print_function
|
4 |
+
|
5 |
+
import json
|
6 |
+
import os
|
7 |
+
|
8 |
+
import torch
|
9 |
+
import torch.nn.functional as F
|
10 |
+
from torch.nn import CrossEntropyLoss
|
11 |
+
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
|
12 |
+
from torch.utils.data.distributed import DistributedSampler
|
13 |
+
from tqdm import tqdm, trange
|
14 |
+
from nltk import word_tokenize
|
15 |
+
# from transformers import (BertConfig, BertForTokenClassification,
|
16 |
+
# BertTokenizer)
|
17 |
+
from pytorch_transformers import (BertForTokenClassification, BertTokenizer)
|
18 |
+
|
19 |
+
|
20 |
+
class BertNer(BertForTokenClassification):
|
21 |
+
|
22 |
+
def forward(self, input_ids, token_type_ids=None, attention_mask=None, valid_ids=None):
|
23 |
+
sequence_output = self.bert(input_ids, token_type_ids, attention_mask, head_mask=None)[0]
|
24 |
+
batch_size,max_len,feat_dim = sequence_output.shape
|
25 |
+
valid_output = torch.zeros(batch_size,max_len,feat_dim,dtype=torch.float32,device='cpu')
|
26 |
+
# valid_output = torch.zeros(batch_size,max_len,feat_dim,dtype=torch.float32,device='cuda' if torch.cuda.is_available() else 'cpu')
|
27 |
+
for i in range(batch_size):
|
28 |
+
jj = -1
|
29 |
+
for j in range(max_len):
|
30 |
+
if valid_ids[i][j].item() == 1:
|
31 |
+
jj += 1
|
32 |
+
valid_output[i][jj] = sequence_output[i][j]
|
33 |
+
sequence_output = self.dropout(valid_output)
|
34 |
+
logits = self.classifier(sequence_output)
|
35 |
+
return logits
|
36 |
+
|
37 |
+
class Ner:
|
38 |
+
|
39 |
+
def __init__(self,model_dir: str):
|
40 |
+
self.model , self.tokenizer, self.model_config = self.load_model(model_dir)
|
41 |
+
self.label_map = self.model_config["label_map"]
|
42 |
+
self.max_seq_length = self.model_config["max_seq_length"]
|
43 |
+
self.label_map = {int(k):v for k,v in self.label_map.items()}
|
44 |
+
self.device = "cpu"
|
45 |
+
# self.device = "cuda" if torch.cuda.is_available() else "cpu"
|
46 |
+
self.model = self.model.to(self.device)
|
47 |
+
self.model.eval()
|
48 |
+
|
49 |
+
def load_model(self, model_dir: str, model_config: str = "model_config.json"):
|
50 |
+
model_config = os.path.join(model_dir,model_config)
|
51 |
+
model_config = json.load(open(model_config))
|
52 |
+
model = BertNer.from_pretrained(model_dir)
|
53 |
+
tokenizer = BertTokenizer.from_pretrained(model_dir, do_lower_case=model_config["do_lower"])
|
54 |
+
return model, tokenizer, model_config
|
55 |
+
|
56 |
+
def tokenize(self, text: str):
|
57 |
+
""" tokenize input"""
|
58 |
+
words = word_tokenize(text)
|
59 |
+
tokens = []
|
60 |
+
valid_positions = []
|
61 |
+
for i,word in enumerate(words):
|
62 |
+
token = self.tokenizer.tokenize(word)
|
63 |
+
tokens.extend(token)
|
64 |
+
for i in range(len(token)):
|
65 |
+
if i == 0:
|
66 |
+
valid_positions.append(1)
|
67 |
+
else:
|
68 |
+
valid_positions.append(0)
|
69 |
+
# print("valid positions from text o/p:=>", valid_positions)
|
70 |
+
return tokens, valid_positions
|
71 |
+
|
72 |
+
def preprocess(self, text: str):
|
73 |
+
""" preprocess """
|
74 |
+
tokens, valid_positions = self.tokenize(text)
|
75 |
+
## insert "[CLS]"
|
76 |
+
tokens.insert(0,"[CLS]")
|
77 |
+
valid_positions.insert(0,1)
|
78 |
+
## insert "[SEP]"
|
79 |
+
tokens.append("[SEP]")
|
80 |
+
valid_positions.append(1)
|
81 |
+
segment_ids = []
|
82 |
+
for i in range(len(tokens)):
|
83 |
+
segment_ids.append(0)
|
84 |
+
input_ids = self.tokenizer.convert_tokens_to_ids(tokens)
|
85 |
+
# print("input ids with berttokenizer:=>", input_ids)
|
86 |
+
input_mask = [1] * len(input_ids)
|
87 |
+
while len(input_ids) < self.max_seq_length:
|
88 |
+
input_ids.append(0)
|
89 |
+
input_mask.append(0)
|
90 |
+
segment_ids.append(0)
|
91 |
+
valid_positions.append(0)
|
92 |
+
return input_ids,input_mask,segment_ids,valid_positions
|
93 |
+
|
94 |
+
def predict_entity(self, B_lab, I_lab, words, labels, entity_list):
|
95 |
+
temp=[]
|
96 |
+
entity=[]
|
97 |
+
|
98 |
+
for word, (label, confidence), B_l, I_l in zip(words, labels, B_lab, I_lab):
|
99 |
+
|
100 |
+
if ((label==B_l) or (label==I_l)) and label!='O':
|
101 |
+
if label==B_l:
|
102 |
+
entity.append(temp)
|
103 |
+
temp=[]
|
104 |
+
temp.append(label)
|
105 |
+
|
106 |
+
temp.append(word)
|
107 |
+
|
108 |
+
entity.append(temp)
|
109 |
+
# print(entity)
|
110 |
+
|
111 |
+
entity_name_label = []
|
112 |
+
for entity_name in entity[1:]:
|
113 |
+
for ent_key, ent_value in entity_list.items():
|
114 |
+
if (ent_key==entity_name[0]):
|
115 |
+
# entity_name_label.append(' '.join(entity_name[1:]) + ": " + ent_value)
|
116 |
+
entity_name_label.append([' '.join(entity_name[1:]), ent_value])
|
117 |
+
|
118 |
+
return entity_name_label
|
119 |
+
|
120 |
+
def predict(self, text: str):
|
121 |
+
input_ids,input_mask,segment_ids,valid_ids = self.preprocess(text)
|
122 |
+
# print("valid ids:=>", segment_ids)
|
123 |
+
input_ids = torch.tensor([input_ids],dtype=torch.long,device=self.device)
|
124 |
+
input_mask = torch.tensor([input_mask],dtype=torch.long,device=self.device)
|
125 |
+
segment_ids = torch.tensor([segment_ids],dtype=torch.long,device=self.device)
|
126 |
+
valid_ids = torch.tensor([valid_ids],dtype=torch.long,device=self.device)
|
127 |
+
|
128 |
+
with torch.no_grad():
|
129 |
+
logits = self.model(input_ids, segment_ids, input_mask,valid_ids)
|
130 |
+
# print("logit values:=>", logits)
|
131 |
+
logits = F.softmax(logits,dim=2)
|
132 |
+
# print("logit values:=>", logits[0])
|
133 |
+
logits_label = torch.argmax(logits,dim=2)
|
134 |
+
logits_label = logits_label.detach().cpu().numpy().tolist()[0]
|
135 |
+
# print("logits label value list:=>", logits_label)
|
136 |
+
|
137 |
+
logits_confidence = [values[label].item() for values,label in zip(logits[0],logits_label)]
|
138 |
+
|
139 |
+
logits = []
|
140 |
+
pos = 0
|
141 |
+
for index,mask in enumerate(valid_ids[0]):
|
142 |
+
if index == 0:
|
143 |
+
continue
|
144 |
+
if mask == 1:
|
145 |
+
logits.append((logits_label[index-pos],logits_confidence[index-pos]))
|
146 |
+
else:
|
147 |
+
pos += 1
|
148 |
+
logits.pop()
|
149 |
+
labels = [(self.label_map[label],confidence) for label,confidence in logits]
|
150 |
+
words = word_tokenize(text)
|
151 |
+
|
152 |
+
entity_list = {'B-PER':'Person',
|
153 |
+
'B-FAC':'Facility',
|
154 |
+
'B-LOC':'Location',
|
155 |
+
'B-ORG':'Organization',
|
156 |
+
'B-ART':'Work Of Art',
|
157 |
+
'B-EVENT':'Event',
|
158 |
+
'B-DATE':'Date-Time Entity',
|
159 |
+
'B-TIME':'Date-Time Entity',
|
160 |
+
'B-LAW':'Law Terms',
|
161 |
+
'B-PRODUCT':'Product',
|
162 |
+
'B-PERCENT':'Percentage',
|
163 |
+
'B-MONEY':'Currency',
|
164 |
+
'B-LANGUAGE':'Langauge',
|
165 |
+
'B-NORP':'Nationality / Religion / Political group',
|
166 |
+
'B-QUANTITY':'Quantity',
|
167 |
+
'B-ORDINAL':'Ordinal Number',
|
168 |
+
'B-CARDINAL':'Cardinal Number'}
|
169 |
+
|
170 |
+
B_labels=[]
|
171 |
+
I_labels=[]
|
172 |
+
for label, confidence in labels:
|
173 |
+
if (label[:1]=='B'):
|
174 |
+
B_labels.append(label)
|
175 |
+
I_labels.append('O')
|
176 |
+
elif (label[:1]=='I'):
|
177 |
+
I_labels.append(label)
|
178 |
+
B_labels.append('O')
|
179 |
+
else:
|
180 |
+
B_labels.append('O')
|
181 |
+
I_labels.append('O')
|
182 |
+
|
183 |
+
assert len(labels) == len(words) == len(I_labels) == len(B_labels)
|
184 |
+
|
185 |
+
output = self.predict_entity(B_labels, I_labels, words, labels, entity_list)
|
186 |
+
print(output)
|
187 |
+
|
188 |
+
# output = [{"word":word,"tag":label,"confidence":confidence} for word,(label,confidence) in zip(words,labels)]
|
189 |
+
return output
|
190 |
+
|
191 |
+
|