chABSA / app.py
aino813's picture
Update app.py
de13b22
raw
history blame
1.43 kB
import gradio as gr
import torch
from transformers import BertJapaneseTokenizer, BertForSequenceClassification
# 日本語の事前学習モデル
MODEL_NAME = 'cl-tohoku/bert-base-japanese-whole-word-masking'
descriptions = '''BERTをchABSA-dataset※でファインチューニングしたモデルになります。
※chABSA-datasetは上場企業の有価証券報告書をベースに作成されたネガポジ用データセット'''
tokenizer = BertJapaneseTokenizer.from_pretrained(MODEL_NAME)
bert_sc_ = BertForSequenceClassification.from_pretrained("models/")
bert_sc = bert_sc_.to("cpu")
def func(text):
encoding = tokenizer(
text,
padding = "longest",
return_tensors="pt"
)
encoding = { k : v.cpu() for k, v in encoding.items()}
with torch.no_grad():
output = bert_sc(**encoding)
scores = output.logits.argmax(-1)
neg = torch.softmax(output.logits, dim=1).tolist()[0][0]
pos = torch.softmax(output.logits, dim=1).tolist()[0][1]
label = "ネガティブ" if scores.item()==0 else "ポジティブ"
cos = f"信頼度:{neg*100:.1f}%" if scores.item()==0 else f"信頼度:{pos*100:.1f}%"
return label,cos
app = gr.Interface(fn=func, inputs=gr.Textbox(lines=3, placeholder="文章を入力してください"), outputs=["label","label"], title="ビジネス文書のネガポジ分析", description=descriptions)
app.launch()