File size: 1,395 Bytes
8941fb6 2f00743 4ed3e64 2f00743 36f19b3 2f00743 36f19b3 2f00743 e169083 9c35660 2f00743 8f775fb 8941fb6 6c55e7d 8941fb6 2f00743 fa2a76b a6f9228 2f00743 db31891 1b70f82 fc17bf8 9a7ebd8 df354b2 8941fb6 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 |
import gradio as gr
import numpy as np
import torch.nn.functional as F
from model import *
from transformers import BertTokenizer
DEVICE = torch.device('cpu')
PATH = 'checkpoints/'
model = torch.load(PATH + 'model.pt', map_location=DEVICE)
model.load_state_dict(torch.load(PATH + 'model_state_dict.pt',
map_location=DEVICE))
model.eval()
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
def evaluate(text) :
encoding = tokenizer.encode_plus(
text, max_length=150,
padding='max_length',
truncation=True,
add_special_tokens=True,
return_token_type_ids=False,
return_attention_mask=True,
return_tensors='pt'
)
input_id = encoding['input_ids']
attention_mask = encoding['attention_mask']
result = F.softmax(model(input_id, attention_mask), dim=1)
with torch.no_grad() :
result = np.round(result.numpy(), 2).tolist()
return {'Liberal': result[0][0], 'Conservative': result[0][1]}
iface = gr.Interface(fn=evaluate,
inputs='text',
outputs=gr.components.Label(num_top_classes=2),
examples=[["Biden speech draws 38.2 million U.S. TV viewers"],
["Biden's first State of the Union address in 67 seconds"]],
title='Political Sentiment Classification Using BERT Transformer')
iface.launch() |