Spaces:
Runtime error
Runtime error
import gradio as gr | |
from transformers import AutoTokenizer, AutoModel | |
import torch | |
import torch.nn.functional as F | |
import numpy as np | |
tokenizer = torch.hub.load('huggingface/pytorch-transformers', 'tokenizer', 'bert-base-cased-finetuned-mrpc') | |
model = torch.hub.load('huggingface/pytorch-transformers', 'modelForSequenceClassification', 'bert-base-cased-finetuned-mrpc') | |
model = model.from_pretrained("jacklindsai/is_it_elon_musk") | |
def preprocess_text(text): | |
return tokenizer.encode_plus(text, truncation=True, padding='max_length', max_length=48, return_attention_mask=True) | |
device = torch.device('cpu') | |
def pred_is_elon_musk(text): | |
encoded_text = preprocess_text(text) | |
ids = encoded_text['input_ids'] | |
masks = encoded_text['attention_mask'] | |
ids = torch.Tensor([ids]).to(device, dtype=torch.int32) | |
masks = torch.Tensor([masks]).to(device, dtype=torch.int32) | |
results = model(input_ids=ids, token_type_ids=None, | |
attention_mask=masks) | |
logis = results['logits'].detach() | |
prob = F.softmax(logis, dim=1)[0][1] | |
prediction = np.argmax(logis.numpy(), axis=1).flatten() | |
output1 = f"The predicted probability is {prob*100: 0.2f}%.\n" | |
if 0.4 <= prob <= 0.6: | |
output2 = f"Therefore, maybe it's from Elon Musk or maybe not." | |
elif prediction[0] == 1: | |
output2 = f"Therefore, maybe it is from Elon Musk." | |
else: | |
output2 = f"Therefore, maybe it is Not from Elon Musk." | |
return output1 + output2 | |
iface = gr.Interface(pred_is_elon_musk, inputs="text", | |
outputs="text", title='“Is the tweet from Elon Musk?” Classifier', | |
theme = "huggingface", examples=["Now I'm going to buy McDonald's and fix all the ice cream machines...", | |
'"Real magic is only a sip away."(Actual slogan of Coca-Cola!!) 🤣🤣', | |
'Let’s make Twitter maximum fun!', | |
'I hope that even my worst critics remain on Twitter, because that is what free speech means'], | |
description="This app predicts whether the tweet is from Elon Musk based on a fine-tuned BERT model. The model considers the first 48 words at most.") | |
iface.launch(inline=False) |