Spaces:
Build error
Build error
import torch | |
from transformers import BertTokenizer | |
from torch.nn.functional import softmax | |
from google.colab import drive | |
import gradio as gr | |
drive.mount('/content/drive') | |
# Set the correct path for the model within the Hugging Face Space | |
model = torch.load('/content/drive/My Drive/Emotion/emotion_model.pth') | |
tokenizer = BertTokenizer.from_pretrained("bert-base-uncased") | |
model.eval() # Set the model to evaluation mode | |
device = torch.device("cuda" if torch.cuda.is_available() else "cpu") | |
model.to(device) | |
def predict_emotions(text): | |
inputs = tokenizer.encode_plus(text, return_tensors="pt", max_length=512, truncation=True, padding='max_length') | |
input_ids = inputs['input_ids'].to(device) | |
attention_mask = inputs['attention_mask'].to(device) | |
with torch.no_grad(): | |
outputs = model(input_ids=input_ids, attention_mask=attention_mask) | |
probabilities = softmax(outputs.logits, dim=-1).squeeze() | |
emotions = ['Sadness', 'Joy', 'Love', 'Anger', 'Fear', 'Surprise'] | |
response = ", ".join(f"{emotion}: {prob * 100:.2f}%" for emotion, prob in zip(emotions, probabilities)) | |
return response | |
iface = gr.Interface(fn=predict_emotions, inputs="text", outputs="label") | |
iface.launch() | |