import gradio as gr from transformers import pipeline, AutoTokenizer from optimum.onnxruntime import ORTModelForSequenceClassification import torch device = 'cuda' if torch.cuda.is_available() else 'cpu' model_name = "Rahmat82/DistilBERT-finetuned-on-emotion" model = ORTModelForSequenceClassification.from_pretrained(model_name, export=True) tokenizer = AutoTokenizer.from_pretrained(model_name, use_fast=True) model.to(device) def predict(query: str) -> dict: inputs = tokenizer(query, return_tensors='pt') inputs.to(device) outputs = model(**inputs) outputs = torch.sigmoid(outputs.logits) outputs = outputs.detach().cpu().numpy() label2ids = { "sadness": 0, "joy": 1, "love": 2, "anger": 3, "fear": 4, "surprise": 5, } for i, k in enumerate(label2ids.keys()): label2ids[k] = outputs[0][i] label2ids = {k: float(v) for k, v in sorted(label2ids.items(), key=lambda item: item[1], reverse=True)} return label2ids demo = gr.Interface( theme = gr.themes.Soft(), title = "RHM Emotion Classifier 😊", description = "Beyond Words: Capturing the Essence of Emotion in Text