import gradio as gr from transformers import pipeline, AutoTokenizer from optimum.onnxruntime import ORTModelForSequenceClassification import torch device = 'cuda' if torch.cuda.is_available() else 'cpu' model_name = "Rahmat82/DistilBERT-finetuned-on-emotion" model = ORTModelForSequenceClassification.from_pretrained(model_name, export=True) tokenizer = AutoTokenizer.from_pretrained(model_name, use_fast=True) model.to(device) def predict(query: str) -> dict: inputs = tokenizer(query, return_tensors='pt') inputs.to(device) outputs = model(**inputs) outputs = torch.sigmoid(outputs.logits) outputs = outputs.detach().cpu().numpy() label2ids = { "sadness": 0, "joy": 1, "love": 2, "anger": 3, "fear": 4, "surprise": 5, } for i, k in enumerate(label2ids.keys()): label2ids[k] = outputs[0][i] label2ids = {k: float(v) for k, v in sorted(label2ids.items(), key=lambda item: item[1], reverse=True)} return label2ids demo = gr.Interface( theme = gr.themes.Soft(), title = "RHM Emotion Classifier 😊", description = "Beyond Words: Capturing the Essence of Emotion in Text

On GPU it is much faster 🚀

", fn = predict, inputs = gr.components.Textbox(label='Write your text here', lines=3), outputs = gr.components.Label(label='Predictions', num_top_classes=6), allow_flagging = 'never', examples = [ ["The gentle touch of your hand on mine is a silent promise that echoes through the corridors of my heart."], ["The rain mirrored the tears I couldn't stop, each drop a tiny echo of the ache in my heart. The world seemed muted, colors drained, and a heavy weight settled upon my soul."], ["Walking through the dusty attic, I stumbled upon a hidden door. With a mix of trepidation and excitement, I pushed it open, expecting cobwebs and forgotten junk. Instead, a flood of sunlight revealed a secret garden, blooming with vibrant flowers and buzzing with life. My jaw dropped in pure astonishment."], ] ) demo.launch(share=True)