import gradio as gr import numpy as np import torch from transformers import AutoTokenizer, AutoModelForSequenceClassification from transformers import pipeline emotion_labels = ['喜び', '悲しみ', '期待', '驚き', '怒り', '信頼', '悲しみ', '嫌悪'] sentiment_labels = ['ポジティブ', 'ニュートラル', 'ネガティブ'] def np_softmax(x): x_exp = torch.exp(torch.tensor(x) - torch.max(torch.tensor(x))) f_x = x_exp / x_exp.sum() return f_x def emotion_classifier(text): model.eval() tokens = tokenizer(text, truncation=True, return_tensors="pt") tokens.to(model.device) preds = model(**tokens) prob = np_softmax(preds.logits.cpu().detach().numpy()[0]) out_dict = {n: p.item() for n, p in zip(emotion_labels, prob)} return out_dict tokenizer = AutoTokenizer.from_pretrained("cl-tohoku/bert-base-japanese-whole-word-masking") model = AutoModelForSequenceClassification.from_pretrained("jingwora/language-emotion-classification-ja", num_labels=8) def sentiment_classifier(text): clf = pipeline(model="lxyuan/distilbert-base-multilingual-cased-sentiments-student", return_all_scores=True) sentiment = clf(text) sentiment = {item['label']: item['score'] for item in sentiment[0]} sentiment = {sentiment_labels[i]: sentiment[label] for i, label in enumerate(sentiment)} # 日本語ラベル return sentiment examples = [ ["このお店は本当に素晴らしいです!サービスも料理も満足できるものばかりでした。"], ["料理の味が期待外れでした。改善が必要ですね。"], ["サービスは普通ですが、特に不満もありません。"], ["価格と品質のバランスが取れていると思います。"], ] demo = gr.Blocks( theme="freddyaboulton/dracula_revamped", ) with demo: gr.Markdown( """ # Emotion and Sentiment Classification Enter Japanese text and get the emotion probabilities and sentiment probablilities. """ ) text = gr.Textbox(lines=2) with gr.Row(): gr.Examples(examples=examples, inputs=text) b1 = gr.Button("Emotion Classification") label1 = gr.Label(num_top_classes=8) b1.click(emotion_classifier, inputs=text, outputs=label1) b2 = gr.Button("Sentiment Analysis") label2 = gr.Label(num_top_classes=3) b2.click(sentiment_classifier, inputs=text, outputs=label2) demo.launch()