Spaces:
Running
Running
File size: 1,869 Bytes
7023483 3c82b91 7023483 3c82b91 7023483 3c82b91 7023483 3c82b91 7023483 3c82b91 7023483 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 |
import gradio as gr
import os
from datetime import date
import json
import csv
import datetime
import smtplib
from email.mime.text import MIMEText
import requests
from transformers import AutoTokenizer, AutoModelWithLMHead
import gc
cwd = os.getcwd()
model_path = os.path.join(cwd)
tokenizer = AutoTokenizer.from_pretrained("mrm8488/t5-base-finetuned-emotion")
model_base = AutoModelWithLMHead.from_pretrained(model_path)
def get_emotion(text):
# input_ids = tokenizer.encode(text + '</s>', return_tensors='pt')
input_ids = tokenizer.encode(text, return_tensors='pt')
output = model_base.generate(input_ids=input_ids,
max_length=2)
dec = [tokenizer.decode(ids) for ids in output]
label = dec[0]
gc.collect()
return label
def generate_emotion(article):
sen_list = article
sen_list = sen_list.split('\r\n')
sen_list_temp = sen_list[0:]
results_dict = []
results = []
for sen in sen_list_temp:
if(sen.strip()):
log_sen_list.append(sen)
cur_result = get_emotion(sen)
results.append(cur_result)
results_dict.append(
{
'sentence': sen,
'emotion': cur_result
}
)
result = {
'result': results_dict,
}
gc.collect()
print("LENGTH of results ====> ", results)
return result
inputs=gr.Textbox(lines=10, label="Sentences",elem_id="inp_div")
outputs=gr.Textbox(lines=10, label="Here is the Result",elem_id="inp_div")
demo = gr.Interface(
generate_emotion,
inputs,
outputs,
title="Emotion Detection",
description="Feel free to give your feedback",
css=".gradio-container {background-color: lightgray} #inp_div {background-color: [#7](https://www1.example.com/issues/7)FB3D5;"
)
demo.launch() |