Spaces:
Build error
Build error
import re, time | |
import matplotlib.pyplot as plt | |
from threading import Timer | |
import gradio as gr | |
from transformers import ( | |
GPT2LMHeadModel, GPT2Tokenizer, | |
AutoModelForSequenceClassification, AutoTokenizer, | |
pipeline | |
) | |
# reference: https://huggingface.co/spaces/bentrevett/emotion-prediction | |
# and https://huggingface.co/spaces/tareknaous/Empathetic-DialoGPT | |
def euc_100(): | |
# 1,2,3. asks about the user's emotions and store data | |
print('How was your day?') | |
print('On the scale 1 to 10, how would you judge your emotion through the following categories:') # ~ Baymax :) | |
emotion_types = ['overall'] #, 'happiness', 'surprise', 'sadness', 'depression', 'anger', 'fear', 'anxiety'] | |
emotion_degree = [] | |
input_time = [] | |
for e in emotion_types: | |
while True: | |
x = input(f'{e}: ') | |
if x.isnumeric() and (0 < int(x) < 11): | |
emotion_degree.append(int(x)) | |
input_time.append(time.gmtime()) | |
break | |
else: | |
print('invalid input, my friend :) plz input again') | |
# 4. if good mood | |
if emotion_degree[0] >= 6: | |
print('You seem to be in a good mood today. Is there anything you could notice that makes you happy?') | |
while True: | |
# timer = Timer(10, ValueError) | |
# timer.start() | |
x = input('Your answer: ') | |
if x == '': # need to change this part to waiting 10 seconds | |
print('Whether your good mood is over?') | |
print('Any other details that you would like to recall?') | |
y = input('Your answer (Yes or No): ') | |
if y == 'No': | |
break | |
else: | |
break | |
print('I am glad that you are willing to share the experience with me. Thanks for letting me know.') | |
# 5. bad mood | |
else: | |
questions = [ | |
'What specific thing is bothering you the most right now?', | |
'Oh, I see. So when it is happening, what feelings or emotions have you got?', | |
'And what do you think about those feelings or emotions at that time?', | |
'Could you think of any evidence for your above-mentioned thought?', | |
] | |
for q in questions: | |
print(q) | |
y = 'No' # bad mood | |
while True: | |
x = input('Your answer (example of answer here): ') | |
if x == '': # need to change this part to waiting 10 seconds | |
print('Whether your bad mood is over?') | |
y = input('Your answer (Yes or No): ') | |
if y == 'Yes': | |
break | |
else: | |
break | |
if y == 'Yes': | |
print('Nice to hear that.') | |
break | |
# reading interface here | |
print('Here are some reference articles about bad emotions. You can take a look :)') | |
pass | |
def load_neural_emotion_detector(): | |
model_name = "joeddav/distilbert-base-uncased-go-emotions-student" | |
model = AutoModelForSequenceClassification.from_pretrained(model_name) | |
tokenizer = AutoTokenizer.from_pretrained(model_name) | |
pipe = pipeline('text-classification', model=model, tokenizer=tokenizer, | |
return_all_scores=True, truncation=True) | |
return pipe | |
def sort_predictions(predictions): | |
return sorted(predictions, key=lambda x: x['score'], reverse=True) | |
def plot_emotion_distribution(predictions): | |
fig, ax = plt.subplots() | |
ax.bar(x=[i for i, _ in enumerate(prediction)], | |
height=[p['score'] for p in prediction], | |
tick_label=[p['label'] for p in prediction]) | |
ax.tick_params(rotation=90) | |
ax.set_ylim(0, 1) | |
plt.show() | |
def rulebase(text): | |
keywords = { | |
'life_safety': ["death", "suicide", "murder", "to perish together", "jump off the building"], | |
'immediacy': ["now", "immediately", "tomorrow", "today"], | |
'manifestation': ["never stop", "every moment", "strong", "very"] | |
} | |
# if found dangerous kw/topics | |
if re.search(rf"{'|'.join(keywords['life_safety'])}", text)!=None and \ | |
sum([re.search(rf"{'|'.join(keywords[k])}", text)!=None for k in ['immediacy','manifestation']]) >= 1: | |
print('We noticed that you may need immediate professional assistance, would you like to make a phone call? ' | |
'The Hong Kong Lifeline number is (852) 2382 0000') | |
x = input('Choose 1. "Dial to the number" or 2. "No dangerous emotion la": ') | |
if x == '1': | |
print('Let you connect to the office') | |
else: | |
print('Sorry for our misdetection. We just want to make sure that you could get immediate help when needed. ' | |
'Would you mind if we send this conversation to the cloud to finetune the model.') | |
y = input('Yes or No: ') | |
if y == 'Yes': | |
pass # do smt here | |
def euc_200(text, testing=True): | |
# 2. using rule to judge user's emotion | |
rulebase(text) | |
# 3. using ML | |
if not testing: | |
pipe = load_neural_emotion_detector() | |
prediction = pipe(text)[0] | |
prediction = sort_predictions(prediction) | |
plot_emotion_distribution(prediction) | |
# get the most probable emotion. TODO: modify this part, may take sum of prob. over all negative emotion | |
threshold = 0.3 | |
emotion = {'label': 'sadness', 'score': 0.4} if testing else prediction[0] | |
# then judge | |
if emotion['label'] in ['surprise', 'sadness', 'anger', 'fear'] and emotion['score'] > threshold: | |
print(f'It has come to our attention that you may suffer from {emotion["label"]}') | |
print('If you want to know more about yourself, ' | |
'some professional scales are provided to quantify your current status. ' | |
'After a period of time (maybe a week/two months/a month) trying to follow the solutions we suggested, ' | |
'you can fill out these scales again to see if you have improved.') | |
x = input('Fill in the form now (Okay or Later): ') | |
if x == 'Okay': | |
print('Display the form') | |
else: | |
print('Here are some reference articles about bad emotions. You can take a look :)') | |
# 4. If both of the above are not satisfied. What do u mean by 'satisfied' here? | |
questions = [ | |
'What specific thing is bothering you the most right now?', | |
'Oh, I see. So when it is happening, what feelings or emotions have you got?', | |
'And what do you think about those feelings or emotions at that time?', | |
'Could you think of any evidence for your above-mentioned thought? #', | |
] | |
for q in questions: | |
print(q) | |
y = 'No' # bad mood | |
while True: | |
x = input('Your answer (example of answer here): ') | |
if x == '': # need to change this part to waiting 10 seconds | |
print('Whether your bad mood is over?') | |
y = input('Your answer (Yes or No): ') | |
if y == 'Yes': | |
break | |
else: | |
break | |
if y == 'Yes': | |
print('Nice to hear that.') | |
break | |
# reading interface here | |
print('Here are some reference articles about bad emotions. You can take a look :)') | |
pass | |
tokenizer = GPT2Tokenizer.from_pretrained("tareknaous/dialogpt-empathetic-dialogues") | |
model = GPT2LMHeadModel.from_pretrained("tareknaous/dialogpt-empathetic-dialogues") | |
model.eval() | |
def chat(message, history): | |
history = history or [] | |
eos = tokenizer.eos_token | |
input_str = eos.join([x for turn in history for x in turn] + [message]) | |
bot_input_ids = tokenizer.encode(input_str, return_tensors='pt') | |
bot_output_ids = model.generate(bot_input_ids, | |
max_length=1000, | |
do_sample=True, top_p=0.9, temperature=0.8, | |
pad_token_id=tokenizer.eos_token_id) | |
response = tokenizer.decode(bot_output_ids[:, bot_input_ids.shape[-1]:][0], | |
skip_special_tokens=True) | |
history.append((message, response)) | |
return history, history | |
if __name__ == '__main__': | |
# euc_100() | |
# euc_200('I am happy about my academic record.') | |
title = "PsyPlus Empathetic Chatbot" | |
description = "Gradio demo for product of PsyPlus. Based on rule-based CBT and conversational AI model DialoGPT" | |
iface = gr.Interface( | |
chat, | |
["text", "state"], | |
["chatbot", "state"], | |
allow_screenshot=False, | |
allow_flagging="never", | |
title=title, | |
description=description, | |
) | |
iface.launch(debug=True, server_name="0.0.0.0", server_port=2022, share=True) |