JRQi's picture
Update game3.py
86d1530
raw
history blame
6.75 kB
import requests
import random
import time
import pandas as pd
import gradio as gr
import numpy as np
from transformers import AutoTokenizer, AutoModelForSequenceClassification
from transformers import pipeline
def read3(num_selected_former):
fname = 'data3_convai2_inferred.txt'
with open(fname, encoding='utf-8') as f:
content = f.readlines()
index_selected = random.randint(0,len(content)/2-1)
while index_selected == num_selected_former:
index_selected = random.randint(0,len(content)/2-1)
text = eval(content[index_selected*2])
interpretation = eval(content[int(index_selected*2+1)])
min_len = 5
tokens = [i[0] for i in interpretation]
tokens = tokens[1:-1]
while len(tokens) <= min_len or '\\' in text['text'] or '//' in text['text']:
index_selected = random.randint(0,len(content)/2-1)
text = eval(content[int(index_selected*2)])
res_tmp = [(i, 0) for i in text['text'].split(' ')]
res = {"original": text['text'], "interpretation": res_tmp}
return res, index_selected
def func3(num_selected, human_predict, num1, num2, user_important):
chatbot = []
# num1: Human score; num2: AI score
fname = 'data3_convai2_inferred.txt'
with open(fname) as f:
content = f.readlines()
text = eval(content[int(num_selected*2)])
interpretation = eval(content[int(num_selected*2+1)])
if text['binary_label'] == 1:
golden_label = int(50 * (1 - text['binary_score']))
else:
golden_label = int(50 * (1 + text['binary_score']))
# (START) off-the-shelf version -- slow at the beginning
# Load model directly
# Use a pipeline as a high-level helper
classifier = pipeline("text-classification", model="padmajabfrl/Gender-Classification")
output = classifier([text['text']])
print(output)
out = output[0]
# (END) off-the-shelf version
if out['label'] == 'Female':
ai_predict = int(100 * out['score'])
else:
ai_predict = 100 - int(100 * out['score'])
user_select = "You focused on "
flag_select = False
if user_important == "":
user_select += "nothing. Interesting! "
else:
user_select += "'" + user_important + "'. "
# for i in range(len(user_marks)):
# if user_marks[i][1] != None and h1[i][0] not in ["P", "N"]:
# flag_select = True
# user_select += "'" + h1[i][0] + "'"
# if i == len(h1) - 1:
# user_select += ". "
# else:
# user_select += ", "
# if not flag_select:
# user_select += "nothing. Interesting! "
user_select += "Wanna see how the AI made the guess? Click here. ⬅️"
if golden_label > 60:
gender = ' (female)'
elif golden_label < 40:
gender = ' (male)'
else:
gender = ' (neutral)'
if abs(golden_label - human_predict) <= 20 and abs(golden_label - ai_predict) <= 20:
chatbot.append(("The correct answer is " + str(golden_label) + gender + ". Congratulations! πŸŽ‰ Both of you get the correct answer!", user_select))
num1 += 1
num2 += 1
elif abs(golden_label - human_predict) > 20 and abs(golden_label - ai_predict) > 20:
chatbot.append(("The correct answer is " + str(golden_label) + gender + ". Sorry.. No one gets the correct answer. But nice try! πŸ˜‰", user_select))
elif abs(golden_label - human_predict) <= 20 and abs(golden_label - ai_predict) > 20:
chatbot.append(("The correct answer is " + str(golden_label) + gender + ". Great! πŸŽ‰ You are closer to the answer and better than AI!", user_select))
num1 += 1
else:
chatbot.append(("The correct answer is " + str(golden_label) + gender + ". Sorry.. AI wins in this round.", user_select))
num2 += 1
tot_scores = ''' ### <p style="text-align: center;"> πŸ€– Machine &ensp; ''' + str(int(num2)) + ''' &ensp; VS &ensp; ''' + str(int(num1)) + ''' &ensp; Human πŸ‘¨πŸ‘© </p>'''
num_tmp = max(num1, num2)
y_lim_upper = (int((num_tmp + 3)/10)+1) * 10
return ai_predict, chatbot, num1, num2, tot_scores
def interpre3(num_selected):
fname = 'data3_convai2_inferred.txt'
with open(fname) as f:
content = f.readlines()
text = eval(content[int(num_selected*2)])
interpretation = eval(content[int(num_selected*2+1)])
print(interpretation)
res = {"original": text['text'], "interpretation": interpretation}
# pos = []
# neg = []
# res = []
# for i in interpretation:
# if i[1] > 0:
# pos.append(i[1])
# elif i[1] < 0:
# neg.append(i[1])
# else:
# continue
# median_pos = np.median(pos)
# median_neg = np.median(neg)
# res.append(("P", "+"))
# res.append(("/", None))
# res.append(("N", "-"))
# res.append(("Review:", None))
# for i in interpretation:
# if i[1] > median_pos:
# res.append((i[0], "+"))
# elif i[1] < median_neg:
# res.append((i[0], "-"))
# else:
# res.append((i[0], None))
return res
def func3_written(text_written, human_predict, lang_written):
chatbot = []
# num1: Human score; num2: AI score
# (START) off-the-shelf version
# tokenizer = AutoTokenizer.from_pretrained("nlptown/bert-base-multilingual-uncased-sentiment")
# model = AutoModelForSequenceClassification.from_pretrained("nlptown/bert-base-multilingual-uncased-sentiment")
classifier = pipeline("text-classification", model="padmajabfrl/Gender-Classification")
output = classifier([text_written])
print(output)
out = output[0]
# (END) off-the-shelf version
if out['label'] == 'Female':
ai_predict = int(100 * out['score'])
else:
ai_predict = 100 - int(100 * out['score'])
if abs(ai_predict - human_predict) <= 20:
chatbot.append(("AI gives it a close score! πŸŽ‰", "⬅️ Feel free to try another one! ⬅️"))
else:
chatbot.append(("AI thinks in a different way from human. πŸ˜‰", "⬅️ Feel free to try another one! ⬅️"))
import shap
gender_classifier = pipeline("text-classification", model="padmajabfrl/Gender-Classification", return_all_scores=True)
explainer = shap.Explainer(gender_classifier)
shap_values = explainer([text_written])
interpretation = list(zip(shap_values.data[0], shap_values.values[0, :, 1]))
res = {"original": text_written, "interpretation": interpretation}
print(res)
return res, ai_predict, chatbot