File size: 8,584 Bytes
da772cb 6c1c1da da772cb 6c1c1da da772cb 6c1c1da da772cb 6c1c1da da772cb 6c1c1da 3e91834 6c1c1da da772cb 3e91834 6c1c1da da772cb 6c1c1da da772cb 6c1c1da da772cb 6c1c1da 1493dfb 4e0d3f9 6c1c1da da772cb 6c1c1da da772cb 6c1c1da da772cb 6c1c1da da772cb 6c1c1da da772cb 6c1c1da da772cb 6c1c1da da772cb 3e91834 6c1c1da 3e91834 6c1c1da 3e91834 6c1c1da da772cb 6c1c1da da772cb 6c1c1da da772cb 6c1c1da da772cb 6c1c1da da772cb 34fb291 2d7a2b0 da772cb 6c1c1da da772cb 4ba6902 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 |
import gradio as gr
from transformers import BlenderbotTokenizer, BlenderbotForConditionalGeneration
model_name = "facebook/blenderbot-400M-distill"
tokenizer = BlenderbotTokenizer.from_pretrained(model_name)
model = BlenderbotForConditionalGeneration.from_pretrained(model_name)
# declare translate model
def translate(text,mode):
if mode== "ztoe":
from transformers import AutoModelWithLMHead,AutoTokenizer,pipeline
mode_name = 'liam168/trans-opus-mt-zh-en'
model = AutoModelWithLMHead.from_pretrained(mode_name)
tokenizer = AutoTokenizer.from_pretrained(mode_name)
translation = pipeline("translation_zh_to_en", model=model, tokenizer=tokenizer)
translate_result = translation(text, max_length=400)
if mode == "etoz":
from transformers import AutoModelWithLMHead,AutoTokenizer,pipeline
mode_name = 'liam168/trans-opus-mt-en-zh'
model = AutoModelWithLMHead.from_pretrained(mode_name)
tokenizer = AutoTokenizer.from_pretrained(mode_name)
translation = pipeline("translation_en_to_zh", model=model, tokenizer=tokenizer)
translate_result = translation(text, max_length=400)
return translate_result
#initial list and variable
chat_history=[]
output=[]
bname=''
#declare emoji function
def add_emoji(response):
keyword_emoji_dict = {
"happy": "π",
"sad": "π’",
"sorry":"π",
"love": "β€οΈ",
"like": "π",
"dislike": "π",
"Why": "π₯Ί",
"cat":"π±",
"dog":"πΆ",
"ε¨" : "π"
}
for keyword, emoji in keyword_emoji_dict.items():
response = response.replace(keyword, f"{keyword} {emoji}")
return response
# Define the keywords and their corresponding keywords
def tran_shortform(response):
keyword_shortform_dict = {
"yolo": "You only live once",
"lol":"funny" ,
"nvm": "nevermind",
"lmk": "tell me",
"btw": "by the way",
"dk":"don't know",
"idk":"I don't know" ,
"π": "happy",
"π’": "sad",
"π": "sorry",
"β€οΈ": "love",
"π": "like",
"π": "dislike",
"π₯Ί": "Why",
"π±": "cat",
"πΆ": "dog",
}
response= response.lower()
for keyword, st in keyword_shortform_dict.items():
response = response.replace(keyword, f"{st}")
return response
# Define the keywords and their corresponding keywords
def add_shortform(response):
keyword_shortform_dict = {
"You only live once": "YOLO",
"funny": "LOL",
"laugh":"LOL",
"nevermind": "nvm",
"sorry": "sorryyyyy",
"tell me": "LMK",
"By the way": "BTW",
"don't know":"DK",
"i don't know":"IDK"
}
response= response.lower()
for keyword, st in keyword_shortform_dict.items():
response = response.replace(keyword, f"{st}")
return response
#------------------------------------------------------------------------------------------------------
#Chatbot start
def chatbot(text,name):
global chat_history #for chatbot to response
global outputsss # for user to read
global bname #the previous bot name
global raw # the text input by user. It will not be changed in the program
raw=text #distinguish text for the user to read it(raw) and text for the chatbot (text)
# if the user do not name the chatbot.
if name=='':
name="Mr. Chatbot"
#since the program will keep running in the website. When the name of chatbot change, the history and chatbot will be refreshed
if bname != name:
chat_history= []
outputsss=[]
bname=name # same the name
#change the shortform and emoji to english
text = tran_shortform(text)
# Try to detect the language of the input text
is_chinese = any(0x4e00 <= ord(char) <= 0x9fff for char in text.lower())
# If the input language is Chinese, convert the text to lowercase and check if it contains any Chinese characters
if is_chinese:
text = translate(text,"ztoe")
text=f"{text}"
text=text[23:(len(text)-3)]
# Look for keywords in the previous chat history
keyword_responses = {
"how are you": "I'm doing wellπ, thank you for asking!",
"bye": "Goodbye!ππ»",
"bye.": "Goodbye!ππ»",
"thank you": "You're welcome!π",
"thank you.": "You're welcome!π",
"hello": f'I am {bname}. Nice to meet you!π',
"hello.": f'I am {bname}. Nice to meet you!π',
"hi": f'I am {bname}. Nice to meet you!π',
}
# Generate a response based on the previous messages
if len(chat_history) > 0:
# Get the last message from the chat history
last_message = chat_history[-1][1]
# Generate a response based on the last message
encoded_input = tokenizer.encode(last_message + tokenizer.eos_token + text, return_tensors='pt')
generated = model.generate(encoded_input, max_length=1024, do_sample=True)
response = tokenizer.decode(generated[0], skip_special_tokens=True)
response=f"{response}"
else:
# If there is no previous message, generate a response using the default method
encoded_input = tokenizer(text, return_tensors='pt')
generated = model.generate(**encoded_input)
response = tokenizer.batch_decode(generated, skip_special_tokens=True)[0]
response=f"{response}"
# Add emojis to the response
response = add_emoji(response)
# Add short form to the response
response = add_shortform(response)
# output the standard response
if text.lower() in keyword_responses:
response = keyword_responses[text.lower()]
# update history
chat_history.append((text,response))
# If the input language was Chinese, translate the response back to Chinese
if is_chinese:
response = translate(response,"etoz")
# remove the header of the output
# translate simplified Chinese to traditional Chinese
from hanziconv import HanziConv
response = HanziConv.toTraditional(f"{response}")
response = f"{response} "
response=response[23:(len(response)-4)]
#if the response is amended. Show the amended text in blanket)
if raw.lower() == text.lower():
outputsss.append((raw,response))
else:
outputsss.append((f"{raw} ({text})", response))
return (outputsss)
#------------------------------------------------------------------------------------------------------
iface =gr.Interface(fn=chatbot,
inputs=[gr.inputs.Textbox(label="Chatπ", placeholder="Say somehtingπ¬"),
gr.inputs.Textbox(label="Name the BotπΊ", placeholder="give me a nameπ")],
outputs=[gr.Chatbot(label="Chat HereβΊοΈ")],
title="πΈEmphatic Chatbotβ€οΈ",
allow_flagging=False,
layout="vertical",
theme='gstaff/xkcd' , # credit: https://huggingface.co/spaces/gstaff/xkcd/tree/main
examples=[["δ½ ε₯½"], ["Hello"],["I am sad"],["ζεΎε·εΏ"]],
article="<H3>Disclaimer</H3><br><oi><li>The chatbot does not have emotions or the ability to empathize in the same way that humans do</li><li>The chatbot can simulate empathy by recognizing and responding to certain emotional cues in language, but responses are generated by algorithms and not based on personal experiences or feelings</li><li>The information and advice provided by the chatbot should not be used as a substitute for professional medical, psychological, or legal adviceUsers should always consult with qualified professionals in these fields for personalized recommendations that take into account their individual circumstances</li><li>The chatbot is not infallible and may encounter technical difficulties or make errors that can impact its performance</li><li>The information presented by the chatbot should always be cross-checked and authenticated before any action is taken based on it</li><li>Interactions with the chatbot may be subject to recording or monitoring for the purposes of quality assurance and training</li><liUser privacy and data protection should always remain a top priority</li><li>Any information collected by the chatbot must be handled in compliance with relevant data protection laws and regulations.</li></oi>")
#.launch(share=True)
iface.launch() |