import gradio as gr from transformers import BlenderbotTokenizer, BlenderbotForConditionalGeneration model_name = "facebook/blenderbot-400M-distill" tokenizer = BlenderbotTokenizer.from_pretrained(model_name) model = BlenderbotForConditionalGeneration.from_pretrained(model_name) # declare translate model def translate(text,mode): if mode== "ztoe": from transformers import AutoModelWithLMHead,AutoTokenizer,pipeline mode_name = 'liam168/trans-opus-mt-zh-en' model = AutoModelWithLMHead.from_pretrained(mode_name) tokenizer = AutoTokenizer.from_pretrained(mode_name) translation = pipeline("translation_zh_to_en", model=model, tokenizer=tokenizer) translate_result = translation(text, max_length=400) if mode == "etoz": from transformers import AutoModelWithLMHead,AutoTokenizer,pipeline mode_name = 'liam168/trans-opus-mt-en-zh' model = AutoModelWithLMHead.from_pretrained(mode_name) tokenizer = AutoTokenizer.from_pretrained(mode_name) translation = pipeline("translation_en_to_zh", model=model, tokenizer=tokenizer) translate_result = translation(text, max_length=400) return translate_result #initial list and variable chat_history=[] output=[] bname='' #declare emoji function def add_emoji(response): keyword_emoji_dict = { "happy": "๐Ÿ˜€", "sad": "๐Ÿ˜ข", "sorry":"๐Ÿ˜ž", "love": "โค๏ธ", "like": "๐Ÿ‘", "dislike": "๐Ÿ‘Ž", "Why": "๐Ÿฅบ", "cat":"๐Ÿฑ", "dog":"๐Ÿถ", "ๅ—จ" : "๐Ÿ˜Ž" } for keyword, emoji in keyword_emoji_dict.items(): response = response.replace(keyword, f"{keyword} {emoji}") return response # Define the keywords and their corresponding keywords def tran_shortform(response): keyword_shortform_dict = { "yolo": "You only live once", "lol":"funny" , "nvm": "nevermind", "lmk": "tell me", "btw": "by the way", "dk":"don't know", "idk":"I don't know" , "๐Ÿ˜€": "happy", "๐Ÿ˜ข": "sad", "๐Ÿ˜ž": "sorry", "โค๏ธ": "love", "๐Ÿ‘": "like", "๐Ÿ‘Ž": "dislike", "๐Ÿฅบ": "Why", "๐Ÿฑ": "cat", "๐Ÿถ": "dog", } response= response.lower() for keyword, st in keyword_shortform_dict.items(): response = response.replace(keyword, f"{st}") return response # Define the keywords and their corresponding keywords def add_shortform(response): keyword_shortform_dict = { "You only live once": "YOLO", "funny": "LOL", "laugh":"LOL", "nevermind": "nvm", "sorry": "sorryyyyy", "tell me": "LMK", "By the way": "BTW", "don't know":"DK", "i don't know":"IDK" } response= response.lower() for keyword, st in keyword_shortform_dict.items(): response = response.replace(keyword, f"{st}") return response #------------------------------------------------------------------------------------------------------ #Chatbot start def chatbot(text,name): global chat_history #for chatbot to response global outputsss # for user to read global bname #the previous bot name global raw # the text input by user. It will not be changed in the program raw=text #distinguish text for the user to read it(raw) and text for the chatbot (text) # if the user do not name the chatbot. if name=='': name="Mr. Chatbot" #since the program will keep running in the website. When the name of chatbot change, the history and chatbot will be refreshed if bname != name: chat_history= [] outputsss=[] bname=name # same the name #change the shortform and emoji to english text = tran_shortform(text) # Try to detect the language of the input text is_chinese = any(0x4e00 <= ord(char) <= 0x9fff for char in text.lower()) # If the input language is Chinese, convert the text to lowercase and check if it contains any Chinese characters if is_chinese: text = translate(text,"ztoe") text=f"{text}" text=text[23:(len(text)-3)] # Look for keywords in the previous chat history keyword_responses = { "how are you": "I'm doing well๐Ÿ˜„, thank you for asking!", "bye": "Goodbye!๐Ÿ‘Š๐Ÿป", "bye.": "Goodbye!๐Ÿ‘Š๐Ÿป", "thank you": "You're welcome!๐Ÿ˜ƒ", "thank you.": "You're welcome!๐Ÿ˜ƒ", "hello": f'I am {bname}. Nice to meet you!๐Ÿ˜Ž', "hello.": f'I am {bname}. Nice to meet you!๐Ÿ˜Ž', "hi": f'I am {bname}. Nice to meet you!๐Ÿ˜Ž', } # Generate a response based on the previous messages if len(chat_history) > 0: # Get the last message from the chat history last_message = chat_history[-1][1] # Generate a response based on the last message encoded_input = tokenizer.encode(last_message + tokenizer.eos_token + text, return_tensors='pt') generated = model.generate(encoded_input, max_length=1024, do_sample=True) response = tokenizer.decode(generated[0], skip_special_tokens=True) response=f"{response}" else: # If there is no previous message, generate a response using the default method encoded_input = tokenizer(text, return_tensors='pt') generated = model.generate(**encoded_input) response = tokenizer.batch_decode(generated, skip_special_tokens=True)[0] response=f"{response}" # Add emojis to the response response = add_emoji(response) # Add short form to the response response = add_shortform(response) # output the standard response if text.lower() in keyword_responses: response = keyword_responses[text.lower()] # update history chat_history.append((text,response)) # If the input language was Chinese, translate the response back to Chinese if is_chinese: response = translate(response,"etoz") # remove the header of the output # translate simplified Chinese to traditional Chinese from hanziconv import HanziConv response = HanziConv.toTraditional(f"{response}") response = f"{response} " response=response[23:(len(response)-4)] #if the response is amended. Show the amended text in blanket) if raw.lower() == text.lower(): outputsss.append((raw,response)) else: outputsss.append((f"{raw} ({text})", response)) return (outputsss) #------------------------------------------------------------------------------------------------------ iface =gr.Interface(fn=chatbot, inputs=[gr.inputs.Textbox(label="Chat๐Ÿ˜ƒ", placeholder="Say somehting๐Ÿ’ฌ"), gr.inputs.Textbox(label="Name the Bot๐Ÿ˜บ", placeholder="give me a name๐Ÿ˜‰")], outputs=[gr.Chatbot(label="Chat Hereโ˜บ๏ธ")], title="๐ŸŒธEmphatic Chatbotโค๏ธ", allow_flagging=False, layout="vertical", theme='gstaff/xkcd' , # credit: https://huggingface.co/spaces/gstaff/xkcd/tree/main examples=[["ไฝ ๅฅฝ"], ["Hello"],["I am sad"],["ๆˆ‘ๅพˆๅ‚ทๅฟƒ"]], article="

Disclaimer


  • The chatbot does not have emotions or the ability to empathize in the same way that humans do
  • The chatbot can simulate empathy by recognizing and responding to certain emotional cues in language, but responses are generated by algorithms and not based on personal experiences or feelings
  • The information and advice provided by the chatbot should not be used as a substitute for professional medical, psychological, or legal adviceUsers should always consult with qualified professionals in these fields for personalized recommendations that take into account their individual circumstances
  • The chatbot is not infallible and may encounter technical difficulties or make errors that can impact its performance
  • The information presented by the chatbot should always be cross-checked and authenticated before any action is taken based on it
  • Interactions with the chatbot may be subject to recording or monitoring for the purposes of quality assurance and training
  • Any information collected by the chatbot must be handled in compliance with relevant data protection laws and regulations.
  • ") #.launch(share=True) iface.launch()