Himhimhim commited on
Commit
312c56c
β€’
1 Parent(s): 46f3794

Delete final.ipynb

Browse files
Files changed (1) hide show
  1. final.ipynb +0 -228
final.ipynb DELETED
@@ -1,228 +0,0 @@
1
- {
2
- "cells": [
3
- {
4
- "attachments": {},
5
- "cell_type": "markdown",
6
- "metadata": {},
7
- "source": []
8
- },
9
- {
10
- "cell_type": "code",
11
- "execution_count": null,
12
- "metadata": {},
13
- "outputs": [],
14
- "source": [
15
- "!php install --user gradio\n",
16
- "!php install --user transformers\n",
17
- "!php install --user HanziConv"
18
- ]
19
- },
20
- {
21
- "cell_type": "code",
22
- "execution_count": null,
23
- "metadata": {},
24
- "outputs": [],
25
- "source": [
26
- "#Unicode\n",
27
- "import gradio as gr\n",
28
- "from transformers import BlenderbotTokenizer, BlenderbotForConditionalGeneration\n",
29
- "\n",
30
- "model_name = \"facebook/blenderbot-400M-distill\"\n",
31
- "tokenizer = BlenderbotTokenizer.from_pretrained(model_name)\n",
32
- "model = BlenderbotForConditionalGeneration.from_pretrained(model_name)\n",
33
- "\n",
34
- "def translate(text,mode): \n",
35
- " if mode== \"ztoe\":\n",
36
- " from transformers import AutoModelWithLMHead,AutoTokenizer,pipeline\n",
37
- " mode_name = 'liam168/trans-opus-mt-zh-en'\n",
38
- " model = AutoModelWithLMHead.from_pretrained(mode_name)\n",
39
- " tokenizer = AutoTokenizer.from_pretrained(mode_name)\n",
40
- " translation = pipeline(\"translation_zh_to_en\", model=model, tokenizer=tokenizer)\n",
41
- " translate_result = translation(text, max_length=400)\n",
42
- " if mode == \"etoz\":\n",
43
- " from transformers import AutoModelWithLMHead,AutoTokenizer,pipeline\n",
44
- " mode_name = 'liam168/trans-opus-mt-en-zh'\n",
45
- " model = AutoModelWithLMHead.from_pretrained(mode_name)\n",
46
- " tokenizer = AutoTokenizer.from_pretrained(mode_name)\n",
47
- " translation = pipeline(\"translation_en_to_zh\", model=model, tokenizer=tokenizer)\n",
48
- " \n",
49
- " #translation = pipeline(\"translation_en_to_zh\", model=model, tokenizer=tokenizer)\n",
50
- " translate_result = translation(text, max_length=400)\n",
51
- " \n",
52
- " return translate_result\n",
53
- "\n",
54
- "\n",
55
- "chat_history=[]\n",
56
- "#chat_history.append(f\"Hello i am your first bot friendπŸ€“. Give me a name and say something!\")\n",
57
- "\n",
58
- "\n",
59
- "def add_emoji(response):\n",
60
- " # Define the keywords and their corresponding emojis\n",
61
- " keyword_emoji_dict = {\n",
62
- " \"happy\": \"πŸ˜€\",\n",
63
- " \"sad\": \"😒\",\n",
64
- " \"sorry\":\"😞\",\n",
65
- " \"love\": \"❀️\",\n",
66
- " \"like\": \"πŸ‘\",\n",
67
- " \"dislike\": \"πŸ‘Ž\",\n",
68
- " \"Why\": \"πŸ₯Ί\",\n",
69
- " \"cat\":\"🐱\",\n",
70
- " \"dog\":\"🐢\",\n",
71
- " \"ε—¨\" : \"😎\"\n",
72
- " \n",
73
- " }\n",
74
- " for keyword, emoji in keyword_emoji_dict.items():\n",
75
- " response = response.replace(keyword, f\"{keyword} {emoji}\")\n",
76
- " return response\n",
77
- "\n",
78
- "def add_shortform(response):\n",
79
- " # Define the keywords and their corresponding emojis\n",
80
- " keyword_shortform_dict = {\n",
81
- " \"You only live once\": \"YOLO\",\n",
82
- " \"funny\": \"LOL\",\n",
83
- " \"laugh\":\"LOL\",\n",
84
- " \"nevermind\": \"nvm\",\n",
85
- " \"sorry\": \"sorryyyyy\",\n",
86
- " \"tell me\": \"LMK\",\n",
87
- " \"By the way\": \"BTW\",\n",
88
- " \"don't know\":\"DK\",\n",
89
- " \"do not know\":\"IDK\"\n",
90
- " \n",
91
- " \n",
92
- " }\n",
93
- " for keyword, st in keyword_shortform_dict.items():\n",
94
- " response = response.replace(keyword, f\"{st}\")\n",
95
- " return response\n",
96
- "\n",
97
- "def chatbot(text,name):\n",
98
- " global chat_history\n",
99
- " global Itext\n",
100
- " global bname \n",
101
- " bname= name\n",
102
- " Itext=text\n",
103
- " \n",
104
- " \n",
105
- " \n",
106
- " \n",
107
- " # Try to detect the language of the input text\n",
108
- " \n",
109
- " # If the input language is Chinese, convert the text to lowercase and check if it contains any Chinese characters\n",
110
- " is_chinese = any(0x4e00 <= ord(char) <= 0x9fff for char in text.lower())\n",
111
- " if is_chinese:\n",
112
- " \n",
113
- " text = translate(text,\"ztoe\")\n",
114
- " \n",
115
- " text=f\"{text}\"\n",
116
- " text=text[23:(len(text)-3)]\n",
117
- " \n",
118
- "\n",
119
- " # Look for keywords in the previous chat history\n",
120
- " keyword_responses = {\n",
121
- " #\"hello\": f\"I'm {name} πŸ˜„,nice to meet you!\",\n",
122
- " \"how are you\": \"I'm doing wellπŸ˜„, thank you for asking!\",\n",
123
- " \"bye\": \"Goodbye!πŸ‘ŠπŸ»\",\n",
124
- " \"thank you\": \"You're welcome!πŸ˜ƒ\",\n",
125
- " \"hello\": f'I am {bname}. Nice to meet you!😎',\n",
126
- " \"Hello\": f'I am {bname}. Nice to meet you!😎',\n",
127
- " \"Hi\": f'I am {bname}. Nice to meet you!😎',\n",
128
- " \"hi\": f'I am {bname}. Nice to meet you!😎',\n",
129
- " \n",
130
- " \n",
131
- " }\n",
132
- "\n",
133
- " # Generate a response based on the previous messages\n",
134
- " if len(chat_history) > 0:\n",
135
- " # Get the last message from the chat history\n",
136
- " last_message = chat_history[-1][1]\n",
137
- " # Generate a response based on the last message\n",
138
- " encoded_input = tokenizer.encode(last_message + tokenizer.eos_token + text, return_tensors='pt')\n",
139
- " generated = model.generate(encoded_input, max_length=1024, do_sample=True)\n",
140
- " response = tokenizer.decode(generated[0], skip_special_tokens=True)\n",
141
- " response=f\"{response}\"\n",
142
- " else:\n",
143
- " # If there is no previous message, generate a response using the default method\n",
144
- " encoded_input = tokenizer(text, return_tensors='pt')\n",
145
- " generated = model.generate(**encoded_input)\n",
146
- " response = tokenizer.batch_decode(generated, skip_special_tokens=True)[0]\n",
147
- " response=f\"{response}\"\n",
148
- " if text in keyword_responses:\n",
149
- " response = keyword_responses[text]\n",
150
- " #break\n",
151
- "\n",
152
- " # If the input language was Chinese, translate the response back to Chinese\n",
153
- " # if input_lang == \"zh\":\n",
154
- " if is_chinese:\n",
155
- " from hanziconv import HanziConv\n",
156
- " response = translate(response,\"etoz\")\n",
157
- " response = HanziConv.toTraditional(f\"{response}\")\n",
158
- " response = f\"{response} \"\n",
159
- " response=response[23:(len(response)-4)]\n",
160
- " else:\n",
161
- " response = response\n",
162
- "\n",
163
- " # Add emojis to the response\n",
164
- " response = add_emoji(response)\n",
165
- " response = add_shortform(response)\n",
166
- " chat_history.append((Itext,response))\n",
167
- " \n",
168
- "\n",
169
- " # Format the chat history as an HTML string for display\n",
170
- " history_str = \"\"\n",
171
- " for name, msg in chat_history:\n",
172
- " history_str += f\"<strong>{name}:</strong> {msg}<br>\"\n",
173
- " # Return the response along with the chat history\n",
174
- " \n",
175
- " \n",
176
- " \n",
177
- " return (chat_history)\n",
178
- "\n",
179
- " \n",
180
- "gr.Interface(fn=chatbot,\n",
181
- " \n",
182
- " \n",
183
- " inputs=[gr.inputs.Textbox(label=\"Chat\", placeholder=\"Say somehting\"),\n",
184
- " gr.inputs.Textbox(label=\"Name the Bot\", placeholder=\"give me a name\")],\n",
185
- " outputs=[gr.Chatbot(label=\"Chat Here\")], \n",
186
- " \n",
187
- " title=\"Emphatic Chatbot\",\n",
188
- " allow_flagging=False,\n",
189
- " layout=\"vertical\",\n",
190
- " #theme=\"default\",\n",
191
- " #theme= \"darkpeach\",\n",
192
- " theme='gstaff/xkcd' ,\n",
193
- " \n",
194
- " \n",
195
- " \n",
196
- " \n",
197
- " #theme=gr.themes.Soft(),\n",
198
- " examples=[[\"δ½ ε₯½\"], [\"Hello\"]]\n",
199
- " ).launch()\n",
200
- "\n",
201
- "\n",
202
- "\n"
203
- ]
204
- }
205
- ],
206
- "metadata": {
207
- "kernelspec": {
208
- "display_name": "Python 3",
209
- "language": "python",
210
- "name": "python3"
211
- },
212
- "language_info": {
213
- "codemirror_mode": {
214
- "name": "ipython",
215
- "version": 3
216
- },
217
- "file_extension": ".py",
218
- "mimetype": "text/x-python",
219
- "name": "python",
220
- "nbconvert_exporter": "python",
221
- "pygments_lexer": "ipython3",
222
- "version": "3.9.16"
223
- },
224
- "orig_nbformat": 4
225
- },
226
- "nbformat": 4,
227
- "nbformat_minor": 2
228
- }