gorkemgoknar commited on
Commit
b3b32bd
1 Parent(s): fba635a

Update app.py

Browse files

chatbot interface

Files changed (1) hide show
  1. app.py +40 -26
app.py CHANGED
@@ -1,30 +1,44 @@
1
  import gradio as gr
2
  import random
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3
 
4
- def chat(message):
5
- history = gr.get_state() or []
6
- if message.startswith("How many"):
7
- response = random.randint(1,10)
8
- elif message.startswith("How"):
9
- response = random.choice(["Great", "Good", "Okay", "Bad"])
10
- elif message.startswith("Where"):
11
- response = random.choice(["Here", "There", "Somewhere"])
12
- else:
13
- response = "I don't know"
14
- history.append((message, response))
15
- gr.set_state(history)
16
- html = "<div class='chatbot'>"
17
- for user_msg, resp_msg in history:
18
- html += f"<div class='user_msg'>{user_msg}</div>"
19
- html += f"<div class='resp_msg'>{resp_msg}</div>"
20
- html += "</div>"
21
- return html
22
-
23
- iface = gr.Interface(chat, "text", "html", css="""
24
- .chatbox {display:flex;flex-direction:column}
25
- .user_msg, .resp_msg {padding:4px;margin-bottom:4px;border-radius:4px;width:80%}
26
- .user_msg {background-color:cornflowerblue;color:white;align-self:start}
27
- .resp_msg {background-color:lightgray;align-self:self-end}
28
- """, allow_screenshot=False, allow_flagging=False)
29
  if __name__ == "__main__":
30
- iface.launch()
 
1
  import gradio as gr
2
  import random
3
+ from transformers import AutoConfig
4
+ from transformers import GPT2Tokenizer, GPT2LMHeadModel
5
+
6
+ config = AutoConfig.from_pretrained('gorkemgoknar/gpt2chatbotenglish')
7
+ model = GPT2LMHeadModel.from_pretrained('gorkemgoknar/gpt2chatbotenglish', config=config)
8
+
9
+ tokenizer = GPT2Tokenizer.from_pretrainedb('gorkemgoknar/gpt2chatbotenglish')
10
+ tokenizer.model_max_length = 1024
11
+
12
+
13
+ def get_chat_response(name, input_txt = "Hello , what is your name?"):
14
+ personality = "My name is " + "Gandalf"
15
+
16
+ bot_input_ids = tokenizer.encode(personality + tokenizer.eos_token + input_txt + tokenizer.eos_token , return_tensors='pt')
17
+
18
+ #optimum response and speed
19
+ chat_history_ids = model.generate(
20
+ bot_input_ids, max_length=50,
21
+ pad_token_id=tokenizer.eos_token_id,
22
+ no_repeat_ngram_size=3,
23
+ do_sample=True,
24
+ top_k=60,
25
+ top_p=0.8,
26
+ temperature = 1.3
27
+ )
28
+
29
+ out_str = tokenizer.decode(chat_history_ids[:, bot_input_ids.shape[-1]:][0], skip_special_tokens=True)
30
+ return out_str
31
+
32
+
33
+ personality_choices = ["Gandalf", "Riddick", "Macleod", "Morpheus", "Neo","Spock","Vader","Indy"]
34
+
35
+ examples= ["Gandalf", "What is your name?"]
36
+
37
+
38
+ iface = gr.Interface(fn=get_chat_response, inputs=[gr.inputs.Dropdown(personality_choices) ,"text"], outputs="text")
39
+ iface.launch(debug=False)
40
+
41
+
42
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
43
  if __name__ == "__main__":
44
+ interface .launch()