darpan-jain commited on
Commit
030066b
1 Parent(s): cb7a867

Include chatbot methods within the Blocks flow

Browse files
Files changed (1) hide show
  1. app.py +26 -20
app.py CHANGED
@@ -1,7 +1,8 @@
1
  from transformers import AutoTokenizer, AutoModelForCausalLM
2
  import gradio as gr
3
  import torch
4
-
 
5
 
6
  # tokenizer = AutoTokenizer.from_pretrained("chavinlo/gpt4-x-alpaca")
7
  # model = AutoModelForCausalLM.from_pretrained("chavinlo/gpt4-x-alpaca")
@@ -9,33 +10,38 @@ import torch
9
  tokenizer = AutoTokenizer.from_pretrained("microsoft/DialoGPT-medium")
10
  model = AutoModelForCausalLM.from_pretrained("microsoft/DialoGPT-medium")
11
 
12
- def user(message, history):
13
- return "", history + [[message, None]]
14
-
15
- def bot(history):
16
- user_message = history[-1][0]
17
- new_user_input_ids = tokenizer.encode(user_message + tokenizer.eos_token, return_tensors='pt')
18
-
19
- # append the new user input tokens to the chat history
20
- bot_input_ids = torch.cat([torch.LongTensor(history), new_user_input_ids], dim=-1)
21
-
22
- # generate a response
23
- history = model.generate(bot_input_ids, max_length=1000, pad_token_id=tokenizer.eos_token_id).tolist()
 
 
24
 
25
- # convert the tokens to text, and then split the responses into lines
26
- response = tokenizer.decode(history[0]).split("<|endoftext|>")
27
- response = [(response[i], response[i+1]) for i in range(0, len(response)-1, 2)] # convert to tuples of list
28
- return history
29
-
30
- ## Use the above methods to create the Chatbot interface
31
  with gr.Blocks() as demo:
32
  chatbot = gr.Chatbot()
33
  msg = gr.Textbox()
34
  clear = gr.Button("Clear")
35
 
 
 
 
 
 
 
 
 
 
36
  msg.submit(user, [msg, chatbot], [msg, chatbot], queue=False).then(
37
  bot, chatbot, chatbot
38
  )
39
  clear.click(lambda: None, None, chatbot, queue=False)
40
 
41
- demo.launch()
 
1
  from transformers import AutoTokenizer, AutoModelForCausalLM
2
  import gradio as gr
3
  import torch
4
+ import random
5
+ import time
6
 
7
  # tokenizer = AutoTokenizer.from_pretrained("chavinlo/gpt4-x-alpaca")
8
  # model = AutoModelForCausalLM.from_pretrained("chavinlo/gpt4-x-alpaca")
 
10
  tokenizer = AutoTokenizer.from_pretrained("microsoft/DialoGPT-medium")
11
  model = AutoModelForCausalLM.from_pretrained("microsoft/DialoGPT-medium")
12
 
13
+ # def bot(history):
14
+ # user_message = history[-1][0]
15
+ # new_user_input_ids = tokenizer.encode(user_message + tokenizer.eos_token, return_tensors='pt')
16
+ #
17
+ # # append the new user input tokens to the chat history
18
+ # bot_input_ids = torch.cat([torch.LongTensor(history), new_user_input_ids], dim=-1)
19
+ #
20
+ # # generate a response
21
+ # history = model.generate(bot_input_ids, max_length=1000, pad_token_id=tokenizer.eos_token_id).tolist()
22
+ #
23
+ # # convert the tokens to text, and then split the responses into lines
24
+ # response = tokenizer.decode(history[0]).split("<|endoftext|>")
25
+ # response = [(response[i], response[i+1]) for i in range(0, len(response)-1, 2)] # convert to tuples of list
26
+ # return history
27
 
 
 
 
 
 
 
28
  with gr.Blocks() as demo:
29
  chatbot = gr.Chatbot()
30
  msg = gr.Textbox()
31
  clear = gr.Button("Clear")
32
 
33
+ def user(user_message, history):
34
+ return "", history + [[user_message, None]]
35
+
36
+ def bot(history):
37
+ bot_message = random.choice(["Yes", "No"])
38
+ history[-1][1] = bot_message
39
+ time.sleep(1)
40
+ return history
41
+
42
  msg.submit(user, [msg, chatbot], [msg, chatbot], queue=False).then(
43
  bot, chatbot, chatbot
44
  )
45
  clear.click(lambda: None, None, chatbot, queue=False)
46
 
47
+ demo.launch()