masapasa commited on
Commit
65c37f9
1 Parent(s): edf41f7
Files changed (1) hide show
  1. app.py +22 -5
app.py CHANGED
@@ -1,16 +1,33 @@
1
  import gradio as gr
 
 
2
 
3
- def update(name):
4
- return f"Welcome to Gradio, {name}!"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5
 
6
  demo = gr.Blocks()
7
 
8
  with demo:
9
  gr.Markdown("Start typing below and then click **Run** to see the output.")
10
  with gr.Row():
11
- inp = gr.Textbox(placeholder="What is your name?")
12
- out = gr.Textbox()
13
  btn = gr.Button("Run")
14
- btn.click(fn=update, inputs=inp, outputs=out)
15
 
16
  demo.launch()
1
  import gradio as gr
2
+ from transformers import AutoModelForCausalLM, AutoTokenizer
3
+ import torch
4
 
5
+ tokenizer = AutoTokenizer.from_pretrained("microsoft/DialoGPT-medium")
6
+ model = AutoModelForCausalLM.from_pretrained("microsoft/DialoGPT-medium")
7
+
8
+ def predict(input, history=[]):
9
+ # tokenize the new input sentence
10
+ new_user_input_ids = tokenizer.encode(input + tokenizer.eos_token, return_tensors='pt')
11
+
12
+ # append the new user input tokens to the chat history
13
+ bot_input_ids = torch.cat([torch.LongTensor(history), new_user_input_ids], dim=-1)
14
+
15
+ # generate a response
16
+ history = model.generate(bot_input_ids, max_length=1000, pad_token_id=tokenizer.eos_token_id).tolist()
17
+
18
+ # convert the tokens to text, and then split the responses into lines
19
+ response = tokenizer.decode(history[0]).split("<|endoftext|>")
20
+ response = [(response[i], response[i+1]) for i in range(0, len(response)-1, 2)] # convert to tuples of list
21
+ return response, history
22
 
23
  demo = gr.Blocks()
24
 
25
  with demo:
26
  gr.Markdown("Start typing below and then click **Run** to see the output.")
27
  with gr.Row():
28
+ inp = gr.Textbox(["text", "state"])
29
+ out = gr.Textbox(["text", "state"])
30
  btn = gr.Button("Run")
31
+ btn.click(fn=predict, inputs=inp, outputs=out)
32
 
33
  demo.launch()