Spaces:
Runtime error
Runtime error
darpan-jain
commited on
Commit
·
61ca66a
1
Parent(s):
030066b
`bot` method now replaced with responses from DialoGPT
Browse files
app.py
CHANGED
@@ -10,20 +10,6 @@ import time
|
|
10 |
tokenizer = AutoTokenizer.from_pretrained("microsoft/DialoGPT-medium")
|
11 |
model = AutoModelForCausalLM.from_pretrained("microsoft/DialoGPT-medium")
|
12 |
|
13 |
-
# def bot(history):
|
14 |
-
# user_message = history[-1][0]
|
15 |
-
# new_user_input_ids = tokenizer.encode(user_message + tokenizer.eos_token, return_tensors='pt')
|
16 |
-
#
|
17 |
-
# # append the new user input tokens to the chat history
|
18 |
-
# bot_input_ids = torch.cat([torch.LongTensor(history), new_user_input_ids], dim=-1)
|
19 |
-
#
|
20 |
-
# # generate a response
|
21 |
-
# history = model.generate(bot_input_ids, max_length=1000, pad_token_id=tokenizer.eos_token_id).tolist()
|
22 |
-
#
|
23 |
-
# # convert the tokens to text, and then split the responses into lines
|
24 |
-
# response = tokenizer.decode(history[0]).split("<|endoftext|>")
|
25 |
-
# response = [(response[i], response[i+1]) for i in range(0, len(response)-1, 2)] # convert to tuples of list
|
26 |
-
# return history
|
27 |
|
28 |
with gr.Blocks() as demo:
|
29 |
chatbot = gr.Chatbot()
|
@@ -34,11 +20,25 @@ with gr.Blocks() as demo:
|
|
34 |
return "", history + [[user_message, None]]
|
35 |
|
36 |
def bot(history):
|
37 |
-
|
38 |
-
|
39 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
40 |
return history
|
41 |
|
|
|
42 |
msg.submit(user, [msg, chatbot], [msg, chatbot], queue=False).then(
|
43 |
bot, chatbot, chatbot
|
44 |
)
|
|
|
10 |
tokenizer = AutoTokenizer.from_pretrained("microsoft/DialoGPT-medium")
|
11 |
model = AutoModelForCausalLM.from_pretrained("microsoft/DialoGPT-medium")
|
12 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
13 |
|
14 |
with gr.Blocks() as demo:
|
15 |
chatbot = gr.Chatbot()
|
|
|
20 |
return "", history + [[user_message, None]]
|
21 |
|
22 |
def bot(history):
|
23 |
+
user_message = history[-1][0]
|
24 |
+
new_user_input_ids = tokenizer.encode(user_message + tokenizer.eos_token, return_tensors='pt')
|
25 |
+
|
26 |
+
# append the new user input tokens to the chat history
|
27 |
+
bot_input_ids = torch.cat([torch.LongTensor(history), new_user_input_ids], dim=-1)
|
28 |
+
|
29 |
+
# generate a response
|
30 |
+
history = model.generate(bot_input_ids, max_length=1000, pad_token_id=tokenizer.eos_token_id).tolist()
|
31 |
+
|
32 |
+
# convert the tokens to text, and then split the responses into lines
|
33 |
+
response = tokenizer.decode(history[0]).split("<|endoftext|>")
|
34 |
+
response = [(response[i], response[i+1]) for i in range(0, len(response)-1, 2)] # convert to tuples of list
|
35 |
+
|
36 |
+
# Add the new response to the chat history
|
37 |
+
history = history + [tokenizer.encode(response[0][0] + tokenizer.eos_token, return_tensors='pt')]
|
38 |
+
|
39 |
return history
|
40 |
|
41 |
+
|
42 |
msg.submit(user, [msg, chatbot], [msg, chatbot], queue=False).then(
|
43 |
bot, chatbot, chatbot
|
44 |
)
|