Spaces:
Running
Running
kingabzpro
commited on
Commit
•
fadb0e3
1
Parent(s):
3a2bb98
Improved the Chatbot
Browse files
app.py
CHANGED
@@ -9,12 +9,12 @@ description = """
|
|
9 |
<p>
|
10 |
<center>
|
11 |
The bot was trained on Rick and Morty dialogues Kaggle Dataset using DialoGPT.
|
12 |
-
<img src="https://huggingface.co/spaces/kingabzpro/Rick_and_Morty_Bot/
|
13 |
</center>
|
14 |
</p>
|
15 |
"""
|
16 |
article = "<p style='text-align: center'><a href='https://medium.com/geekculture/discord-bot-using-dailogpt-and-huggingface-api-c71983422701' target='_blank'>Complete Tutorial</a></p><p style='text-align: center'><a href='https://dagshub.com/kingabzpro/DailoGPT-RickBot' target='_blank'>Project is Available at DAGsHub</a></p></center><center><img src='https://visitor-badge.glitch.me/badge?page_id=kingabzpro/Rick_and_Morty_Bot' alt='visitor badge'></center></p>"
|
17 |
-
|
18 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
19 |
import torch
|
20 |
|
@@ -29,14 +29,23 @@ def predict(input, history=[]):
|
|
29 |
bot_input_ids = torch.cat([torch.LongTensor(history), new_user_input_ids], dim=-1)
|
30 |
|
31 |
# generate a response
|
32 |
-
history = model.generate(bot_input_ids, max_length=
|
33 |
|
34 |
-
# convert the tokens to text, and then split the responses into
|
35 |
response = tokenizer.decode(history[0]).split("<|endoftext|>")
|
|
|
36 |
response = [(response[i], response[i+1]) for i in range(0, len(response)-1, 2)] # convert to tuples of list
|
|
|
37 |
return response, history
|
38 |
|
39 |
-
gr.Interface(fn
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
40 |
|
41 |
#theme ="grass",
|
42 |
#title = title,
|
|
|
9 |
<p>
|
10 |
<center>
|
11 |
The bot was trained on Rick and Morty dialogues Kaggle Dataset using DialoGPT.
|
12 |
+
<img src="https://huggingface.co/spaces/kingabzpro/Rick_and_Morty_Bot/img/rick.png" alt="rick" width="200"/>
|
13 |
</center>
|
14 |
</p>
|
15 |
"""
|
16 |
article = "<p style='text-align: center'><a href='https://medium.com/geekculture/discord-bot-using-dailogpt-and-huggingface-api-c71983422701' target='_blank'>Complete Tutorial</a></p><p style='text-align: center'><a href='https://dagshub.com/kingabzpro/DailoGPT-RickBot' target='_blank'>Project is Available at DAGsHub</a></p></center><center><img src='https://visitor-badge.glitch.me/badge?page_id=kingabzpro/Rick_and_Morty_Bot' alt='visitor badge'></center></p>"
|
17 |
+
examples = [["How are you Rick?"]]
|
18 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
19 |
import torch
|
20 |
|
|
|
29 |
bot_input_ids = torch.cat([torch.LongTensor(history), new_user_input_ids], dim=-1)
|
30 |
|
31 |
# generate a response
|
32 |
+
history = model.generate(bot_input_ids, max_length=4000, pad_token_id=tokenizer.eos_token_id).tolist()
|
33 |
|
34 |
+
# convert the tokens to text, and then split the responses into lines
|
35 |
response = tokenizer.decode(history[0]).split("<|endoftext|>")
|
36 |
+
#print('decoded_response-->>'+str(response))
|
37 |
response = [(response[i], response[i+1]) for i in range(0, len(response)-1, 2)] # convert to tuples of list
|
38 |
+
#print('response-->>'+str(response))
|
39 |
return response, history
|
40 |
|
41 |
+
gr.Interface(fn=predict,
|
42 |
+
title=title,
|
43 |
+
description=description,
|
44 |
+
examples=examples,
|
45 |
+
flagging_callback = hf_writer,
|
46 |
+
allow_flagging = "manual",
|
47 |
+
inputs=["text", "state"],
|
48 |
+
outputs=["chatbot", "state"]).launch()
|
49 |
|
50 |
#theme ="grass",
|
51 |
#title = title,
|