BarBar288 commited on
Commit
2e80b13
1 Parent(s): 5000565

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +42 -0
app.py ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import AutoModelForCasualLM, AutoTokenizer
2
+ import gradio as gr
3
+ import torch
4
+
5
+ title = "AI Chatbot"
6
+ description = "A State-of-the-Art Large-scale Pretrained Response generation model (DialoGPT)"
7
+ examples = [["How are you?"]]
8
+ tokenizer = AutoTokenizer.from_pretrained("microsoft/DialoGPT-large")
9
+ model = AutoModelForCasualLM.from_pretrained("microsoft/DialoGPT-large")
10
+
11
+ def predict(input, history=[]):
12
+ #tokenize new input
13
+ new_user_input_ids = tokenizer.encode(
14
+ input + tokenizer.eos_token, return_tensors="pt"
15
+ )
16
+ # append the new user input tokens to the chat history
17
+ bot_input_ids = torch.cat([torch.LongTensor(history), new_user_input_ids], dim=-1)
18
+
19
+ # generate a response
20
+ history = model.generate(
21
+ bot_input_ids, max_length=4000, pad_token_id=tokenizer.eos_token_id
22
+ ).tolist()
23
+
24
+ # convert the tokens to text, and then split the responses into lines
25
+ response = tokenizer.decode(history[0]).split("<|endoftext|>")
26
+ # print('decoded_response-->>'+str(response))
27
+ response = [
28
+ (response[i], response[i + 1]) for i in range(0, len(response) - 1, 2)
29
+ ] # convert to tuples of list
30
+ # print('response-->>'+str(response))
31
+ return response, history
32
+
33
+
34
+ gr.Interface(
35
+ fn=predict,
36
+ title=title,
37
+ description=description,
38
+ examples=examples,
39
+ inputs=["text", "state"],
40
+ outputs=["chatbot", "state"],
41
+ theme="finlaymacklon/boxy_violet",
42
+ ).launch()