william4416 commited on
Commit
85d3e5e
1 Parent(s): 0944d81

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +13 -14
app.py CHANGED
@@ -1,19 +1,17 @@
1
- import gradio as gr
2
  from transformers import AutoModelForCausalLM, AutoTokenizer
3
- import torch
4
- import json
5
 
6
  tokenizer = AutoTokenizer.from_pretrained("microsoft/DialoGPT-large")
7
  model = AutoModelForCausalLM.from_pretrained("microsoft/DialoGPT-large")
8
 
9
- def predict(input_text, state=[]):
 
10
  # tokenize the new input sentence
11
  new_user_input_ids = tokenizer.encode(
12
- input_text + tokenizer.eos_token, return_tensors="pt"
13
  )
14
 
15
  # append the new user input tokens to the chat history
16
- bot_input_ids = torch.cat([torch.LongTensor(state), new_user_input_ids], dim=-1)
17
 
18
  # generate a response
19
  history = model.generate(
@@ -21,13 +19,15 @@ def predict(input_text, state=[]):
21
  ).tolist()
22
 
23
  # convert the tokens to text, and then split the responses into lines
24
- response = tokenizer.decode(history[0]).split(" ")
 
25
  response = [
26
  (response[i], response[i + 1]) for i in range(0, len(response) - 1, 2)
27
  ] # convert to tuples of list
 
28
  return response, history
29
 
30
- def read_json_file(file_path):
31
  with open(file_path, 'r') as file:
32
  data = json.load(file)
33
  return data
@@ -46,15 +46,14 @@ def main():
46
  if __name__ == "__main__":
47
  main()
48
 
49
- title = "Chat with AI"
50
- description = "This AI chatbot can respond to your queries using a DialoGPT-based model and additional learned responses."
51
- examples = [["Hello, how are you?"], ["What is the weather today?"]]
52
 
53
  gr.Interface(
54
  fn=predict,
55
  title=title,
56
  description=description,
57
- inputs=["text", "text"],
58
- outputs=["text", "text"],
 
59
  theme="finlaymacklon/boxy_violet",
60
- ).launch()
 
 
1
  from transformers import AutoModelForCausalLM, AutoTokenizer
 
 
2
 
3
  tokenizer = AutoTokenizer.from_pretrained("microsoft/DialoGPT-large")
4
  model = AutoModelForCausalLM.from_pretrained("microsoft/DialoGPT-large")
5
 
6
+
7
+ def predict(input, history=[]):
8
  # tokenize the new input sentence
9
  new_user_input_ids = tokenizer.encode(
10
+ input + tokenizer.eos_token, return_tensors="pt"
11
  )
12
 
13
  # append the new user input tokens to the chat history
14
+ bot_input_ids = torch.cat([torch.LongTensor(history), new_user_input_ids], dim=-1)
15
 
16
  # generate a response
17
  history = model.generate(
 
19
  ).tolist()
20
 
21
  # convert the tokens to text, and then split the responses into lines
22
+ response = tokenizer.decode(history[0]).split("<|endoftext|>")
23
+ # print('decoded_response-->>'+str(response))
24
  response = [
25
  (response[i], response[i + 1]) for i in range(0, len(response) - 1, 2)
26
  ] # convert to tuples of list
27
+ # print('response-->>'+str(response))
28
  return response, history
29
 
30
+ def read_json_file(file_path): #read json file test
31
  with open(file_path, 'r') as file:
32
  data = json.load(file)
33
  return data
 
46
  if __name__ == "__main__":
47
  main()
48
 
49
+
 
 
50
 
51
  gr.Interface(
52
  fn=predict,
53
  title=title,
54
  description=description,
55
+ examples=examples,
56
+ inputs=["text", "state"],
57
+ outputs=["chatbot", "state"],
58
  theme="finlaymacklon/boxy_violet",
59
+ ).launch()