william4416 commited on
Commit
23c5a04
1 Parent(s): 7617e4a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +12 -8
app.py CHANGED
@@ -19,15 +19,16 @@ known_questions_answers = {
19
  "Do you speak English?": "I can understand and respond to English questions.",
20
  }
21
 
22
- def predict(input, history=[]):
23
  response = None
 
24
 
25
  # Check if the input question is in the known question-answer pairs
26
- if input in known_questions_answers:
27
- response = known_questions_answers[input]
28
  else:
29
  # Tokenize the new user input sentence
30
- new_user_input_ids = tokenizer.encode(input + tokenizer.eos_token, return_tensors="pt")
31
  # Append the new user input tokens to the chat history
32
  bot_input_ids = torch.cat([torch.LongTensor(history), new_user_input_ids], dim=-1)
33
  # Generate a response
@@ -37,19 +38,22 @@ def predict(input, history=[]):
37
  # Convert tokens to text, and split the response into lines
38
  response = tokenizer.decode(history[0], skip_special_tokens=True)
39
 
40
- print("Response:", response) # Debug statement
41
- return response
42
 
43
  def main():
44
  # You can add logic here to read known question-answer pairs, for example, from a JSON file
45
  pass
46
 
 
 
 
 
47
  gr.Interface(
48
  fn=predict,
49
  title=title,
50
  description=description,
51
  examples=examples,
52
- inputs=["text", "state"],
53
- outputs="text",
54
  theme="finlaymacklon/boxy_violet",
55
  ).launch()
 
19
  "Do you speak English?": "I can understand and respond to English questions.",
20
  }
21
 
22
+ def predict(input_text, chatbot_state):
23
  response = None
24
+ history = chatbot_state
25
 
26
  # Check if the input question is in the known question-answer pairs
27
+ if input_text in known_questions_answers:
28
+ response = known_questions_answers[input_text]
29
  else:
30
  # Tokenize the new user input sentence
31
+ new_user_input_ids = tokenizer.encode(input_text + tokenizer.eos_token, return_tensors="pt")
32
  # Append the new user input tokens to the chat history
33
  bot_input_ids = torch.cat([torch.LongTensor(history), new_user_input_ids], dim=-1)
34
  # Generate a response
 
38
  # Convert tokens to text, and split the response into lines
39
  response = tokenizer.decode(history[0], skip_special_tokens=True)
40
 
41
+ return response, history
 
42
 
43
  def main():
44
  # You can add logic here to read known question-answer pairs, for example, from a JSON file
45
  pass
46
 
47
+ textbox_output = gr.outputs.Textbox(label="Chatbot Response")
48
+ state_input = "text"
49
+ state_output = "state"
50
+
51
  gr.Interface(
52
  fn=predict,
53
  title=title,
54
  description=description,
55
  examples=examples,
56
+ inputs=["text", state_input],
57
+ outputs=[textbox_output, state_output],
58
  theme="finlaymacklon/boxy_violet",
59
  ).launch()