kingabzpro commited on
Commit
1b2f46a
1 Parent(s): 562ea7d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -3
app.py CHANGED
@@ -31,9 +31,9 @@ def predict(input, history=[]):
31
  # generate a response
32
  history = model.generate(bot_input_ids, max_length=1000, pad_token_id=tokenizer.eos_token_id).tolist()
33
 
34
- # convert the tokens to text, and then split the responses into lines
35
- response = tokenizer.decode(history[0]).replace("<|endoftext|>", "\n")
36
-
37
  return response, history
38
 
39
  gr.Interface(fn = predict, inputs = ["textbox","state"], outputs = ["chatbot","state"],allow_flagging = "manual",theme ="grass",title = title, flagging_callback = hf_writer, description = description, article = article ).launch(enable_queue=True) # customizes the input component
 
31
  # generate a response
32
  history = model.generate(bot_input_ids, max_length=1000, pad_token_id=tokenizer.eos_token_id).tolist()
33
 
34
+ # convert the tokens to text, and then split the responses into the right format
35
+ response = tokenizer.decode(history[0]).split("<|endoftext|>")
36
+ response = [(response[i], response[i+1]) for i in range(0, len(response)-1, 2)] # convert to tuples of list
37
  return response, history
38
 
39
  gr.Interface(fn = predict, inputs = ["textbox","state"], outputs = ["chatbot","state"],allow_flagging = "manual",theme ="grass",title = title, flagging_callback = hf_writer, description = description, article = article ).launch(enable_queue=True) # customizes the input component