vilarin commited on
Commit
fd6304d
β€’
1 Parent(s): 3b9cb87

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -3
app.py CHANGED
@@ -57,15 +57,15 @@ tokenizer = AutoTokenizer.from_pretrained(MODEL_ID)
57
 
58
  @spaces.GPU
59
  def stream_chat(message: str, history: list, temperature: float, max_new_tokens: int):
60
- print("message:"+ message)
61
- print("history:"+ history)
62
  conversation = []
63
  for prompt, answer in history:
64
  conversation.extend([{"role": "user", "content": prompt}, {"role": "assistant", "content": answer}])
65
 
66
  conversation.append({"role": "user", "content": message})
67
 
68
- print("conversation:"+ conversation)
69
 
70
  input_ids = tokenizer.apply_chat_template(conversation, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(model.device)
71
 
 
57
 
58
  @spaces.GPU
59
  def stream_chat(message: str, history: list, temperature: float, max_new_tokens: int):
60
+ print(f'message is - {message}')
61
+ print(f'history is - {history}')
62
  conversation = []
63
  for prompt, answer in history:
64
  conversation.extend([{"role": "user", "content": prompt}, {"role": "assistant", "content": answer}])
65
 
66
  conversation.append({"role": "user", "content": message})
67
 
68
+ print(f"Conversation is -\n{conversation}")
69
 
70
  input_ids = tokenizer.apply_chat_template(conversation, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(model.device)
71