while-nalu commited on
Commit
eadcaf7
1 Parent(s): 0b3fe04

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +5 -0
app.py CHANGED
@@ -37,16 +37,19 @@ for message in messages:
37
 
38
  # Get user input
39
  user_input = st.text_input("Your message")
 
40
 
41
  # Generate response
42
  if user_input:
43
  messages.append({"role": "user", "content": user_input})
 
44
  text = tokenizer.apply_chat_template(
45
  messages,
46
  tokenize=False,
47
  add_generation_prompt=True
48
  )
49
  model_inputs = tokenizer([text], return_tensors="pt").to(device)
 
50
  generated_ids = model.generate(
51
  model_inputs.input_ids,
52
  max_new_tokens=512
@@ -54,7 +57,9 @@ if user_input:
54
  generated_ids = [
55
  output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
56
  ]
 
57
  response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
 
58
  messages.append({"role": "assistant", "content": response})
59
 
60
  # Display response
 
37
 
38
  # Get user input
39
  user_input = st.text_input("Your message")
40
+ print("received!")
41
 
42
  # Generate response
43
  if user_input:
44
  messages.append({"role": "user", "content": user_input})
45
+ print("good!")
46
  text = tokenizer.apply_chat_template(
47
  messages,
48
  tokenize=False,
49
  add_generation_prompt=True
50
  )
51
  model_inputs = tokenizer([text], return_tensors="pt").to(device)
52
+ print("good!")
53
  generated_ids = model.generate(
54
  model_inputs.input_ids,
55
  max_new_tokens=512
 
57
  generated_ids = [
58
  output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
59
  ]
60
+ print("good!")
61
  response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
62
+ print("good!")
63
  messages.append({"role": "assistant", "content": response})
64
 
65
  # Display response