supernovamutinda commited on
Commit
df62058
1 Parent(s): 856b14c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +11 -11
app.py CHANGED
@@ -26,17 +26,17 @@ if choice == "Meal Suggester":
26
  st.write(f"you have said {user_input}")
27
 
28
  # We use the tokenizer's chat template to format each message - see https://huggingface.co/docs/transformers/main/en/chat_templating
29
- messages = [
30
- {
31
- "role": "system",
32
- "content": "You are a friendly chatbot who always responds in the style of a pirate",
33
- },
34
- {"role": "user", "content": f"{user_input}"},
35
- ]
36
- prompt = pipe.tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
37
- outputs = pipe(prompt, max_new_tokens=256, do_sample=True, temperature=0.7, top_k=50, top_p=0.95)
38
- st.write(outputs[0]["generated_text"])
39
- # <|system|>
40
  # You are a friendly chatbot who always responds in the style of a pirate.</s>
41
  # <|user|>
42
  # How many helicopters can a human eat in one sitting?</s>
 
26
  st.write(f"you have said {user_input}")
27
 
28
  # We use the tokenizer's chat template to format each message - see https://huggingface.co/docs/transformers/main/en/chat_templating
29
+ messages = [
30
+ {
31
+ "role": "system",
32
+ "content": "You are a friendly chatbot who always responds in the style of a pirate",
33
+ },
34
+ {"role": "user", "content": f"{user_input}"},
35
+ ]
36
+ prompt = pipe.tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
37
+ outputs = pipe(prompt, max_new_tokens=256, do_sample=True, temperature=0.7, top_k=50, top_p=0.95)
38
+ st.write(outputs[0]["generated_text"])
39
+ # <|system|>
40
  # You are a friendly chatbot who always responds in the style of a pirate.</s>
41
  # <|user|>
42
  # How many helicopters can a human eat in one sitting?</s>