supernovamutinda commited on
Commit
856b14c
1 Parent(s): e64b0f5

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +4 -3
app.py CHANGED
@@ -21,8 +21,9 @@ if choice == "Meal Suggester":
21
 
22
  pipe = pipeline("text-generation", model="HuggingFaceH4/zephyr-7b-beta", torch_dtype=torch.bfloat16, device_map="auto")
23
 
24
- user_input = st.input("insert text here", )
25
- st.write(user_input)
 
26
 
27
  # We use the tokenizer's chat template to format each message - see https://huggingface.co/docs/transformers/main/en/chat_templating
28
  messages = [
@@ -30,7 +31,7 @@ if choice == "Meal Suggester":
30
  "role": "system",
31
  "content": "You are a friendly chatbot who always responds in the style of a pirate",
32
  },
33
- {"role": "user", "content": "How many Ships can a human eat in one sitting?"},
34
  ]
35
  prompt = pipe.tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
36
  outputs = pipe(prompt, max_new_tokens=256, do_sample=True, temperature=0.7, top_k=50, top_p=0.95)
 
21
 
22
  pipe = pipeline("text-generation", model="HuggingFaceH4/zephyr-7b-beta", torch_dtype=torch.bfloat16, device_map="auto")
23
 
24
+ user_input = st.chat_input("Say something")
25
+ if user_input:
26
+ st.write(f"you have said {user_input}")
27
 
28
  # We use the tokenizer's chat template to format each message - see https://huggingface.co/docs/transformers/main/en/chat_templating
29
  messages = [
 
31
  "role": "system",
32
  "content": "You are a friendly chatbot who always responds in the style of a pirate",
33
  },
34
+ {"role": "user", "content": f"{user_input}"},
35
  ]
36
  prompt = pipe.tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
37
  outputs = pipe(prompt, max_new_tokens=256, do_sample=True, temperature=0.7, top_k=50, top_p=0.95)