JamalAG commited on
Commit
ce7457e
1 Parent(s): 04d4458

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +14 -18
app.py CHANGED
@@ -2,32 +2,28 @@ import streamlit as st
2
  from transformers import pipeline
3
  import torch
4
 
5
- MODEL_PATH = "HuggingFaceH4/zephyr-7b-beta"
6
 
7
  def main():
8
  st.title("Chatbot with Hugging Face Model")
9
 
10
- # Check if the model is already saved locally
11
- try:
12
- pipe = pipeline("text-generation", model=MODEL_PATH, torch_dtype=torch.bfloat16, device_map="auto")
13
- except:
14
- # If not saved, load the model and save it
15
- st.warning("Model not found locally. Downloading and saving the model. Please wait...")
16
- pipe = pipeline("text-generation", model="HuggingFaceH4/zephyr-7b-beta", torch_dtype=torch.bfloat16, device_map="auto")
17
- pipe.save_pretrained(MODEL_PATH)
18
 
 
 
19
  # Define chat messages
20
  messages = [
21
- {"role": "system", "content": "You are a friendly chatbot who always responds in the style of a pirate"},
22
- {"role": "user", "content": st.text_input("User Input", "How many helicopters can a human eat in one sitting?")},
23
  ]
24
-
25
- # Generate response
26
- prompt = pipe.tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
27
- outputs = pipe(prompt, max_new_tokens=256, do_sample=True, temperature=0.7, top_k=50, top_p=0.95)
28
-
29
- # Display generated text
30
- st.text(outputs[0]["generated_text"])
 
31
 
32
  if __name__ == "__main__":
33
  main()
 
2
  from transformers import pipeline
3
  import torch
4
 
 
5
 
6
  def main():
7
  st.title("Chatbot with Hugging Face Model")
8
 
9
+ # If not saved, load the model and save it
10
+ pipe = pipeline("text-generation", model="HuggingFaceH4/zephyr-7b-beta", torch_dtype=torch.bfloat16, device_map="auto")
 
 
 
 
 
 
11
 
12
+
13
+ user_input = st.text_area("Enter Text")
14
  # Define chat messages
15
  messages = [
16
+ {"role": "system", "content": "You are a friendly chatbot who always responds in kind way."},
17
+ {"role": "user", "content": user_input},
18
  ]
19
+ submit = st.button('Generate')
20
+ if submit:
21
+ # Generate response
22
+ prompt = pipe.tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
23
+ outputs = pipe(prompt, max_new_tokens=256, do_sample=True, temperature=0.7, top_k=50, top_p=0.95)
24
+
25
+ # Display generated text
26
+ st.text(outputs[0]["generated_text"])
27
 
28
  if __name__ == "__main__":
29
  main()