Spaces:
Sleeping
Sleeping
import streamlit as st | |
from transformers import pipeline | |
import torch | |
MODEL_PATH = "HuggingFaceH4/zephyr-7b-beta" | |
def main(): | |
st.title("Chatbot with Hugging Face Model") | |
# Check if the model is already saved locally | |
try: | |
pipe = pipeline("text-generation", model=MODEL_PATH, torch_dtype=torch.bfloat16, device_map="auto") | |
except: | |
# If not saved, load the model and save it | |
st.warning("Model not found locally. Downloading and saving the model. Please wait...") | |
pipe = pipeline("text-generation", model="HuggingFaceH4/zephyr-7b-beta", torch_dtype=torch.bfloat16, device_map="auto") | |
pipe.save_pretrained(MODEL_PATH) | |
# Define chat messages | |
messages = [ | |
{"role": "system", "content": "You are a friendly chatbot who always responds in the style of a pirate"}, | |
{"role": "user", "content": st.text_input("User Input", "How many helicopters can a human eat in one sitting?")}, | |
] | |
# Generate response | |
prompt = pipe.tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True) | |
outputs = pipe(prompt, max_new_tokens=256, do_sample=True, temperature=0.7, top_k=50, top_p=0.95) | |
# Display generated text | |
st.text(outputs[0]["generated_text"]) | |
if __name__ == "__main__": | |
main() |