workout_4 / app.py
Abbeite's picture
Update app.py
a9a4fe5 verified
raw
history blame
1.04 kB
import streamlit as st
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
# Streamlit's cache decorator to cache the model and tokenizer loading
def load_pipeline():
model_name = "NousResearch/Llama-2-7b-chat-hf" # Replace with your actual model name
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(model_name)
chat_pipeline = pipeline("text-generation", model=model, tokenizer=tokenizer, max_length=300)
return chat_pipeline
# Initialize the pipeline
chat_pipeline = load_pipeline()
st.title("Interact with Your Model")
# User input
user_input = st.text_area("Enter your prompt:", "")
if st.button("Submit"):
if user_input:
try:
# Generate text based on the input
generated_text = chat_pipeline(user_input)[0]['generated_text']
st.write(generated_text)
except Exception as e:
st.error(f"Error generating text: {e}")
else:
st.write("Please enter a prompt.")