File size: 1,250 Bytes
0778969 9428921 1a80b74 0778969 a071ebf 0778969 36c479c 9428921 6a44c25 9428921 863918f 9428921 0778969 9428921 61f9719 9428921 a071ebf 135c8a1 9428921 61f9719 9428921 61f9719 9428921 688ce9d e08ed98 2c20cef 688ce9d 5b3985f 688ce9d 36c479c |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 |
import streamlit as st
import logging
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
import time
# Set the logger to display only CRITICAL messages
logging.basicConfig(level=logging.CRITICAL)
# Cache the model and tokenizer to avoid reloading it every time
@st.experimental_singleton
def load_model():
model_name = "Abbeite/trail_wl" # Replace with your actual model name
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(model_name)
return model, tokenizer
model, tokenizer = load_model()
# Function to generate text with the model
def generate_text(prompt):
formatted_prompt = f"[INST] {prompt} [/INST]" # Format the prompt according to your specification
pipe = pipeline("text-generation", model=model, tokenizer=tokenizer, max_length=300)
result = pipe(formatted_prompt)
return result[0]['generated_text']
st.title("Interact with Your Model")
# User input
user_input = st.text_area("Enter your prompt:", "")
if st.button("Submit"):
if user_input:
# Generate text based on the input
generated_text = generate_text(user_input)
st.write(generated_text)
else:
st.write("Please enter a prompt.") |