Spaces:
Sleeping
Sleeping
import streamlit as st | |
from transformers import pipeline, AutoTokenizer, AutoModelForCausalLM | |
import torch | |
# Initialize the model and tokenizer | |
model_id = "meta-llama/Meta-Llama-3-8B" | |
def load_model(): | |
tokenizer = AutoTokenizer.from_pretrained(model_id) | |
model = AutoModelForCausalLM.from_pretrained(model_id, torch_dtype=torch.bfloat16) | |
tokenizer.add_special_tokens({"pad_token": "<pad>"}) | |
model.resize_token_embeddings(len(tokenizer)) | |
return pipeline("text-generation", model=model, tokenizer=tokenizer, device_map="auto") | |
pipe = load_model() | |
# Streamlit app UI | |
st.title("FinWise AI π") | |
st.write("Your AI-powered financial advisor") | |
user_input = st.text_area("Enter your query about stock market investments:", "") | |
if st.button("Get Insights"): | |
if user_input: | |
with st.spinner("Generating insights..."): | |
try: | |
result = pipe(user_input, max_length=100, num_return_sequences=1, truncation=True, pad_token_id=tokenizer.pad_token_id) | |
st.success("Here are your insights:") | |
st.write(result[0]['generated_text']) | |
except Exception as e: | |
st.error(f"An error occurred: {e}") | |
else: | |
st.error("Please enter a query.") | |