finQA / app.py
Nehal Gajraj
.
6cb6a1d
import streamlit as st
from transformers import pipeline
# Streamlit app
st.title("Finance Question Answering with Llama 3.1")
st.write("Enter a financial question and get an answer from the finetuned Llama 3.1 model.")
# Model loading
model_path = "deadbeee/finQA-llama3.1-finetuned-LoRA"
@st.cache_resource
def load_pipeline():
try:
return pipeline("text-generation", model=model_path)
except Exception as e:
st.error(f"Error loading model: {str(e)}")
return None
pipe = load_pipeline()
if pipe is None:
st.stop()
# Text input
user_input = st.text_input("Your question:")
if user_input:
# Prepare input for the pipeline
messages = [
{"role": "user", "content": user_input},
]
# Generate response
response = pipe(messages, max_new_tokens=512, do_sample=True, temperature=0.7)
# Display the result
st.write("Model response:")
st.write(response[0]['generated_text'])
# Run the Streamlit app
if __name__ == "__main__":
st.write("Running the Streamlit app.")