|
import streamlit as st |
|
from transformers import pipeline |
|
|
|
|
|
st.title("Finance Question Answering with Llama 3.1") |
|
|
|
st.write("Enter a financial question and get an answer from the finetuned Llama 3.1 model.") |
|
|
|
|
|
model_path = "deadbeee/finQA-llama3.1-finetuned-LoRA" |
|
@st.cache_resource |
|
def load_pipeline(): |
|
try: |
|
return pipeline("text-generation", model=model_path) |
|
except Exception as e: |
|
st.error(f"Error loading model: {str(e)}") |
|
return None |
|
|
|
pipe = load_pipeline() |
|
|
|
if pipe is None: |
|
st.stop() |
|
|
|
|
|
user_input = st.text_input("Your question:") |
|
|
|
if user_input: |
|
|
|
messages = [ |
|
{"role": "user", "content": user_input}, |
|
] |
|
|
|
|
|
response = pipe(messages, max_new_tokens=512, do_sample=True, temperature=0.7) |
|
|
|
|
|
st.write("Model response:") |
|
st.write(response[0]['generated_text']) |
|
|
|
|
|
if __name__ == "__main__": |
|
st.write("Running the Streamlit app.") |