neuraldevx commited on
Commit
10c7bc9
β€’
1 Parent(s): 56c2a8e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +23 -22
app.py CHANGED
@@ -1,28 +1,29 @@
1
- import os
2
- from transformers import AutoTokenizer, AutoModelForCausalLM
3
  import streamlit as st
 
4
  import torch
5
 
6
- # Set the Hugging Face token
7
- hf_token = os.getenv("HF_TOKEN")
8
 
9
- # Load the model and tokenizer
10
- tokenizer = AutoTokenizer.from_pretrained("meta-llama/Meta-Llama-3-8B", use_auth_token=hf_token)
11
- model = AutoModelForCausalLM.from_pretrained("meta-llama/Meta-Llama-3-8B", use_auth_token=hf_token, torch_dtype=torch.bfloat16)
 
 
12
 
13
- st.title("FinWise AI")
14
- st.write("Enter a message to generate a response:")
15
 
16
- messages = st.text_area("Message", "Hey, how are you doing today?")
17
- if st.button("Generate Response"):
18
- with st.spinner("Generating response..."):
19
- inputs = tokenizer(messages, return_tensors="pt")
20
- outputs = model.generate(
21
- inputs.input_ids,
22
- max_new_tokens=256,
23
- do_sample=True,
24
- temperature=0.6,
25
- top_p=0.9
26
- )
27
- response = tokenizer.decode(outputs[0], skip_special_tokens=True)
28
- st.write(response)
 
 
 
 
1
  import streamlit as st
2
+ from transformers import pipeline, AutoTokenizer, AutoModelForCausalLM
3
  import torch
4
 
5
+ # Initialize the model and tokenizer
6
+ model_id = "meta-llama/Meta-Llama-3-8B"
7
 
8
+ @st.cache(allow_output_mutation=True)
9
+ def load_model():
10
+ tokenizer = AutoTokenizer.from_pretrained(model_id)
11
+ model = AutoModelForCausalLM.from_pretrained(model_id, torch_dtype=torch.bfloat16)
12
+ return pipeline("text-generation", model=model, tokenizer=tokenizer, device_map="auto")
13
 
14
+ pipe = load_model()
 
15
 
16
+ # Streamlit app UI
17
+ st.title("FinWise AI πŸ†")
18
+ st.write("Your AI-powered financial advisor")
19
+
20
+ user_input = st.text_area("Enter your query about stock market investments:", "")
21
+
22
+ if st.button("Get Insights"):
23
+ if user_input:
24
+ with st.spinner("Generating insights..."):
25
+ result = pipe(user_input, max_length=100, num_return_sequences=1)
26
+ st.success("Here are your insights:")
27
+ st.write(result[0]['generated_text'])
28
+ else:
29
+ st.error("Please enter a query.")