Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,20 +1,32 @@
|
|
1 |
import streamlit as st
|
2 |
import os
|
3 |
from together import Together
|
|
|
4 |
|
5 |
-
# Initialize the Llama-3 client
|
6 |
client = Together(api_key=os.environ.get("TOGETHER_API_KEY"))
|
7 |
|
8 |
-
def get_response(question):
|
|
|
|
|
9 |
response = client.chat.completions.create(
|
10 |
model="meta-llama/Llama-3-8b-chat-hf",
|
11 |
messages=[{"role": "user", "content": question}],
|
|
|
12 |
)
|
13 |
return response.choices[0].message.content
|
14 |
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
import streamlit as st
|
2 |
import os
|
3 |
from together import Together
|
4 |
+
import PyPDF2
|
5 |
|
|
|
6 |
client = Together(api_key=os.environ.get("TOGETHER_API_KEY"))
|
7 |
|
8 |
+
def get_response(question, pdf_files):
|
9 |
+
texts = [get_pdf_content(pdf) for pdf in pdf_files]
|
10 |
+
combined_text = "\n".join(texts)
|
11 |
response = client.chat.completions.create(
|
12 |
model="meta-llama/Llama-3-8b-chat-hf",
|
13 |
messages=[{"role": "user", "content": question}],
|
14 |
+
context=combined_text
|
15 |
)
|
16 |
return response.choices[0].message.content
|
17 |
|
18 |
+
def get_pdf_content(pdf_file):
|
19 |
+
reader = PyPDF2.PdfReader(pdf_file)
|
20 |
+
text = ""
|
21 |
+
for page in reader.pages:
|
22 |
+
text += page.extract_text()
|
23 |
+
return text
|
24 |
+
|
25 |
+
st.title('Llama-3 Chatbot with PDF Input')
|
26 |
+
uploaded_files = st.file_uploader("Upload PDF", accept_multiple_files=True, type=['pdf'])
|
27 |
+
question = st.text_input("Ask me anything based on the uploaded PDFs!")
|
28 |
+
|
29 |
+
if st.button("Answer"):
|
30 |
+
if uploaded_files and question:
|
31 |
+
answer = get_response(question, uploaded_files)
|
32 |
+
st.text_area("Answer:", value=answer, height=300)
|