Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,17 +1,16 @@
|
|
|
|
|
|
|
|
1 |
import streamlit as st
|
2 |
from llama_index.core import StorageContext, load_index_from_storage, VectorStoreIndex, SimpleDirectoryReader, ChatPromptTemplate
|
3 |
from llama_index.llms.huggingface import HuggingFaceInferenceAPI
|
4 |
-
from dotenv import load_dotenv
|
5 |
from llama_index.embeddings.huggingface import HuggingFaceEmbedding
|
6 |
-
from llama_index.core import Settings
|
7 |
-
import os
|
8 |
-
import base64
|
9 |
|
10 |
-
# Load environment variables
|
11 |
load_dotenv()
|
12 |
|
13 |
-
# Configure the
|
14 |
-
Settings.
|
15 |
model_name="google/gemma-1.1-7b-it",
|
16 |
tokenizer_name="google/gemma-1.1-7b-it",
|
17 |
context_window=3000,
|
@@ -19,51 +18,50 @@ Settings.llm = HuggingFaceInferenceAPI(
|
|
19 |
max_new_tokens=512,
|
20 |
generate_kwargs={"temperature": 0.1},
|
21 |
)
|
22 |
-
Settings.
|
23 |
model_name="BAAI/bge-small-en-v1.5"
|
24 |
)
|
25 |
|
26 |
-
# Define directories for
|
27 |
-
|
28 |
-
|
29 |
|
30 |
# Create directories if they do not exist
|
31 |
-
os.makedirs(
|
32 |
-
os.makedirs(
|
33 |
|
34 |
-
def
|
35 |
"""Display a PDF file in the Streamlit app."""
|
36 |
with open(file_path, "rb") as file:
|
37 |
-
|
38 |
-
|
39 |
-
st.markdown(
|
40 |
|
41 |
def ingest_data():
|
42 |
-
"""Load and index documents from the
|
43 |
-
|
44 |
storage_context = StorageContext.from_defaults()
|
45 |
-
|
46 |
-
|
47 |
|
48 |
-
def process_query(
|
49 |
"""Handle user queries by searching the indexed documents."""
|
50 |
-
storage_context = StorageContext.from_defaults(persist_dir=
|
51 |
-
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
|
59 |
-
|
60 |
-
|
61 |
-
|
62 |
-
]
|
63 |
-
text_qa_template = ChatPromptTemplate.from_messages(chat_text_qa_msgs)
|
64 |
|
65 |
-
query_engine =
|
66 |
-
answer = query_engine.query(
|
67 |
|
68 |
if hasattr(answer, 'response'):
|
69 |
return answer.response
|
@@ -72,32 +70,38 @@ def process_query(query):
|
|
72 |
else:
|
73 |
return "Sorry, I couldn't find an answer."
|
74 |
|
|
|
|
|
|
|
|
|
|
|
75 |
# Initialize the Streamlit app
|
76 |
-
st.title("
|
77 |
-
st.markdown("
|
78 |
|
|
|
|
|
79 |
|
80 |
-
if '
|
81 |
-
st.session_state.
|
82 |
|
83 |
with st.sidebar:
|
84 |
-
st.title("
|
85 |
-
|
86 |
-
if st.button("
|
87 |
-
with st.spinner("Processing..."):
|
88 |
-
|
89 |
-
with open(
|
90 |
-
f.write(
|
91 |
-
|
92 |
-
|
93 |
-
st.success("Done")
|
94 |
|
95 |
-
|
96 |
-
if
|
97 |
-
st.session_state.
|
98 |
-
|
99 |
-
st.session_state.
|
100 |
|
101 |
-
for message in st.session_state.
|
102 |
with st.chat_message(message['role']):
|
103 |
st.write(message['content'])
|
|
|
1 |
+
import os
|
2 |
+
import base64
|
3 |
+
from dotenv import load_dotenv
|
4 |
import streamlit as st
|
5 |
from llama_index.core import StorageContext, load_index_from_storage, VectorStoreIndex, SimpleDirectoryReader, ChatPromptTemplate
|
6 |
from llama_index.llms.huggingface import HuggingFaceInferenceAPI
|
|
|
7 |
from llama_index.embeddings.huggingface import HuggingFaceEmbedding
|
|
|
|
|
|
|
8 |
|
9 |
+
# Load environment variables
|
10 |
load_dotenv()
|
11 |
|
12 |
+
# Configure the AI model settings
|
13 |
+
Settings.ai_model = HuggingFaceInferenceAPI(
|
14 |
model_name="google/gemma-1.1-7b-it",
|
15 |
tokenizer_name="google/gemma-1.1-7b-it",
|
16 |
context_window=3000,
|
|
|
18 |
max_new_tokens=512,
|
19 |
generate_kwargs={"temperature": 0.1},
|
20 |
)
|
21 |
+
Settings.embedding_model = HuggingFaceEmbedding(
|
22 |
model_name="BAAI/bge-small-en-v1.5"
|
23 |
)
|
24 |
|
25 |
+
# Define directories for data storage
|
26 |
+
DATA_STORAGE_DIR = "./data_storage"
|
27 |
+
DOCUMENTS_DIR = "documents"
|
28 |
|
29 |
# Create directories if they do not exist
|
30 |
+
os.makedirs(DOCUMENTS_DIR, exist_ok=True)
|
31 |
+
os.makedirs(DATA_STORAGE_DIR, exist_ok=True)
|
32 |
|
33 |
+
def show_pdf(file_path):
|
34 |
"""Display a PDF file in the Streamlit app."""
|
35 |
with open(file_path, "rb") as file:
|
36 |
+
encoded_pdf = base64.b64encode(file.read()).decode('utf-8')
|
37 |
+
pdf_html = f'<iframe src="data:application/pdf;base64,{encoded_pdf}" width="100%" height="600" type="application/pdf"></iframe>'
|
38 |
+
st.markdown(pdf_html, unsafe_allow_html=True)
|
39 |
|
40 |
def ingest_data():
|
41 |
+
"""Load and index documents from the specified directory."""
|
42 |
+
document_files = SimpleDirectoryReader(DOCUMENTS_DIR).load_data()
|
43 |
storage_context = StorageContext.from_defaults()
|
44 |
+
document_index = VectorStoreIndex.from_documents(document_files)
|
45 |
+
document_index.storage_context.persist(persist_dir=DATA_STORAGE_DIR)
|
46 |
|
47 |
+
def process_query(user_query):
|
48 |
"""Handle user queries by searching the indexed documents."""
|
49 |
+
storage_context = StorageContext.from_defaults(persist_dir=DATA_STORAGE_DIR)
|
50 |
+
document_index = load_index_from_storage(storage_context)
|
51 |
+
chat_template = ChatPromptTemplate.from_messages([
|
52 |
+
(
|
53 |
+
"user",
|
54 |
+
f"""You are a Q&A assistant named CHATTO, created by Prateek Mohan. You have a specific response programmed for when users specifically ask about your creator, Prateek Mohan. The response is: "I was created by Prateek Mohan, an enthusiast in Artificial Intelligence. He is dedicated to solving complex problems and delivering innovative solutions. With a strong focus on machine learning, deep learning, Python, generative AI, NLP, and computer vision, Prateek is passionate about pushing the boundaries of AI to explore new possibilities." For all other inquiries, your main goal is to provide answers as accurately as possible, based on the instructions and context you have been given. If a question does not match the provided context or is outside the scope of the document, kindly advise the user to ask questions within the context of the document.
|
55 |
+
Context:
|
56 |
+
{context_str}
|
57 |
+
Question:
|
58 |
+
{query_str}
|
59 |
+
"""
|
60 |
+
)
|
61 |
+
])
|
|
|
62 |
|
63 |
+
query_engine = document_index.as_query_engine(text_qa_template=chat_template)
|
64 |
+
answer = query_engine.query(user_query)
|
65 |
|
66 |
if hasattr(answer, 'response'):
|
67 |
return answer.response
|
|
|
70 |
else:
|
71 |
return "Sorry, I couldn't find an answer."
|
72 |
|
73 |
+
# Custom CSS to make the overall background white
|
74 |
+
def local_css(file_name):
|
75 |
+
with open(file_name) as f:
|
76 |
+
st.markdown(f'<style>{f.read()}</style>', unsafe_allow_html=True)
|
77 |
+
|
78 |
# Initialize the Streamlit app
|
79 |
+
st.title("Interactive PDF Assistant")
|
80 |
+
st.markdown("Developed by Prateek Mohan")
|
81 |
|
82 |
+
# Apply custom CSS
|
83 |
+
local_css("style.css")
|
84 |
|
85 |
+
if 'conversation_history' not in st.session_state:
|
86 |
+
st.session_state.conversation_history = [{'role': 'assistant', "content": 'Hello! Upload a PDF and ask me anything about its content.'}]
|
87 |
|
88 |
with st.sidebar:
|
89 |
+
st.title("Options:")
|
90 |
+
uploaded_pdf = st.file_uploader("Upload PDF Files Here")
|
91 |
+
if st.button("Process PDF"):
|
92 |
+
with st.spinner("Processing PDF..."):
|
93 |
+
pdf_path = "documents/uploaded_document.pdf"
|
94 |
+
with open(pdf_path, "wb") as f:
|
95 |
+
f.write(uploaded_pdf.getbuffer())
|
96 |
+
ingest_data() # Process and index the PDF
|
97 |
+
st.success("PDF processed successfully")
|
|
|
98 |
|
99 |
+
user_input = st.chat_input("Ask me anything about the PDF content:")
|
100 |
+
if user_input:
|
101 |
+
st.session_state.conversation_history.append({'role': 'user', "content": user_input})
|
102 |
+
assistant_response = process_query(user_input)
|
103 |
+
st.session_state.conversation_history.append({'role': 'assistant', "content": assistant_response})
|
104 |
|
105 |
+
for message in st.session_state.conversation_history:
|
106 |
with st.chat_message(message['role']):
|
107 |
st.write(message['content'])
|