Spaces:
Running
Running
Commit
•
fd73551
1
Parent(s):
cd563d5
Upload 3 files
Browse files- .env +1 -0
- app.py +100 -0
- requirements.txt +10 -0
.env
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
GOOGLE_API_KEY="AIzaSyDs9p7qZMOl8XG_th5QPZ7ILgMlhzSxSBw"
|
app.py
ADDED
@@ -0,0 +1,100 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
2 |
+
from PyPDF2 import PdfReader
|
3 |
+
from langchain.text_splitter import RecursiveCharacterTextSplitter
|
4 |
+
import os
|
5 |
+
from langchain_google_genai import GoogleGenerativeAIEmbeddings
|
6 |
+
import google.generativeai as genai
|
7 |
+
from langchain.vectorstores import FAISS
|
8 |
+
from langchain_google_genai import ChatGoogleGenerativeAI
|
9 |
+
from langchain.chains.question_answering import load_qa_chain
|
10 |
+
from langchain.prompts import PromptTemplate
|
11 |
+
from dotenv import load_dotenv
|
12 |
+
|
13 |
+
# Load environment variables
|
14 |
+
load_dotenv()
|
15 |
+
api_key = os.getenv("GOOGLE_API_KEY")
|
16 |
+
genai.configure(api_key=api_key)
|
17 |
+
|
18 |
+
def get_pdf_text(pdf_docs):
|
19 |
+
text = ""
|
20 |
+
for pdf in pdf_docs:
|
21 |
+
pdf_reader = PdfReader(pdf)
|
22 |
+
for page in pdf_reader.pages:
|
23 |
+
text += page.extract_text()
|
24 |
+
return text
|
25 |
+
|
26 |
+
def get_text_chunks(text):
|
27 |
+
text_splitter = RecursiveCharacterTextSplitter(chunk_size=10000, chunk_overlap=1000)
|
28 |
+
chunks = text_splitter.split_text(text)
|
29 |
+
return chunks
|
30 |
+
|
31 |
+
def get_vector_store(text_chunks):
|
32 |
+
embedding_function = GoogleGenerativeAIEmbeddings(model="models/embedding-001")
|
33 |
+
vector_store = FAISS.from_texts(text_chunks, embedding=embedding_function)
|
34 |
+
vector_store.save_local("faiss_index")
|
35 |
+
|
36 |
+
def get_conversational_chain():
|
37 |
+
prompt_template = """
|
38 |
+
Answer the question as detailed as possible from the provided context, make sure to provide all the details, if the answer is not in
|
39 |
+
provided context just say, "answer is not available in the context", don't provide the wrong answer\n\n
|
40 |
+
Context:\n {context}?\n
|
41 |
+
Question: \n{question}\n
|
42 |
+
|
43 |
+
Answer:
|
44 |
+
"""
|
45 |
+
model = ChatGoogleGenerativeAI(model="gemini-1.5-pro-latest", temperature=0.3)
|
46 |
+
prompt = PromptTemplate(template=prompt_template, input_variables=["context", "question"])
|
47 |
+
chain = load_qa_chain(model, chain_type="stuff", prompt=prompt)
|
48 |
+
return chain
|
49 |
+
|
50 |
+
def user_input(user_question):
|
51 |
+
embeddings = GoogleGenerativeAIEmbeddings(model="models/embedding-001")
|
52 |
+
new_db = FAISS.load_local("faiss_index", embeddings, allow_dangerous_deserialization=True)
|
53 |
+
docs = new_db.similarity_search(user_question)
|
54 |
+
chain = get_conversational_chain()
|
55 |
+
response = chain({"input_documents": docs, "question": user_question}, return_only_outputs=True)
|
56 |
+
return response["output_text"]
|
57 |
+
|
58 |
+
# Main function
|
59 |
+
def main():
|
60 |
+
st.header("ChatBot")
|
61 |
+
|
62 |
+
if "messages" not in st.session_state:
|
63 |
+
st.session_state.messages = []
|
64 |
+
|
65 |
+
with st.form(key="uploader_form"):
|
66 |
+
pdf_docs = st.file_uploader("Upload your PDF Files", accept_multiple_files=True)
|
67 |
+
submit_button = st.form_submit_button(label="Submit & Process")
|
68 |
+
|
69 |
+
if submit_button:
|
70 |
+
if pdf_docs:
|
71 |
+
with st.spinner("Processing..."):
|
72 |
+
raw_text = get_pdf_text(pdf_docs)
|
73 |
+
text_chunks = get_text_chunks(raw_text)
|
74 |
+
get_vector_store(text_chunks)
|
75 |
+
st.success("Processing completed successfully.")
|
76 |
+
else:
|
77 |
+
st.warning("Please upload at least one PDF file.")
|
78 |
+
|
79 |
+
# Display chat messages from history on app rerun
|
80 |
+
for message in st.session_state.messages:
|
81 |
+
with st.chat_message(message["role"]):
|
82 |
+
st.markdown(message["content"])
|
83 |
+
|
84 |
+
# React to user input
|
85 |
+
if prompt := st.chat_input("Ask a question from the PDF files"):
|
86 |
+
# Display user message in chat message container
|
87 |
+
st.chat_message("user").markdown(prompt)
|
88 |
+
# Add user message to chat history
|
89 |
+
st.session_state.messages.append({"role": "user", "content": prompt})
|
90 |
+
|
91 |
+
response = user_input(prompt)
|
92 |
+
# Display assistant response in chat message container
|
93 |
+
with st.chat_message("assistant"):
|
94 |
+
st.markdown(response)
|
95 |
+
# Add assistant response to chat history
|
96 |
+
st.session_state.messages.append({"role": "assistant", "content": response})
|
97 |
+
|
98 |
+
if __name__ == "__main__":
|
99 |
+
main()
|
100 |
+
|
requirements.txt
ADDED
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
streamlit
|
2 |
+
streamlit-option-menu
|
3 |
+
streamlit
|
4 |
+
google-generativeai
|
5 |
+
python-dotenv
|
6 |
+
langchain
|
7 |
+
PyPDF2
|
8 |
+
chromadb
|
9 |
+
faiss-cpu
|
10 |
+
langchain_google_genai
|