File size: 4,084 Bytes
cfcca1d cc03242 cfcca1d f942880 cfcca1d f942880 cfcca1d 0ee6b9d cfcca1d f942880 ff5bb34 13f22e4 ff5bb34 cfcca1d 77ee048 cfcca1d f942880 cfcca1d cff4dd7 8929888 4f9a780 8929888 4f9a780 782c184 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 |
import streamlit as st
from PyPDF2 import PdfReader
from langchain_text_splitters import RecursiveCharacterTextSplitter
import os
from langchain_google_genai import GoogleGenerativeAIEmbeddings
from langchain_community.vectorstores import Chroma
from langchain_google_genai import ChatGoogleGenerativeAI
from langchain.chains.question_answering import load_qa_chain
from langchain.prompts import PromptTemplate
from streamlit_chromadb_connection.chromadb_connection import ChromadbConnection
configuration = {
"client": "PersistentClient",
"path": "/tmp/.chroma"
}
st.set_page_config(page_title="Document Genie", layout="wide")
st.markdown("""
## Document Genie: Get instant insights from your Documents
This chatbot is built using the Retrieval-Augmented Generation (RAG) framework, leveraging Google's Generative AI model Gemini-PRO. It processes uploaded PDF documents by breaking them down into manageable chunks, creates a searchable vector store, and generates accurate answers to user queries. This advanced approach ensures high-quality, contextually relevant responses for an efficient and effective user experience.
### How It Works
Follow these simple steps to interact with the chatbot:
1. **Upload Your Documents**: The system accepts multiple PDF files at once, analyzing the content to provide comprehensive insights.
2. **Ask a Question**: After processing the documents, ask any question related to the content of your uploaded documents for a precise answer.
""")
def get_pdf(pdf_docs):
text = ""
for pdf in pdf_docs:
pdf_reader = PdfReader(pdf)
for page in pdf_reader.pages:
text += page.extract_text()
return text
def text_splitter(text):
text_splitter = RecursiveCharacterTextSplitter(
# Set a really small chunk size, just to show.
chunk_size=500,
chunk_overlap=20,
separators=["\n\n","\n"," ",".",","])
chunks=text_splitter.split_text(text)
return chunks
GOOGLE_API_KEY = os.getenv("GOOGLE_API_KEY")
def embedding(chunk):
embeddings = GoogleGenerativeAIEmbeddings(model="models/embedding-001")
#vector = Chroma.from_documents(chunk)
new_client = chromadb.EphemeralClient()
db = Chroma.from_documents(vector, embeddings,client=new_client
, persist_directory="./chroma_db")
def get_conversational_chain():
prompt_template = """
Answer the question as detailed as possible from the provided context, make sure to provide all the details, if the answer is not in
provided context just say, "answer is not available in the context", don't provide the wrong answer\n\n
Context:\n {context}?\n
Question: \n{question}\n
Answer:
"""
model = ChatGoogleGenerativeAI(model="gemini-pro", temperature=0.3, google_api_key=GOOGLE_API_KEY)
prompt = PromptTemplate(template=prompt_template, input_variables=["context", "question"])
chain = load_qa_chain(model, chain_type="stuff", prompt=prompt)
return chain
def user_call(query):
embeddings = GoogleGenerativeAIEmbeddings(model="models/embedding-001")
db3 = Chroma(persist_directory="./chroma_db", embedding_function=embeddings)
docs = db3.similarity_search(query)
chain = get_conversational_chain()
response = chain({"input_documents": docs, "question": query}, return_only_outputs=True)
st.write("Reply: ", response["output_text"])
def main():
st.header("Chat with your pdf💁")
query = st.text_input("Ask a Question from the PDF Files", key="query")
if query:
user_call(query)
with st.sidebar:
st.title("Menu:")
pdf_docs = st.file_uploader("Upload your PDF Files and Click on the Submit & Process Button", accept_multiple_files=True, key="pdf_uploader")
if st.button("Submit & Process", key="process_button"):
with st.spinner("Processing..."):
raw_text = get_pdf(pdf_docs)
text_chunks = text_splitter(raw_text)
embedding(text_chunks)
st.success("Done")
if __name__ == "__main__":
main() |