sadak commited on
Commit
020889a
1 Parent(s): 2ef7577

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +133 -126
app.py CHANGED
@@ -10,142 +10,149 @@ import os
10
  import google.generativeai as genai
11
  from dotenv import load_dotenv
12
 
13
- load_dotenv() # Load all env variables
 
14
  genai.configure(api_key=os.getenv("GOOGLE_API_KEY"))
15
 
16
 
17
- def get_pdf_text(pdf_docs):
18
- text=""
19
- for pdf in pdf_docs:
20
- pdf_reader= PdfReader(pdf)
21
- for page in pdf_reader.pages:
22
- text+= page.extract_text()
23
- return text
 
 
 
24
 
25
 
26
  def get_text_chunks(text):
27
- text_splitter = RecursiveCharacterTextSplitter(chunk_size=10000, chunk_overlap=1000)
28
- chunks = text_splitter.split_text(text)
29
- return chunks
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
30
 
31
 
32
- def get_vector_store(text_chunks):
33
- embeddings = GoogleGenerativeAIEmbeddings(model="models/embedding-001")
34
- vector_store = FAISS.from_texts(text_chunks, embedding=embeddings)
35
- vector_store.save_local("faiss_index")
36
-
37
-
38
- def get_conversational_chain():
39
- prompt_template = """
40
- Answer the question as detailed as possible from the provided context, make sure to provide all the details, if the answer is not in
41
- provided context just say, "answer is not available in the context", don't provide the wrong answer\n\n
42
- Context:\n {context}?\n
43
- Question: \n{question}\n
44
-
45
- Answer:
46
- """
47
-
48
- model = ChatGoogleGenerativeAI(model="gemini-pro", temperature=0.3)
49
- prompt = PromptTemplate(template=prompt_template, input_variables=["context", "question"])
50
- chain = load_qa_chain(model, chain_type="stuff", prompt=prompt)
51
-
52
- return chain
53
-
54
-
55
- def user_input(user_question):
56
- embeddings = GoogleGenerativeAIEmbeddings(model="models/embedding-001")
57
- new_db = FAISS.load_local("faiss_index", embeddings, allow_dangerous_deserialization=True)
58
- docs = new_db.similarity_search(user_question)
59
- chain = get_conversational_chain()
60
- response = chain({"input_documents": docs, "question": user_question}, return_only_outputs=True)
61
- st.write("Reply: ", response["output_text"])
62
-
63
-
64
- ## function to load Gemini Pro model and get responses
65
- model = genai.GenerativeModel("gemini-pro-vision")
66
- def get_gemini_response(input, image=None):
67
- if image is not None and model.is_image_model:
68
- response = model.generate_content([input, image])
69
- else:
70
- response = model.generate_content(input)
71
- return response.text
72
-
73
-
74
- ## Initialize our Streamlit app
75
-
76
- st.set_page_config(page_title='Combined Streamlit Application')
77
- st.header("Streamlit Application")
78
-
79
- # Define the different applications
80
- applications = {
81
- "PDF Chat": "pdf_chat",
82
- "Image Chat": "image_chat",
83
- "Q&A Chat": "qa_chat"
84
- }
85
-
86
- # Render the dropdown menu
87
- selected_app = st.sidebar.selectbox("Select Application", list(applications.keys()))
88
-
89
- # Function to display PDF Chat application
90
- def pdf_chat():
91
- st.header("PDF Chat Application")
92
- user_question = st.text_input("Ask a Question from the PDF Files")
93
- if user_question:
94
- user_input(user_question)
95
-
96
- pdf_docs = st.file_uploader("Upload your PDF Files and Click on the Submit & Process Button", accept_multiple_files=True)
97
- if st.button("Submit & Process"):
98
- with st.spinner("Processing..."):
99
- raw_text = get_pdf_text(pdf_docs)
100
- text_chunks = get_text_chunks(raw_text)
101
- get_vector_store(text_chunks)
102
- st.success("Done")
103
-
104
- # Function to display Image Chat application
105
- def image_chat():
106
- st.header("Image Chat Application")
107
- input_text = st.text_input("Input for Gemini Pro:", key="input_gemini")
108
- uploaded_file = st.file_uploader("Choose an image...", type="jpg")
109
- if uploaded_file is not None:
110
- image = Image.open(uploaded_file)
111
- st.image(image, caption="Uploaded Image", use_column_width=True)
112
-
113
- submit_gemini = st.button("Ask Gemini Pro")
114
-
115
- if submit_gemini:
116
- response_gemini = get_gemini_response(input_text, image)
117
- st.subheader("Gemini Pro Response:")
118
- st.write(response_gemini)
119
-
120
- # Function to display Q&A Chat application
121
- def qa_chat():
122
- st.header("Q&A Chat Application")
123
- # Initialize session state for chat history if it doesn't exist
124
- if 'chat_history' not in st.session_state:
125
- st.session_state['chat_history'] = []
126
-
127
- input_qa = st.text_area("Input for Q&A:", key="input_qa")
128
- submit_qa = st.button("Ask the question")
129
-
130
- if submit_qa and input_qa:
131
- response_qa = get_gemini_response(input_qa)
132
- # Add user query and response to session state chat history
133
- st.session_state['chat_history'].append(("You", input_qa))
134
- st.subheader("Q&A Response:")
135
- for chunk in response_qa:
136
- st.write(chunk.text)
137
- st.session_state['chat_history'].append(("Gemini Pro", chunk.text))
138
-
139
- st.subheader("Q&A Chat History:")
140
- for role, text in st.session_state['chat_history']:
141
- st.write(f"{role}: {text}")
142
-
143
  # Map selected application to corresponding function
144
  selected_app_func = {
145
- "PDF Chat": pdf_chat,
146
- "Image Chat": image_chat,
147
- "Q&A Chat": qa_chat
148
  }
149
 
150
  # Run the selected application function
151
- selected_app_func[selected_app]()
 
10
  import google.generativeai as genai
11
  from dotenv import load_dotenv
12
 
13
+ # Load environment variables
14
+ load_dotenv()
15
  genai.configure(api_key=os.getenv("GOOGLE_API_KEY"))
16
 
17
 
18
+ def process_pdfs(pdf_files):
19
+ """
20
+ Extracts text from uploaded PDFs and splits them into chunks.
21
+ """
22
+ text = ""
23
+ for pdf in pdf_files:
24
+ reader = PdfReader(pdf)
25
+ for page in reader.pages:
26
+ text += page.extract_text()
27
+ return get_text_chunks(text)
28
 
29
 
30
  def get_text_chunks(text):
31
+ """
32
+ Splits the text into manageable chunks for processing.
33
+ """
34
+ splitter = RecursiveCharacterTextSplitter(chunk_size=10000, chunk_overlap=1000)
35
+ return splitter.split_text(text)
36
+
37
+
38
+ def create_vector_store(text_chunks):
39
+ """
40
+ Creates a vector store for efficient text retrieval.
41
+ """
42
+ embeddings = GoogleGenerativeAIEmbeddings(model="models/embedding-001")
43
+ return FAISS.from_texts(text_chunks, embedding=embeddings).save_local("faiss_index")
44
+
45
+
46
+ def build_qa_chain():
47
+ """
48
+ Defines the prompt template and loads the question-answering chain.
49
+ """
50
+ prompt_template = """
51
+ Answer the question in detail, considering the provided context.
52
+ If the answer is not available, state "answer unavailable".\n\n
53
+ Context: {context}?\n
54
+ Question: {question}\n
55
+
56
+ Answer:
57
+ """
58
+ model = ChatGoogleGenerativeAI(model="gemini-pro", temperature=0.3)
59
+ prompt = PromptTemplate(template=prompt_template, input_variables=["context", "question"])
60
+ return load_qa_chain(model, chain_type="stuff", prompt=prompt)
61
+
62
+
63
+ def answer_question(question, embeddings, vector_store):
64
+ """
65
+ Searches the vector store and retrieves relevant documents for answering.
66
+ """
67
+ docs = vector_store.similarity_search(question)
68
+ qa_chain = get_conversational_chain()
69
+ response = qa_chain({"input_documents": docs, "question": question}, return_only_outputs=True)
70
+ return response["output_text"]
71
+
72
+
73
+ def get_gemini_response(input_text, image=None):
74
+ """
75
+ Generates a response using the Gemini Pro model, potentially with an image.
76
+ """
77
+ model = genai.GenerativeModel("gemini-pro-vision")
78
+ if image is not None and model.is_image_model:
79
+ response = model.generate_content([input_text, image])
80
+ else:
81
+ response = model.generate_content(input_text)
82
+ return response.text
83
+
84
+
85
+ def pdf_chat_app():
86
+ """
87
+ Handles the PDF chat functionality.
88
+ """
89
+ st.header("PDF Chat Application")
90
+ user_question = st.text_input("Ask a Question from the PDF Files")
91
+ uploaded_files = st.file_uploader("Upload PDF Files", accept_multiple_files=True)
92
+
93
+ if uploaded_files:
94
+ with st.spinner("Processing..."):
95
+ text_chunks = process_pdfs(uploaded_files)
96
+ create_vector_store(text_chunks)
97
+ embeddings = GoogleGenerativeAIEmbeddings(model="models/embedding-001")
98
+ vector_store = FAISS.load_local("faiss_index", embeddings)
99
+ st.success("Done!")
100
+
101
+ if user_question and uploaded_files:
102
+ answer = answer_question(user_question, embeddings, vector_store)
103
+ st.write("Reply:", answer)
104
+
105
+ def image_chat_app():
106
+ """
107
+ Handles the image chat functionality.
108
+ """
109
+ st.header("Image Chat Application")
110
+ input_text = st.text_input("Input for Gemini Pro:")
111
+ uploaded_image = st.file_uploader("Choose an image...", type="jpg")
112
+ if uploaded_image is not None:
113
+ image = Image.open(uploaded_image)
114
+ st.image(image, caption="Uploaded Image", use_column_width=True)
115
+
116
+ submit_gemini = st.button("Ask Gemini Pro")
117
+
118
+ if submit_gemini:
119
+ response_gemini = get_gemini_response(input_text, image)
120
+ st.subheader("Gemini Pro Response:")
121
+ st.write(response_gemini)
122
+
123
+
124
+ def qa_chat_app():
125
+ """
126
+ Handles the Q&A chat functionality.
127
+ """
128
+ st.header("Q&A Chat Application")
129
+ # Initialize session state for chat history if it doesn't exist
130
+ if 'chat_history' not in st.session_state:
131
+ st.session_state['chat_history'] = []
132
+
133
+ input_qa = st.text_area("Input for Q&A:", key="input_qa")
134
+ submit_qa = st.button("Ask the question")
135
+
136
+ if submit_qa and input_qa:
137
+ response_qa = get_gemini_response(input_qa)
138
+ # Add user query and response to session state chat history
139
+ st.session_state['chat_history'].append(("You", input_qa))
140
+ st.subheader("Q&A Response:")
141
+ for chunk in response_qa:
142
+ st.write(chunk.text)
143
+ st.session_state['chat_history'].append(("Gemini Pro", chunk.text))
144
+
145
+ st.subheader("Q&A Chat History:")
146
+ for role, text in st.session_state['chat_history']:
147
+ st.write(f"{role}: {text}")
148
 
149
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
150
  # Map selected application to corresponding function
151
  selected_app_func = {
152
+ "PDF Chat": pdf_chat,
153
+ "Image Chat": image_chat,
154
+ "Q&A Chat": qa_chat
155
  }
156
 
157
  # Run the selected application function
158
+ selected_app_func[selected_app]()