sadak commited on
Commit
f0996a3
1 Parent(s): a1b27a4

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +98 -72
app.py CHANGED
@@ -1,45 +1,73 @@
1
  from dotenv import load_dotenv
2
- load_dotenv() # Load all env variables
3
-
4
  import streamlit as st
5
- from PyPDF2 import PdfReader
6
- from langchain.text_splitter import RecursiveCharacterTextSplitter
7
  import os
8
- from langchain_google_genai import GoogleGenerativeAIEmbeddings
9
  import google.generativeai as genai
 
 
 
 
10
  from langchain.vectorstores import FAISS
11
- from langchain_google_genai import ChatGoogleGenerativeAI
12
  from langchain.chains.question_answering import load_qa_chain
13
  from langchain.prompts import PromptTemplate
14
- from PIL import Image
15
 
16
- ## Function to load gemini pro model and get responses
17
- def get_gemini_response(input, image=None):
18
- model = genai.GenerativeModel("gemini-pro-vision")
19
- if input != "":
20
- response = model.generate_content([input, image])
21
- else:
22
- response = model.generate_content(image)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
23
  return response.text
24
 
 
25
  def get_pdf_text(pdf_docs):
26
- text=""
27
  for pdf in pdf_docs:
28
- pdf_reader= PdfReader(pdf)
29
  for page in pdf_reader.pages:
30
- text+= page.extract_text()
31
- return text
32
 
 
33
  def get_text_chunks(text):
34
  text_splitter = RecursiveCharacterTextSplitter(chunk_size=10000, chunk_overlap=1000)
35
  chunks = text_splitter.split_text(text)
36
  return chunks
37
 
 
38
  def get_vector_store(text_chunks):
39
- embeddings = GoogleGenerativeAIEmbeddings(model = "models/embedding-001")
40
  vector_store = FAISS.from_texts(text_chunks, embedding=embeddings)
41
  vector_store.save_local("faiss_index")
42
 
 
 
 
 
 
 
 
 
 
 
43
  def get_conversational_chain():
44
  prompt_template = """
45
  Answer the question as detailed as possible from the provided context, make sure to provide all the details, if the answer is not in
@@ -50,59 +78,57 @@ def get_conversational_chain():
50
  Answer:
51
  """
52
  model = ChatGoogleGenerativeAI(model="gemini-pro", temperature=0.3)
53
- prompt = PromptTemplate(template = prompt_template, input_variables = ["context", "question"])
54
  chain = load_qa_chain(model, chain_type="stuff", prompt=prompt)
55
  return chain
56
 
57
- def user_input(user_question):
58
- embeddings = GoogleGenerativeAIEmbeddings(model = "models/embedding-001")
59
- new_db = FAISS.load_local("faiss_index", embeddings,allow_dangerous_deserialization=True)
60
- docs = new_db.similarity_search(user_question)
61
- chain = get_conversational_chain()
62
- response = chain({"input_documents":docs, "question": user_question}, return_only_outputs=True)
63
- return response["output_text"]
64
-
65
- def main():
66
- st.set_page_config(page_title='Combined Demo')
67
- st.header("Combined Application")
68
-
69
- app_mode = st.sidebar.selectbox("Choose the App Mode", ["Gemini Q&A", "PDF Q&A", "Gemini Image"])
70
-
71
- if app_mode == "Gemini Q&A":
72
- st.subheader("Gemini LLM Application")
73
- user_question = st.text_input("Input")
74
- if st.button("Ask the question"):
75
- response = get_gemini_response(user_question)
76
- st.subheader("The Response is :")
77
- st.write(response)
78
-
79
- elif app_mode == "PDF Q&A":
80
- st.subheader("Chat with PDF using Gemini💁")
81
- user_question = st.text_input("Ask a Question from the PDF Files")
82
- pdf_docs = st.file_uploader("Upload your PDF Files and Click on the Submit & Process Button", accept_multiple_files=True)
83
- if st.button("Submit & Process"):
84
- with st.spinner("Processing..."):
85
- raw_text = get_pdf_text(pdf_docs)
86
- text_chunks = get_text_chunks(raw_text)
87
- get_vector_store(text_chunks)
88
- st.success("Done")
89
-
90
- if user_question:
91
- response = user_input(user_question)
92
- st.write("Reply: ", response)
93
-
94
- elif app_mode == "Gemini Image":
95
- st.subheader("Gemini LLM Application for Image")
96
- input_text = st.text_input("Input")
97
- uploaded_file = st.file_uploader("Choose an image...", type="jpg")
98
- if uploaded_file is not None:
99
- image = Image.open(uploaded_file)
100
- st.image(image, caption="Uploaded Image", use_column_width=True)
101
-
102
- if st.button("Tell me about the image"):
103
- response = get_gemini_response(input_text, image=image)
104
- st.subheader("The Response is :")
105
- st.write(response)
106
-
107
- if __name__ == "__main__":
108
- main()
 
1
  from dotenv import load_dotenv
 
 
2
  import streamlit as st
 
 
3
  import os
4
+ import sqlite3
5
  import google.generativeai as genai
6
+ from PIL import Image
7
+ from PyPDF2 import PdfReader
8
+ from langchain.text_splitter import RecursiveCharacterTextSplitter
9
+ from langchain_google_genai import GoogleGenerativeAIEmbeddings, ChatGoogleGenerativeAI
10
  from langchain.vectorstores import FAISS
 
11
  from langchain.chains.question_answering import load_qa_chain
12
  from langchain.prompts import PromptTemplate
 
13
 
14
+ load_dotenv() # Load all env variables
15
+ genai.configure(api_key=os.getenv("GOOGLE_API_KEY"))
16
+
17
+ # Function to load Google Gemini Model and provide responses
18
+ def get_gemini_response(question, prompt=None, image=None):
19
+ model = genai.GenerativeModel("gemini-pro-vision" if image else "gemini-pro")
20
+ context = prompt[0] if prompt else None
21
+ inputs = [question, context, image] if prompt else [question, image]
22
+ response = model.generate_content(inputs)
23
+ return response.text
24
+
25
+ # Function to retrieve query from the database
26
+ def read_sql_query(sql, db):
27
+ conn = sqlite3.connect(db)
28
+ cur = conn.cursor()
29
+ cur.execute(sql)
30
+ rows = cur.fetchall()
31
+ conn.close()
32
+ return rows
33
+
34
+ # Function to load Gemini Pro model and get responses
35
+ def get_gemini_response(question, prompt=None):
36
+ model = genai.GenerativeModel("gemini-pro")
37
+ response = model.generate_content([prompt[0], question]) if prompt else model.generate_content(question)
38
  return response.text
39
 
40
+ # Function to load PDF text
41
  def get_pdf_text(pdf_docs):
42
+ text = ""
43
  for pdf in pdf_docs:
44
+ pdf_reader = PdfReader(pdf)
45
  for page in pdf_reader.pages:
46
+ text += page.extract_text()
47
+ return text
48
 
49
+ # Function to split text into chunks
50
  def get_text_chunks(text):
51
  text_splitter = RecursiveCharacterTextSplitter(chunk_size=10000, chunk_overlap=1000)
52
  chunks = text_splitter.split_text(text)
53
  return chunks
54
 
55
+ # Function to create a vector store
56
  def get_vector_store(text_chunks):
57
+ embeddings = GoogleGenerativeAIEmbeddings(model="models/embedding-001")
58
  vector_store = FAISS.from_texts(text_chunks, embedding=embeddings)
59
  vector_store.save_local("faiss_index")
60
 
61
+ # Function to retrieve query from the database
62
+ def read_sql_query(sql, db):
63
+ conn = sqlite3.connect(db)
64
+ cur = conn.cursor()
65
+ cur.execute(sql)
66
+ rows = cur.fetchall()
67
+ conn.close()
68
+ return rows
69
+
70
+ # Function to get conversational chain
71
  def get_conversational_chain():
72
  prompt_template = """
73
  Answer the question as detailed as possible from the provided context, make sure to provide all the details, if the answer is not in
 
78
  Answer:
79
  """
80
  model = ChatGoogleGenerativeAI(model="gemini-pro", temperature=0.3)
81
+ prompt = PromptTemplate(template=prompt_template, input_variables=["context", "question"])
82
  chain = load_qa_chain(model, chain_type="stuff", prompt=prompt)
83
  return chain
84
 
85
+ # Streamlit App
86
+ st.set_page_config(page_title="Consolidated Application")
87
+
88
+ st.header("Consolidated Application")
89
+
90
+ # Gemini LLM Application
91
+ st.subheader("Gemini LLM Application")
92
+
93
+ # Input for Gemini LLM Application
94
+ gemini_input = st.text_input("Input for Gemini LLM", key="gemini_input")
95
+
96
+ # File uploader for image
97
+ uploaded_image = st.file_uploader("Choose an image...", type=["jpg", "jpeg", "png"])
98
+
99
+ # Button to generate response for Gemini LLM Application
100
+ if st.button("Generate Response for Gemini LLM"):
101
+ gemini_response = get_gemini_response(gemini_input, image=uploaded_image)
102
+ st.subheader("Gemini LLM Response")
103
+ st.write(gemini_response)
104
+
105
+ # Chat with PDF using Gemini
106
+ st.subheader("Chat with PDF using Gemini")
107
+
108
+ # Input for Chat with PDF
109
+ pdf_question = st.text_input("Ask a Question from the PDF Files", key="pdf_question")
110
+
111
+ # File uploader for PDF
112
+ uploaded_pdf = st.file_uploader("Upload your PDF Files", type="pdf", accept_multiple_files=True)
113
+
114
+ # Button to process PDF and generate response
115
+ if st.button("Submit & Process PDF"):
116
+ raw_text = get_pdf_text(uploaded_pdf)
117
+ text_chunks = get_text_chunks(raw_text)
118
+ get_vector_store(text_chunks)
119
+ st.success("PDF Processing Complete")
120
+
121
+ # Image Description Generation
122
+ st.subheader("Image Description Generation")
123
+
124
+ # Input for Image Description Generation
125
+ image_input = st.text_input("Input for Image Description Generation", key="image_input")
126
+
127
+ # File uploader for image
128
+ uploaded_image = st.file_uploader("Choose an image...", type=["jpg", "jpeg", "png"])
129
+
130
+ # Button to generate response for Image Description Generation
131
+ if st.button("Generate Image Description"):
132
+ image_response = get_gemini_response(image_input, image=uploaded_image)
133
+ st.subheader("Image Description")
134
+ st.write(image_response)