sadak commited on
Commit
9cc03e8
1 Parent(s): a692bf7

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +98 -71
app.py CHANGED
@@ -1,28 +1,81 @@
1
  from dotenv import load_dotenv
2
- import streamlit as st
3
- import os
4
- import sqlite3
5
- import google.generativeai as genai
6
- from PIL import Image
7
  from PyPDF2 import PdfReader
8
  from langchain.text_splitter import RecursiveCharacterTextSplitter
9
  from langchain_google_genai import GoogleGenerativeAIEmbeddings, ChatGoogleGenerativeAI
10
  from langchain.vectorstores import FAISS
11
  from langchain.chains.question_answering import load_qa_chain
12
  from langchain.prompts import PromptTemplate
 
 
 
 
 
 
 
13
 
14
- load_dotenv() # Load all env variables
15
  genai.configure(api_key=os.getenv("GOOGLE_API_KEY"))
16
 
17
- # Function to load Google Gemini Model and provide responses
18
- def get_gemini_response(question, prompt=None, image=None):
19
- model = genai.GenerativeModel("gemini-pro-vision" if image else "gemini-pro")
20
- context = prompt[0] if prompt else None
21
- inputs = [question, context, image] if prompt else [question, image]
22
- response = model.generate_content(inputs)
23
- return response.text
 
 
 
 
 
24
 
25
- # Function to load PDF text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
26
  def get_pdf_text(pdf_docs):
27
  text = ""
28
  for pdf in pdf_docs:
@@ -37,23 +90,15 @@ def get_text_chunks(text):
37
  chunks = text_splitter.split_text(text)
38
  return chunks
39
 
40
- # Function to create a vector store
41
  def get_vector_store(text_chunks):
42
  embeddings = GoogleGenerativeAIEmbeddings(model="models/embedding-001")
43
  vector_store = FAISS.from_texts(text_chunks, embedding=embeddings)
44
  vector_store.save_local("faiss_index")
45
 
46
- # Function to retrieve query from the database
47
- def read_sql_query(sql, db):
48
- conn = sqlite3.connect(db)
49
- cur = conn.cursor()
50
- cur.execute(sql)
51
- rows = cur.fetchall()
52
- conn.close()
53
- return rows
54
-
55
- # Function to get conversational chain
56
  def get_conversational_chain():
 
57
  prompt_template = """
58
  Answer the question as detailed as possible from the provided context, make sure to provide all the details, if the answer is not in
59
  provided context just say, "answer is not available in the context", don't provide the wrong answer\n\n
@@ -62,53 +107,35 @@ def get_conversational_chain():
62
 
63
  Answer:
64
  """
65
- model = ChatGoogleGenerativeAI(model="gemini-pro", temperature=0.3)
66
  prompt = PromptTemplate(template=prompt_template, input_variables=["context", "question"])
67
  chain = load_qa_chain(model, chain_type="stuff", prompt=prompt)
68
  return chain
69
 
70
- # Streamlit App
71
- st.set_page_config(page_title="Consolidated Application")
72
-
73
- st.header("Consolidated Application")
74
-
75
- # Dropdown menu for selecting functionality
76
- selected_option = st.sidebar.selectbox("Select an option", ["Gemini LLM", "Chat with PDF", "Image Description Generation"])
77
-
78
- if selected_option == "Gemini LLM":
79
- st.subheader("Gemini LLM Application")
80
-
81
- gemini_input = st.text_input("Input for Gemini LLM", key="gemini_input")
82
-
83
- uploaded_image = st.file_uploader("Choose an image...", type=["jpg", "jpeg", "png"])
84
-
85
- if st.button("Generate Response for Gemini LLM"):
86
- gemini_response = get_gemini_response(gemini_input, image=uploaded_image)
87
- st.subheader("Gemini LLM Response")
88
- st.write(gemini_response)
89
-
90
- elif selected_option == "Chat with PDF":
91
- st.subheader("Chat with PDF using Gemini")
92
-
93
- pdf_question = st.text_input("Ask a Question from the PDF Files", key="pdf_question")
94
- uploaded_pdf = st.file_uploader("Upload your PDF Files", type="pdf", accept_multiple_files=True)
95
-
96
- if st.button("Submit & Process PDF"):
97
- raw_text = get_pdf_text(uploaded_pdf)
98
- text_chunks = get_text_chunks(raw_text)
99
- get_vector_store(text_chunks)
100
- st.success("PDF Processing Complete")
101
-
102
- elif selected_option == "Image Description Generation":
103
- st.subheader("Image Description Generation")
104
-
105
- image_input = st.text_input("Input for Image Description Generation", key="image_input")
106
- uploaded_image = st.file_uploader("Choose an image...", type=["jpg", "jpeg", "png"])
107
-
108
- if st.button("Generate Image Description"):
109
- image_response = get_gemini_response(image_input, image=uploaded_image)
110
- st.subheader("Image Description")
111
- st.write(image_response)
112
-
113
-
114
-
 
1
  from dotenv import load_dotenv
 
 
 
 
 
2
  from PyPDF2 import PdfReader
3
  from langchain.text_splitter import RecursiveCharacterTextSplitter
4
  from langchain_google_genai import GoogleGenerativeAIEmbeddings, ChatGoogleGenerativeAI
5
  from langchain.vectorstores import FAISS
6
  from langchain.chains.question_answering import load_qa_chain
7
  from langchain.prompts import PromptTemplate
8
+ import streamlit as st
9
+ import os
10
+ import sqlite3
11
+ import google.generativeai as genai
12
+
13
+ # Load environment variables
14
+ load_dotenv()
15
 
16
+ # Configure Genai Key
17
  genai.configure(api_key=os.getenv("GOOGLE_API_KEY"))
18
 
19
+ # Initialize Streamlit app
20
+ st.set_page_config(page_title="Q&A Demo")
21
+
22
+ st.header("Gemini LLM Application")
23
+
24
+ # Initialize session state for chat history if it doesn't exist
25
+ if 'chat_history' not in st.session_state:
26
+ st.session_state['chat_history'] = []
27
+
28
+ # Load Google Gemini Model
29
+ model = genai.GenerativeModel("gemini-pro")
30
+ chat = model.start_chat(history=[])
31
 
32
+ # Function to get response from Gemini model
33
+ def get_gemini_response(question):
34
+ response = chat.send_message(question)
35
+ return response
36
+
37
+ # Function to read SQL query from database
38
+ def read_sql_query(sql, db):
39
+ conn = sqlite3.connect(db)
40
+ cur = conn.cursor()
41
+ cur.execute(sql)
42
+ rows = cur.fetchall()
43
+ conn.commit()
44
+ conn.close()
45
+ return rows
46
+
47
+ # Define prompt for Gemini model
48
+ prompt = """
49
+ You are an expert in converting English questions to SQL query!
50
+ The SQL database has the name STUDENT and has the following columns - NAME, CLASS, SECTION \n\nFor example,\nExample 1 - How many entries of records are present?,
51
+ the SQL command will be something like this SELECT COUNT(*) FROM STUDENT ;
52
+ \nExample 2 - Tell me all the students studying in Data Science class?,
53
+ the SQL command will be something like this SELECT * FROM STUDENT
54
+ where CLASS="Data Science"; And show me the data in tabular format if possible.
55
+ also the sql code should not have ``` in beginning or end and sql word in output
56
+ """
57
+
58
+ # Streamlit UI
59
+ input_text = st.text_area("Input: ", key="input")
60
+ submit_button = st.button("Ask the question")
61
+
62
+ if submit_button and input_text:
63
+ # Get response from Gemini model
64
+ response = get_gemini_response(input_text)
65
+
66
+ # Add user query and response to session state chat history
67
+ st.session_state['chat_history'].append(("You", input_text))
68
+ st.subheader("The Response is")
69
+ for chunk in response:
70
+ st.write(chunk.text)
71
+ st.session_state['chat_history'].append(("Bot", chunk.text))
72
+
73
+ # Display chat history
74
+ st.subheader("The Chat History is")
75
+ for role, text in st.session_state['chat_history']:
76
+ st.write(f"{role}: {text}")
77
+
78
+ # Function to get text from PDF
79
  def get_pdf_text(pdf_docs):
80
  text = ""
81
  for pdf in pdf_docs:
 
90
  chunks = text_splitter.split_text(text)
91
  return chunks
92
 
93
+ # Function to create vector store
94
  def get_vector_store(text_chunks):
95
  embeddings = GoogleGenerativeAIEmbeddings(model="models/embedding-001")
96
  vector_store = FAISS.from_texts(text_chunks, embedding=embeddings)
97
  vector_store.save_local("faiss_index")
98
 
99
+ # Function to initialize conversational chain
 
 
 
 
 
 
 
 
 
100
  def get_conversational_chain():
101
+ model = ChatGoogleGenerativeAI(model="gemini-pro", temperature=0.3)
102
  prompt_template = """
103
  Answer the question as detailed as possible from the provided context, make sure to provide all the details, if the answer is not in
104
  provided context just say, "answer is not available in the context", don't provide the wrong answer\n\n
 
107
 
108
  Answer:
109
  """
 
110
  prompt = PromptTemplate(template=prompt_template, input_variables=["context", "question"])
111
  chain = load_qa_chain(model, chain_type="stuff", prompt=prompt)
112
  return chain
113
 
114
+ # Function to process user input for PDF interaction
115
+ def user_input(user_question):
116
+ embeddings = GoogleGenerativeAIEmbeddings(model="models/embedding-001")
117
+ new_db = FAISS.load_local("faiss_index", embeddings, allow_dangerous_deserialization=True)
118
+ docs = new_db.similarity_search(user_question)
119
+ chain = get_conversational_chain()
120
+ response = chain({"input_documents": docs, "question": user_question}, return_only_outputs=True)
121
+ st.write("Reply: ", response["output_text"])
122
+
123
+ # Main function
124
+ def main():
125
+ st.set_page_config("Chat PDF")
126
+ st.header("Chat with PDF using Gemini💁")
127
+ user_question = st.text_input("Ask a Question from the PDF Files")
128
+ if user_question:
129
+ user_input(user_question)
130
+ with st.sidebar:
131
+ st.title("Menu:")
132
+ pdf_docs = st.file_uploader("Upload your PDF Files and Click on the Submit & Process Button", accept_multiple_files=True)
133
+ if st.button("Submit & Process"):
134
+ with st.spinner("Processing..."):
135
+ raw_text = get_pdf_text(pdf_docs)
136
+ text_chunks = get_text_chunks(raw_text)
137
+ get_vector_store(text_chunks)
138
+ st.success("Done")
139
+
140
+ if __name__ == "__main__":
141
+ main()