Update app.py
Browse files
app.py
CHANGED
@@ -1,45 +1,73 @@
|
|
1 |
from dotenv import load_dotenv
|
2 |
-
load_dotenv() # Load all env variables
|
3 |
-
|
4 |
import streamlit as st
|
5 |
-
from PyPDF2 import PdfReader
|
6 |
-
from langchain.text_splitter import RecursiveCharacterTextSplitter
|
7 |
import os
|
8 |
-
|
9 |
import google.generativeai as genai
|
|
|
|
|
|
|
|
|
10 |
from langchain.vectorstores import FAISS
|
11 |
-
from langchain_google_genai import ChatGoogleGenerativeAI
|
12 |
from langchain.chains.question_answering import load_qa_chain
|
13 |
from langchain.prompts import PromptTemplate
|
14 |
-
from PIL import Image
|
15 |
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
else
|
22 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
23 |
return response.text
|
24 |
|
|
|
25 |
def get_pdf_text(pdf_docs):
|
26 |
-
text=""
|
27 |
for pdf in pdf_docs:
|
28 |
-
pdf_reader= PdfReader(pdf)
|
29 |
for page in pdf_reader.pages:
|
30 |
-
text+= page.extract_text()
|
31 |
-
return
|
32 |
|
|
|
33 |
def get_text_chunks(text):
|
34 |
text_splitter = RecursiveCharacterTextSplitter(chunk_size=10000, chunk_overlap=1000)
|
35 |
chunks = text_splitter.split_text(text)
|
36 |
return chunks
|
37 |
|
|
|
38 |
def get_vector_store(text_chunks):
|
39 |
-
embeddings = GoogleGenerativeAIEmbeddings(model
|
40 |
vector_store = FAISS.from_texts(text_chunks, embedding=embeddings)
|
41 |
vector_store.save_local("faiss_index")
|
42 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
43 |
def get_conversational_chain():
|
44 |
prompt_template = """
|
45 |
Answer the question as detailed as possible from the provided context, make sure to provide all the details, if the answer is not in
|
@@ -50,59 +78,57 @@ def get_conversational_chain():
|
|
50 |
Answer:
|
51 |
"""
|
52 |
model = ChatGoogleGenerativeAI(model="gemini-pro", temperature=0.3)
|
53 |
-
prompt = PromptTemplate(template
|
54 |
chain = load_qa_chain(model, chain_type="stuff", prompt=prompt)
|
55 |
return chain
|
56 |
|
57 |
-
|
58 |
-
|
59 |
-
|
60 |
-
|
61 |
-
|
62 |
-
|
63 |
-
|
64 |
-
|
65 |
-
|
66 |
-
|
67 |
-
|
68 |
-
|
69 |
-
|
70 |
-
|
71 |
-
|
72 |
-
|
73 |
-
|
74 |
-
|
75 |
-
|
76 |
-
|
77 |
-
|
78 |
-
|
79 |
-
|
80 |
-
|
81 |
-
|
82 |
-
|
83 |
-
|
84 |
-
|
85 |
-
|
86 |
-
|
87 |
-
|
88 |
-
|
89 |
-
|
90 |
-
|
91 |
-
|
92 |
-
|
93 |
-
|
94 |
-
|
95 |
-
|
96 |
-
|
97 |
-
|
98 |
-
|
99 |
-
|
100 |
-
|
101 |
-
|
102 |
-
|
103 |
-
|
104 |
-
|
105 |
-
|
106 |
-
|
107 |
-
if __name__ == "__main__":
|
108 |
-
main()
|
|
|
1 |
from dotenv import load_dotenv
|
|
|
|
|
2 |
import streamlit as st
|
|
|
|
|
3 |
import os
|
4 |
+
import sqlite3
|
5 |
import google.generativeai as genai
|
6 |
+
from PIL import Image
|
7 |
+
from PyPDF2 import PdfReader
|
8 |
+
from langchain.text_splitter import RecursiveCharacterTextSplitter
|
9 |
+
from langchain_google_genai import GoogleGenerativeAIEmbeddings, ChatGoogleGenerativeAI
|
10 |
from langchain.vectorstores import FAISS
|
|
|
11 |
from langchain.chains.question_answering import load_qa_chain
|
12 |
from langchain.prompts import PromptTemplate
|
|
|
13 |
|
14 |
+
load_dotenv() # Load all env variables
|
15 |
+
genai.configure(api_key=os.getenv("GOOGLE_API_KEY"))
|
16 |
+
|
17 |
+
# Function to load Google Gemini Model and provide responses
|
18 |
+
def get_gemini_response(question, prompt=None, image=None):
|
19 |
+
model = genai.GenerativeModel("gemini-pro-vision" if image else "gemini-pro")
|
20 |
+
context = prompt[0] if prompt else None
|
21 |
+
inputs = [question, context, image] if prompt else [question, image]
|
22 |
+
response = model.generate_content(inputs)
|
23 |
+
return response.text
|
24 |
+
|
25 |
+
# Function to retrieve query from the database
|
26 |
+
def read_sql_query(sql, db):
|
27 |
+
conn = sqlite3.connect(db)
|
28 |
+
cur = conn.cursor()
|
29 |
+
cur.execute(sql)
|
30 |
+
rows = cur.fetchall()
|
31 |
+
conn.close()
|
32 |
+
return rows
|
33 |
+
|
34 |
+
# Function to load Gemini Pro model and get responses
|
35 |
+
def get_gemini_response(question, prompt=None):
|
36 |
+
model = genai.GenerativeModel("gemini-pro")
|
37 |
+
response = model.generate_content([prompt[0], question]) if prompt else model.generate_content(question)
|
38 |
return response.text
|
39 |
|
40 |
+
# Function to load PDF text
|
41 |
def get_pdf_text(pdf_docs):
|
42 |
+
text = ""
|
43 |
for pdf in pdf_docs:
|
44 |
+
pdf_reader = PdfReader(pdf)
|
45 |
for page in pdf_reader.pages:
|
46 |
+
text += page.extract_text()
|
47 |
+
return text
|
48 |
|
49 |
+
# Function to split text into chunks
|
50 |
def get_text_chunks(text):
|
51 |
text_splitter = RecursiveCharacterTextSplitter(chunk_size=10000, chunk_overlap=1000)
|
52 |
chunks = text_splitter.split_text(text)
|
53 |
return chunks
|
54 |
|
55 |
+
# Function to create a vector store
|
56 |
def get_vector_store(text_chunks):
|
57 |
+
embeddings = GoogleGenerativeAIEmbeddings(model="models/embedding-001")
|
58 |
vector_store = FAISS.from_texts(text_chunks, embedding=embeddings)
|
59 |
vector_store.save_local("faiss_index")
|
60 |
|
61 |
+
# Function to retrieve query from the database
|
62 |
+
def read_sql_query(sql, db):
|
63 |
+
conn = sqlite3.connect(db)
|
64 |
+
cur = conn.cursor()
|
65 |
+
cur.execute(sql)
|
66 |
+
rows = cur.fetchall()
|
67 |
+
conn.close()
|
68 |
+
return rows
|
69 |
+
|
70 |
+
# Function to get conversational chain
|
71 |
def get_conversational_chain():
|
72 |
prompt_template = """
|
73 |
Answer the question as detailed as possible from the provided context, make sure to provide all the details, if the answer is not in
|
|
|
78 |
Answer:
|
79 |
"""
|
80 |
model = ChatGoogleGenerativeAI(model="gemini-pro", temperature=0.3)
|
81 |
+
prompt = PromptTemplate(template=prompt_template, input_variables=["context", "question"])
|
82 |
chain = load_qa_chain(model, chain_type="stuff", prompt=prompt)
|
83 |
return chain
|
84 |
|
85 |
+
# Streamlit App
|
86 |
+
st.set_page_config(page_title="Consolidated Application")
|
87 |
+
|
88 |
+
st.header("Consolidated Application")
|
89 |
+
|
90 |
+
# Gemini LLM Application
|
91 |
+
st.subheader("Gemini LLM Application")
|
92 |
+
|
93 |
+
# Input for Gemini LLM Application
|
94 |
+
gemini_input = st.text_input("Input for Gemini LLM", key="gemini_input")
|
95 |
+
|
96 |
+
# File uploader for image
|
97 |
+
uploaded_image = st.file_uploader("Choose an image...", type=["jpg", "jpeg", "png"])
|
98 |
+
|
99 |
+
# Button to generate response for Gemini LLM Application
|
100 |
+
if st.button("Generate Response for Gemini LLM"):
|
101 |
+
gemini_response = get_gemini_response(gemini_input, image=uploaded_image)
|
102 |
+
st.subheader("Gemini LLM Response")
|
103 |
+
st.write(gemini_response)
|
104 |
+
|
105 |
+
# Chat with PDF using Gemini
|
106 |
+
st.subheader("Chat with PDF using Gemini")
|
107 |
+
|
108 |
+
# Input for Chat with PDF
|
109 |
+
pdf_question = st.text_input("Ask a Question from the PDF Files", key="pdf_question")
|
110 |
+
|
111 |
+
# File uploader for PDF
|
112 |
+
uploaded_pdf = st.file_uploader("Upload your PDF Files", type="pdf", accept_multiple_files=True)
|
113 |
+
|
114 |
+
# Button to process PDF and generate response
|
115 |
+
if st.button("Submit & Process PDF"):
|
116 |
+
raw_text = get_pdf_text(uploaded_pdf)
|
117 |
+
text_chunks = get_text_chunks(raw_text)
|
118 |
+
get_vector_store(text_chunks)
|
119 |
+
st.success("PDF Processing Complete")
|
120 |
+
|
121 |
+
# Image Description Generation
|
122 |
+
st.subheader("Image Description Generation")
|
123 |
+
|
124 |
+
# Input for Image Description Generation
|
125 |
+
image_input = st.text_input("Input for Image Description Generation", key="image_input")
|
126 |
+
|
127 |
+
# File uploader for image
|
128 |
+
uploaded_image = st.file_uploader("Choose an image...", type=["jpg", "jpeg", "png"])
|
129 |
+
|
130 |
+
# Button to generate response for Image Description Generation
|
131 |
+
if st.button("Generate Image Description"):
|
132 |
+
image_response = get_gemini_response(image_input, image=uploaded_image)
|
133 |
+
st.subheader("Image Description")
|
134 |
+
st.write(image_response)
|
|
|
|