Update app.py
Browse files
app.py
CHANGED
@@ -1,7 +1,4 @@
|
|
1 |
-
from dotenv import load_dotenv
|
2 |
import streamlit as st
|
3 |
-
import os
|
4 |
-
import google.generativeai as genai
|
5 |
from PIL import Image
|
6 |
from PyPDF2 import PdfReader
|
7 |
from langchain.text_splitter import RecursiveCharacterTextSplitter
|
@@ -9,6 +6,9 @@ from langchain_google_genai import GoogleGenerativeAIEmbeddings, ChatGoogleGener
|
|
9 |
from langchain.vectorstores import FAISS
|
10 |
from langchain.chains.question_answering import load_qa_chain
|
11 |
from langchain.prompts import PromptTemplate
|
|
|
|
|
|
|
12 |
|
13 |
load_dotenv() # Load all env variables
|
14 |
genai.configure(api_key=os.getenv("GOOGLE_API_KEY"))
|
@@ -76,13 +76,23 @@ def get_gemini_response(input,image):
|
|
76 |
st.set_page_config(page_title='Combined Streamlit Application')
|
77 |
st.header("Streamlit Application")
|
78 |
|
79 |
-
#
|
80 |
-
|
81 |
-
|
82 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
83 |
|
84 |
-
with st.sidebar:
|
85 |
-
st.title("Menu:")
|
86 |
pdf_docs = st.file_uploader("Upload your PDF Files and Click on the Submit & Process Button", accept_multiple_files=True)
|
87 |
if st.button("Submit & Process"):
|
88 |
with st.spinner("Processing..."):
|
@@ -91,39 +101,51 @@ with st.sidebar:
|
|
91 |
get_vector_store(text_chunks)
|
92 |
st.success("Done")
|
93 |
|
94 |
-
# Image Chat
|
95 |
-
|
96 |
-
|
97 |
-
|
98 |
-
|
99 |
-
|
100 |
-
|
101 |
-
|
102 |
-
|
103 |
-
|
104 |
-
|
105 |
-
|
106 |
-
|
107 |
-
|
108 |
-
|
109 |
-
|
110 |
-
|
111 |
-
|
112 |
-
|
113 |
-
|
114 |
-
|
115 |
-
|
116 |
-
|
117 |
-
|
118 |
-
|
119 |
-
|
120 |
-
|
121 |
-
|
122 |
-
|
123 |
-
|
124 |
-
st.
|
125 |
-
|
126 |
-
|
127 |
-
st.
|
128 |
-
|
129 |
-
st.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
import streamlit as st
|
|
|
|
|
2 |
from PIL import Image
|
3 |
from PyPDF2 import PdfReader
|
4 |
from langchain.text_splitter import RecursiveCharacterTextSplitter
|
|
|
6 |
from langchain.vectorstores import FAISS
|
7 |
from langchain.chains.question_answering import load_qa_chain
|
8 |
from langchain.prompts import PromptTemplate
|
9 |
+
import os
|
10 |
+
import google.generativeai as genai
|
11 |
+
from dotenv import load_dotenv
|
12 |
|
13 |
load_dotenv() # Load all env variables
|
14 |
genai.configure(api_key=os.getenv("GOOGLE_API_KEY"))
|
|
|
76 |
st.set_page_config(page_title='Combined Streamlit Application')
|
77 |
st.header("Streamlit Application")
|
78 |
|
79 |
+
# Define the different applications
|
80 |
+
applications = {
|
81 |
+
"PDF Chat": "pdf_chat",
|
82 |
+
"Image Chat": "image_chat",
|
83 |
+
"Q&A Chat": "qa_chat"
|
84 |
+
}
|
85 |
+
|
86 |
+
# Render the dropdown menu
|
87 |
+
selected_app = st.sidebar.selectbox("Select Application", list(applications.keys()))
|
88 |
+
|
89 |
+
# Function to display PDF Chat application
|
90 |
+
def pdf_chat():
|
91 |
+
st.header("PDF Chat Application")
|
92 |
+
user_question = st.text_input("Ask a Question from the PDF Files")
|
93 |
+
if user_question:
|
94 |
+
user_input(user_question)
|
95 |
|
|
|
|
|
96 |
pdf_docs = st.file_uploader("Upload your PDF Files and Click on the Submit & Process Button", accept_multiple_files=True)
|
97 |
if st.button("Submit & Process"):
|
98 |
with st.spinner("Processing..."):
|
|
|
101 |
get_vector_store(text_chunks)
|
102 |
st.success("Done")
|
103 |
|
104 |
+
# Function to display Image Chat application
|
105 |
+
def image_chat():
|
106 |
+
st.header("Image Chat Application")
|
107 |
+
input_text = st.text_input("Input for Gemini Pro:", key="input_gemini")
|
108 |
+
uploaded_file = st.file_uploader("Choose an image...", type="jpg")
|
109 |
+
if uploaded_file is not None:
|
110 |
+
image = Image.open(uploaded_file)
|
111 |
+
st.image(image, caption="Uploaded Image", use_column_width=True)
|
112 |
+
|
113 |
+
submit_gemini = st.button("Ask Gemini Pro")
|
114 |
+
|
115 |
+
if submit_gemini:
|
116 |
+
response_gemini = get_gemini_response(input_text, image)
|
117 |
+
st.subheader("Gemini Pro Response:")
|
118 |
+
st.write(response_gemini)
|
119 |
+
|
120 |
+
# Function to display Q&A Chat application
|
121 |
+
def qa_chat():
|
122 |
+
st.header("Q&A Chat Application")
|
123 |
+
# Initialize session state for chat history if it doesn't exist
|
124 |
+
if 'chat_history' not in st.session_state:
|
125 |
+
st.session_state['chat_history'] = []
|
126 |
+
|
127 |
+
input_qa = st.text_area("Input for Q&A:", key="input_qa")
|
128 |
+
submit_qa = st.button("Ask the question")
|
129 |
+
|
130 |
+
if submit_qa and input_qa:
|
131 |
+
response_qa = get_gemini_response(input_qa)
|
132 |
+
# Add user query and response to session state chat history
|
133 |
+
st.session_state['chat_history'].append(("You", input_qa))
|
134 |
+
st.subheader("Q&A Response:")
|
135 |
+
for chunk in response_qa:
|
136 |
+
st.write(chunk.text)
|
137 |
+
st.session_state['chat_history'].append(("Gemini Pro", chunk.text))
|
138 |
+
|
139 |
+
st.subheader("Q&A Chat History:")
|
140 |
+
for role, text in st.session_state['chat_history']:
|
141 |
+
st.write(f"{role}: {text}")
|
142 |
+
|
143 |
+
# Map selected application to corresponding function
|
144 |
+
selected_app_func = {
|
145 |
+
"PDF Chat": pdf_chat,
|
146 |
+
"Image Chat": image_chat,
|
147 |
+
"Q&A Chat": qa_chat
|
148 |
+
}
|
149 |
+
|
150 |
+
# Run the selected application function
|
151 |
+
selected_app_func[selected_app]()
|