|
import streamlit as st |
|
from PIL import Image |
|
from PyPDF2 import PdfReader |
|
from langchain.text_splitter import RecursiveCharacterTextSplitter |
|
from langchain_google_genai import GoogleGenerativeAIEmbeddings, ChatGoogleGenerativeAI |
|
from langchain.vectorstores import FAISS |
|
from langchain.chains.question_answering import load_qa_chain |
|
from langchain.prompts import PromptTemplate |
|
import os |
|
import google.generativeai as genai |
|
from dotenv import load_dotenv |
|
|
|
load_dotenv() |
|
genai.configure(api_key=os.getenv("GOOGLE_API_KEY")) |
|
|
|
|
|
def get_pdf_text(pdf_docs): |
|
text="" |
|
for pdf in pdf_docs: |
|
pdf_reader= PdfReader(pdf) |
|
for page in pdf_reader.pages: |
|
text+= page.extract_text() |
|
return text |
|
|
|
|
|
def get_text_chunks(text): |
|
text_splitter = RecursiveCharacterTextSplitter(chunk_size=10000, chunk_overlap=1000) |
|
chunks = text_splitter.split_text(text) |
|
return chunks |
|
|
|
|
|
def get_vector_store(text_chunks): |
|
embeddings = GoogleGenerativeAIEmbeddings(model="models/embedding-001") |
|
vector_store = FAISS.from_texts(text_chunks, embedding=embeddings) |
|
vector_store.save_local("faiss_index") |
|
|
|
|
|
def get_conversational_chain(): |
|
prompt_template = """ |
|
Answer the question as detailed as possible from the provided context, make sure to provide all the details, if the answer is not in |
|
provided context just say, "answer is not available in the context", don't provide the wrong answer\n\n |
|
Context:\n {context}?\n |
|
Question: \n{question}\n |
|
|
|
Answer: |
|
""" |
|
|
|
model = ChatGoogleGenerativeAI(model="gemini-pro", temperature=0.3) |
|
prompt = PromptTemplate(template=prompt_template, input_variables=["context", "question"]) |
|
chain = load_qa_chain(model, chain_type="stuff", prompt=prompt) |
|
|
|
return chain |
|
|
|
|
|
def user_input(user_question): |
|
embeddings = GoogleGenerativeAIEmbeddings(model="models/embedding-001") |
|
new_db = FAISS.load_local("faiss_index", embeddings, allow_dangerous_deserialization=True) |
|
docs = new_db.similarity_search(user_question) |
|
chain = get_conversational_chain() |
|
response = chain({"input_documents": docs, "question": user_question}, return_only_outputs=True) |
|
st.write("Reply: ", response["output_text"]) |
|
|
|
|
|
|
|
model = genai.GenerativeModel("gemini-pro-vision") |
|
def get_gemini_response(input, image=None): |
|
if image is not None and model.is_image_model: |
|
response = model.generate_content([input, image]) |
|
else: |
|
response = model.generate_content(input) |
|
return response.text |
|
|
|
|
|
|
|
|
|
st.set_page_config(page_title='Combined Streamlit Application') |
|
st.header("Streamlit Application") |
|
|
|
|
|
applications = { |
|
"PDF Chat": "pdf_chat", |
|
"Image Chat": "image_chat", |
|
"Q&A Chat": "qa_chat" |
|
} |
|
|
|
|
|
selected_app = st.sidebar.selectbox("Select Application", list(applications.keys())) |
|
|
|
|
|
def pdf_chat(): |
|
st.header("PDF Chat Application") |
|
user_question = st.text_input("Ask a Question from the PDF Files") |
|
if user_question: |
|
user_input(user_question) |
|
|
|
pdf_docs = st.file_uploader("Upload your PDF Files and Click on the Submit & Process Button", accept_multiple_files=True) |
|
if st.button("Submit & Process"): |
|
with st.spinner("Processing..."): |
|
raw_text = get_pdf_text(pdf_docs) |
|
text_chunks = get_text_chunks(raw_text) |
|
get_vector_store(text_chunks) |
|
st.success("Done") |
|
|
|
|
|
def image_chat(): |
|
st.header("Image Chat Application") |
|
input_text = st.text_input("Input for Gemini Pro:", key="input_gemini") |
|
uploaded_file = st.file_uploader("Choose an image...", type="jpg") |
|
if uploaded_file is not None: |
|
image = Image.open(uploaded_file) |
|
st.image(image, caption="Uploaded Image", use_column_width=True) |
|
|
|
submit_gemini = st.button("Ask Gemini Pro") |
|
|
|
if submit_gemini: |
|
response_gemini = get_gemini_response(input_text, image) |
|
st.subheader("Gemini Pro Response:") |
|
st.write(response_gemini) |
|
|
|
|
|
def qa_chat(): |
|
st.header("Q&A Chat Application") |
|
|
|
if 'chat_history' not in st.session_state: |
|
st.session_state['chat_history'] = [] |
|
|
|
input_qa = st.text_area("Input for Q&A:", key="input_qa") |
|
submit_qa = st.button("Ask the question") |
|
|
|
if submit_qa and input_qa: |
|
response_qa = get_gemini_response(input_qa) |
|
|
|
st.session_state['chat_history'].append(("You", input_qa)) |
|
st.subheader("Q&A Response:") |
|
for chunk in response_qa: |
|
st.write(chunk.text) |
|
st.session_state['chat_history'].append(("Gemini Pro", chunk.text)) |
|
|
|
st.subheader("Q&A Chat History:") |
|
for role, text in st.session_state['chat_history']: |
|
st.write(f"{role}: {text}") |
|
|
|
|
|
selected_app_func = { |
|
"PDF Chat": pdf_chat, |
|
"Image Chat": image_chat, |
|
"Q&A Chat": qa_chat |
|
} |
|
|
|
|
|
selected_app_func[selected_app]() |
|
|