File size: 5,279 Bytes
b99c727
 
f0996a3
 
 
f19506e
 
 
a8fbc18
 
 
9cc03e8
b99c727
f0996a3
 
9cc03e8
f19506e
b99c727
f19506e
b99c727
f19506e
b99c727
 
 
f19506e
 
 
 
 
 
b99c727
f19506e
f0996a3
f19506e
 
 
b99c727
f19506e
 
 
 
 
 
 
 
 
b99c727
 
f0996a3
f19506e
b99c727
f19506e
 
b99c727
9cc03e8
 
 
 
 
 
 
 
b99c727
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
a8fbc18
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
b99c727
 
 
 
 
 
 
 
 
a8fbc18
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
import streamlit as st
from PIL import Image
from PyPDF2 import PdfReader
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain_google_genai import GoogleGenerativeAIEmbeddings, ChatGoogleGenerativeAI
from langchain.vectorstores import FAISS
from langchain.chains.question_answering import load_qa_chain
from langchain.prompts import PromptTemplate
import os
import google.generativeai as genai
from dotenv import load_dotenv

load_dotenv() # Load all env variables 
genai.configure(api_key=os.getenv("GOOGLE_API_KEY"))


def get_pdf_text(pdf_docs):
    text=""
    for pdf in pdf_docs:
        pdf_reader= PdfReader(pdf)
        for page in pdf_reader.pages:
            text+= page.extract_text()
    return  text


def get_text_chunks(text):
    text_splitter = RecursiveCharacterTextSplitter(chunk_size=10000, chunk_overlap=1000)
    chunks = text_splitter.split_text(text)
    return chunks


def get_vector_store(text_chunks):
    embeddings = GoogleGenerativeAIEmbeddings(model="models/embedding-001")
    vector_store = FAISS.from_texts(text_chunks, embedding=embeddings)
    vector_store.save_local("faiss_index")


def get_conversational_chain():
    prompt_template = """
    Answer the question as detailed as possible from the provided context, make sure to provide all the details, if the answer is not in
    provided context just say, "answer is not available in the context", don't provide the wrong answer\n\n
    Context:\n {context}?\n
    Question: \n{question}\n

    Answer:
    """

    model = ChatGoogleGenerativeAI(model="gemini-pro", temperature=0.3)
    prompt = PromptTemplate(template=prompt_template, input_variables=["context", "question"])
    chain = load_qa_chain(model, chain_type="stuff", prompt=prompt)

    return chain


def user_input(user_question):
    embeddings = GoogleGenerativeAIEmbeddings(model="models/embedding-001")
    new_db = FAISS.load_local("faiss_index", embeddings, allow_dangerous_deserialization=True)
    docs = new_db.similarity_search(user_question)
    chain = get_conversational_chain()
    response = chain({"input_documents": docs, "question": user_question}, return_only_outputs=True)
    st.write("Reply: ", response["output_text"])


## function to load Gemini Pro model and get responses
model = genai.GenerativeModel("gemini-pro-vision")
def get_gemini_response(input,image):
    if input != "":
        response=model.generate_content([input,image])
    else: 
        response=model.generate_content(image)
    return response.text


## Initialize our Streamlit app

st.set_page_config(page_title='Combined Streamlit Application')
st.header("Streamlit Application")

# Define the different applications
applications = {
    "PDF Chat": "pdf_chat",
    "Image Chat": "image_chat",
    "Q&A Chat": "qa_chat"
}

# Render the dropdown menu
selected_app = st.sidebar.selectbox("Select Application", list(applications.keys()))

# Function to display PDF Chat application
def pdf_chat():
    st.header("PDF Chat Application")
    user_question = st.text_input("Ask a Question from the PDF Files")
    if user_question:
        user_input(user_question)

    pdf_docs = st.file_uploader("Upload your PDF Files and Click on the Submit & Process Button", accept_multiple_files=True)
    if st.button("Submit & Process"):
        with st.spinner("Processing..."):
            raw_text = get_pdf_text(pdf_docs)
            text_chunks = get_text_chunks(raw_text)
            get_vector_store(text_chunks)
            st.success("Done")

# Function to display Image Chat application
def image_chat():
    st.header("Image Chat Application")
    input_text = st.text_input("Input for Gemini Pro:", key="input_gemini")
    uploaded_file = st.file_uploader("Choose an image...", type="jpg")
    if uploaded_file is not None:
        image = Image.open(uploaded_file)
        st.image(image, caption="Uploaded Image", use_column_width=True)

    submit_gemini = st.button("Ask Gemini Pro")

    if submit_gemini:
        response_gemini = get_gemini_response(input_text, image)
        st.subheader("Gemini Pro Response:")
        st.write(response_gemini)

# Function to display Q&A Chat application
def qa_chat():
    st.header("Q&A Chat Application")
    # Initialize session state for chat history if it doesn't exist
    if 'chat_history' not in st.session_state:
        st.session_state['chat_history'] = []

    input_qa = st.text_area("Input for Q&A:", key="input_qa")
    submit_qa = st.button("Ask the question")

    if submit_qa and input_qa:
        response_qa = get_gemini_response(input_qa)
        # Add user query and response to session state chat history
        st.session_state['chat_history'].append(("You", input_qa))
        st.subheader("Q&A Response:")
        for chunk in response_qa:
            st.write(chunk.text)
            st.session_state['chat_history'].append(("Gemini Pro", chunk.text))

    st.subheader("Q&A Chat History:")
    for role, text in st.session_state['chat_history']:
        st.write(f"{role}: {text}")

# Map selected application to corresponding function
selected_app_func = {
    "PDF Chat": pdf_chat,
    "Image Chat": image_chat,
    "Q&A Chat": qa_chat
}

# Run the selected application function
selected_app_func[selected_app]()