File size: 3,180 Bytes
b99c727
 
f0996a3
 
 
f19506e
a8fbc18
 
 
9cc03e8
4cec1f7
f0996a3
 
9cc03e8
4cec1f7
 
 
 
 
 
 
b99c727
f19506e
 
4cec1f7
 
 
f19506e
b99c727
4cec1f7
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
f4e899d
 
4cec1f7
 
f4e899d
 
 
4cec1f7
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
import streamlit as st
from PIL import Image
from PyPDF2 import PdfReader
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain_google_genai import GoogleGenerativeAIEmbeddings, ChatGoogleGenerativeAI
from langchain.vectorstores import FAISS
import os
import google.generativeai as genai
from dotenv import load_dotenv

load_dotenv() # Load all env variables 
genai.configure(api_key=os.getenv("GOOGLE_API_KEY"))


def get_pdf_text(pdf_docs):
    text=""
    for pdf in pdf_docs:
        pdf_reader= PdfReader(pdf)
        for page in pdf_reader.pages:
            text+= page.extract_text()
    return  text


def get_text_chunks(text):
    text_splitter = RecursiveCharacterTextSplitter(chunk_size=10000, chunk_overlap=1000)
    chunks = text_splitter.split_text(text)
    return chunks


def get_vector_store(text_chunks):
    embeddings = GoogleGenerativeAIEmbeddings(model="models/embedding-001")
    vector_store = FAISS.from_texts(text_chunks, embedding=embeddings)
    vector_store.save_local("faiss_index")


## function to load Gemini Pro model and get responses
model = genai.GenerativeModel("gemini-pro-vision")
def get_gemini_response(input, image=None):
    if image is not None and model.is_image_model:
        response = model.generate_content([input, image])
    else:
        response = model.generate_content(input)
    return response.text


## Initialize our Streamlit app

st.set_page_config(page_title='Combined Streamlit Application')
st.header("Streamlit Application")

# Define the different applications
applications = {
    "PDF Chat": "pdf_chat",
    "Image Chat": "image_chat",
}

# Render the dropdown menu
selected_app = st.sidebar.selectbox("Select Application", list(applications.keys()))

# Function to display PDF Chat application
def pdf_chat():
    st.header("PDF Chat Application")
    user_question = st.text_input("Ask a Question from the PDF Files")
    if user_question:
        user_input(user_question)

    pdf_docs = st.file_uploader("Upload your PDF Files and Click on the Submit & Process Button", accept_multiple_files=True)
    if st.button("Submit & Process"):
        with st.spinner("Processing..."):
            raw_text = get_pdf_text(pdf_docs)
            text_chunks = get_text_chunks(raw_text)
            get_vector_store(text_chunks)
            st.success("Done")

# Function to display Image Chat application
def image_chat():
    st.header("Image Chat Application")
    input_text = st.text_input("Input for Gemini Pro:", key="input_gemini")
    uploaded_file = st.file_uploader("Choose an image...", type="jpg")
    if uploaded_file is not None:
        image = Image.open(uploaded_file)
        st.image(image, caption="Uploaded Image", use_column_width=True)

    submit_gemini = st.button("Ask Gemini Pro")

    if submit_gemini:
        response_gemini = get_gemini_response(input_text, image)
        st.subheader("Gemini Pro Response:")
        st.write(response_gemini)

# Map selected application to corresponding function
selected_app_func = {
    "PDF Chat": pdf_chat,
    "Image Chat": image_chat,
}

# Run the selected application function
selected_app_func[selected_app]()