File size: 4,822 Bytes
f19506e f0996a3 f19506e f0996a3 f19506e f0996a3 f19506e f0996a3 f19506e f0996a3 f19506e f0996a3 f19506e f0996a3 f19506e f0996a3 f19506e f0996a3 f19506e f0996a3 f19506e f0996a3 f19506e f0996a3 f19506e f0996a3 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 |
from dotenv import load_dotenv
import streamlit as st
import os
import sqlite3
import google.generativeai as genai
from PIL import Image
from PyPDF2 import PdfReader
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain_google_genai import GoogleGenerativeAIEmbeddings, ChatGoogleGenerativeAI
from langchain.vectorstores import FAISS
from langchain.chains.question_answering import load_qa_chain
from langchain.prompts import PromptTemplate
load_dotenv() # Load all env variables
genai.configure(api_key=os.getenv("GOOGLE_API_KEY"))
# Function to load Google Gemini Model and provide responses
def get_gemini_response(question, prompt=None, image=None):
model = genai.GenerativeModel("gemini-pro-vision" if image else "gemini-pro")
context = prompt[0] if prompt else None
inputs = [question, context, image] if prompt else [question, image]
response = model.generate_content(inputs)
return response.text
# Function to retrieve query from the database
def read_sql_query(sql, db):
conn = sqlite3.connect(db)
cur = conn.cursor()
cur.execute(sql)
rows = cur.fetchall()
conn.close()
return rows
# Function to load Gemini Pro model and get responses
def get_gemini_response(question, prompt=None):
model = genai.GenerativeModel("gemini-pro")
response = model.generate_content([prompt[0], question]) if prompt else model.generate_content(question)
return response.text
# Function to load PDF text
def get_pdf_text(pdf_docs):
text = ""
for pdf in pdf_docs:
pdf_reader = PdfReader(pdf)
for page in pdf_reader.pages:
text += page.extract_text()
return text
# Function to split text into chunks
def get_text_chunks(text):
text_splitter = RecursiveCharacterTextSplitter(chunk_size=10000, chunk_overlap=1000)
chunks = text_splitter.split_text(text)
return chunks
# Function to create a vector store
def get_vector_store(text_chunks):
embeddings = GoogleGenerativeAIEmbeddings(model="models/embedding-001")
vector_store = FAISS.from_texts(text_chunks, embedding=embeddings)
vector_store.save_local("faiss_index")
# Function to retrieve query from the database
def read_sql_query(sql, db):
conn = sqlite3.connect(db)
cur = conn.cursor()
cur.execute(sql)
rows = cur.fetchall()
conn.close()
return rows
# Function to get conversational chain
def get_conversational_chain():
prompt_template = """
Answer the question as detailed as possible from the provided context, make sure to provide all the details, if the answer is not in
provided context just say, "answer is not available in the context", don't provide the wrong answer\n\n
Context:\n {context}?\n
Question: \n{question}\n
Answer:
"""
model = ChatGoogleGenerativeAI(model="gemini-pro", temperature=0.3)
prompt = PromptTemplate(template=prompt_template, input_variables=["context", "question"])
chain = load_qa_chain(model, chain_type="stuff", prompt=prompt)
return chain
# Streamlit App
st.set_page_config(page_title="Consolidated Application")
st.header("Consolidated Application")
# Gemini LLM Application
st.subheader("Gemini LLM Application")
# Input for Gemini LLM Application
gemini_input = st.text_input("Input for Gemini LLM", key="gemini_input")
# File uploader for image
uploaded_image = st.file_uploader("Choose an image...", type=["jpg", "jpeg", "png"])
# Button to generate response for Gemini LLM Application
if st.button("Generate Response for Gemini LLM"):
gemini_response = get_gemini_response(gemini_input, image=uploaded_image)
st.subheader("Gemini LLM Response")
st.write(gemini_response)
# Chat with PDF using Gemini
st.subheader("Chat with PDF using Gemini")
# Input for Chat with PDF
pdf_question = st.text_input("Ask a Question from the PDF Files", key="pdf_question")
# File uploader for PDF
uploaded_pdf = st.file_uploader("Upload your PDF Files", type="pdf", accept_multiple_files=True)
# Button to process PDF and generate response
if st.button("Submit & Process PDF"):
raw_text = get_pdf_text(uploaded_pdf)
text_chunks = get_text_chunks(raw_text)
get_vector_store(text_chunks)
st.success("PDF Processing Complete")
# Image Description Generation
st.subheader("Image Description Generation")
# Input for Image Description Generation
image_input = st.text_input("Input for Image Description Generation", key="image_input")
# File uploader for image
uploaded_image = st.file_uploader("Choose an image...", type=["jpg", "jpeg", "png"])
# Button to generate response for Image Description Generation
if st.button("Generate Image Description"):
image_response = get_gemini_response(image_input, image=uploaded_image)
st.subheader("Image Description")
st.write(image_response)
|