Ilma-Salsabil's picture
Upload app.py
d0d05f1 verified
raw
history blame
No virus
3.34 kB
import streamlit as st
import os
import google.generativeai as genai
from langchain_google_genai import GoogleGenerativeAIEmbeddings, ChatGoogleGenerativeAI
# from langchain_community.embeddings import HuggingFaceInferenceAPIEmbeddings
# from langchain_huggingface import HuggingFaceEndpoint, ChatHuggingFace
# from langchain_huggingface.embeddings import HuggingFaceEndpointEmbeddings
from langchain_community.document_loaders import PyPDFDirectoryLoader
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.chains.combine_documents import create_stuff_documents_chain
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.output_parsers import StrOutputParser
from langchain.chains import create_retrieval_chain
from langchain_community.vectorstores import FAISS
import time
import asyncio
from dotenv import load_dotenv
load_dotenv()
# Load environment variables
# huggingfacehub_api_token = os.getenv("HF_TOKEN")
# # Initialize HuggingFace endpoint and LLM
# repo_id = "meta-llama/Meta-Llama-3-8B-Instruct"
# llm_endpoint = HuggingFaceEndpoint(
# repo_id=repo_id,
# max_length=128,
# temperature=0.7,
# huggingfacehub_api_token=huggingfacehub_api_token
# )
# llm = ChatHuggingFace(llm=llm_endpoint)
genai.configure(api_key=os.getenv("GOOGLE_API_KEY"))
# Ensure that an event loop exists
async def initialize_llm():
return ChatGoogleGenerativeAI(model="gemini-1.5-flash", temperature=0.5, verbose=True)
llm = asyncio.run(initialize_llm())
# Function for vector embedding
def vector_embedding():
if "vectors" not in st.session_state:
st.session_state.embeddings = GoogleGenerativeAIEmbeddings(model="models/embedding-001")
st.session_state.loader = PyPDFDirectoryLoader("./analysis-pdf")
st.session_state.docs = st.session_state.loader.load()
st.session_state.text_splitter = RecursiveCharacterTextSplitter(chunk_size=700, chunk_overlap=50)
st.session_state.final_docs = st.session_state.text_splitter.split_documents(st.session_state.docs[:30])
st.session_state.vectors = FAISS.from_documents(st.session_state.final_docs, st.session_state.embeddings)
st.title("Gemini RAG DEMO")
prompt = ChatPromptTemplate.from_template(
"""
Answer the questions based on the provided context only.
Please provide the most accurate response based on the question.
<context>
{context}
<context>
Question: {input}
"""
)
question_prompt = st.text_input("Enter Your Question From Documents")
if st.button("Document Embedding"):
vector_embedding()
st.write("Vector Store DB is Ready!")
if question_prompt:
document_chain = create_stuff_documents_chain(llm, prompt)
retriever = st.session_state.vectors.as_retriever()
retrieval_chain = create_retrieval_chain(retriever, document_chain)
start_time = time.process_time()
response = retrieval_chain.invoke({"input": question_prompt})
print("Response time :", time.process_time() - start_time)
st.write(response['answer'])
with st.expander("Document Similarity Search"):
for i, doc in enumerate(response["context"]):
st.write(doc.page_content)
st.write("---------------------------")