Ilma-Salsabil commited on
Commit
d0d05f1
1 Parent(s): 8574296

Upload app.py

Browse files
Files changed (1) hide show
  1. app.py +72 -17
app.py CHANGED
@@ -1,30 +1,85 @@
1
- # text generation
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2
 
3
- from fastapi import FastAPI
4
- from transformers import pipeline
5
 
6
- # create a fastapi instances
 
 
 
 
 
 
 
7
 
8
- app = FastAPI()
9
 
10
- # initialize the text generation pipeline
11
- pipe = pipeline("text2text-generation", model="google/flan-t5-small")
12
 
13
- @app.get("/")
14
- def home():
15
- return {"message":"Hello World!"}
16
 
 
17
 
18
- # get request '/generate'
 
 
 
 
 
 
 
 
19
 
20
- @app.get("/generate")
21
- def generate(text:str):
22
- # use the pipeline to generate text from given input text
23
 
24
- output = pipe(text)
25
 
26
- # return the generate text in json response
 
 
 
 
 
 
 
27
 
28
- return {"output":output[0]['generated_text']}
 
29
 
 
30
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import os
3
+ import google.generativeai as genai
4
+ from langchain_google_genai import GoogleGenerativeAIEmbeddings, ChatGoogleGenerativeAI
5
+ # from langchain_community.embeddings import HuggingFaceInferenceAPIEmbeddings
6
+ # from langchain_huggingface import HuggingFaceEndpoint, ChatHuggingFace
7
+ # from langchain_huggingface.embeddings import HuggingFaceEndpointEmbeddings
8
+ from langchain_community.document_loaders import PyPDFDirectoryLoader
9
+ from langchain.text_splitter import RecursiveCharacterTextSplitter
10
+ from langchain.chains.combine_documents import create_stuff_documents_chain
11
+ from langchain_core.prompts import ChatPromptTemplate
12
+ from langchain_core.output_parsers import StrOutputParser
13
+ from langchain.chains import create_retrieval_chain
14
+ from langchain_community.vectorstores import FAISS
15
+ import time
16
+ import asyncio
17
+ from dotenv import load_dotenv
18
+ load_dotenv()
19
 
20
+ # Load environment variables
21
+ # huggingfacehub_api_token = os.getenv("HF_TOKEN")
22
 
23
+ # # Initialize HuggingFace endpoint and LLM
24
+ # repo_id = "meta-llama/Meta-Llama-3-8B-Instruct"
25
+ # llm_endpoint = HuggingFaceEndpoint(
26
+ # repo_id=repo_id,
27
+ # max_length=128,
28
+ # temperature=0.7,
29
+ # huggingfacehub_api_token=huggingfacehub_api_token
30
+ # )
31
 
32
+ # llm = ChatHuggingFace(llm=llm_endpoint)
33
 
34
+ genai.configure(api_key=os.getenv("GOOGLE_API_KEY"))
 
35
 
36
+ # Ensure that an event loop exists
37
+ async def initialize_llm():
38
+ return ChatGoogleGenerativeAI(model="gemini-1.5-flash", temperature=0.5, verbose=True)
39
 
40
+ llm = asyncio.run(initialize_llm())
41
 
42
+ # Function for vector embedding
43
+ def vector_embedding():
44
+ if "vectors" not in st.session_state:
45
+ st.session_state.embeddings = GoogleGenerativeAIEmbeddings(model="models/embedding-001")
46
+ st.session_state.loader = PyPDFDirectoryLoader("./analysis-pdf")
47
+ st.session_state.docs = st.session_state.loader.load()
48
+ st.session_state.text_splitter = RecursiveCharacterTextSplitter(chunk_size=700, chunk_overlap=50)
49
+ st.session_state.final_docs = st.session_state.text_splitter.split_documents(st.session_state.docs[:30])
50
+ st.session_state.vectors = FAISS.from_documents(st.session_state.final_docs, st.session_state.embeddings)
51
 
 
 
 
52
 
53
+ st.title("Gemini RAG DEMO")
54
 
55
+ prompt = ChatPromptTemplate.from_template(
56
+ """
57
+ Answer the questions based on the provided context only.
58
+ Please provide the most accurate response based on the question.
59
+ <context>
60
+ {context}
61
+ <context>
62
+ Question: {input}
63
 
64
+ """
65
+ )
66
 
67
+ question_prompt = st.text_input("Enter Your Question From Documents")
68
 
69
+ if st.button("Document Embedding"):
70
+ vector_embedding()
71
+ st.write("Vector Store DB is Ready!")
72
+
73
+ if question_prompt:
74
+ document_chain = create_stuff_documents_chain(llm, prompt)
75
+ retriever = st.session_state.vectors.as_retriever()
76
+ retrieval_chain = create_retrieval_chain(retriever, document_chain)
77
+ start_time = time.process_time()
78
+ response = retrieval_chain.invoke({"input": question_prompt})
79
+ print("Response time :", time.process_time() - start_time)
80
+ st.write(response['answer'])
81
+
82
+ with st.expander("Document Similarity Search"):
83
+ for i, doc in enumerate(response["context"]):
84
+ st.write(doc.page_content)
85
+ st.write("---------------------------")