VenkyPas commited on
Commit
7132559
1 Parent(s): 431621a

Working with FAISS

Browse files
Files changed (1) hide show
  1. app_faiss.py +174 -0
app_faiss.py ADDED
@@ -0,0 +1,174 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import chainlit as cl
3
+ from dotenv import load_dotenv
4
+ from operator import itemgetter
5
+ from langchain_huggingface import HuggingFaceEndpoint
6
+ from langchain_community.document_loaders import TextLoader
7
+ from langchain_community.document_loaders import PyMuPDFLoader
8
+ from langchain_text_splitters import RecursiveCharacterTextSplitter
9
+ from langchain_community.vectorstores import FAISS
10
+ from langchain_huggingface import HuggingFaceEndpointEmbeddings
11
+ from langchain_core.prompts import PromptTemplate
12
+ from langchain.schema.output_parser import StrOutputParser
13
+ from langchain.schema.runnable import RunnablePassthrough
14
+ from langchain.schema.runnable.config import RunnableConfig
15
+ from langchain.globals import set_debug
16
+
17
+ set_debug(False)
18
+
19
+ # GLOBAL SCOPE - ENTIRE APPLICATION HAS ACCESS TO VALUES SET IN THIS SCOPE #
20
+ # ---- ENV VARIABLES ---- #
21
+ """
22
+ This function will load our environment file (.env) if it is present.
23
+
24
+ NOTE: Make sure that .env is in your .gitignore file - it is by default, but please ensure it remains there.
25
+ """
26
+ load_dotenv()
27
+
28
+ """
29
+ We will load our environment variables here.
30
+ """
31
+ HF_LLM_ENDPOINT = os.environ["HF_LLM_ENDPOINT"]
32
+ HF_EMBED_ENDPOINT = os.environ["HF_EMBED_ENDPOINT"]
33
+ HF_TOKEN = os.environ["HF_TOKEN"]
34
+
35
+ # ---- GLOBAL DECLARATIONS ---- #
36
+
37
+ # -- RETRIEVAL -- #
38
+ """
39
+ 1. Load Documents from Text File
40
+ 2. Split Documents into Chunks
41
+ 3. Load HuggingFace Embeddings (remember to use the URL we set above)
42
+ 4. Index Files if they do not exist, otherwise load the vectorstore
43
+ """
44
+ ### 1. CREATE TEXT LOADER AND LOAD DOCUMENTS
45
+ ### NOTE: PAY ATTENTION TO THE PATH THEY ARE IN.
46
+ pdf_loader = PyMuPDFLoader("./data/10Q-AirBnB.pdf")
47
+ documents = pdf_loader.load()
48
+
49
+ ### 2. CREATE TEXT SPLITTER AND SPLIT DOCUMENTS
50
+ text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=25)
51
+ split_documents = text_splitter.split_documents(documents)
52
+
53
+ ### 3. LOAD HUGGINGFACE EMBEDDINGS
54
+ hf_embeddings = HuggingFaceEndpointEmbeddings(
55
+ model=HF_EMBED_ENDPOINT,
56
+ task="feature-extraction",
57
+ huggingfacehub_api_token=HF_TOKEN,
58
+ )
59
+ DATA_DIR = "./data"
60
+ VECTOR_STORE_DIR = os.path.join(DATA_DIR, "vectorstore")
61
+ VECTOR_STORE_PATH = os.path.join(VECTOR_STORE_DIR, "index.faiss")
62
+
63
+ FAISS_MAX_FETCH_SIZE = 2
64
+ FAISS_MAX_BATCH_SIZE = 32
65
+ if os.path.exists(VECTOR_STORE_PATH):
66
+ vectorstore = FAISS.load_local(
67
+ VECTOR_STORE_DIR,
68
+ hf_embeddings,
69
+ allow_dangerous_deserialization=True # this is necessary to load the vectorstore from disk as it's stored as a `.pkl` file.
70
+ )
71
+ hf_retriever = vectorstore.as_retriever(search_kwargs={"k": FAISS_MAX_FETCH_SIZE, "fetch_k": FAISS_MAX_FETCH_SIZE})
72
+ print("Loaded Vectorstore at " + VECTOR_STORE_DIR)
73
+ else:
74
+ print("Indexing Files")
75
+ os.makedirs(VECTOR_STORE_DIR, exist_ok=True)
76
+ ### 4. INDEX FILES
77
+ ### NOTE: REMEMBER TO BATCH THE DOCUMENTS WITH MAXIMUM BATCH SIZE = 32
78
+ for i in range(0, len(split_documents), FAISS_MAX_BATCH_SIZE):
79
+ if i==0:
80
+ vectorstore = FAISS.from_documents(split_documents[i:i+FAISS_MAX_BATCH_SIZE], hf_embeddings)
81
+ continue
82
+ vectorstore.add_documents(split_documents[i:i+FAISS_MAX_BATCH_SIZE])
83
+ vectorstore.save_local(VECTOR_STORE_DIR)
84
+
85
+ hf_retriever = vectorstore.as_retriever(search_kwargs={"k": FAISS_MAX_FETCH_SIZE, "fetch_k": FAISS_MAX_FETCH_SIZE})
86
+
87
+ # -- AUGMENTED -- #
88
+ """
89
+ 1. Define a String Template
90
+ 2. Create a Prompt Template from the String Template
91
+ """
92
+ ### 1. DEFINE STRING TEMPLATE
93
+ RAG_PROMPT_TEMPLATE = """\
94
+ <|start_header_id|>system<|end_header_id|>
95
+ You are a helpful assistant. You answer user questions based on provided context. If you can't answer the question with the provided context, say you don't know.<|eot_id|>
96
+
97
+ <|start_header_id|>user<|end_header_id|>
98
+ User Query:
99
+ {query}
100
+
101
+ Context:
102
+ {context}<|eot_id|>
103
+
104
+ <|start_header_id|>assistant<|end_header_id|>
105
+ """
106
+
107
+ ### 2. CREATE PROMPT TEMPLATE
108
+ rag_prompt = PromptTemplate.from_template(RAG_PROMPT_TEMPLATE)
109
+
110
+ # -- GENERATION -- #
111
+ """
112
+ 1. Create a HuggingFaceEndpoint for the LLM
113
+ """
114
+ ### 1. CREATE HUGGINGFACE ENDPOINT FOR LLM
115
+ hf_llm = HuggingFaceEndpoint(
116
+ endpoint_url=HF_LLM_ENDPOINT,
117
+ max_new_tokens=512,
118
+ top_k=10,
119
+ top_p=0.95,
120
+ temperature=0.3,
121
+ repetition_penalty=1.15,
122
+ huggingfacehub_api_token=HF_TOKEN,
123
+ )
124
+
125
+ @cl.author_rename
126
+ def rename(original_author: str):
127
+ """
128
+ This function can be used to rename the 'author' of a message.
129
+
130
+ In this case, we're overriding the 'Assistant' author to be 'Paul Graham Essay Bot'.
131
+ """
132
+ rename_dict = {
133
+ "Assistant" : "Paul Graham Essays Bot"
134
+ }
135
+ return rename_dict.get(original_author, original_author)
136
+
137
+ @cl.on_chat_start
138
+ async def start_chat():
139
+ """
140
+ This function will be called at the start of every user session.
141
+
142
+ We will build our LCEL RAG chain here, and store it in the user session.
143
+
144
+ The user session is a dictionary that is unique to each user session, and is stored in the memory of the server.
145
+ """
146
+
147
+ ### BUILD LCEL RAG CHAIN THAT ONLY RETURNS TEXT
148
+ lcel_rag_chain = (
149
+ {"context": itemgetter("query") | hf_retriever, "query": itemgetter("query")}
150
+ | rag_prompt | hf_llm
151
+ )
152
+
153
+ cl.user_session.set("lcel_rag_chain", lcel_rag_chain)
154
+
155
+ @cl.on_message
156
+ async def main(message: cl.Message):
157
+ """
158
+ This function will be called every time a message is recieved from a session.
159
+
160
+ We will use the LCEL RAG chain to generate a response to the user query.
161
+
162
+ The LCEL RAG chain is stored in the user session, and is unique to each user session - this is why we can access it here.
163
+ """
164
+ lcel_rag_chain = cl.user_session.get("lcel_rag_chain")
165
+
166
+ msg = cl.Message(content="")
167
+
168
+ async for chunk in lcel_rag_chain.astream(
169
+ {"query": message.content},
170
+ config=RunnableConfig(callbacks=[cl.LangchainCallbackHandler()]),
171
+ ):
172
+ await msg.stream_token(chunk)
173
+
174
+ await msg.send()