Update app.py
Browse files
app.py
CHANGED
@@ -189,18 +189,25 @@ def optimize_query(
|
|
189 |
# Initialize the language model
|
190 |
#llm = HuggingFacePipeline(pipeline(model=llm_model))
|
191 |
|
|
|
192 |
# Create a temporary vector store for query optimization
|
193 |
temp_vector_store = get_vector_store(vector_store_type, chunks, embedding_model)
|
194 |
|
|
|
|
|
195 |
# Create a retriever with the temporary vector store
|
196 |
temp_retriever = get_retriever(temp_vector_store, search_type, {"k": top_k})
|
197 |
|
|
|
|
|
198 |
# Initialize MultiQueryRetriever with the temporary retriever and the language model
|
199 |
multi_query_retriever = MultiQueryRetriever.from_llm(
|
200 |
retriever=temp_retriever,
|
201 |
llm=llm
|
202 |
)
|
203 |
|
|
|
|
|
204 |
# Limit max time or set a timeout for LLM to avoid endless execution
|
205 |
optimized_queries = multi_query_retriever.invoke(query, max_time=30) # Timeout in seconds
|
206 |
|
|
|
189 |
# Initialize the language model
|
190 |
#llm = HuggingFacePipeline(pipeline(model=llm_model))
|
191 |
|
192 |
+
print('---- optimize query ----')
|
193 |
# Create a temporary vector store for query optimization
|
194 |
temp_vector_store = get_vector_store(vector_store_type, chunks, embedding_model)
|
195 |
|
196 |
+
print('---- optimize query 2 ----')
|
197 |
+
|
198 |
# Create a retriever with the temporary vector store
|
199 |
temp_retriever = get_retriever(temp_vector_store, search_type, {"k": top_k})
|
200 |
|
201 |
+
print('---- optimize query 3 ----')
|
202 |
+
|
203 |
# Initialize MultiQueryRetriever with the temporary retriever and the language model
|
204 |
multi_query_retriever = MultiQueryRetriever.from_llm(
|
205 |
retriever=temp_retriever,
|
206 |
llm=llm
|
207 |
)
|
208 |
|
209 |
+
print('---- optimize query 4 ----')
|
210 |
+
#print(llm.invoke('Hello'))
|
211 |
# Limit max time or set a timeout for LLM to avoid endless execution
|
212 |
optimized_queries = multi_query_retriever.invoke(query, max_time=30) # Timeout in seconds
|
213 |
|