Lex / retrieve.py
taaha3244's picture
Update retrieve.py
00af526 verified
raw
history blame
No virus
6.61 kB
import os
from langchain_community.vectorstores import Qdrant
from langchain_core.output_parsers import StrOutputParser
from langchain_core.runnables import RunnablePassthrough
from langchain.prompts import PromptTemplate
from utils import setup_openai_embeddings,setup_qdrant_client,openai_llm,format_document_metadata
from dotenv import load_dotenv
from langfuse.callback import CallbackHandler
load_dotenv()
os.environ["LANGFUSE_PUBLIC_KEY"] = os.getenv("LANGFUSE_PUBLIC_KEY")
os.environ["LANGFUSE_SECRET_KEY"] = os.getenv("LANGFUSE_SECRET_KEY")
os.environ["LANGFUSE_HOST"] = os.getenv("LANGFUSE_HOST")
langfuse_handler = CallbackHandler()
def retrieve_documents(query, api_key, qdrant_url, qdrant_api_key):
"""Retrieve documents based on the specified query."""
embeddings_model = setup_openai_embeddings(api_key)
qdrant_client = setup_qdrant_client(qdrant_url, qdrant_api_key)
qdrant = Qdrant(client=qdrant_client, collection_name="Lex-v1", embeddings=embeddings_model)
retriever = qdrant.as_retriever(search_kwargs={"k": 5})
prompt = PromptTemplate(
template="""
# Your role
You are a brilliant expert at understanding the intent of the questioner and the crux of the question, and providing the most optimal answer from the docs to the questioner's needs from the documents you are given.
# Instruction
Your task is to answer the question using the following pieces of retrieved context delimited by XML tags.
<retrieved context>
Retrieved Context:
{context}
</retrieved context>
# Constraint
1. Think deeply and multiple times about the user's question\nUser's question:\n{question}\nYou must understand the intent of their question and provide the most appropriate answer.
- Ask yourself why to understand the context of the question and why the questioner asked it, reflect on it, and provide an appropriate response based on what you understand.
2. Choose the most relevant content (the key content that directly relates to the question) from the retrieved context and use it to generate an answer.
3. Generate a concise, logical answer. When generating the answer, Do Not just list your selections, But rearrange them in context so that they become paragraphs with a natural flow.
4. When you don't have retrieved context for the question or if you have retrieved documents, but their content is irrelevant to the question, you should answer 'I can't find the answer to that question in the material I have'.
5. Use five sentences maximum. Keep the answer concise but logical/natural/in-depth.
6. At the end of the response provide metadata provided in the relevant docs, For example:"Metadata: page: 19, source: /content/OCR_RSCA/Analyse docs JVB + mails et convention FOOT INNOVATION.pdf'. Return just the page and source. Provide a list of all the metadata found in the relevant content formatted as bullets
# Question:
{question}""",
input_variables=["context", "question"]
)
llm = openai_llm(api_key=api_key)
rag_chain = (
{"context": retriever | format_document_metadata, "question": RunnablePassthrough()}
| prompt
| llm
| StrOutputParser()
)
return rag_chain.invoke(query,{"callbacks":[langfuse_handler]})
def retrieve_documents_from_collection(query, api_key, qdrant_url, qdrant_api_key, collection_name):
"""Retrieve documents based on the specified query from a specific collection."""
embeddings_model = setup_openai_embeddings(api_key)
qdrant_client = setup_qdrant_client(qdrant_url, qdrant_api_key)
qdrant = Qdrant(client=qdrant_client, collection_name=collection_name, embeddings=embeddings_model)
retriever = qdrant.as_retriever(search_kwargs={"k": 5})
prompt = PromptTemplate(
template="""
# Your role
You are a brilliant expert at understanding the intent of the questioner and the crux of the question, and providing the most optimal answer from the docs to the questioner's needs from the documents you are given.
# Instruction
Your task is to answer the question using the following pieces of retrieved context delimited by XML tags.
<retrieved context>
Retrieved Context:
{context}
</retrieved context>
# Constraint
1. Think deeply and multiple times about the user's question\nUser's question:\n{question}\nYou must understand the intent of their question and provide the most appropriate answer.
- Ask yourself why to understand the context of the question and why the questioner asked it, reflect on it, and provide an appropriate response based on what you understand.
2. Choose the most relevant content (the key content that directly relates to the question) from the retrieved context and use it to generate an answer.
3. Generate a concise, logical answer. When generating the answer, Do Not just list your selections, But rearrange them in context so that they become paragraphs with a natural flow.
4. When you don't have retrieved context for the question or if you have retrieved documents, but their content is irrelevant to the question, you should answer 'I can't find the answer to that question in the material I have'.
5. Use five sentences maximum. Keep the answer concise but logical/natural/in-depth.
6. At the end of the response provide metadata provided in the relevant docs, For example:"Metadata: page: 19, source: /content/OCR_RSCA/Analyse docs JVB + mails et convention FOOT INNOVATION.pdf'. Return just the page and source. Provide a list of all the metadata found in the relevant content formatted as bullets
# Question:
{question}""",
input_variables=["context", "question"]
)
llm = openai_llm(api_key=api_key)
rag_chain = (
{"context": retriever | format_document_metadata, "question": RunnablePassthrough()}
| prompt
| llm
| StrOutputParser()
)
return rag_chain.invoke(query,{"callbacks":[langfuse_handler]})
def delete_collection(collection_name, qdrant_url, qdrant_api_key):
"""Delete a Qdrant collection."""
client = setup_qdrant_client(qdrant_url, qdrant_api_key)
try:
client.delete_collection(collection_name=collection_name)
except Exception as e:
print("Failed to delete collection:", e)
def is_document_embedded(filename):
"""Check if a document is already embedded. Actual implementation needed."""
return False