import os from dotenv import load_dotenv from langchain_community.document_loaders import PyPDFLoader from langchain_community.llms import OpenAI from langchain.chains.summarize import load_summarize_chain from langchain.text_splitter import RecursiveCharacterTextSplitter from langchain import hub from langchain_community.vectorstores import Qdrant from qdrant_client import QdrantClient from langchain_openai import OpenAIEmbeddings, ChatOpenAI from langchain_core.output_parsers import StrOutputParser from langchain_core.runnables import RunnablePassthrough from langchain.prompts import PromptTemplate from langchain_community.document_loaders import UnstructuredAPIFileLoader load_dotenv() def setup_openai_embeddings(api_key): """Set up OpenAI embeddings.""" return OpenAIEmbeddings(model='text-embedding-3-small', openai_api_key=api_key) def setup_qdrant_client(url, api_key): """Set up Qdrant client.""" return QdrantClient(location=url, api_key=api_key) def format_document_metadata(docs): """Format metadata for each document.""" formatted_docs = [] for doc in docs: metadata_str = ', '.join(f"{key}: {value}" for key, value in doc.metadata.items()) doc_str = f"{doc.page_content}\nMetadata: {metadata_str}" formatted_docs.append(doc_str) return "\n\n".join(formatted_docs) def openai_llm(model_name: str, api_key: str): """Get a configured OpenAI language model.""" return ChatOpenAI(model_name=model_name, temperature=0, openai_api_key=api_key) def load_documents_OCR(file_path,unstructured_api): """This Loads Documents that require OCR via unstructured""" loader = UnstructuredAPIFileLoader( file_path=file_path, api_key=unstructured_api, ) documents = loader.load() return documents def load_documents(file_path): """Loads Docs using Langchain""" loader=PyPDFLoader(file_path) documents=loader.load() return documents def split_documents(documents): """Splits documents using Langchain splitter""" text_splitter = RecursiveCharacterTextSplitter(chunk_size=2000, chunk_overlap=500) split_docs = text_splitter.split_documents(documents) return split_docs def load_and_split_documents(file_path): """Load and split documents from the specified file path.""" loader = PyPDFLoader(file_path) documents = loader.load() if not documents: print("No documents loaded from file:", file_path) return [] text_splitter = RecursiveCharacterTextSplitter(chunk_size=2000, chunk_overlap=500) split_docs = text_splitter.split_documents(documents) if not split_docs: print("Document splitting resulted in no output for file:", file_path) return split_docs def update_metadata(documents, original_name): """Update metadata for each document.""" updated_documents = [] for doc in documents: doc.metadata['source'] = original_name updated_documents.append(doc) return updated_documents def setup_summary_chain(api_key, model_name): """Set up a summary chain with a specified LLM.""" llm = openai_llm(model_name=model_name, api_key=api_key) return load_summarize_chain(llm=llm, chain_type='map_reduce') def summarize_documents(model_name, documents, api_key): """Generate summaries for provided documents.""" summary_chain = setup_summary_chain(api_key, model_name) return summary_chain.run(documents) def embed_documents_into_qdrant(documents, api_key, qdrant_url, qdrant_api_key, collection_name="Lex-v1"): """Embed documents into Qdrant.""" embeddings_model = setup_openai_embeddings(api_key) client = setup_qdrant_client(qdrant_url, qdrant_api_key) qdrant = Qdrant(client=client, collection_name=collection_name, embeddings=embeddings_model) try: qdrant.add_documents(documents) except Exception as e: print("Failed to embed documents:", e) def retrieve_documents(query, api_key, qdrant_url, qdrant_api_key, model_name): """Retrieve documents based on the specified query.""" embeddings_model = setup_openai_embeddings(api_key) qdrant_client = setup_qdrant_client(qdrant_url, qdrant_api_key) qdrant = Qdrant(client=qdrant_client, collection_name="Lex-v1", embeddings=embeddings_model) retriever = qdrant.as_retriever(search_kwargs={"k": 5}) prompt=PromptTemplate( template=""" # Your role You are a brilliant expert at understanding the intent of the questioner and the crux of the question, and providing the most optimal answer from the docs to the questioner's needs from the documents you are given. # Instruction Your task is to answer the question using the following pieces of retrieved context delimited by XML tags. Retrieved Context: {context} # Constraint 1. Think deeply and multiple times about the user's question\nUser's question:\n{question}\nYou must understand the intent of their question and provide the most appropriate answer. - Ask yourself why to understand the context of the question and why the questioner asked it, reflect on it, and provide an appropriate response based on what you understand. 2. Choose the most relevant content(the key content that directly relates to the question) from the retrieved context and use it to generate an answer. 3. Generate a concise, logical answer. When generating the answer, Do Not just list your selections, But rearrange them in context so that they become paragraphs with a natural flow. 4. When you don't have retrieved context for the question or If you have a retrieved documents, but their content is irrelevant to the question, you should answer 'I can't find the answer to that question in the material I have'. 5. Use five sentences maximum. Keep the answer concise but logical/natural/in-depth. 6. At the end of the response provide metadata provided in the relevant docs , For example:"Metadata: page: 19, source: /content/OCR_RSCA/Analyse docs JVB + mails et convention FOOT INNOVATION.pdf'.Return just the page and source.Provide a list of all the metadata found in the Relevent content formatted as bullets # Question: {question}""", input_variables=["context","question"] ) llm = openai_llm(model_name=model_name, api_key=api_key) rag_chain = ( {"context": retriever | format_document_metadata, "question": RunnablePassthrough()} | prompt | llm | StrOutputParser() ) return rag_chain.invoke(query) def is_document_embedded(filename): """Check if a document is already embedded. Actual implementation needed.""" return False