import os

import chromadb
from dotenv import load_dotenv
from langchain_chroma import Chroma
from langchain_core.output_parsers import StrOutputParser
from langchain_core.runnables import RunnablePassthrough
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.prompts import SystemMessagePromptTemplate
from langchain_core.prompts import HumanMessagePromptTemplate
from langchain_core.documents import Document

from lc_frame.lc_models.api import llm_api
from lc_frame.utils.log import logger
from chromadb import HttpClient
#from lc_frame.utils.utils import PDF_Agent

import warnings

warnings.filterwarnings('ignore')

current_path = os.path.abspath(__file__)
root_path = os.path.dirname(os.path.dirname(current_path))

pdfs_dict_path = os.path.join(root_path, "datas", "pdfs")
vector_db_path = os.path.join(root_path, "datas", "vector_db", "chroma")

# 加载key
dotenv_path = os.path.join(root_path, "lc_models", "api", ".qwen")
load_dotenv(dotenv_path=dotenv_path)


def lc_rag(question):
    chat, _, embed = llm_api.get_qwen()
    vectorstore = None
    embeddings = llm_api.get_bge_embeddings()

    client = chromadb.PersistentClient(path=vector_db_path)
    if client.list_collections() != [] and client.get_collection(name="langchain") is not None:
        vectorstore = Chroma(embedding_function=embeddings,
                             client=client)
        logger.info("langchain 已存在，将使用 Chroma 进行文本搜索")

    retriever = vectorstore.as_retriever(search_kwargs={'k': 6})

    from langchain.retrievers import ContextualCompressionRetriever
    from langchain.retrievers.document_compressors import CrossEncoderReranker
    from langchain_community.cross_encoders import HuggingFaceCrossEncoder
    # rerank 进行排序，缩小检索范围
    model = HuggingFaceCrossEncoder(model_name="../lc_models/llms/bge-reranker-large")
    compressor = CrossEncoderReranker(model=model, top_n=3)
    compression_retriever = ContextualCompressionRetriever(
        base_compressor=compressor, base_retriever=retriever
    )
    compressed_docs = compression_retriever.invoke(input=question)
    pretty_print_docs(compressed_docs)

    # 提示词模版
    sys_message = """
    """
    human_message = """
        请根据用户从私有知识库检索出来的上下文来回答用户的问题！
        请注意：
            1，如果用户的问题不在上下文中，请直接回答不知道！
            2，不要做任何解释，直接输出最终的结果即可！
        检索出的上下文为：
        {context}
        用户的问题为：
        {question}
        答案为：
    """

    sys_message_template = SystemMessagePromptTemplate.from_template(template=sys_message)
    human_message_template = HumanMessagePromptTemplate.from_template(template=human_message)
    # RAG系统经典的 Prompt (A 增强的过程)
    prompt = ChatPromptTemplate.from_messages([sys_message_template, human_message_template])

    rag_chain = (
            prompt
            | chat
            | StrOutputParser()
    )
    result = rag_chain.invoke(input={"context": compression_retriever, "question": question})
    logger.info(result)
    return result


def pretty_print_docs(docs):
    print("\n".join([f"Document:\n{d.page_content}\n{'-' * 100}" for d in docs]))


if __name__ == "__main__":
    question = "深圳市超频三科技股份有限公司目前主要产品是什么？什么是公司未来业务增长的重点产品？"
    lc_rag(question)
