import os

import chromadb
from dotenv import load_dotenv
from langchain_chroma import Chroma
from langchain_core.output_parsers import StrOutputParser
from langchain_core.runnables import RunnablePassthrough
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.prompts import SystemMessagePromptTemplate
from langchain_core.prompts import HumanMessagePromptTemplate
from langchain_core.documents import Document
from langchain_core.messages import SystemMessage, HumanMessage
from lc_frame.lc_models.api import llm_api
from lc_frame.utils.log import logger
from chromadb import HttpClient
from langgraph.prebuilt import create_react_agent
from langchain.agents.agent_toolkits.vectorstore.toolkit import VectorStoreRouterToolkit
#from lc_frame.utils.utils import PDF_Agent

import warnings

warnings.filterwarnings('ignore')

current_path = os.path.abspath(__file__)
root_path = os.path.dirname(os.path.dirname(current_path))

# pdfs_dict_path = os.path.join(root_path, "datas", "pdfs")
# vector_db_path = os.path.join(root_path, "datas", "vector_db")

# 加载key
dotenv_path = os.path.join(root_path, "lc_models", "api", "qwen.txt")
load_dotenv(dotenv_path=dotenv_path)

def get_retrieve_result(question,vectorstore):
    # 1. 先做检索
    # 粗排 100个  
    raw_docs = vectorstore.similarity_search_with_relevance_scores(query=question, 
                                                          k=100,
                                                           score_threshold=0.1)
    # 2. 结果筛选：a）处理权限  b）关键词过滤
    my_docs = []
    my_docs = [doc for doc, score in raw_docs]

    # 4. 精排 
    # 使用 rerank 模型重新计算 docs 和 question 的相似度
    # 筛选出最终的 4个？
    # 3. 拼接起来
    context = "\n\n".join([doc.page_content for doc in my_docs])
    # 4. 返回最终的结果
    return context, my_docs


def lc_rag(question):
    chat, llm, embed = llm_api.get_qwen()

    client = HttpClient(host="localhost", port=8001)
   
    vectorstore = Chroma(embedding_function=embed, client=client,collection_name="langchain")

    # toolkit = VectorStoreRouterToolkit(llm=llm, vectorstores=[{
    #     "vectorstore": vectorstore,
    #     "description": "这是一个chroma的向量数据库",
    #     "name": "langchain"
    # }])

    # tools = toolkit.get_tools()


    
    #db.similarity_search_with_relevance_scores(query=question)
    # vectorstore = None
    # client = chromadb.PersistentClient(path=vector_db_path)
    # if client.list_collections() != [] and client.get_collection(name="langchain") is not None:
    #     vectorstore = Chroma(embedding_function=embeddings,
    #                          client=client)
    #     logger.info("langchain 已存在，将使用 Chroma 进行文本搜索")
    # else:
    #     logger.info("langchain 不存在，将加载pdf 使用Chroma 进行文本搜索")
    #     vectorstore = Chroma(embedding_function=embeddings,
    #                          collection_name="langchain",
    #                          persist_directory=vector_db_path)

    #     pdf_agent = PDF_Agent(dict_path=pdfs_dict_path, vectorstore=vectorstore)
    #     pdf_agent.run()
    #retriever, docs = get_retrieve_result(question=question, vectorstore=vectorstore)
    #retriever = vectorstore.as_retriever()

    retriever = vectorstore.as_retriever(search_kwargs={'k': 10})

    from langchain.retrievers import ContextualCompressionRetriever
    from langchain.retrievers.document_compressors import CrossEncoderReranker
    from langchain_community.cross_encoders import HuggingFaceCrossEncoder
    # rerank 进行排序，缩小检索范围
    model = HuggingFaceCrossEncoder(model_name="/gemini/code/arua_completion_project/lc_frame/lc_models/llms/bge-reranker-large")
    compressor = CrossEncoderReranker(model=model, top_n=3)
    compression_retriever = ContextualCompressionRetriever(
        base_compressor=compressor, base_retriever=retriever
    )
    compressed_docs = compression_retriever.invoke(input=question)
    
    # SQL_PREFIX = """
    # 请根据用户从私有知识库检索出来的上下文来回答用户的问题！
    #     请注意：
    #         1，如果用户的问题不在上下文中，请直接回答不知道！
    #         2，不要做任何解释，直接输出最终的结果即可！
    # """
     
    # system_message = SystemMessage(content=SQL_PREFIX)

    # agent_executor = create_react_agent(model=chat, tools=tools, state_modifier=system_message)

    # events = agent_executor.stream(
    #     {"messages": [HumanMessage(content=question)]},
    #     stream_mode="values"
    # )
    # result_list = []
    # #logger.info(events.count())
    # for event in events:
    #     logger.info(event["messages"][-1].pretty_print())
    #     result_list.append(event["messages"][-1].content)
    #     #result_list.append(event["messages"][-1])
    # # 返回最终结果
    # #final_result = event["messages"][-1] if result_list else None
    # final_result = event["messages"][-1].content if result_list else None
    # logger.info(f'最终答案是：{final_result}')
    # return final_result


    # 提示词模版
    sys_message = """
    """
    human_message = """
        请根据用户从私有知识库检索出来的上下文来回答用户的问题！
        请注意：
            1，如果用户的问题不在上下文中，请使用自有知识回答！
            2，不要做任何解释，直接输出最终的结果即可！
        检索出的上下文为：
        {context}
        用户的问题为：
        {question}
        答案为：
    """

    sys_message_template = SystemMessagePromptTemplate.from_template(template=sys_message)
    human_message_template = HumanMessagePromptTemplate.from_template(template=human_message)
    # RAG系统经典的 Prompt (A 增强的过程)
    prompt = ChatPromptTemplate.from_messages([sys_message_template, human_message_template])

    rag_chain = (
            prompt
            | chat
            | StrOutputParser()
    )
    result = rag_chain.invoke(input={"context": compressed_docs, "question": question})
    logger.info(result)
    return result


# if __name__ == "__main__":
#     question = "在研发方面，上海真兰仪表科技股份有限公司利用哪三地的人才优势，成立研发中心，组建了“三位一体”的研发体系？"
#     lc_rag(question)
