
from fastapi import  HTTPException
from langchain_openai import OpenAIEmbeddings, ChatOpenAI
from langchain_community.tools.tavily_search import TavilySearchResults

from langchain.chains import RetrievalQA
from langchain import hub
import json
import pathlib
from typing_extensions import Annotated
from typing_extensions import TypedDict
from langgraph.graph import StateGraph, START, END
from langgraph.graph.message import add_messages
from langgraph.prebuilt import ToolNode, tools_condition
from langgraph.checkpoint.memory import MemorySaver
from langchain_deepseek import ChatDeepSeek
from createVectorDB import load_vector_store
from siliconflow_embeddings import SiliconFlowEmbeddings

# from langsmith import traceable

# 读取环境变量
from dotenv import load_dotenv

import ctypes
api_key="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"
# base_url="https://api.openai.com/v1"
class KBDict: #定义一个类，用于存储向量数据库collection的描述和名称
        def __init__(self, description: str, collection_name:str,vector_db_path: str):
            self.description = description
            self.collection_name = collection_name
            self.vector_db_path = vector_db_path

def get_short_path(path):
    buffer = ctypes.create_unicode_buffer(512)
    ctypes.windll.kernel32.GetShortPathNameW(path, buffer, len(buffer))
    return buffer.value
load_dotenv()

# @traceable
async def ask_agent(message: str, chat_history: list,user_id:str,ifUseKnowledgeBase:bool,ifUseWeb:bool,embeddings) -> str:
    """
    通过 langgraph 智能体 进行增强型对话
    """
    try:
        if not message or not isinstance(message, str):
            raise ValueError("Invalid message input")
        llm = ChatDeepSeek(
            model="deepseek-chat",
            temperature=0,
            max_tokens=None,
            timeout=None,
            max_retries=2,
            api_key="sk-c29b014fa48f4a95bca0b44455f55ea9",
            # other params...
        )
        tools = []
        
        if ifUseWeb:
            search = TavilySearchResults(max_results=2)
            tools.append(search)
            # tools = [search]
        from langchain.tools.retriever import create_retriever_tool
        if user_id and ifUseKnowledgeBase:
        # 找user_id下的所有子文件夹
            KBlist=get_user_vectorDB(user_id)
            KBlist_public=get_user_vectorDB('public')
            KBlist.extend(KBlist_public)
            if KBlist.__len__()>0:
                i=0
                for KB in KBlist:
                    i=i+1
                    description = KB.description
                    collection_name = KB.collection_name
                    vectorstore=load_vector_store(
                        vector_db_path=KB.vector_db_path,
                        collection_name=collection_name,
                        embeddings=embeddings)
                    print("Loading vectorstore from:", collection_name)
                    print('描述：'+description)
                     # 检索相关文档
                    if vectorstore is not None:
                        retriever = vectorstore.as_retriever(search_kwargs={"k": 10})
                        rag_tool = create_retriever_tool(
                                retriever=retriever,
                                name='retriever'+str(i),
                                description=  description                      
                                )
                        tools.append(rag_tool)
                    else:
                        print(f"❌ 无法加载向量库: {collection_name}")
        memory = MemorySaver()
        class State(TypedDict):
            messages: Annotated[list, add_messages]
        graph_builder = StateGraph(State)
        llm_with_tools = llm.bind_tools(tools)
        def chatbot(state: State):
            return {"messages": [llm_with_tools.invoke(state["messages"])]}
        graph_builder.add_node("chatbot", chatbot)
        tool_node = ToolNode(tools=tools)
        graph_builder.add_node("tools", tool_node)
        graph_builder.add_conditional_edges(
            "chatbot",
            tools_condition,
        )
        # Any time a tool is called, we return to the chatbot to decide the next step
        graph_builder.add_edge("tools", "chatbot")
        graph_builder.add_edge(START, "chatbot")

        graph = graph_builder.compile(checkpointer=memory)
        config = {"configurable": {"thread_id": "1"}}
        user_input = message
        chat_history_messages = []
        if chat_history:
            for chat in chat_history[:-1]:
                if chat.sender== "user" and chat.content:
                    chat_history_messages.append({"role": "user", "content": chat.content})
                elif chat.sender == "ai" and chat.content:
                    chat_history_messages.append({"role": "ai", "content": chat.content})
        print("Chat History:", chat_history_messages)
        chat_history_messages.append({"role": "user", "content": user_input})
#         response =await agent_executor.ainvoke({"input": message, "chat_history": chat_history_messages})
        responses =await graph.ainvoke(
            {"messages": chat_history_messages},
            config,
        )
        print(responses["messages"][-1].content)
        return responses["messages"][-1].content
    except Exception as e:
        print("Error in ask_openai_agent:", e)
        raise HTTPException(status_code=500, detail="Error processing request")


# 流式输出
async def ask_agent_stream(
        message: str, 
        chat_history: list,
        user_id:str,
        ifUseKnowledgeBase:bool,
        ifUseWeb:bool,
        systemPrompt:str,
        embeddings
        ) -> str:
    """
    通过 langgraph 智能体 进行增强型对话
    """
    
    try:
        if not message or not isinstance(message, str):
            raise ValueError("Invalid message input")

        # llm = ChatOpenAI(
        #     model="gpt-4.1-mini-2025-04-14",
        #     temperature=0
        # )
        llm = ChatDeepSeek(
            model="deepseek-chat",
            temperature=0,
            max_tokens=None,
            timeout=None,
            max_retries=2,
            api_key="sk-c29b014fa48f4a95bca0b44455f55ea9",
            # other params...
        )
        tools = []
        
        if ifUseWeb:
            search = TavilySearchResults(max_results=2)
            tools.append(search)
            # tools = [search]
        from langchain.tools.retriever import create_retriever_tool
        if user_id and ifUseKnowledgeBase:
        # 找user_id下的所有子文件夹
            KBlist=get_user_vectorDB(user_id)
            KBlist_public=get_user_vectorDB('public')
            KBlist.extend(KBlist_public)
            if KBlist.__len__()>0:
                i=0
                for KB in KBlist:
                    i=i+1
                    description = KB.description
                    collection_name = KB.collection_name
                    #加载向量数据库
                    vectorstore= load_vector_store(
                        vector_db_path=KB.vector_db_path,
                        collection_name=collection_name,
                        embeddings=embeddings)
                    print("Loading vectorstore from:", collection_name)
                    print('描述：'+description)
                        # 检索相关文档
                    if vectorstore is not None:
                        retriever = vectorstore.as_retriever(search_kwargs={"k": 10})
                        rag_tool = create_retriever_tool(
                                retriever=retriever,
                                name='retriever'+str(i),
                                description=  description                      
                                )
                        tools.append(rag_tool)
                    else:
                        print(f"❌ 无法加载向量库: {collection_name}")
        memory = MemorySaver()
        class State(TypedDict):
            messages: Annotated[list, add_messages]
        graph_builder = StateGraph(State)
        llm_with_tools = llm.bind_tools(tools)
        def chatbot(state: State):
            return {"messages": [llm_with_tools.invoke(state["messages"])]}
        graph_builder.add_node("chatbot", chatbot)
        tool_node = ToolNode(tools=tools)
        graph_builder.add_node("tools", tool_node)
        graph_builder.add_conditional_edges(
            "chatbot",
            tools_condition,
        )
        # Any time a tool is called, we return to the chatbot to decide the next step
        graph_builder.add_edge("tools", "chatbot")
        graph_builder.add_edge(START, "chatbot")

        graph = graph_builder.compile(checkpointer=memory)
        config = {"configurable": {"thread_id": "1"}}
        user_input = message
        chat_history_messages = []
        chat_history_messages.append({"role": "system", "content": systemPrompt})
        if chat_history:
            for chat in chat_history[:-1]:
                if chat.sender== "user" and chat.content:
                    chat_history_messages.append({"role": "user", "content": chat.content})
                elif chat.sender == "ai" and chat.content:
                    chat_history_messages.append({"role": "ai", "content": chat.content})

        print("Chat History:", chat_history_messages)
        chat_history_messages.append({"role": "user", "content": user_input})
        response =graph.astream(
            {"messages": chat_history_messages},
            config,
            stream_mode="messages"
        )
        return response
    except Exception as e:
        print("Error in ask_openai_agent:", e)
        raise HTTPException(status_code=500, detail="Error processing request")
    
async def ask_knowledge_base(message: str, vectorstore) -> str:
    """
    通过知识库进行检索
    """
    try:
        
        if not message or not isinstance(message, str):
            raise ValueError("Invalid message input")
        model = ChatOpenAI(
            model="gpt-4o-mini",
            temperature=0,
        )
        retriever = vectorstore.as_retriever(search_kwargs={"k": 5})
        references = [{"source":doc.metadata["source"],"content":doc.page_content} for doc in retriever.get_relevant_documents(message)]
        print("检索到的参考文献：", references)
        prompt = hub.pull("rlm/rag-prompt")
        chain = RetrievalQA.from_llm(llm=model, retriever=retriever, prompt=prompt)
        response = await chain.ainvoke(message)
        print(response)
        return {"message":response["result"],"references":references}
    except Exception as e:
        print("Error in ask_openai_knowledge_base:", e)
        raise HTTPException(status_code=500, detail="Error processing request")

#获取user_id下的所有子文件夹，并打开文件夹下的config.json文件，获取detail
def get_user_vectorDB(user_id:str)-> list[KBDict]:
    """
    获取user_id下的所有子文件夹，并打开文件夹下的config.json文件，获取detail
    """
    try:
        if user_id:
        # 找user_id下的所有子文件夹
            user_knowledge_collections = []
            folder_path = pathlib.Path(user_id)
            if folder_path.exists():        
                for entry in folder_path.iterdir():
                    if entry.is_dir(): #如果用户文件夹下有子文件夹，代表存在知识库文件夹
                        config_file = entry / "config.json"
                        if config_file.exists(): #如果知识库文件夹下有config.json文件
                            with open(config_file, "r", encoding='utf-8') as file:
                                config = json.load(file)
                                detail=config["detail"]
                                collection_name = config["collection_name"]
                                # print (f"Found knowledge base: {collection_name} with detail: {detail}")
                                # vectorDBPathList = []
                                # # for subentry in entry.iterdir(): #打开知识库文件夹下的所有文件
                                # #     if subentry.is_file() and subentry.suffix=='.faiss':
                                # #         vectorDBPathList.append(str(entry))
                                # #         # print('已添加知识库'+str(entry))
                                # #         break
                                # if vectorDBPathList.__len__()>0:
                                #     subfolders.append(KBDict(description=detail, collection_name=collection_name))
                                user_knowledge_collections.append(KBDict(description=detail, collection_name=collection_name,vector_db_path=user_id+"/"+"vector_db"))
                return user_knowledge_collections
            else:
                return []
        else:
            return []
    except Exception as e:
        print("Error in get_user_folders:", e)
        raise HTTPException(status_code=500, detail="Error processing request")




    

