# -*- coding: utf-8 -*-
from langgraph.prebuilt.chat_agent_executor import AgentState
from langchain_core.messages.utils import trim_messages, count_tokens_approximately, AnyMessage
from langgraph.checkpoint.redis import RedisSaver
from langgraph.graph import StateGraph, START, END, MessagesState, add_messages
from typing import TypedDict, Annotated, Any, Optional, List
from pydantic import BaseModel, Field
from langgraph.checkpoint.memory import InMemorySaver
from langchain_core.messages import HumanMessage, SystemMessage, BaseMessage, ToolMessage
from langgraph.managed.is_last_step import RemainingSteps
from langgraph.prebuilt import create_react_agent

from app.utils.custom_exc import CustomException
from .models_manage import get_model, get_embeddings
from .prompts import CHAT_PROMPT, RAG_GENERATE_PROMPT, RAG_DECIDE_PROMPT
from .sentence_utils.sentence_sorted import docs_scores_sorted
from .tools import baidu_search_tool
from . import settings
from langchain_milvus import Milvus
from langchain_core.documents import Document
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.output_parsers import StrOutputParser


class State(TypedDict):
    messages: Annotated[list[AnyMessage], add_messages]
    remaining_steps: RemainingSteps
    user_id: str
    generate: bool


def pre_model_hook(state: AgentState):
    trimmed = trim_messages(
        state["messages"],
        strategy="last",
        token_counter=len,
        max_tokens=20,
        start_on="human",
        end_on=("human", "tool"),
        # include_system=True,
        allow_partial=False,
    )
    return {"llm_input_messages": trimmed}


def get_checkpoint():
    ttl_conf = {'default_ttl': settings.CHECKPOINT_TTL, 'refresh_on_read': False}
    checkpointer = RedisSaver(redis_url=settings.CHECKPOINT_REDIS_URL, ttl=ttl_conf)
    checkpointer.setup()
    return checkpointer


class ChatManage:

    def __init__(self, model_name='deepseek-chat'):

        try:
            self.checkpoint = get_checkpoint()
        except:
            raise CustomException('system error')

        self.prompt = CHAT_PROMPT

        self.tools = [baidu_search_tool]

        model_conf = settings.MODELS_TYPE_DICT[model_name]
        self.model = get_model(api_key=model_conf['apikey'], api_base=model_conf['api_base'], model=model_name)
        self.graph = None

    async def angent_build(self):
        self.model.bind_tools(self.tools)
        self.graph = create_react_agent(
            self.model,
            tools=self.tools,
            checkpointer=self.checkpoint,
            prompt=self.prompt,
            pre_model_hook=pre_model_hook
        )


    async def graph_build(self):

        def chat_agent(state: State):
            self.model.bind_tools(self.tools)
            response = self.model.invoke(state["messages"])
            # print(response.content)
            return {"messages": [response]}
        # 创建图
        workflow = StateGraph(State)
        # 添加节点
        workflow.add_node("chat_agent", chat_agent)
        # 添加边
        workflow.add_edge(START, "chat_agent")  # 设置入口
        workflow.add_edge("chat_agent", END)  # 设置出口

        self.graph = workflow.compile(checkpointer=self.checkpoint)


class GraphState(TypedDict):
    """
    """
    question: str
    generation: str
    web_search: bool
    search_count: int
    documents: List[str]
    collection_list: List[str]
    user_id: str
    messages: Annotated[list[AnyMessage], add_messages]


class GradeAnswer(BaseModel):

    binary_score: bool = Field(
        description="Answer addresses the question, 'True' or 'False'"
    )


class RAGAgentManage:

    def __init__(self, model_name='deepseek-chat'):
        try:
            self.checkpoint = get_checkpoint()
        except:
            raise CustomException('system error')

        model_conf = settings.MODELS_TYPE_DICT[model_name]
        self.model = get_model(api_key=model_conf['apikey'], api_base=model_conf['api_base'], model=model_name)
        self.graph = None
        self.embeddings = get_embeddings()

    async def graph_build(self):

        model = self.model

        def retrieve(state: GraphState):
            collection_list = state['collection_list']
            question = state['question']

            documents = []
            for collection_name in collection_list:
                retriever = Milvus(
                    embedding_function=self.embeddings,
                    collection_name=collection_name,
                    connection_args={"host": settings.CNF_DATA['milvus']['host'], "port": settings.CNF_DATA['milvus']['port'], 'user': settings.CNF_DATA['milvus']['user'], 'password': settings.CNF_DATA['milvus']['password']},
                ).as_retriever(
                    search_kwargs={"k": 5},
                )

                docs = retriever.invoke(question)
                documents.extend(docs)

            return {"documents": documents, "question": question}

        def grade_documents(state: GraphState):
            # print("grade_documents")
            question = state['question']
            documents = state['documents']
            documents_str = [doc.page_content for doc in documents]
            # print(documents)
            documents_str = docs_scores_sorted(documents_str, question)
            # print("==================")
            documents = [Document(page_content=text) for text in documents_str]
            documents = documents[:3]

            return {"documents": documents, "question": question}

        def generate(state: GraphState):
            # print("generate")
            question = state['question']
            documents = state['documents']

            history = state['messages'][:-1]
            history.insert(0, ("system", RAG_GENERATE_PROMPT))
            history.append(("human", "参考上下文: \n\n {document} \n\n 用户问题: {question}"))

            rag_prompt = ChatPromptTemplate.from_messages(
                history
                # [
                #     ("system", RAG_GENERATE_PROMPT),
                #     ("human", "参考上下文: \n\n {document} \n\n 用户问题: {question}"),
                # ]
            )
            generation_chain = rag_prompt | model | StrOutputParser()
            documents_str = "\n\n".join(doc.page_content for doc in documents)
            generation = generation_chain.invoke({"document": documents_str, "question": question})

            return {"documents": documents, "question": question, "generation": generation, 'messages': [generation]}

        def web_search(state: GraphState):
            # print("web_search")
            question = state['question']
            documents = state['documents']
            search_count = state['search_count']

            result = baidu_search_tool.invoke({'query': question})
            data = f"# 联网搜索：{question}\n\n{result}"
            web_results = Document(page_content=data)
            if documents:
                documents.append(web_results)
            else:
                documents = [web_results]
            search_count += 1
            return {"documents": documents, "question": question, 'search_count': search_count}

        def decide_to_generate(state: GraphState):
            # print("decide_to_generate")
            question = state['question']
            documents = state['documents']
            search_count = state['search_count']
            structured_llm_grader = model.with_structured_output(GradeAnswer)
            history = state['messages'][:-1]

            history.insert(0, ("system", RAG_DECIDE_PROMPT))
            history.append(("human", "检索到的文档: \n\n {document} \n\n 用户问题: {question}"))

            grade_prompt = ChatPromptTemplate.from_messages(
                history
                # [
                #     ("system", RAG_DECIDE_PROMPT),
                #     ("human", "检索到的文档: \n\n {document} \n\n 用户问题: {question}"),
                # ]
            )
            retrieval_grader = grade_prompt | structured_llm_grader
            documents_str = "\n\n".join(doc.page_content for doc in documents)
            sore = retrieval_grader.invoke({"question": question, "document": documents_str})
            # print(sore)
            if sore.binary_score is True or search_count > 4:
                return 'generate'
            else:
                return 'web_search'

        workflow = StateGraph(GraphState)

        workflow.add_node('retrieve', retrieve)
        workflow.add_node('grade_documents', grade_documents)

        workflow.add_node('generate', generate)
        workflow.add_node('web_search', web_search)

        # 设置边
        workflow.add_edge(START, "retrieve")  # 设置入口
        workflow.add_edge("retrieve", "grade_documents")
        workflow.add_edge('web_search', 'grade_documents')

        workflow.add_conditional_edges(
            "grade_documents",
            decide_to_generate,
            {
                'web_search': 'web_search',
                'generate': 'generate',
            },
        )

        workflow.add_edge('generate', END)

        self.graph = workflow.compile(checkpointer=self.checkpoint)


