import logging
from typing import List, Sequence, Optional, Any

import tiktoken
from llama_index.core.base.llms.types import ChatMessage, MessageRole
from llama_index.core.llms import LLM
from llama_index.core.memory import ChatSummaryMemoryBuffer
from llama_index.core.tools import BaseTool, FunctionTool
from llama_index.core.tools.types import DefaultToolFnSchema, ToolMetadata
from pydantic.v1 import BaseModel

from app.models.log import FuncallLog, FuncallLogType
from app.services.knowledge_services import knowledge_services_query_by_kb_id
from app.services.log_services import add_funcall_log
from sks.agent.settings import Settings
from sks.tools import none_fun

logger = logging.getLogger(__name__)


def isAgentTool(name):
    return name.startswith('sksAgent')


class AgentMeta(BaseModel):
    """当智能体作为工具使用时"""
    id: Optional[Any] = None  #当前的智能助手id,用于执行tools
    description: str = ''  #当前的智能助手名称
    tool_desc: str = ''  #当智能体作为工具时,该字段为工具说明,为空时,使用 name 字段作为工具说明
    kb_id: Optional[int] = None  #智能体关联的知识库ID

    def tool_name(self):
        return f'sksAgent{self.id}'

    def tool_description(self):
        if self.tool_desc and self.tool_desc != '':
            return self.tool_desc
        else:
            return self.description


class BaseIntelligentAgent:
    chat_id: int = None  #用于记录日志
    metadata: AgentMeta = AgentMeta()
    agents: List["BaseIntelligentAgent"] = []  #List["BaseIntelligentAgent"]
    llm: LLM = None
    system_prompt: str = "你是一个勤勤恳恳的助手,能耐心的帮助用户"
    tools: List[BaseTool] = None
    chat_history: List[ChatMessage] = []  #传入的该参数如果不为None时,添加/删除元素后后仍会相同
    summarizer_llm: LLM = None

    def __init__(self, **kwargs):
        self.system_prompt = kwargs.get('system_prompt', self.system_prompt)
        if not kwargs.__contains__('llm'):
            raise Exception("llm不能为空")
        self.llm = kwargs.get('llm', self.llm)
        self.chat_history = kwargs.get('chat_history', self.chat_history)
        self.summarizer_llm = kwargs.get('summarizer_llm', self.llm)
        self.metadata = kwargs.get('metadata', self.metadata)
        self.tools = kwargs.get('tools', self.tools)
        self.agents = kwargs.get('agents', self.agents)
        self.merge_agents_to_tools()
        self.chat_id = kwargs.get('chat_id', self.chat_id)
        tokenizer_model = "gpt-4-0125-preview"
        tokenizer_fn = tiktoken.encoding_for_model(tokenizer_model).encode
        self.memory = ChatSummaryMemoryBuffer.from_defaults(
            chat_history=self.chat_history,
            llm=self.summarizer_llm,
            # token_limit=2,
            # summarize_prompt="以下是用户和助手之间的对话。就这次对话的内容写一个简洁的总结。",
            token_limit=256,  # 40000
            summarize_prompt="以下是用户和助手之间的对话。就这次对话的内容写一个简洁的总结。字数限制在2000字之内",
            tokenizer_fn=tokenizer_fn,
        )

    def merge_agents_to_tools(self):
        """只能在初始化时执行一次"""
        if self.tools is None:
            self.tools = []
        if self.agents:
            for agent in self.agents:
                self.tools.append(FunctionTool(none_fun, metadata=ToolMetadata(name=agent.metadata.tool_name(),
                                                                               description=agent.metadata.tool_description(),
                                                                               fn_schema=DefaultToolFnSchema)))

    def printSystemPrompt(self):
        print(self.system_prompt)

    def add_chat_history(self, chat_msg: ChatMessage):
        """本次的问题和回复不需要添加,只需要设置历史记录即可"""
        self.chat_history.append(chat_msg)

    def chat_and_call_tool(self, chat_history):
        llm = self.llm
        resp = self.llm.stream_chat(tools=self.tools, messages=chat_history)
        tool_calls = llm.get_tool_calls_from_response(
            resp, error_on_no_tool_call=False
        )

        while tool_calls:
            # add the LLM's response to the chat history
            chat_history.append(resp.message)

            for tool_call in tool_calls:
                tool_name = tool_call.tool_name
                tool_kwargs = tool_call.tool_kwargs

                print(f"Calling {tool_name} with {tool_kwargs}")
                if isAgentTool(tool_name):
                    for agent in self.agents:
                        if agent.metadata.tool_name() == tool_name:
                            user_msg = tool_kwargs.get('input')
                            agent_resp = agent.chat_with_tools(user_msg, self.chat_id)
                            content = str(agent_resp)
                            funcallInfo = FuncallLog(description=agent.metadata.description,
                                                     type=FuncallLogType.AGENT.dictValue, req_args=user_msg,
                                                     log_text=content)
                            add_funcall_log(funcallInfo)
                            chat_history.append(
                                ChatMessage(
                                    role="tool",
                                    content=content,
                                    # most LLMs like OpenAI need to know the tool call id
                                    additional_kwargs={"tool_call_id": tool_call.tool_id},
                                )
                            )

                else:
                    for tool in self.tools:
                        if tool.metadata.name == tool_name:
                            tool_output = tool(**tool_kwargs)
                            chat_history.append(
                                ChatMessage(
                                    role="tool",
                                    content=str(tool_output),
                                    # most LLMs like OpenAI need to know the tool call id
                                    additional_kwargs={"tool_call_id": tool_call.tool_id},
                                )
                            )

            resp = llm.chat_with_tools(self.tools, chat_history=chat_history)
            tool_calls = llm.get_tool_calls_from_response(
                resp, error_on_no_tool_call=False
            )

        return resp

    def stream_chat_and_call_tool(self, chat_history):
        llm = self.llm
        # resp=self.llm.chat_with_tools(tools=self.tools,chat_history=chat_history)
        _resp = self.llm.stream_chat(tools=self.tools, messages=chat_history)
        logger.debug(_resp)
        resp = ''
        for delta in _resp:
            print(delta.delta, end="")
            resp += delta.message.content

        tool_calls = llm.get_tool_calls_from_response(
            resp, error_on_no_tool_call=False
        )

        while tool_calls:
            # add the LLM's response to the chat history
            chat_history.append(resp.message)

            for tool_call in tool_calls:
                tool_name = tool_call.tool_name
                tool_kwargs = tool_call.tool_kwargs

                print(f"Calling {tool_name} with {tool_kwargs}")
                if isAgentTool(tool_name):
                    for agent in self.agents:
                        if agent.metadata.tool_name() == tool_name:
                            user_msg = tool_kwargs.get('input')
                            agent_resp = agent.chat_with_tools(user_msg, self.chat_id)
                            content = str(agent_resp)
                            funcallInfo = FuncallLog(description=agent.metadata.description,
                                                     type=FuncallLogType.AGENT.dictValue, req_args=user_msg,
                                                     log_text=content)
                            add_funcall_log(funcallInfo)
                            chat_history.append(
                                ChatMessage(
                                    role="tool",
                                    content=content,
                                    # most LLMs like OpenAI need to know the tool call id
                                    additional_kwargs={"tool_call_id": tool_call.tool_id},
                                )
                            )

                else:
                    for tool in self.tools:
                        if tool.metadata.name == tool_name:
                            tool_output = tool(**tool_kwargs)
                            chat_history.append(
                                ChatMessage(
                                    role="tool",
                                    content=str(tool_output),
                                    # most LLMs like OpenAI need to know the tool call id
                                    additional_kwargs={"tool_call_id": tool_call.tool_id},
                                )
                            )

            resp = llm.chat_with_tools(self.tools, chat_history=chat_history)
            tool_calls = llm.get_tool_calls_from_response(
                resp, error_on_no_tool_call=False
            )

        return resp

    def format_message_has_knowledge(self, user_msg):
        """格式化用户的问题,并增加知识库信息"""
        if self.metadata.kb_id is None:
            return user_msg
        kn_out = knowledge_services_query_by_kb_id(kb_id=self.metadata.kb_id, msg=user_msg)
        return f"""
        --------------知识库相关信息--------------
        {str(kn_out)}
        --------------用户问题--------------
        {user_msg}
        """

    def chat_with_tools(self, user_msg: str, chat_id: int = None) -> str:
        _user_msg = self.format_message_has_knowledge(user_msg)
        logger.info(f"chat_with_tools: {_user_msg}")
        user_cm = ChatMessage(role=MessageRole.USER, content=_user_msg)
        messages: List[ChatMessage] = [
            ChatMessage(role=MessageRole.SYSTEM, content=self.system_prompt),
        ]
        if self.chat_history and len(self.chat_history) > 0:
            messages.extend(self.memory.get())

        messages.append(user_cm)
        # messages.append(_user_msg)
        openai_log_id = Settings.openai_callback_handler.on_reqeust_event(body=messages, url="", chat_id=chat_id)
        # resp=self.llm.chat_with_tools(tools=self.tools,messages=messages)
        response=self.chat_and_call_tool(messages)
        # response = self.stream_chat_and_call_tool(messages)

        Settings.openai_callback_handler.on_response_event(body=response.__dict__, id=openai_log_id)
        _message = response.message.content

        self.add_chat_history(user_cm)
        self.add_chat_history(ChatMessage(role=MessageRole.ASSISTANT, content=_message))
        return _message

    def chat(self, user_msg: str, chat_id: int = None) -> str:
        _user_msg = self.format_message_has_knowledge(user_msg)
        user_cm = ChatMessage(role=MessageRole.USER, content=_user_msg)
        messages: List[ChatMessage] = [
            ChatMessage(role=MessageRole.SYSTEM, content=self.system_prompt),
        ]
        if self.chat_history and len(self.chat_history) > 0:
            messages.extend(self.memory.get())

        messages.append(user_cm)
        # messages.append(_user_msg)
        openai_log_id = Settings.openai_callback_handler.on_reqeust_event(body=messages, url="", chat_id=chat_id)
        response = self.llm.chat(messages=messages)
        Settings.openai_callback_handler.on_response_event(body=response.__dict__, id=openai_log_id)
        _message = response.message.content
        self.add_chat_history(user_cm)
        self.add_chat_history(ChatMessage(role=MessageRole.ASSISTANT, content=_message))
        return _message


class IntelligentAgent(BaseIntelligentAgent):

    def __init__(self,
                 **kwargs
                 ):
        super().__init__(**kwargs)
        pass


if __name__ == '__main__':
    agent = IntelligentAgent()
    agent.printSystemPrompt()
    agent = IntelligentAgent(system_prompt='hello')
    agent.printSystemPrompt()
