'''
langchain 包装的推理类

基于 from algorithms.infer import get_llm_infer
'''
import time

# pip install langchain==0.2.17 -i https://pypi.tuna.tsinghua.edu.cn/simple

from langchain.llms.base import LLM  #
from typing import Any, List, Callable, Optional, Union, Generator, Sequence, Dict, Type, Iterator
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain_core.utils.function_calling import convert_to_openai_tool
from pydantic import (
    BaseModel,
    ConfigDict,
    Field,
    SecretStr,
)
from langchain_core.tools import BaseTool
from langchain_core.runnables import Runnable, RunnableMap, RunnablePassthrough
from langchain_core.language_models import LanguageModelInput
from langchain_core.messages import BaseMessage
# 获取当前文件所在目录的绝对路径
import os
import sys

current_dir = os.path.dirname(os.path.abspath(__file__))
# 计算父目录的绝对路径# 将父目录添加到sys.path
sys.path.append(os.path.dirname(current_dir))
from algorithms.infer import get_llm_infer, get_remote_llm_infer  # QWen0.5
import traceback
import json
import re


class DeepSeek_R1_Distill_Qwen_LLM(LLM):
    # 基于本地 DeepSeek_R1_Distill_Qwen 自定义 LLM 类
    llm_infer: Callable = None

    def __init__(self, mode_name_or_path: str):
        super().__init__()

        print("正在从本地加载模型...")
        self.llm_infer = get_llm_infer(lora_path=None, is_rag=False, model_path=mode_name_or_path)
        print("完成本地模型的加载")

    def _call(self, prompt: str, stop: Optional[List[str]] = None,
              run_manager: Optional[CallbackManagerForLLMRun] = None,
              **kwargs: Any):
        # prompt= "为我用python写一个简单的猜拳小游戏，三局两胜"
        max_new_tokens = kwargs.get('max_new_tokens', 1024)
        is_streamer = kwargs.get('is_streamer', False)
        # print(max_new_tokens)
        # print(is_streamer)
        print(f'prompt {prompt}')
        response: Union[str, Generator] = \
            self.llm_infer(prompt, is_streamer=is_streamer, max_new_tokens=max_new_tokens)[0]
        # response = self.llm_infer(prompt)[0]
        # response = 'xx'
        # print(response)
        # print('xx4')
        response = response.split('</think>')[-1]
        return response  # str

    @property
    def _llm_type(self) -> str:
        return "DeepSeek_R1_Distill_Qwen_LLM"

    def bind_tools(
            self,
            tools: Sequence[Union[Dict[str, Any], Type[BaseModel], Callable, BaseTool]],
            **kwargs: Any,
    ) -> Runnable[LanguageModelInput, BaseMessage]:
        """Bind tool-like objects to this chat model.

        Args:
            tools: A list of tool definitions to bind to this chat model.
                Can be  a dictionary, pydantic model, callable, or BaseTool. Pydantic
                models, callables, and BaseTools will be automatically converted to
                their schema dictionary representation.
            **kwargs: Any additional parameters to pass to the
                :class:`~langchain.runnable.Runnable` constructor.
        """

        formatted_tools = [convert_to_openai_tool(tool) for tool in tools]
        return super().bind(tools=formatted_tools, **kwargs)  # 模型可输出tool_calls字段


# 输出解析器作用
#  1 描述模型输出格式（放入prompt），2 将模型输出seq转为dict

from pydantic import BaseModel, Field, model_validator
from langchain_core.output_parsers import PydanticOutputParser


# Define your desired data structure.
class Joke(BaseModel):
    name: str = Field(description="所调用工具的名称")
    args: dict = Field(description="所调用工具的参数", default_factory=dict)

    # # You can add custom validation logic easily with Pydantic.
    # @model_validator(mode="before")
    # @classmethod
    # def question_ends_with_question_mark(cls, values: dict) -> dict:
    #     setup = values["setup"]
    #     if setup[-1] != "?":
    #         raise ValueError("Badly formed question!")
    #     return values


parser = PydanticOutputParser(pydantic_object=Joke)

from langchain_core.language_models import BaseChatModel
from langchain_core.outputs import ChatGeneration, ChatGenerationChunk, ChatResult
from langchain_core.messages import AIMessage, HumanMessage, SystemMessage, AIMessageChunk


class Custom_Langchain_ChatLLM(BaseChatModel):  # 可bind tools
    # 基于本地 DeepSeek_R1_Distill_Qwen 自定义 LLM 类
    llm_infer: Callable = None
    # llm_remote_infer:Callable = None
    # model_name: str
    n: int = 300

    def __init__(self, mode_name_or_path):
        super().__init__()

        print("正在从本地加载模型...")
        if isinstance(mode_name_or_path, str):
            self.llm_infer = get_llm_infer(lora_path=None, is_rag=False, model_path=mode_name_or_path)
        elif isinstance(mode_name_or_path, Callable):
            self.llm_infer = mode_name_or_path
        else:
            raise Exception(f'mode_name_or_path错误 {mode_name_or_path}')
        # self.llm_remote_infer = get_remote_llm_infer()
        # self.n = 3
        print("完成本地模型的加载")

    def _generate(
            self,
            messages: List[BaseMessage],
            stop: Optional[List[str]] = None,
            run_manager: Optional[CallbackManagerForLLMRun] = None,
            **kwargs: Any,  # 存放tool_dict
    ) -> ChatResult:
        """Override the _generate method to implement the chat model logic.

        This can be a call to an API, a call to a local model, or any other
        implementation that generates a response to the input prompt.

        Args:
            messages: the prompt composed of a list of messages.
            stop: a list of strings on which the model should stop generating.
                  If generation stops due to a stop token, the stop token itself
                  SHOULD BE INCLUDED as part of the output. This is not enforced
                  across models right now, but it's a good practice to follow since
                  it makes it much easier to parse the output of the model
                  downstream and understand why generation stopped.
            run_manager: A run manager with callbacks for the LLM.
        """
        # Replace this with actual logic to generate a response from a list
        # of messages.
        print('_generate')
        max_new_tokens = kwargs.get('max_new_tokens', 1024)  # 如何传递？
        is_streamer = kwargs.get('is_streamer', False)
        tools_schema = kwargs.get('tools', [])  # bind_tools
        # is_custom_preproccess = kwargs.get('is_custom_preproccess',False)
        langchain_messages = messages  #
        print('langchain_messages')
        # print(langchain_messages)
        # if is_custom_preproccess:
        #     tools_senq = ''
        #     for tool_dictx in tools_schema:
        #         tool_dict = tool_dictx['function']
        #         tools_senq += f"工具名称：{tool_dict['name']}, 工具描述:{tool_dict['description']},工具参数：{tool_dict['parameters']}"
        #     sys_prompt = f'你可以使用以下工具回答问题 {tools_senq},\n 输出格式: {parser.get_format_instructions()}'
        #     prompt = f'{sys_prompt} \n\n 问题是{last_message.content}' # dict2seq
        #     print(prompt)

        # langchain_messages 转qwen input
        prompt = langchain_messages[-1].content  # 待 tool_response
        qwen_messages = []
        l2q = {
            'ai': 'assistant',
            'human': 'user',
            'system': 'system',
            'tool': 'tool',

        }
        for langchain_message in langchain_messages:
            qwen_message = {}
            qwen_message['role'] = l2q[langchain_message.type]
            qwen_message['content'] = langchain_message.content
            qwen_messages.append(qwen_message)

        res_seq = self.llm_infer(messages=qwen_messages, is_streamer=is_streamer, max_new_tokens=max_new_tokens,
                                 tools_schema=tools_schema)[0]  # 推理， 含tools2seq逻辑
        # res_seq = self.llm_remote_infer(prompt)
        # tokens = prompt[: self.n]
        print(res_seq)

        # 转AIMessage
        tool_calls = []
        if len(tools_schema) > 0:
            try:
                # 特定的 qwen 2 lc
                # # tool_call_dict = dict(parser.invoke(res_seq)) # 当前支持1个tool_call
                # json_string = res_seq.replace('<tool_call>', '').replace('</tool_call>', '') # 特定的 qwen 2 lc
                # parser_dict = json.loads(json_string)
                # # tool_call_dict = {}
                # # tool_call_dict['function'] = parser_dict
                # # tool_call_dict['type'] = 'function'
                # # tool_call_dict['id'] = self.get_ymdhmss() # 生成tool_call标志
                #
                # tool_call_dict = parser_dict
                # tool_call_dict['id'] = self.get_ymdhmss() # 生成tool_call标志
                # # # res_dict['type'] = 'tool_call'
                # if 'arguments' in tool_call_dict.keys(): # qwen转 lc， 因为qwen模板指定输出格式为arguments
                #     tool_call_dict['args'] = tool_call_dict.pop('arguments')
                # print(f'tool_call_dict {tool_call_dict}')
                # tool_calls = [tool_call_dict] # 1个
                # print(tool_calls)

                pattern = r'<tool_call>\s*({.*?})\s*</tool_call>'
                matches = re.findall(pattern, res_seq, re.DOTALL)
                tool_calls = [json.loads(match) for match in matches]  # n个
                base_id = self.get_ymdhmss()
                for ind, tool_call_dict in enumerate(tool_calls):
                    tool_call_dict['id'] = f'{base_id}_{ind}'
                    if 'arguments' in tool_call_dict.keys():  # qwen转 lc， 因为qwen模板指定输出格式为arguments
                        tool_call_dict['args'] = tool_call_dict.pop('arguments')
                print(f'tool_calls {tool_calls}')
            except:
                print('结构化res_seq错误')
                print(traceback.format_exc())
                pass
        message = AIMessage(
            content=res_seq,
            tool_calls=tool_calls,  # 注意格式
            additional_kwargs={},  # Used to add additional payload (e.g., function calling request)
            response_metadata={  # Use for response metadata
                "time_in_seconds": 3,
            },
        )
        ##
        generation = ChatGeneration(message=message)
        return ChatResult(generations=[generation])

    def _stream(
            self,
            messages: List[BaseMessage],
            stop: Optional[List[str]] = None,
            run_manager: Optional[CallbackManagerForLLMRun] = None,
            **kwargs: Any,
    ) -> Iterator[ChatGenerationChunk]:
        """Stream the output of the model.

        This method should be implemented if the model can generate output
        in a streaming fashion. If the model does not support streaming,
        do not implement it. In that case streaming requests will be automatically
        handled by the _generate method.

        Args:
            messages: the prompt composed of a list of messages.
            stop: a list of strings on which the model should stop generating.
                  If generation stops due to a stop token, the stop token itself
                  SHOULD BE INCLUDED as part of the output. This is not enforced
                  across models right now, but it's a good practice to follow since
                  it makes it much easier to parse the output of the model
                  downstream and understand why generation stopped.
            run_manager: A run manager with callbacks for the LLM.
        """
        # last_message = messages[-1]
        # tokens = last_message.content[: self.n]

        max_new_tokens = kwargs.get('max_new_tokens', 1024)  # 如何传递？
        is_streamer = kwargs.get('is_streamer', False)
        tools_schema = kwargs.get('tools', [])  # bind_tools
        langchain_messages = messages  #
        print('langchain_messages')
        # print(langchain_messages)
        # langchain_messages 转qwen input
        qwen_messages = []
        l2q = {
            'ai': 'assistant',
            'human': 'user',
            'system': 'system',
            'tool': 'tool',
        }
        for langchain_message in langchain_messages:
            qwen_message = {}
            qwen_message['role'] = l2q[langchain_message.type]
            qwen_message['content'] = langchain_message.content
            qwen_messages.append(qwen_message)
        res_gen = self.llm_infer(messages=qwen_messages, is_streamer=True, max_new_tokens=max_new_tokens,
                                 tools_schema=tools_schema)  # 推理， 含tools2seq逻辑
        print('_stream')
        chunks = ChatGenerationChunk(message=AIMessageChunk(content=''))
        for token in res_gen:
            # for token in tokens:
            chunk = ChatGenerationChunk(message=AIMessageChunk(content=token))

            if run_manager:
                # This is optional in newer versions of LangChain
                # The on_llm_new_token will be called automatically
                run_manager.on_llm_new_token(token, chunk=chunk)
            chunks += chunk  # 拼接 chunks属性 text:str, message:AIMessageChunk （合并后的）
            # print('xxx')
            # print(f'chunks {chunks}')
            # print(f'chunk {chunk}')
            yield chunk
        # 输出所有chunk之后
        whole_res_seq = chunks.message.content
        tool_call_chunks = []
        if len(tools_schema) > 0:
            tool_call_chunks = Custom_Langchain_ChatLLM.qwen_seq_2_lc_tool_calls(whole_res_seq,
                                                                                 is_tool_call_chunks=False)  # 直接计算tool_calls
        # Let's add some other information (e.g., response metadata)
        chunk = ChatGenerationChunk(
            message=AIMessageChunk(content="", tool_calls=tool_call_chunks, response_metadata={"time_in_sec": 3})
        )  # https://www.langchain.com.cn/docs/how_to/tool_streaming/
        if run_manager:
            # This is optional in newer versions of LangChain
            # The on_llm_new_token will be called automatically
            run_manager.on_llm_new_token(token, chunk=chunk)
        yield chunk

    @property
    def _llm_type(self) -> str:
        return "DeepSeek_R1_Distill_Qwen_ChatLLM"

    def bind_tools(
            self,
            tools: Sequence[Union[Dict[str, Any], Type[BaseModel], Callable, BaseTool]],
            **kwargs: Any,
    ) -> Runnable[LanguageModelInput, BaseMessage]:

        formatted_tools = [convert_to_openai_tool(tool) for tool in tools]
        #  将tool_dict放置在_generate **kwargs  , 仅仅传递参数
        # 模型可输出tool_calls字段
        return super().bind(tools=formatted_tools, **kwargs)

    @staticmethod
    def get_ymdhmss(id=0):
        ct = time.time() + id
        local_time = time.localtime(ct)
        data_head = time.strftime("%Y%m%d_%H%M%S", local_time)
        data_secs = (ct - int(ct)) * 1000
        time_stamp = "%s_%03d" % (data_head, data_secs)
        return time_stamp

    @staticmethod
    def qwen_seq_2_lc_tool_calls(res_seq, is_tool_call_chunks=False):  # 千问llm输出序列 转langchain tool_calls格式
        try:
            # 特定的 qwen 2 lc
            pattern = r'<tool_call>\s*({.*?})\s*</tool_call>'
            matches = re.findall(pattern, res_seq, re.DOTALL)
            tool_calls = [json.loads(match) for match in matches]  # n个
            base_id = Custom_Langchain_ChatLLM.get_ymdhmss()
            for ind, tool_call_dict in enumerate(tool_calls):  # [{}]
                tool_call_dict['id'] = f'{base_id}_{ind}'
                if 'arguments' in tool_call_dict.keys():  # qwen转 lc， 因为qwen模板指定输出格式为arguments
                    tool_call_dict['args'] = tool_call_dict.pop('arguments')
                    if is_tool_call_chunks:  # is_tool_call_chunks args类型为str
                        tool_call_dict['args'] = str(tool_call_dict['args'])
            print(f'tool_calls {tool_calls}')
            return tool_calls
        except:
            print('结构化res_seq错误')
            print(traceback.format_exc())
            return []


def test_DeepSeek_R1_Distill_Qwen_LLM():
    import re

    # 文本分割函数
    def split_text(text):
        pattern = re.compile(r'<think>(.*?)</think>(.*)', re.DOTALL)  # 定义正则表达式模式
        match = pattern.search(text)  # 匹配 <think>思考过程</think>回答

        if match:  # 如果匹配到思考过程
            think_content = match.group(1).strip()  # 获取思考过程
            answer_content = match.group(2).strip()  # 获取回答
        else:
            think_content = ""  # 如果没有匹配到思考过程，则设置为空字符串
            answer_content = text.strip()  # 直接返回回答

        return think_content, answer_content

    model_path = r'/home/ps/zhangxiancai/llm_deploy/bigfiles/models/DeepSeek-R1-Distill-Qwen-7B'
    llm = DeepSeek_R1_Distill_Qwen_LLM(mode_name_or_path=model_path)

    response = llm('你是谁')
    think, answer = split_text(response)  # 调用split_text函数，分割思考过程和回答
    print(f"***思考***")
    print(think)  # 输出思考
    print(f"***回答***")
    print(answer)  # 输出回答


def test_in_langchain():
    from langchain_core.prompts import ChatPromptTemplate
    prompt_template = ChatPromptTemplate.from_messages(
        [("system", "以海盗口味回答问题"), ("human", "{input}")]
    )

    # model_path = r'/home/ps/zhangxiancai/llm_deploy/bigfiles/models/DeepSeek-R1-Distill-Qwen-7B'
    model_path = r'D:\code\other\LLMs\models\DeepSeek-R1-Distill-Qwen-1.5B'
    llm = DeepSeek_R1_Distill_Qwen_LLM(mode_name_or_path=model_path)
    res = llm.invoke(prompt_template.invoke({'input': '你是谁'}))
    print(res)


def test_3():
    model_path = r'D:\code\other\LLMs\models\DeepSeek-R1-Distill-Qwen-1.5B'
    llm = DeepSeek_R1_Distill_Qwen_LLM(mode_name_or_path=model_path)
    from lc_tools import open_pycharm
    tools = [open_pycharm]
    llm_with_tools = llm.bind_tools(tools)  # llm输入输出逻辑包含tool
    print(llm_with_tools.invoke('现在请打开pycharm编程软件'))


def test_DeepSeek_R1_Distill_Qwen_ChatLLM():
    # model_path = r'D:\code\other\LLMs\models\DeepSeek-R1-Distill-Qwen-1.5B'
    # model_path = r'D:\code\other\LLMs\models\Qwen2.5-Coder-0.5B-Instruct'
    model_path = r'/home/ps/zhangxiancai/llm_deploy/bigfiles/models/Qwen2.5-14B-Instruct'
    chatllm = Custom_Langchain_ChatLLM(mode_name_or_path=model_path)
    from my_langchain.lc_tools import get_tool_open_pycharm
    tools = [get_tool_open_pycharm()]
    chatllm = chatllm.bind_tools(tools)
    # res = chatllm.invoke(
    #     [
    #         # SystemMessage(content=parser.get_format_instructions()),
    #         # HumanMessage(content="hello!"),
    #         AIMessage(content="Hi there human!"),
    #         HumanMessage(content='打开pycharm编程软件'),
    #     ]
    # )
    # print(res)

    res = chatllm.stream(
        [
            # SystemMessage(content=parser.get_format_instructions()),
            # HumanMessage(content="hello!"),
            AIMessage(content="Hi there human!"),
            HumanMessage(content='打开pycharm编程软件'),
        ]
    )
    for r in res:
        print(r)


if __name__ == '__main__':
    # test_DeepSeek_R1_Distill_Qwen_LLM()
    # test_in_langchain()
    # test_3()
    test_DeepSeek_R1_Distill_Qwen_ChatLLM()
