import time
from itertools import tee
from typing import Union, Generator

import util
from script import common_client, merge_thread_local
from script.common_client import LlmRes
from util import LlmMessage

import inspect
import json
import types
from functools import wraps


class LlmToolRes(LlmRes):
    def __init__(self, llm_res: LlmRes = None, thought: str = None, tool_name: str = None, tool_args: dict = None, reply_user: str = None):
        self.status = llm_res.status
        self.result = llm_res.result
        self.ext_info = llm_res.ext_info
        self.message = llm_res.message
        self.cost = llm_res.cost
        self.messages = llm_res.messages
        self.finished = llm_res.finished
        self.thought = llm_res.thought
        if thought:
            self.thought = thought
        self.tool_name = tool_name
        self.tool_args = tool_args
        self.reply_user = reply_user

    def is_only_thought(self):
        return self.tool_args is None and self.reply_user is None and self.thought is not None

    def is_complete_tool(self):
        return self.tool_name is not None and self.tool_args is not None

    def is_reply_user(self):
        return self.reply_user is not None

    def to_dict(self):
        dict = super().to_dict()
        dict.update({
            'tool_name': self.tool_name,
            'tool_args': self.tool_args,
            'reply_user': self.reply_user
        })
        return dict


# 回复用户
class ReplyUser():
    def __init__(self, content: str = None, finished: bool = True, thought: str = None):
        self.content = content
        self.thought = thought
        self.finished = finished

    def to_dict(self):
        return {
            "content": self.content,
            "thought": self.thought,
            "finished": self.finished
        }

        # 添加 to_json 方法

    def to_json(self):
        return json.dumps(self.to_dict(), ensure_ascii=False, default=str)

        # 实现 __repr__ 方法

    def __repr__(self):
        return self.to_json()


class LlmTool():
    def __init__(self, name: str = None, description: str = None, parameters: dict = None, required: list[str] = None, func=None):
        self.name = name
        self.description = description
        self.parameters = parameters
        self.required = required
        self.func = func

    @classmethod
    def from_dict(cls, func=None, data: dict = None):
        if func == None:
            raise ValueError('function 不可为空')
        parameters = data.get('parameters')
        required = data.get('required')
        if parameters.get('type') == 'object':
            parameters = parameters.get('properties')
            required = parameters.get('required')
        name = data.get('name')
        description = data.get('description')
        if not name or not description:
            raise ValueError('工具名称及描述不可为空')
        return cls(name=name, description=description, parameters=parameters, required=required, func=func)

    @classmethod
    def from_func(cls, func=None):
        if func == None:
            raise ValueError('function 不可为空')
        # 从func中提取stool装饰器的参数构建对象
        name = getattr(func, 'name', None)
        description = getattr(func, 'description', None)
        if not name or not description:
            raise ValueError('工具名称及描述不可为空')
        parameters = getattr(func, 'parameters', None)
        required = getattr(func, 'required', None)
        return cls(name=name, description=description, parameters=parameters, required=required, func=func)

    @classmethod
    def from_module(cls, module):
        if not isinstance(module, types.ModuleType):
            raise ValueError('py should be a python module')

        # 获取模块中的所有函数
        funcs = [member for member in inspect.getmembers(module) if inspect.isfunction(member[1])]

        # 过滤出具有stool装饰器的函数
        tool_funcs = [func for name, func in funcs if hasattr(func, 'name') and hasattr(func, 'description') and hasattr(func, 'parameters') and hasattr(func, 'required')]

        # 使用from_func方法构造LLmTool对象
        tools = [cls.from_func(func) for func in tool_funcs]

        return tools

    def to_dict(self):
        return {
            "name": self.name,
            "description": self.description,
            "parameters": {
                "type": "object",
                "properties": self.parameters,
                "required": self.required
            }
        }

    # 添加 to_json 方法
    def to_json(self):
        return json.dumps(self.to_dict(), ensure_ascii=False, default=str)

    # 实现 __repr__ 方法
    def __repr__(self):
        return self.to_json()


class ReactReq():
    def __init__(self, model_params: dict = None, tools: list[Union[LlmTool, dict]] = None, messages: list[dict] = None, react_id: str = None, params: dict = None):
        self.model_params = model_params
        self.tools = tools
        self.messages = messages
        self.params = params
        self.react_id = react_id

    def to_dict(self):
        return {
            "model_params": self.model_params,
            "messages": self.messages,
            "tools": self.tools,
            "params": self.params,
            'react_id': self.react_id
        }

    def to_json(self):
        return json.dumps(self.to_dict(), ensure_ascii=False, default=str)

    # 实现 __repr__ 方法
    def __repr__(self):
        return self.to_json()


def stool(name: str = None, description: str = None, parameters: dict = None, required: list[str] = None):
    '''
    :param name: 工具名称 默认取函数名称
    :param description: 工具描述
    :param parameters: 工具参数 字典例如：{
        "numa": {
            "type": "int",
            "description": "需要相加的数a"
        }
    }
    :param required: 必填参数 例如：["numa"]
    :return: 可以直接返回ReplyUser对象，这个结果会直接吐给前端，也可以返回ReplyUser的生成器，这将会流式返回给用户；亦或是一个其他对象（建议文本化），这将会进入react后面的在思考；
    '''

    def decorator(func):
        @wraps(func)
        def wrapper(args: dict, req: ReactReq):
            link = util.LinkInfo('execute_tool', name if name else func.__name__, {
                'args': args,
                'req': req
            })
            result = None
            try:
                link.step_in()
                if not isinstance(req, ReactReq):
                    raise ValueError("ReactReq must not be null")
                # 检查参数的合理性
                if args is None:
                    args = {}
                elif not isinstance(args, dict):
                    raise ValueError("tool args must be dict")
                if required:
                    for key in required:
                        if key not in args:
                            raise ValueError(f"Parameter {key} is required but not provided.")
                tool_res = func(args, req)
                if inspect.isgenerator(tool_res):
                    for tool_res_i in tool_res:
                        result = tool_res_i
                        yield result
                else:
                    result = tool_res
                    return tool_res
            except Exception as e:
                link.step_out(error=e)
                raise e
            finally:
                link.step_out(result=result)
                link.digest_log()

        # 在函数对象上添加额外的信息
        wrapper.name = name if name else func.__name__
        wrapper.description = description
        wrapper.parameters = parameters
        wrapper.required = required
        return wrapper

    return decorator


def tool_predict_stream(tools: list[Union[LlmTool, dict]] = None, msg: Union[str, dict, list[LlmMessage], list[dict]] = None, params: dict = None, observation: str = None, **kwargs) -> Generator[LlmToolRes, None, None]:
    tools = [item if isinstance(item, dict) else item.to_dict() for item in tools]
    if not tools:
        yield LlmToolRes(LlmRes(status='invalid_param', message='react模式工具列表不可为空'))
        return
    # 格式统一
    if isinstance(msg, str):
        msg = [{
            'role': 'user',
            'content': msg
        }]
    elif isinstance(msg, dict):
        msg = [msg.copy()]
    else:
        msg = [item.copy() if isinstance(item, dict) else item.to_dict() for item in msg]
    if msg[0].get('role') == 'system':
        yield LlmToolRes(LlmRes(status='invalid_param', message='react模式不支持system role，请使用sys_pre'))
        return

    tools_name, tools_desc = util.parse_llm_tools(tools)
    util.merge_react_messages(messages=msg, params=params, tools_name=tools_name, tools_desc=tools_desc, observation=observation, support_assistant_predict='gpt' in kwargs.get('model'))

    result = None
    link = util.LinkInfo('tool_client', 'tool_predict_stream', {
        'tools_name': tools_name
    })
    try:
        link.step_in()
        if kwargs.get('stop') is not None:
            kwargs.pop('stop')
        attempt = 0
        while attempt < 3:
            attempt += 1
            llm_res_gen = common_client.llm_predict_stream(msg=msg, params=params, stop=['Observation:', '\nObservation:'], **kwargs)
            for llm_res in llm_res_gen:
                if not llm_res.is_success():
                    result = LlmToolRes(llm_res)
                    yield result
                    return
                try:
                    thought, tool_name, tool_args, reply_user = util.parse_react_result(llm_res.result, llm_res.finished, tools_name)
                except Exception as e:
                    if attempt < 3:
                        break
                    else:
                        yield LlmToolRes(LlmRes(status='parse_react_result_error', message=str(e)))
                        return
                result = LlmToolRes(llm_res, thought, tool_name, tool_args, reply_user)
                yield result
                if llm_res.finished:
                    return
                else:
                    continue
            # 看似没问题，这里加个保险吧
            attempt = 3
    except Exception as e:
        link.step_out(error=e)
        yield LlmToolRes(LlmRes(status='code_error', message=str(e)))
    finally:
        link.step_out(result=result.to_dict() if result else None)
        link.digest_log()


def react_predict_stream(req: ReactReq) -> Generator[LlmRes, None, None]:
    msg = req.messages
    tools = req.tools
    params = req.params
    if isinstance(msg, str):
        msg = [{
            'role': 'user',
            'content': msg
        }]
    elif isinstance(msg, dict):
        msg = [msg]
    else:
        msg = [item if isinstance(item, dict) else item.to_dict() for item in msg]

    def find(tools, tool_name):
        for item in tools:
            if item.name == tool_name:
                return item

    start_time = time.time()
    old_msg = msg
    current_msg = [item.copy() for item in msg]
    loop_num = 0
    try:
        # 最多5轮
        observation = None
        while loop_num < 5:
            loop_num += 1
            yield LlmRes(status=True, result=None, messages=old_msg, cost=round(time.time() - start_time, 1), finished=False, ext_info=getattr(merge_thread_local, 'merge_data', None), thought="思考一下...")
            thought = None
            tool_name = None
            tool_args = None
            llm_tool_res_gen = tool_predict_stream(tools=tools, msg=current_msg, params=params, observation=observation, **req.model_params)
            for llm_tool_res in llm_tool_res_gen:
                llm_tool_res.check_success()
                # 还在思考
                if llm_tool_res.is_only_thought():
                    yield LlmRes(status=llm_tool_res.status, result=None, messages=old_msg, cost=round(time.time() - start_time, 1), finished=llm_tool_res.finished, ext_info=getattr(merge_thread_local, 'merge_data', None), thought=llm_tool_res.thought)
                    continue
                # 回复用户
                if llm_tool_res.is_reply_user():
                    yield LlmRes(status=llm_tool_res.status, result=llm_tool_res.reply_user, messages=old_msg, cost=round(time.time() - start_time, 1), finished=llm_tool_res.finished, ext_info=getattr(merge_thread_local, 'merge_data', None), thought=llm_tool_res.thought)
                    if llm_tool_res.finished:
                        return
                    else:
                        continue
                if llm_tool_res.is_complete_tool():
                    tool_name = llm_tool_res.tool_name
                    tool_args = llm_tool_res.tool_args
                    thought = llm_tool_res.thought
                elif llm_tool_res.finished:
                    raise ValueError('结束了，未找到reply和tool信息')
                elif llm_tool_res.tool_name:
                    yield LlmRes(status=llm_tool_res.status, result='', messages=old_msg, cost=round(time.time() - start_time, 1), finished=llm_tool_res.finished, ext_info=getattr(merge_thread_local, 'merge_data', None),
                                 thought=llm_tool_res.thought + f'\n正在准备{llm_tool_res.tool_name}的参数...' if llm_tool_res.thought else f'正在准备{llm_tool_res.tool_name}的参数...')
                    continue

            # 调用工具
            tool: LlmTool = find(tools, tool_name)
            if not tool:
                raise ValueError('工具不存在')
            # 提示执行工具
            yield LlmRes(status=True, result=None, messages=old_msg, cost=round(time.time() - start_time, 1), finished=False, ext_info=getattr(merge_thread_local, 'merge_data', None), thought=f"正在执行:{tool_name},参数:{json.dumps(tool_args, ensure_ascii=False)}")
            observation = ''
            tool_res = tool.func(tool_args, req)
            # 生成器
            for tool_res_i in tool_res:
                if tool_res_i is None:
                    util.log('tool_generator_is_null')
                    continue
                if not isinstance(tool_res_i, ReplyUser):
                    observation = str(tool_res_i)
                    continue
                yield LlmRes(status=True, result=tool_res_i.content, messages=old_msg, cost=round(time.time() - start_time, 1), finished=tool_res_i.finished, ext_info=getattr(merge_thread_local, 'merge_data', None), thought=tool_res_i.thought)
                if tool_res_i.finished:
                    return
            # Observation型
            # 最后一句是user，则添加一个assistant
            if current_msg[len(current_msg) - 1].get('role') == 'user':
                current_msg.append({
                    'role': 'assistant',
                    'content': f'Thought: {thought if thought else ""}\nAction: {tool_name}\nAction Input: {tool_args}'
                })

    except Exception as e:
        yield LlmRes(status='react_error', message=str(e), messages=old_msg, cost=round(time.time() - start_time, 1), finished=True, ext_info=getattr(merge_thread_local, 'merge_data', None))


def react_ops(req: ReactReq) -> Generator[LlmRes, None, None]:
    msg = req.messages
    tools = req.tools
    params = req.params
    if isinstance(msg, str):
        msg = [{
            'role': 'user',
            'content': msg
        }]
    elif isinstance(msg, dict):
        msg = [msg]
    else:
        msg = [item if isinstance(item, dict) else item.to_dict() for item in msg]
    # 仅保留5轮记录
    msg = msg[-11:]
    start_time = time.time()
    old_msg = msg
    current_msg = [item.copy() for item in msg]
    try:
        mock_tool_keywords = "$模拟工具调用："
        # 前一轮是个工具的调用，其他部分
        first_observation = None
        last_assistant_contents = []
        while len(current_msg) >= 2 and current_msg[-2].get('content').startswith(mock_tool_keywords):
            tool_content = current_msg[-2].get('content')
            # $模拟工具调用：${tool_name}\n思考：{tool_thought}\n参数：${tool_args}\n请回答模拟的工具结果
            # Split the content using the specific strings
            parts = tool_content.split(mock_tool_keywords)
            tool_name = parts[1].split("\n思考：")[0] if len(parts) > 1 else None

            parts = tool_content.split("\n思考：")
            tool_thought = parts[1].split("\n参数：")[0] if len(parts) > 1 else None

            parts = tool_content.split("\n参数：")
            tool_args = parts[1].split("\n请注意：你的回答将被视为本次工具调用的结果。在你回答之后，后面的对话不会包含工具模拟的记录。")[0] if len(parts) > 1 else None

            observation = current_msg[-1].get('content') if current_msg[-1].get('content') else ''
            if first_observation is None:
                first_observation = observation
                last_assistant_contents.append(f'Thought: {tool_thought}\nAction: {tool_name}\nAction Input: {tool_args}')
            else:
                last_assistant_contents.append(f'Thought: {tool_thought}\nAction: {tool_name}\nAction Input: {tool_args}\nObservation: {observation}')
            current_msg = current_msg[:-2]
        # 尾巴是工具
        if last_assistant_contents:
            # 翻过来
            last_assistant_contents.reverse()
            current_msg.append({
                'role': 'assistant',
                'content': '\n'.join(last_assistant_contents)
            })

        i = 0
        while i < len(current_msg):
            if current_msg[i]['role'] == 'assistant' and current_msg[i]['content'].startswith(mock_tool_keywords):
                # remove this message and the following user message
                del current_msg[i:i + 2]
            else:
                i += 1

        yield LlmRes(status=True, result=None, messages=old_msg, cost=round(time.time() - start_time, 1), finished=False, ext_info=getattr(merge_thread_local, 'merge_data', None), thought="思考一下...")
        llm_tool_res_gen = tool_predict_stream(tools=tools, msg=current_msg, params=params, observation=first_observation, **req.model_params)

        for llm_tool_res in llm_tool_res_gen:
            llm_tool_res.check_success()
            # 还在思考
            if llm_tool_res.is_only_thought():
                yield LlmRes(status=llm_tool_res.status, result=None, messages=old_msg, cost=round(time.time() - start_time, 1), finished=llm_tool_res.finished, ext_info=llm_tool_res.ext_info, thought=llm_tool_res.thought)
                continue
            # 回复用户
            if llm_tool_res.is_reply_user():
                yield LlmRes(status=llm_tool_res.status, result=llm_tool_res.reply_user, messages=old_msg, cost=round(time.time() - start_time, 1), finished=llm_tool_res.finished, ext_info=llm_tool_res.ext_info, thought=llm_tool_res.thought)
                if llm_tool_res.finished:
                    return
                else:
                    continue
            if llm_tool_res.is_complete_tool():
                tool_name = llm_tool_res.tool_name
                tool_args = llm_tool_res.tool_args
                yield LlmRes(status=llm_tool_res.status, result=f'{mock_tool_keywords}{tool_name}\n思考：{llm_tool_res.thought}\n参数：{json.dumps(tool_args, ensure_ascii=False)}\n请注意：你的回答将被视为本次工具调用的结果。在你回答之后，后面的对话不会包含工具模拟的记录。', messages=old_msg,
                             cost=round(time.time() - start_time, 1), finished=llm_tool_res.finished, ext_info=llm_tool_res.ext_info, thought=llm_tool_res.thought)
                return
            if llm_tool_res.finished:
                raise ValueError('结束了，未找到reply和tool信息')
            if llm_tool_res.tool_name:
                yield LlmRes(status=llm_tool_res.status, result='', messages=old_msg, cost=round(time.time() - start_time, 1), finished=llm_tool_res.finished, ext_info=llm_tool_res.ext_info,
                             thought=llm_tool_res.thought + f'\n正在准备{llm_tool_res.tool_name}的参数...' if llm_tool_res.thought else f'正在准备{llm_tool_res.tool_name}的参数...')
                continue
    except Exception as e:
        yield LlmRes(status='react_error', message=str(e), messages=old_msg, cost=round(time.time() - start_time, 1), finished=True, ext_info=None)
