import requests
import json
import logging
import copy
import os
import json
import configparser
from abc import ABC
from typing import Dict, Iterator, List, Literal, Optional, Union

from typing import Dict, Optional,List,Iterator
from pprint import pformat
from utils.actions.llm.base import LLM_REGISTRY
from utils.actions.tools.openapi_plugin import OpenAPIPluginTool
from utils.model_tools import req_unicom_llm_chat
from utils.actions.llm.base import BaseChatModel,register_llm,ModelServiceError
from utils.actions.llm.schema import ASSISTANT, FUNCTION, SYSTEM, USER, ContentItem, FunctionCall, Message
from utils.actions.llm.schema import DEFAULT_SYSTEM_MESSAGE, SYSTEM, USER, Message
from utils.actions.log import logger
from utils.actions.settings import DEFAULT_MAX_INPUT_TOKENS
from utils.actions.utils.tokenization_qwen import tokenizer
from utils.actions.llm.function_calling import BaseFnCallModel
from utils.actions.utils.utils import (extract_text_from_message, format_as_multimodal_message, has_chinese_messages,
                                    merge_generate_cfgs, print_traceback)

from utils.actions.llm.base import BaseChatModel
from utils.actions.llm.schema import ASSISTANT, FUNCTION, SYSTEM, USER, ContentItem, FunctionCall, Message

APP_ID = '1ca9025978c748279e7a5b40a9662d9b'
API_KEY = '750aa9b2ba714c1a835f0e1a2edac7ca'
SECRET_KEY = 'd48b30516cde43309ae71ca6c162402c'

logger = logging.getLogger(__name__) 



config = configparser.ConfigParser()
config.read('config.ini')
MODEL_NAME_CONFIG = config["MODELS"]["default_llm"]
MODEL_NAME = os.getenv('CUAI_DEFAULT_LLM_MODEL_ID', MODEL_NAME_CONFIG)

MODEL_URL_CONFIG = config["MODELS"]["model_url"]
MODEL_URL = os.getenv('CUAI_DEFAULT_LLM_MODEL_URL', MODEL_URL_CONFIG)

@register_llm('unicomllm')
class UnicomLLM(BaseChatModel):

    def _chat_no_stream(self, messages: List[Dict],model_name,model_url, **kwargs):

        messages = [msg.model_dump() for msg in messages]
        # logger.info(f"调用大模型的prompt为：{messages}")
        # logger.debug(f'*{pformat(messages, indent=2)}*')
        try:
            response = req_unicom_llm_chat(messages=messages,  model_name=model_name, model_url = model_url,stream=False,do_sample=False)
            # logger.info(f"\n\n调用大模型的结果为：{response.text}\n\n")
            # content = response.json()['data']['choices'][0]['message']['content']
            content = response.content
            return [Message(ASSISTANT, content)]
        except Exception as ex:
            raise ModelServiceError(exception=ex)

    def _chat_stream(
        self,
        messages: List[Message],
        model_name,
        model_url,
        delta_stream: bool,
        generate_cfg: dict,
    ) -> Iterator[List[Message]]:
        messages = [msg.model_dump() for msg in messages]
        # logger.info(f"\n\n调用大模型的prompt为：{messages}\n\n")
        # print(f"\n\n调用大模型的prompt为：{messages}\n\n")
        # logger.debug(f'*{pformat(messages, indent=2)}*')
        try:
            response = req_unicom_llm_chat(messages=messages,model_name=model_name, model_url = model_url,stream=True,do_sample=False)
            # logger.info(f"\n\n调用大模型的结果为：{response.text}\n\n")
            # for chunk in response.iter_lines():
            #    yield [Message(ASSISTANT, chunk)]
            
            for chunk in response:
                chunk = chunk.content
                # logger.info(f"test_chunk:{chunk}")
                yield [Message(ASSISTANT, chunk)]
            '''
            if delta_stream:
                for chunk in response:
                    if hasattr(chunk.choices[0].delta, 'content') and chunk.choices[0].delta.content:
                        yield [Message(ASSISTANT, chunk.choices[0].delta.content)]
            else:
                full_response = ''
                for chunk in response.iter_lines(decode_unicode=True):
                    if hasattr(chunk.choices[0].delta, 'content') and chunk.choices[0].delta.content:
                        full_response += chunk.choices[0].delta.content
                        yield [Message(ASSISTANT, full_response)]
            '''
        except Exception as ex:
            raise ModelServiceError(exception=ex)

    def _preprocess_messages(self, messages: List[Message], lang: Literal['en', 'zh'],
                             generate_cfg: dict) -> List[Message]:
        messages = super()._preprocess_messages(messages, lang=lang, generate_cfg=generate_cfg)
        if generate_cfg.get('function_choice', 'auto') == 'none':
            messages = self._remove_fncall_messages(messages, lang=lang)
        else:
            messages = self._preprocess_fncall_messages(messages)
        return messages

    def _remove_fncall_messages(self, messages: List[Message], lang: Literal['en', 'zh']) -> List[Message]:
        # Change function calls into user messages so that the model won't try
        # to generate function calls when given functions and function_choice="none".
        new_messages = []
        for msg in messages:
            if (msg.role == FUNCTION) or msg.function_call:
                if (not new_messages) or (new_messages[-1].role != USER):
                    new_messages.append(Message(role=USER, content=[]))
                if msg.function_call:
                    tool_name = msg.function_call.name
                    tool_args = msg.function_call.arguments
                    if lang == 'zh':
                        tool_text = f'\n\n工具"{tool_name}"被调用时使用了以下参数：\n{tool_args}'
                    else:
                        tool_text = f'\n\nThe tool "{tool_name}" was called with these arguments:\n{tool_args}'
                else:
                    assert msg.role == FUNCTION
                    if msg.content:
                        assert len(msg.content) == 1
                        assert isinstance(msg.content[0], ContentItem)
                        assert isinstance(msg.content[0].text, str)
                        tool_result = msg.content[0].text
                    else:
                        tool_result = 'No result.'
                    if lang == 'zh':
                        tool_text = f'\n\n该工具返回了以下结果：\n{tool_result}'
                    else:
                        tool_text = f'\n\nThe tool has returned the following result:\n{tool_result}'
                new_messages[-1].content.append(ContentItem(text=tool_text))
            else:
                if (msg.role == USER) and new_messages and (new_messages[-1].role == USER):
                    # Separate two user messages with an assistant message to make the bot focus on the latter:
                    new_messages.append(Message(role=ASSISTANT, content=[ContentItem(text='...')]))
                new_messages.append(msg)
        return new_messages

    def _preprocess_fncall_messages(self, messages: List[Message]) -> List[Message]:
        """Convert messages with function_call key and function role to assistant's content, which is
            for chat interface or text_completion interface that do not support functions.
        """
        validate_num_fncall_results(messages)
        new_messages = []
        for msg in copy.deepcopy(messages):
            role, content = msg.role, msg.content
            if role in (SYSTEM, USER):
                new_messages.append(msg)
            elif role == ASSISTANT:
                content = (content or [])
                fn_call = msg.function_call
                if fn_call:
                    f_name = fn_call.name
                    f_args = fn_call.arguments
                    if f_args.startswith('```'):  # if code snippet
                        f_args = '\n' + f_args  # for markdown rendering
                    func_content = '\n' if new_messages[-1].role == ASSISTANT else ''
                    func_content += f'{FN_NAME}: {f_name}'
                    func_content += f'\n{FN_ARGS}: {f_args}'
                    content.append(ContentItem(text=func_content))
                if new_messages[-1].role == ASSISTANT:
                    new_messages[-1].content += content
                else:
                    new_messages.append(Message(role=role, content=content))
            elif role == FUNCTION:
                assert new_messages[-1].role == ASSISTANT
                assert isinstance(content, list)
                if content:
                    assert len(content) == 1
                    assert isinstance(content[0], ContentItem)
                    f_result = content[0].text
                    assert f_result is not None
                else:
                    f_result = ''
                f_exit = f'\n{FN_EXIT}: '
                last_text_content = new_messages[-1].content[-1].text
                if last_text_content.endswith(f_exit):
                    new_messages[-1].content[-1].text = last_text_content[:-len(f_exit)]
                new_messages[-1].content += [ContentItem(text=f'\n{FN_RESULT}: {f_result}{f_exit}')]
            else:
                raise TypeError

        # Remove ': ' for continued generation of function calling,
        # because ': ' may form a single token with its following words
        if new_messages[-1].role == ASSISTANT:
            last_msg = new_messages[-1].content
            for i in range(len(last_msg) - 1, -1, -1):
                item_type, item_text = last_msg[i].get_type_and_value()
                if item_type == 'text':
                    if item_text.endswith(f'{FN_EXIT}: '):
                        last_msg[i].text = item_text[:-2]
                    break
        return new_messages

    def _chat_with_functions(
            self,
            messages: List[Message],
            functions: List[Dict],
            function_calls_list: List[Dict],
            model_name,
            model_url,
            stream: bool,
            delta_stream: bool,
            generate_cfg: dict,
            lang: Literal['en', 'zh'],
    ) -> Union[List[Message], Iterator[List[Message]]]:
        if delta_stream:
            raise NotImplementedError('Please use stream=True with delta_stream=False, because delta_stream=True'
                                      ' is not implemented for function calling due to some technical reasons.')
        parallel_function_calls = generate_cfg.get('parallel_function_calls', False)
        # print(f"原始的messages为：{messages}")
        # messages = self._prepend_fncall_system(
        #     messages=messages,
        #     functions=functions,
        #     lang=lang,
        #     parallel_function_calls=parallel_function_calls,
        # )
        # print(f"配置后的messages为：{messages}")


        fn_choice = generate_cfg.get('function_choice', 'auto')
        if fn_choice not in ('auto', 'none'):
            if messages[-1].role == ASSISTANT:
                msg_to_cont = copy.deepcopy(messages[-1])
                if msg_to_cont.content.endswith(FN_EXIT):
                    msg_to_cont.content += ': '
                msg_to_cont.content += '\n'
                messages = messages[:-1]
            else:
                msg_to_cont = Message(role=ASSISTANT, content='')
            msg_to_cont.content += f'{FN_NAME}: {fn_choice}'
            messages = messages + [msg_to_cont]


        generate_cfg = copy.deepcopy(generate_cfg)
        for k in ['parallel_function_calls', 'function_choice']:
            if k in generate_cfg:
                del generate_cfg[k]

        return self._continue_assistant_response(messages, model_name = model_name,model_url = model_url,generate_cfg=generate_cfg, stream=stream)

    '''
    def _prepend_fncall_system(
            self,
            messages: List[Message],
            functions: List[Dict],
            lang: Literal['en', 'zh'],
            parallel_function_calls: bool = False,
    ) -> List[Message]:
        tool_desc_template = FN_CALL_TEMPLATE[lang + ('_parallel' if parallel_function_calls else '')]
        tool_descs = '\n\n'.join(get_function_description(function, lang=lang) for function in functions)
        tool_names = ','.join(function.get('name', function.get('name_for_model', '')) for function in functions)
        tool_system = tool_desc_template.format(tool_descs=tool_descs, tool_names=tool_names)

        assert messages[0].role == SYSTEM
        messages = copy.deepcopy(messages[:1]) + messages[1:]
        if isinstance(messages[0].content, str):
            messages[0].content += '\n\n' + tool_system
        else:
            messages[0].content.append(ContentItem(text='\n\n' + tool_system))

        return messages
    '''


    def _continue_assistant_response(
            self,
            messages: List[Message],
            model_name,
            model_url ,
            generate_cfg: dict,
            stream: bool,
    ) -> Iterator[List[Message]]:
        # Simulate text completion with chat completion
        if messages and messages[-1].role == ASSISTANT:
            assert len(messages) > 1 and messages[-2].role == USER
            assert messages[-1].function_call is None
            usr = messages[-2].content
            bot = messages[-1].content
            sep = '\n\n'
            if isinstance(usr, str) and isinstance(bot, str):
                usr = usr + sep + bot
            elif isinstance(usr, list) and isinstance(bot, list):
                usr = usr + [ContentItem(text=sep)] + bot
            else:
                raise NotImplementedError
            text_to_complete = copy.deepcopy(messages[-2])
            text_to_complete.content = usr
            messages = messages[:-2] + [text_to_complete]
        return self._chat(messages, model_name = model_name,model_url = model_url,stream=stream, delta_stream=False, generate_cfg=generate_cfg)

    def _postprocess_messages(
            self,
            messages: List[Message],
            fncall_mode: bool,
            generate_cfg: dict,
    ) -> List[Message]:
        messages = super()._postprocess_messages(messages, fncall_mode=fncall_mode, generate_cfg=generate_cfg)
        if fncall_mode:
            fn_choice = generate_cfg.get('function_choice', 'auto')
            if fn_choice not in ('auto', 'none'):
                messages = copy.deepcopy(messages)
                output = messages[0].content[0].text
                if output.lstrip().startswith(FN_ARGS):
                    # Prepend this fn_choice prefix only if the model correctly completes it
                    output = f'{FN_NAME}: {fn_choice}\n' + output
                messages[0].content[0].text = output
            messages = self._postprocess_fncall_messages(messages)
        return messages

    def _postprocess_fncall_messages(self, messages: List[Message]) -> List[Message]:
        """
        If the model calls function by built-in function call template,
        convert and display it in function_call format.
        """

        # Remove ': ' brought by continued generation of function calling
        last_msg = messages[-1].content
        for i in range(len(last_msg)):
            item_type, item_text = last_msg[i].get_type_and_value()
            if item_type == 'text':
                if item_text.startswith(': '):
                    last_msg[i].text = item_text[2:]
                elif item_text.startswith(':'):
                    last_msg[i].text = item_text[1:]
                break

        new_messages = []
        for msg in messages:
            role, content = msg.role, msg.content
            assert isinstance(content, list)

            if role in (SYSTEM, USER):
                new_messages.append(Message(role=role, content=content))
                continue

            new_content = []
            for item in content:
                item_type, item_text = item.get_type_and_value()

                if item_type != 'text':  # multimodal
                    new_content.append(item)
                    continue

                for stop_word in [FN_RESULT, FN_EXIT]:
                    assert stop_word in FN_STOP_WORDS
                    assert stop_word not in item_text, 'Something wrong, stop words are expected to be excluded.'

                i = item_text.find(f'{FN_NAME}:')

                # If no function call:
                if i < 0:
                    show_text = remove_incomplete_special_tokens(item_text)
                    if show_text:
                        new_content.append(ContentItem(text=show_text))
                    continue

                # If it says something before function call:
                if i > 0:
                    answer = item_text[:i].lstrip('\n').rstrip()
                    if answer.endswith('\n'):
                        answer = answer[:-1]
                    show_text = remove_incomplete_special_tokens(answer)
                    if show_text:
                        new_content.append(ContentItem(text=show_text))
                    if new_content:
                        new_messages.append(Message(
                            role=role,
                            content=new_content,
                        ))  # split thought and function call
                        new_content = []
                    item_text = item_text[i:]

                # If has function call:
                for part in item_text.split(f'{FN_NAME}:'):
                    if not part:
                        continue
                    if part.endswith('\n'):
                        part = part[:-1]

                    arg_sep = f'{FN_ARGS}:'
                    i = part.find(arg_sep)
                    if i < 0:
                        fn_name = part.strip()
                        list_of_fn_args = ['']
                    else:
                        fn_name = part[:i].strip()
                        list_of_fn_args = [_.strip() for _ in part[i + len(arg_sep):].split(arg_sep)]
                    fn_name = remove_incomplete_special_tokens(fn_name)
                    for fn_args in list_of_fn_args:
                        fn_args = remove_incomplete_special_tokens(fn_args)
                        fn_args = remove_trailing_comment_of_fn_args(fn_args)
                        new_messages.append(
                            Message(
                                role=ASSISTANT,
                                content=[],
                                function_call=FunctionCall(
                                    name=fn_name,
                                    arguments=fn_args,
                                ),
                            ))
                # Break here and discard the text after function call
                return new_messages

            if new_content:
                new_messages.append(Message(role=role, content=new_content))
        return new_messages

def validate_num_fncall_results(messages: List[Message]):
    fn_results = []
    i = len(messages) - 1
    while messages[i].role == FUNCTION:
        fn_results = [messages[i].name] + fn_results
        i -= 1

    fn_calls = []
    while messages[i].function_call:
        fn_calls = [messages[i].function_call.name] + fn_calls
        i -= 1

    if len(fn_calls) != len(fn_results):
        raise ValueError(f'Expecting {len(fn_calls)} function results (i.e., messages with role="function") '
                         f'but received {len(fn_results)} function results. '
                         'The number of function results must match that of the function_call messages.')
    for fc_name, fr_name in zip(fn_calls, fn_results):
        if fr_name and (fc_name != fr_name):
            raise ValueError('The function results (i.e., the messages with role="function" ) must be '
                             'put in the same order as the function_call messages. And the function names must match.'
                             f'The function results are currently {fn_results}. But {fn_calls} are expected.')



FN_NAME = '✿FUNCTION✿'
FN_ARGS = '✿ARGS✿'
FN_RESULT = '✿RESULT✿'
FN_EXIT = '✿RETURN✿'
FN_STOP_WORDS = [FN_RESULT, FN_EXIT]

FN_CALL_TEMPLATE_INFO_ZH = """# 工具

## 你拥有如下工具：

{tool_descs}"""

FN_CALL_TEMPLATE_INFO_EN = """# Tools

## You have access to the following tools:

{tool_descs}"""

FN_CALL_TEMPLATE_FMT_ZH = """## 你可以在回复中插入零次、一次或多次以下命令以调用工具：

%s: 工具名称，必须是[{tool_names}]之一。
%s: 工具输入
%s: 工具结果
%s: 根据工具结果进行回复，需将图片用![](url)渲染出来""" % (
    FN_NAME,
    FN_ARGS,
    FN_RESULT,
    FN_EXIT,
)

FN_CALL_TEMPLATE_FMT_EN = """## When you need to call a tool, please insert the following command in your reply, which can be called zero or multiple times according to your needs:

%s: The tool to use, should be one of [{tool_names}]
%s: The input of the tool
%s: Tool results
%s: Reply based on tool results. Images need to be rendered as ![](url)""" % (
    FN_NAME,
    FN_ARGS,
    FN_RESULT,
    FN_EXIT,
)

FN_CALL_TEMPLATE_FMT_PARA_ZH = """## 你可以在回复中插入以下命令以并行调用N个工具：

%s: 工具1的名称，必须是[{tool_names}]之一
%s: 工具1的输入
%s: 工具2的名称
%s: 工具2的输入
...
%s: 工具N的名称
%s: 工具N的输入
%s: 工具1的结果
%s: 工具2的结果
...
%s: 工具N的结果
%s: 根据工具结果进行回复，需将图片用![](url)渲染出来""" % (
    FN_NAME,
    FN_ARGS,
    FN_NAME,
    FN_ARGS,
    FN_NAME,
    FN_ARGS,
    FN_RESULT,
    FN_RESULT,
    FN_RESULT,
    FN_EXIT,
)

FN_CALL_TEMPLATE_FMT_PARA_EN = """## Insert the following command in your reply when you need to call N tools in parallel:

%s: The name of tool 1, should be one of [{tool_names}]
%s: The input of tool 1
%s: The name of tool 2
%s: The input of tool 2
...
%s: The name of tool N
%s: The input of tool N
%s: The result of tool 1
%s: The result of tool 2
...
%s: The result of tool N
%s: Reply based on tool results. Images need to be rendered as ![](url)""" % (
    FN_NAME,
    FN_ARGS,
    FN_NAME,
    FN_ARGS,
    FN_NAME,
    FN_ARGS,
    FN_RESULT,
    FN_RESULT,
    FN_RESULT,
    FN_EXIT,
)

FN_CALL_TEMPLATE = {
    'zh': FN_CALL_TEMPLATE_INFO_ZH + '\n\n' + FN_CALL_TEMPLATE_FMT_ZH,
    'en': FN_CALL_TEMPLATE_INFO_EN + '\n\n' + FN_CALL_TEMPLATE_FMT_EN,
    'zh_parallel': FN_CALL_TEMPLATE_INFO_ZH + '\n\n' + FN_CALL_TEMPLATE_FMT_PARA_ZH,
    'en_parallel': FN_CALL_TEMPLATE_INFO_EN + '\n\n' + FN_CALL_TEMPLATE_FMT_PARA_EN,
}


def get_function_description(function: Dict, lang: Literal['en', 'zh']) -> str:
    """
    Text description of function
    """
    tool_desc_template = {
        'zh': '### {name_for_human}\n\n{name_for_model}: {description_for_model} 输入参数：{parameters} {args_format}',
        'en': '### {name_for_human}\n\n{name_for_model}: {description_for_model} Parameters: {parameters} {args_format}'
    }
    tool_desc = tool_desc_template[lang]
    name = function.get('name', None)
    name_for_human = function.get('name_for_human', name)
    name_for_model = function.get('name_for_model', name)
    assert name_for_human and name_for_model

    if name_for_model == 'code_interpreter':
        args_format = {
            'zh': '此工具的输入应为Markdown代码块。',
            'en': 'Enclose the code within triple backticks (`) at the beginning and end of the code.',
        }
    else:
        args_format = {
            'zh': '此工具的输入应为JSON对象。',
            'en': 'Format the arguments as a JSON object.',
        }
    args_format = function.get('args_format', args_format[lang])

    return tool_desc.format(name_for_human=name_for_human,
                            name_for_model=name_for_model,
                            description_for_model=function['description'],
                            parameters=json.dumps(function['parameters'], ensure_ascii=False),
                            args_format=args_format).rstrip()


# Mainly for removing incomplete trailing special tokens when streaming the output
def remove_incomplete_special_tokens(text: str) -> str:
    special_tokens = (FN_NAME, FN_ARGS, FN_RESULT, FN_EXIT)
    text = text.rstrip()
    if text.endswith(special_tokens):
        for s in special_tokens:
            if text.endswith(s):
                text = text[:-len(s)]
                break
    else:
        trail_start = text.rfind('✿')
        trail_token = text[trail_start:]
        for s in special_tokens:
            if s.startswith(trail_token):
                text = text[:trail_start]
                break
    text = text.lstrip('\n').rstrip()
    return text


# For hotfix badcases such as `{"arg1": "value1"} <!-- this is an example comment -->`.
def remove_trailing_comment_of_fn_args(fn_args: str):
    fn_args = fn_args.strip()

    if fn_args.startswith('{'):
        k = fn_args.rfind('}')
        if k > 0:
            fn_args = fn_args[:k + 1]

    if fn_args.startswith('```'):
        k = fn_args.rfind('\n```')
        if k > 0:
            fn_args = fn_args[:k + 4]

    return fn_args








