import multiprocessing
from tqdm import tqdm
from typing import Any, List, Mapping, Optional, Sequence
from functools import partial
from concurrent.futures import ThreadPoolExecutor


def _string_to_dict(to_convert):
    # r"""Converts a string with equal signs to dictionary. E.g.
    # >>> string_to_dict(" name=user university=stanford")
    # {'name': 'user', 'university': 'stanford'}
    # """
    return {s.split("=", 1)[0]: s.split("=", 1)[1] for s in to_convert.split(" ") if len(s) > 0}


class DalchemyLLM(object):
    ''' llm基类，要实现call和set_config方法 '''
    cost_rmb: int = 0 # 每次程序调用，累计消耗的money，单位是rmb

    def __init__(self, **kwargs):
        super().__init__()
        # 使用字典键值对初始化类属性
        for key, value in kwargs.items():
            if hasattr(self, key):
                setattr(self, key, value)
        self.set_config()

    def __call__(self, prompt: str, is_chat: bool) -> str:
        ''' 输入输出text '''
        raise NotImplemented

    def set_config(self):
        ''' 配置参数 '''
        raise NotImplemented

    # 是否传history
    def generate(self, prompts: List[str], is_chat: bool = False, num_procs: int = 1) -> List[str]:
        if num_procs > 1:
            results = self._parallel_generate(prompts, is_chat=is_chat, num_procs=num_procs)
        else:
            results = [self.__call__(prompt, is_chat) for prompt in tqdm(prompts)]
        return results

    def _parallel_generate_v1(self, prompt_batch: List[str],
                              is_chat: bool = False,
                              num_procs: Optional[int] = None,
                              ) -> List[str]:
        ''' 用pool并行生成结果,prompts是一个batch的prompts '''
        multiprocessing.freeze_support()
        if num_procs is None:
            num_procs = multiprocessing.cpu_count() // 2
        batch_size = len(prompt_batch)

        with multiprocessing.Pool(num_procs) as p:
            # partial_completion_helper = partial(_openai_completion_helper, engine=engine)
            # partial_completion_helper = partial(_openai_completion_helper, engine=engine)
            call_fn = partial(self.__call__, is_chat=is_chat)
            batch_results = list(
                tqdm(
                    p.imap(call_fn, prompt_batch),
                    desc=f"Performing batch processing...",
                    total=batch_size,
                )
            )
        return batch_results

    def _parallel_generate(self, prompt_batch: List[str],
                           is_chat: bool = False,
                           num_procs: Optional[int] = None,
                           ) -> List[str]:
        ''' 用多线程并发生成结果,prompts是一个batch的提示 '''
        multiprocessing.freeze_support()
        if num_procs is None:
            num_procs = multiprocessing.cpu_count()
        batch_size = len(prompt_batch)

        call_fn = partial(self.__call__, is_chat=is_chat)
        with ThreadPoolExecutor(max_workers=num_procs) as executor:
            batch_results = list(
                tqdm(
                    executor.map(call_fn, prompt_batch),
                    desc=f"Performing batch processing...",
                    total=batch_size,
                )
            )

        return batch_results

    @classmethod
    def prompt_to_chatml(cls, prompt: str, start_token: str = "<|im_start|>", end_token: str = "<|im_end|>"):
        # r"""Convert a text prompt to ChatML formal
        # """
        prompt = prompt.strip()
        assert prompt.startswith(start_token)
        assert prompt.endswith(end_token)

        message = []
        for p in prompt.split("<|im_start|>")[1:]:
            newline_splitted = p.split("\n", 1)
            role = newline_splitted[0].strip()
            content = newline_splitted[1].split(end_token, 1)[0].strip()

            if role.startswith("system") and role != "system":
                # based on https://github.com/openai/openai-cookbook/blob/main/examples
                # /How_to_format_inputs_to_ChatGPT_models.ipynb
                # and https://github.com/openai/openai-python/blob/main/chatml.md it seems that system can specify a
                # dictionary of other args
                other_params = _string_to_dict(role.split("system", 1)[-1])
                role = "system"
            else:
                other_params = dict()

            message.append(dict(content=content, role=role, **other_params))

        return message

    @classmethod
    def encode_chat_history(cls, chat_history, start_token="<|im_start|>", end_token="<|im_end|>"):
        conversation = ""
        for msg in chat_history:
            role = msg["role"]
            content = msg["content"]
            conversation += f"{start_token}{role}\n{content}\n{end_token}\n"
        return conversation

    # @property
    # def _llm_type(self) -> str:
    #     return "custom"
    #
    # def _call(
    #     self,
    #     prompt: str,
    #     stop: Optional[List[str]] = None,
    #     run_manager: Optional[CallbackManagerForLLMRun] = None,
    # ) -> str:
    #     if stop is not None:
    #         raise ValueError("stop kwargs are not permitted.")
    #     return prompt[: self.n]
    #
    # @property
    # def _identifying_params(self) -> Mapping[str, Any]:
    #     """Get the identifying parameters."""
    #     return {"n": self.n}
