import random
import traceback
from typing import Any, AsyncGenerator, Dict, List, Optional, TypedDict, Union

from chernc.constants import DEFAULT_LOGGER_DIR, DEFAULT_LOGGER_FILE_NAME
from chernc.llm.lm_inference import AsyncLanguageModelPredictor, LanguageModelPredictor
from chernc.llm.uni_tokenizer import UniTokenizer
from chernc.logging import build_logger

logger = build_logger(__name__ + ".apipool", DEFAULT_LOGGER_FILE_NAME, logger_dir=DEFAULT_LOGGER_DIR)


class ModelInfo(TypedDict):
    predicator: Union[LanguageModelPredictor, AsyncLanguageModelPredictor]
    model_features: Dict[str, Any]


class ModelScorer:
    def __call__(self, tokenizer: UniTokenizer, model_features: Dict[str, Any], messages: List[Dict[str, str]]) -> float:
        return 0.0


def same_label_count(labels: List[str], model_labels: List[str]) -> int:
    match_num = 0
    for label in labels:
        for mlabel in model_labels:
            if label.strip().lower() == mlabel.strip().lower():
                match_num += 1
    return match_num


class LLMAPIPoolBase:
    def __init__(self):
        self.models: Dict[str, ModelInfo] = {}
        self.model_states: Dict[str, int] = {}

    def register_model(
        self,
        model_name: str,
        model_predictor: Union[LanguageModelPredictor, AsyncLanguageModelPredictor],
        model_features: Dict[str, Any],
    ):
        """
        conditions are just some properties
        """
        self.models[model_name] = {
            "predicator": model_predictor,
            "model_features": model_features,
        }

    def _record_error(self, model_name: str):
        if model_name in self.models and model_name not in self.model_states:
            self.model_states[model_name] = 0
        if model_name in self.model_states:
            self.model_states[model_name] += 1

    def _record_correct(self, model_name: str):
        if model_name in self.models and model_name not in self.model_states:
            self.model_states[model_name] = 0
        if model_name in self.model_states:
            self.model_states[model_name] = 0

    def select_model(self, messages: List[Dict[str, str]], selectors: Union[List[ModelScorer], ModelScorer]) -> str:
        """
        根据条件选择合适的模型。

        :param conditions: 输入的条件字典，例如 {'token_length': 150, 'compute_power': 200}
        :return: 选择的模型API函数
        """
        if isinstance(selectors, ModelScorer):
            selectors = [selectors]

        best_model = None
        best_score = float("-inf")

        for model_name, model_info in self.models.items():
            scores = []
            for selector in selectors:
                if model_info["predicator"].tokenizer is None:
                    continue
                each_score = selector.__call__(
                    tokenizer=model_info["predicator"].tokenizer, model_features=model_info["model_features"], messages=messages
                )
                scores.append(each_score)
            if any([s < 0 for s in scores]):
                continue
            score = sum(scores)

            if score > best_score:
                best_score = score
                best_model = model_name  # model_info["predicator"]

        if best_model is None:
            raise ValueError("No suitable model found for the given conditions.")

        return best_model

    def select_models(self, messages: List[Dict[str, str]], selectors: Union[List[ModelScorer], ModelScorer]) -> List[str]:
        if isinstance(selectors, ModelScorer):
            selectors = [selectors]

        results = []
        for model_name, model_info in self.models.items():
            scores = []
            for selector in selectors:
                if model_info["predicator"].tokenizer is None:
                    continue
                each_score = selector.__call__(
                    tokenizer=model_info["predicator"].tokenizer, model_features=model_info["model_features"], messages=messages
                )
                scores.append(each_score)
            logger.debug(f"Model {model_name}: {scores} (Selectors: {', '.join(type(s).__name__ for s in selectors)})")
            if any([s < 0 for s in scores]):
                continue
            score = sum(scores)

            results.append((model_name, score))

        results = sorted(results, key=lambda x: x[1], reverse=True)
        return [m[0] for m in results]


class LLMAPIPool(LLMAPIPoolBase):
    def __init__(self):
        super(LLMAPIPool, self).__init__()

    def call_chat(self, messages: List[Dict[str, str]], selectors: Union[List[ModelScorer], ModelScorer], *args, **kwargs):
        """
        调用聊天接口。根据提供的消息和选择器，从候选模型中选择适合的模型并调用聊天方法。

        Args:
            messages (List[Dict[str, str]]): 聊天消息列表，每条消息是一个字典，通常包含 "role" 和 "content" 字段。
            selectors (Union[List[ModelScorer], ModelScorer]): 模型选择器或选择器列表，定义模型的评分和选择策略。
            *args: 其他位置参数，将传递给具体模型的 chat 方法。
            **kwargs: 其他关键字参数，将传递给具体模型的 chat 方法。

        Returns:
            results: 来自成功模型的聊天返回结果。

        Raises:
            Exception: 如果所有可用模型均无法返回结果，则抛出异常。
        """
        model_names = self.select_models(messages, selectors)
        # 随机打乱，尤其是在修复的时候，一个模型修不出，切换到另一个
        random.shuffle(model_names)
        # 按照出错次数由低到高排序，依次选
        sorted_model_names = sorted(model_names, key=lambda name: self.model_states.get(name, -1))

        results = None
        for model_name in sorted_model_names:
            model_predicator = self.models[model_name]["predicator"]
            try:
                results = model_predicator.chat(messages=messages, *args, **kwargs)
                self._record_correct(model_name)
            except KeyboardInterrupt as e:
                raise e
            except:
                self._record_error(model_name)
                logger.warning(f"Failed to call chat API for model {model_name}.")
                logger.debug(traceback.format_exc())
                continue
            return results
        if results is None:
            raise Exception("Failed to call chat API and all endpoints are tried....")

    def call_chat_stream(self, messages: List[Dict[str, str]], selectors: Union[List[ModelScorer], ModelScorer], *args, **kwargs):
        """
        调用聊天流接口。选择一个模型进行流式聊天调用。

        Args:
            messages (List[Dict[str, str]]): 聊天消息列表，每条消息是一个字典。
            selectors (Union[List[ModelScorer], ModelScorer]): 模型选择器或选择器列表，用于选择适合的模型。
            *args: 其他位置参数，将传递给具体模型的 chat 方法。
            **kwargs: 其他关键字参数，将传递给具体模型的 chat 方法。

        Returns:
            流式聊天结果，由所选模型返回。
        """
        model_name = self.select_model(messages, selectors)
        model_predicator = self.models[model_name]["predicator"]
        return model_predicator.chat(messages=messages, *args, **kwargs)


class AsyncLLMAPIPool(LLMAPIPoolBase):
    def __init__(self):
        super(AsyncLLMAPIPool, self).__init__()

    async def call_chat(
        self, messages: List[Dict[str, str]], selectors: Union[List[ModelScorer], ModelScorer], *args, **kwargs
    ) -> Optional[str]:
        """
        Asynchronously calls the chat interface. Based on the provided messages and selectors, selects the appropriate model from the candidate models and invokes the chat method.

        :param messages: A list of chat messages, where each message is a dictionary, typically containing "role" and "content" fields.
        :type messages: List[Dict[str, str]]
        :param selectors: A model selector or a list of selectors, defining the scoring and selection strategy for the model.
        :type selectors: Union[List[ModelScorer], ModelScorer]
        :param *args: Additional positional arguments passed to the specific model's chat method.
        :param **kwargs: Additional keyword arguments passed to the specific model's chat method.

        :return: The chat response from the selected model if successful. Returns None if the call fails.
        :rtype: Optional[str]

        :raises Exception: If the selected model is synchronous or if all asynchronous models fail to provide a result.
        """
        if isinstance(selectors, ModelScorer):
            selectors = [selectors]
        model_names = self.select_models(messages, selectors)
        # 随机打乱，尤其是在修复的时候，一个模型修不出，切换到另一个
        random.shuffle(model_names)
        # 按照出错次数由低到高排序，依次选
        sorted_model_names = sorted(model_names, key=lambda name: self.model_states.get(name, -1))

        if len(model_names) == 0:
            raise Exception(
                f"The number of selected models is zero (Selectors: {', '.join(type(s).__name__ for s in selectors)}) , please check the LLM config."
            )

        results = None
        for model_name in sorted_model_names:
            model_predicator = self.models[model_name]["predicator"]
            if isinstance(model_predicator, LanguageModelPredictor):
                raise Exception("The model predictor is sync.")
            try:
                results = await model_predicator.chat(messages=messages, *args, **kwargs)
                self._record_correct(model_name)
            except KeyboardInterrupt as e:
                raise e
            except:
                self._record_error(model_name)
                logger.warning(f"Failed to call chat API for model {model_name}.")
                logger.debug(traceback.format_exc())
                continue
            return results
        if results is None:
            raise Exception("Failed to call chat API and all endpoints are tried....")

    async def call_chat_stream(
        self, messages: List[Dict[str, str]], selectors: Union[List[ModelScorer], ModelScorer], *args, **kwargs
    ) -> AsyncGenerator[Optional[str], None]:
        """
        异步调用流式聊天接口。选择一个模型进行流式聊天调用。

        Args:
            messages (List[Dict[str, str]]): 聊天消息列表，每条消息是一个字典。
            selectors (Union[List[ModelScorer], ModelScorer]): 模型选择器或选择器列表，用于选择适合的模型。
            *args: 其他位置参数，将传递给具体模型的 chat_stream 方法。
            **kwargs: 其他关键字参数，将传递给具体模型的 chat_stream 方法。

        Yields:
            AsyncGenerator[Optional[str], None]: 来自模型的流式聊天输出块。

        Raises:
            Exception: 如果选择的模型是同步模型。
        """
        model_name = self.select_model(messages, selectors)
        model_predicator = self.models[model_name]["predicator"]
        if isinstance(model_predicator, LanguageModelPredictor):
            raise Exception("The model predictor is sync.")
        async for chunk in model_predicator.chat_stream(messages=messages, *args, **kwargs):
            yield chunk


class PaidModelScorer(ModelScorer):
    def __call__(self, tokenizer: UniTokenizer, model_features: Dict[str, Any], messages: List[Dict[str, str]]) -> float:
        logger.debug(f"[[PaidModelScorer]] Model features:{model_features}")
        if same_label_count(["Paid"], model_features.get("labels", [])) == 1:
            return 1.0
        else:
            return -1.0


class FreeModelScorer(ModelScorer):
    def __call__(self, tokenizer: UniTokenizer, model_features: Dict[str, Any], messages: List[Dict[str, str]]) -> float:
        logger.debug(f"[[FreeModelScorer]] Model features:{model_features}")
        if same_label_count(["Free"], model_features.get("labels", [])) == 1:
            return 1.0
        else:
            return -1.0


class InputLengthScorer(ModelScorer):
    def __call__(self, tokenizer: UniTokenizer, model_features: Dict[str, Any], messages: List[Dict[str, str]]) -> float:
        tokens = tokenizer.token_num(messages)
        logger.debug(f"[[InputLengthScorer]]Tokens: {tokens}; Model features:{model_features}")
        max_tokens = model_features.get("max_tokens", 0)
        min_tokens = model_features.get("min_tokens", 0)
        if tokens > min_tokens and tokens < max_tokens:
            return 1.0
        else:
            return -1.0
