import inspect
import random
from typing import Dict, Optional, Any, Iterator, AsyncIterator

from langchain_core.callbacks import CallbackManagerForLLMRun
from langchain_core.language_models import BaseChatModel, BaseLanguageModel, LanguageModelInput
from langchain_core.messages import BaseMessage
from langchain_core.outputs import ChatResult, ChatGenerationChunk
from langchain_core.runnables import RunnableConfig
from langchain_openai import ChatOpenAI
from langchain_community.chat_models import ChatTongyi, QianfanChatEndpoint, ChatZhipuAI, MoonshotChat
from langchain_ollama.chat_models import ChatOllama
from langchain_deepseek import ChatDeepSeek
from loguru import logger
from openai.resources.chat import AsyncCompletions
from pydantic import Field

from app.api.database.models.llm import LLMServerType, LLMServer, LLMModel, LLMDao, LLMModelType


def _get_openai_params(params: dict, server_config: dict, model_config: dict, model_keys: list[str]) -> dict:
    # ChatOpenAI 最终会调用 AsyncCompletions.create()
    sig = inspect.signature(AsyncCompletions.create)
    valid_fields = list(sig.parameters.keys())  # set(ChatOpenAI.model_fields.keys())

    logger.debug(f"ChatOpenAI.model_fields.keys()={ChatOpenAI.model_fields.keys()}")

    filtered = {
        "model_kwargs": {},
        'model': params['model'],
        'base_url': params['base_url'].rstrip('/')
    }

    if model_keys:
        # 从keys中随机取一个key
        filtered['api_key'] = model_keys[random.randint(0, len(model_keys) - 1)]

    if not filtered['model']:
        raise Exception('openai model is empty')
    if not filtered['api_key']:
        raise Exception('openai api_key is empty')
    if not filtered['base_url']:
        raise Exception('openai base_url is empty')

    if filtered['extra_body'] and not filtered['model_config']:
        filtered['extra_body'].update(filtered.get('model_config', {}))

    for k in valid_fields:
        # 查找映射或直接匹配
        v = params.get(k)
        if v is not None:
            filtered[k] = v
    for mk in filtered['model_kwargs']:
        if mk not in valid_fields:
            filtered['model_kwargs'].pop(mk, None)

    return filtered


def _get_qianfan_params(params: dict, server_config: dict, model_config: dict, model_keys: list[str]):
    filtered = {
        "model_kwargs": {}
    }
    valid_fields = set(QianfanChatEndpoint.model_fields.keys())  # set(ChatOpenAI.model_fields.keys())
    for k in valid_fields:
        # 查找映射或直接匹配
        v = params.get(k)
        if v is not None:
            filtered[k] = v
    if model_keys:
        # 从keys中随机取一个key
        baidu_key = model_keys[random.randint(0, len(model_keys) - 1)]
        ak_sk = baidu_key.split(',')
        filtered['qianfan_ak'] = ak_sk[0]
        filtered['qianfan_sk'] = ak_sk[1]
    return filtered

def _get_tongyi_params(params: dict, server_config: dict, model_config: dict, model_keys: list[str]):
    filtered = {
        "model_kwargs": {},
        "model": params['model'],
    }
    valid_fields = set(ChatTongyi.model_fields.keys())  # set(ChatOpenAI.model_fields.keys())
    for k in valid_fields:
        # 查找映射或直接匹配
        v = params.get(k)
        if v is not None:
            filtered[k] = v
    if model_keys:
        # 从keys中随机取一个key
        key = model_keys[random.randint(0, len(model_keys) - 1)]
        filtered['dashscope_api_key'] = key
    if filtered['extra_body'] and not filtered['model_config']:
        filtered['extra_body'].update(filtered.get('model_config', {}))


    return filtered
def _get_deepseek_params(params: dict, server_config: dict, model_config: dict, model_keys: list[str]):
    filtered = {
        "model_kwargs": {},
        "model": params['model'],
    }
    valid_fields = set(ChatDeepSeek.model_fields.keys())  # set(ChatOpenAI.model_fields.keys())
    for k in valid_fields:
        # 查找映射或直接匹配
        v = params.get(k)
        if v is not None:
            filtered[k] = v
    if model_keys:
        key = model_keys[random.randint(0, len(model_keys) - 1)]
        filtered['api_key'] = key
    filtered['api_base'] =  params['base_url'].rstrip('/')
    return filtered


_llm_node_type: Dict = {
    # 开源推理框架
    LLMServerType.OPENAI.value: {'client': 'ChatOpenAI', 'clazz': ChatOpenAI,'handler': _get_openai_params},

    LLMServerType.ALI_BAILIAN.value: {'client': 'ChatTongyi', 'clazz': ChatTongyi, 'handler': _get_tongyi_params},

    LLMServerType.DEEPSEEK.value: {'client': 'ChatDeepSeek', 'clazz': ChatDeepSeek, 'handler': _get_deepseek_params},

    LLMServerType.BAIDU_QIANFAN.value: {'client': 'QianfanChatEndpoint', 'clazz': QianfanChatEndpoint, 'handler': _get_qianfan_params},

    LLMServerType.MOONSHOT.value: {'client': 'MoonshotChat', 'clazz': MoonshotChat, 'handler': _get_openai_params},

    LLMServerType.ZHIPU.value: {'client': 'ChatZhipuAI', 'clazz': ChatZhipuAI, 'handler': _get_openai_params},

    LLMServerType.OLLAMA.value: {'client': 'ChatOllama', 'clazz': ChatOllama, 'handler': _get_openai_params},

}


class TengitsLLM(BaseChatModel):
    """
        Use the llm model that has been launched in model management
    """

    model_id: int = Field(description="后端服务保存的model唯一ID")
    model_name: Optional[str] = Field(default='', description="后端服务保存的model名称")
    streaming: bool = Field(default=True, description="是否使用流式输出", alias="stream")
    temperature: float = Field(default=0.3, description="模型生成的温度")
    top_p: float = Field(default=1, description="模型生成的top_p")

    llm: Optional[BaseChatModel] = Field(default=None, description="存放真实的llm实例")

    # bisheng强相关的业务参数
    model_info: Optional[LLMModel] = Field(default=None)
    server_info: Optional[LLMServer] = Field(default=None)

    def __init__(self, **kwargs):
        super().__init__(**kwargs)
        self.model_id = kwargs.get('model_id')
        self.streaming = kwargs.get('streaming', True)
        self.temperature = kwargs.get('temperature', 0.3)
        self.top_p = kwargs.get('top_p', 1)

        # 是否忽略模型是否上线的检查
        ignore_online = kwargs.get('ignore_online', False)
        # get_server get_model
        if not self.model_id:
            raise Exception('没有找到llm模型配置')
        model_info = LLMDao.get_model_by_id(self.model_id)
        logger.info(f"init llm model,{type(model_info)}, {type(model_info.model_keys)}")
        logger.info(f"init llm model, model_name={model_info.model_name}")
        if not model_info:
            raise Exception('llm模型配置已被删除，请重新配置模型')
        self.model_name = model_info.model_name
        server_info = LLMDao.get_server_by_id(model_info.server_id)
        logger.info(f"init llm model, {type(server_info)},server_info={server_info}")
        if not server_info:
            raise Exception('服务提供方配置已被删除，请重新配置llm模型')
        if model_info.model_type != LLMModelType.LLM.value:
            raise Exception(f'只支持LLM类型的模型，不支持{model_info.model_type}类型的模型')
        if not ignore_online and not model_info.online:
            raise Exception(f'{server_info.name}下的{model_info.model_name}模型已下线，请联系管理员上线对应的模型')

        self.model_info = model_info
        self.server_info = server_info

        class_object = self._get_llm_class(server_info.server_type)
        params = self._get_llm_params(server_info, model_info, server_info.server_type, **kwargs)

        logger.info(
            f"init llm model, class_object={class_object}, server_typ={server_info.server_type}, params={params}")
        try:
            self.llm = class_object(**params)
        except Exception as e:

            logger.exception('init tengits llm error')
            raise Exception(f'初始化llm失败，请检查配置或联系管理员。错误信息：{e}')

    def _get_llm_params(self, server_info: LLMServer, model_info: LLMModel, server_type: str,
                        **kwargs) -> dict:
        server_config = self.get_server_info_config()
        model_config = self.get_model_info_config()
        # 合并参数 以及强制使用流式输出
        default_params = self._get_default_params(server_config, model_config, **kwargs)

        params_handler = _llm_node_type[server_type]['handler']

        params = params_handler(default_params, server_config, model_config, self.model_info.model_keys,)
        # 统一处理API参数,移除掉不支持的参数

        if server_info.server_type == LLMServerType.DEEPSEEK.value:
            # https://api-docs.deepseek.com/zh-cn/guides/reasoning_model
            params.pop('logprobs', None)
            params.pop('top_logprobs', None)

        return params

    def _get_default_params(self, server_config: dict, model_config: dict, **kwargs) -> dict:

        logger.debug(f"get default params, server_config={server_config}, model_config={model_config}, kwargs={kwargs}")
        merge_keys = {'extra_body', 'model_kwargs'}
        default_params = {
            'model': self.model_info.model_name,

            'temperature': self.temperature,
            'top_p': self.top_p,
            **{k: v for k, v in server_config.items() if v is not None and k not in merge_keys},
            **{k: v for k, v in model_config.items() if v is not None and k not in merge_keys},
            **kwargs,
            'streaming': True,  # 全部使用流式接口,
            'model_kwargs': {'stream_options': {"include_usage": True}},
        }
        for k in merge_keys:
            if k in server_config:
                default_params[k] = server_config[k]
            if k in model_config:
                default_params[k].update(model_config[k])
            if k in kwargs:
                # logger.debug(f"get default params, k={k}, kwargs={kwargs[k]}")
                default_params[k].update(kwargs[k])

        return default_params

    def get_server_info_config(self):
        if self.server_info.config:
            return self.server_info.config
        return {}

    def get_model_info_config(self):
        if self.model_info.config:
            return self.model_info.config
        return {}

    def _get_llm_class(self, server_type: str) -> (BaseLanguageModel, str):
        if server_type not in _llm_node_type:
            raise Exception(f'not support llm type: {server_type}')
        class_object = _llm_node_type[server_type]['clazz']
        return class_object

    def _generate(
            self,
            messages: list[BaseMessage],
            stop: Optional[list[str]] = None,
            run_manager: Optional[CallbackManagerForLLMRun] = None,
            **kwargs: Any,
    ) -> ChatResult:
        try:
            ret = self.llm._generate(messages, stop, run_manager, **kwargs)
            logger.debug(f"generate llm,name={self.model_name},ret={ret}")
        except Exception as e:
            # self._update_model_status(1, str(e))
            raise e
        return ret

    def stream(
            self,
            input: LanguageModelInput,
            config: Optional[RunnableConfig] = None,
            *,
            stop: Optional[list[str]] = None,
            **kwargs: Any,
    ) -> Iterator[ChatGenerationChunk]:
        try:
            for one in self.llm.stream(input=input, config=config, stop=stop, **kwargs):
                logger.info(f"stream llm,name={self.llm.name},chunk={one}")
                yield one
            # self._update_model_status(0)
        except Exception as e:
            # self._update_model_status(1, str(e))
            raise e

    async def astream(
            self,
            input: LanguageModelInput,
            config: Optional[RunnableConfig] = None,
            *,
            stop: Optional[list[str]] = None,
            **kwargs: Any,
    ) -> AsyncIterator[ChatGenerationChunk]:
        try:
            async for one in self.llm.astream(input=input, config=config, stop=stop, **kwargs):
                logger.debug(f"astream llm,name={self.model_name},chunk,{type(one)},{one}")
                yield one
        except Exception as e:
            # self._update_model_status(1, str(e))
            raise

    @property
    def _llm_type(self) -> str:
        return self.llm._llm_type
