import random
from typing import Dict, Optional, Any

import numpy as np
from langchain_community.embeddings import DashScopeEmbeddings
from langchain_core.embeddings import Embeddings
from langchain_core.language_models import BaseChatModel
from langchain_openai import OpenAIEmbeddings
from loguru import logger
from pydantic import Field, BaseModel

from app.api.chat_completion_schema import BaseEmbeddingSchema
from app.api.database.models.llm import LLMServerType, LLMServer, LLMModel, LLMDao, LLMModelType


def _get_params(params: dict, server_config: dict, model_config: dict, model_keys: list[str]) -> dict:
    if server_config:
        params.update({
            'base_url': server_config.get('openai_api_base') or server_config.get('base_url'),
        })

    if server_config.get('openai_proxy'):
        params['openai_proxy'] = server_config.get('openai_proxy')
    valid_fields = set(BaseEmbeddingSchema.model_fields.keys())
    print(f"valid_fields={valid_fields}")

    filtered = {}

    filtered['model'] = params['model']
    filtered['base_url'] = params['base_url'].rstrip('/')
    if model_keys:
        # 从keys中随机取一个key
        filtered['api_key'] = model_keys[random.randint(0, len(model_keys) - 1)]

    if not filtered['model']:
        raise Exception('openai model is empty')
    if not filtered['api_key']:
        raise Exception('openai api_key is empty')
    if not filtered['base_url']:
        raise Exception('openai base_url is empty')

    for k in valid_fields:
        # 查找映射或直接匹配
        v = params.get(k)
        if v is not None:
            filtered[k] = v

    return filtered


llm_embedding_node_type: Dict = {

    # 官方API服务
    LLMServerType.OPENAI.value: OpenAIEmbeddings,

    LLMServerType.ALI_BAILIAN.value: DashScopeEmbeddings,

    LLMServerType.SILICONFLOW.value: OpenAIEmbeddings

}


class TengitsEmbedding(Embeddings):
    """
        Use the llm model that has been launched in model management
    """

    model_id: int = Field(description="后端服务保存的model唯一ID")

    model_name: Optional[str] = Field(default='', description="后端服务保存的model名称")

    embeddings: Optional[Embeddings] = Field(default=None, description="存放真实的Embedding实例")

    # bisheng强相关的业务参数
    model_info: Optional[LLMModel] = Field(default=None)
    server_info: Optional[LLMServer] = Field(default=None)

    def __init__(self, **kwargs):
        logger.debug(f"init tengits embedding llm, kwargs={kwargs}")
        # super().__init__(**kwargs)
        self.model_id = kwargs.get('model_id')

        # 是否忽略模型是否上线的检查
        ignore_online = kwargs.get('ignore_online', False)

        if not self.model_id:
            raise Exception('没有找到llm模型配置')
        model_info = LLMDao.get_model_by_id(self.model_id)
        logger.info(f"init llm model,{type(model_info)}, {type(model_info.model_keys)}")
        logger.info(f"init llm model, model_name={model_info.model_name}")
        if not model_info:
            raise Exception('llm模型配置已被删除，请重新配置模型')
        self.model_name = model_info.model_name
        server_info = LLMDao.get_server_by_id(model_info.server_id)
        logger.info(f"init llm model, {type(server_info)},server_info={server_info}")
        if not server_info:
            raise Exception('服务提供方配置已被删除，请重新配置llm模型')
        if model_info.model_type != LLMModelType.EMBEDDING.value:
            raise Exception(f'只支持Embedding类型的模型，不支持{model_info.model_type}类型的模型')
        if not ignore_online and not model_info.online:
            raise Exception(f'{server_info.name}下的{model_info.model_name}模型已下线，请联系管理员上线对应的模型')

        self.model_info = model_info
        self.server_info = server_info

        class_object = self._get_llm_class(server_info.server_type)
        params = self._get_llm_params(server_info, model_info, **kwargs)

        logger.info(f"init llm model, class_object={class_object}, params={params}")
        try:
            self.embeddings = class_object(**params)
        except Exception as e:

            logger.exception('init tengits embedding llm error')
            raise Exception(f'初始化 embedding llm失败，请检查配置或联系管理员。错误信息：{class_object},{e}')

    def _get_llm_params(self, server_info: LLMServer, model_info: LLMModel, **kwargs) -> dict:
        server_config = self.get_server_info_config()
        model_config = self.get_model_info_config()
        default_params = self._get_default_params(server_config, model_config, **kwargs)

        params = _get_params(default_params, server_config, model_config, self.model_info.model_keys)
        if server_info.server_type == LLMServerType.ALI_BAILIAN.value:
            # DashScopeEmbeddings 只有三个参数
            params = {
                "model": params['model'],
                "dashscope_api_key": params['api_key'],
                # "max_retries": int(params.get('max_retries', None)),
            }
            if params.get('max_retries', None) is not None:
                params['max_retries'] = int(params.get('max_retries'))

        return params

    def _get_default_params(self, server_config: dict, model_config: dict, **kwargs) -> dict:
        merge_keys = {'extra_body', 'model_config'}
        default_params = {
            'model': self.model_info.model_name,
            **{k: v for k, v in server_config.items() if v is not None and k not in merge_keys},
            **{k: v for k, v in model_config.items() if v is not None and k not in merge_keys},

        }
        for k in merge_keys:
            if k in server_config:
                default_params[k] = server_config[k]
            if k in model_config:
                default_params[k].update(model_config[k])
            if k in kwargs:
                default_params[k].update(kwargs[k])

        # if model_config.get('max_tokens'):
        #     default_params['max_tokens'] = model_config.get('max_tokens')

        return default_params

    def get_server_info_config(self):
        if self.server_info.config:
            return self.server_info.config
        return {}

    def get_model_info_config(self):
        if self.model_info.config:
            return self.model_info.config
        return {}

    def _get_llm_class(self, server_type: str):
        if server_type not in llm_embedding_node_type:
            raise Exception(f'not support llm type: {server_type}')
        class_object = llm_embedding_node_type[server_type]
        return class_object

    def embed_documents(self, texts: list[str]) -> list[list[float]]:
        """embedding"""
        batch_size = 64
        results = []
        try:
            for i in range(0, len(texts), batch_size):
                batch = texts[i:i + batch_size]
                ret = self.embeddings.embed_documents(batch)
                # 盘单向量是否归一化了
                if ret:
                    vector = ret[0]
                    if abs(np.linalg.norm(vector) - 1.0) > 1e-5:
                        ret = [(np.array(doc) / np.linalg.norm(doc)).tolist() for doc in ret]
                results.extend(ret)

            return results
        except Exception as e:
            logger.exception('embedding error')
            raise Exception(f'embedding error: {e}')

    def embed_query(self, text: str) -> list[float]:
        """embedding"""
        try:
            ret = self.embeddings.embed_query(text)
            if abs(np.linalg.norm(ret) - 1.0) > 1e-5:
                ret = (np.array(ret) / np.linalg.norm(ret)).tolist()
            return ret
        except Exception as e:
            logger.exception('embedding error')
            raise Exception(f'embedding组件异常，请检查配置或联系管理员。错误信息：{e}')
