import logging
from typing import Optional, List, Type, cast

from gpt.configs.model_config import get_device
from gpt.model.interface.embeddings import Embeddings, RerankEmbeddings
from gpt.model.parameters import EmbeddingModelParameters, BaseEmbeddingModelParameters, ProxyEmbeddingParameters
from gpt.util.parameter_utils import EnvArgumentParser

logger = logging.getLogger(__name__)


class EmbeddingLoader:
    def load(self, model_name: str, param: BaseEmbeddingModelParameters) -> Embeddings:
        if model_name in ["proxy_openai", "proxy_azure"]:
            from langchain.embeddings import OpenAIEmbeddings
            return OpenAIEmbeddings(**param.build_kwargs())
        elif model_name in ["proxy_http_openapi"]:
            from gpt.model.embedding.open_api_embeddings import OpenAPIEmbeddings

            proxy_param = cast(ProxyEmbeddingParameters, param)
            openapi_param = {}
            if proxy_param.proxy_server_url:
                openapi_param["api_url"] = proxy_param.proxy_server_url
            if proxy_param.proxy_api_key:
                openapi_param["api_key"] = proxy_param.proxy_api_key
            if proxy_param.proxy_backend:
                openapi_param["model_name"] = proxy_param.proxy_backend
            return OpenAPIEmbeddings(**openapi_param)
        else:
            from gpt.model.embedding.hugging_face_embeddings import HuggingFaceEmbeddings

            kwargs = param.build_kwargs(model_name=param.model_path)
            return HuggingFaceEmbeddings(**kwargs)

    def load_rerank_model(
            self, model_name: str, param: BaseEmbeddingModelParameters
    ) -> RerankEmbeddings:
        if model_name in ["rerank_proxy_http_openapi"]:
            from gpt.model.embedding.rerank import OpenAPIRerankEmbeddings

            proxy_param = cast(ProxyEmbeddingParameters, param)
            openapi_param = {}
            if proxy_param.proxy_server_url:
                openapi_param["api_url"] = proxy_param.proxy_server_url
            if proxy_param.proxy_api_key:
                openapi_param["api_key"] = proxy_param.proxy_api_key
            if proxy_param.proxy_backend:
                openapi_param["model_name"] = proxy_param.proxy_backend
            return OpenAPIRerankEmbeddings(**openapi_param)
        elif model_name in ["rerank_proxy_siliconflow"]:
            from gpt.model.embedding.rerank import SiliconFlowRerankEmbeddings

            proxy_param = cast(ProxyEmbeddingParameters, param)
            openapi_param = {}
            if proxy_param.proxy_server_url:
                openapi_param["api_url"] = proxy_param.proxy_server_url
            if proxy_param.proxy_api_key:
                openapi_param["api_key"] = proxy_param.proxy_api_key
            if proxy_param.proxy_backend:
                openapi_param["model_name"] = proxy_param.proxy_backend
            return SiliconFlowRerankEmbeddings(**openapi_param)
        else:
            from gpt.model.embedding.rerank import CrossEncoderRerankEmbeddings

            kwargs = param.build_kwargs(model_name=param.model_path)
            return CrossEncoderRerankEmbeddings(**kwargs)


def parse_embedding_params(
        model_name: Optional[str] = None,
        model_path: Optional[str] = None,
        command_args: List[str] = None,
        param_cls: Optional[Type] = EmbeddingModelParameters,
        **kwargs,
):
    model_args = EnvArgumentParser()
    env_prefix = EnvArgumentParser.get_env_prefix(model_name)
    model_params: BaseEmbeddingModelParameters = model_args.parse_args_into_dataclass(
        param_cls,
        env_prefixes=[env_prefix],
        command_args=command_args,
        model_name=model_name,
        model_path=model_path,
        **kwargs,
    )
    if not model_params.device:
        model_params.device = get_device()
        logger.info(
            f"[EmbeddingsModelWorker] Parameters of device is None, use {model_params.device}"
        )
    return model_params
