import logging
from abc import abstractmethod
from typing import Optional, Type

from gpt.model.adapter.base import register_model_adapter
from gpt.model.adapter.llm_adapter import LLMModelAdapter
from gpt.model.base import ModelType
from gpt.model.llm.proxy.base import ProxyLLMClient
from gpt.model.llm.proxy.proxy_model import ProxyModel
from gpt.model.parameters import ProxyModelParameters

logger = logging.getLogger(__name__)


class ProxyLLMModelAdapter(LLMModelAdapter):
    def new_adapter(self, **kwargs) -> "LLMModelAdapter":
        return self.__class__()

    def model_type(self) -> str:
        return ModelType.PROXY

    def match(
            self,
            model_type: str,
            model_name: Optional[str] = None,
            model_path: Optional[str] = None,
    ) -> bool:
        model_name = model_name.lower() if model_name else None
        model_path = model_path.lower() if model_path else None
        return self.do_match(model_name) or self.do_match(model_path)

    @abstractmethod
    def do_match(self, lower_model_name_or_path: Optional[str] = None):
        raise NotImplementedError()

    def dynamic_llm_client_class(
            self, params: ProxyModelParameters
    ) -> Optional[Type[ProxyLLMClient]]:
        """Get dynamic llm client class

        Parse the llm_client_class from params and return the class

        Args:
            params (ProxyModelParameters): proxy model parameters

        Returns:
            Optional[Type[ProxyLLMClient]]: llm client class
        """

        if params.llm_client_class:
            from gpt.util.module_utils import import_from_checked_string

            worker_cls: Type[ProxyLLMClient] = import_from_checked_string(
                params.llm_client_class, ProxyLLMClient
            )
            return worker_cls
        return None

    def get_llm_client_class(
            self, params: ProxyModelParameters
    ) -> Type[ProxyLLMClient]:
        """Get llm client class"""
        dynamic_llm_client_class = self.dynamic_llm_client_class(params)
        if dynamic_llm_client_class:
            return dynamic_llm_client_class
        raise NotImplementedError()

    def load_from_params(self, params: ProxyModelParameters):
        dynamic_llm_client_class = self.dynamic_llm_client_class(params)
        if not dynamic_llm_client_class:
            dynamic_llm_client_class = self.get_llm_client_class(params)
        logger.info(
            f"Load model from params: {params}, llm client class: {dynamic_llm_client_class}"
        )
        proxy_llm_client = dynamic_llm_client_class.new_client(params)
        model = ProxyModel(params, proxy_llm_client)
        return model, model


# class OpenAIProxyLLMModelAdapter(ProxyLLMModelAdapter):
#     def support_async(self) -> bool:
#         return True
#
#     def do_match(self, lower_model_name_or_path: Optional[str] = None):
#         return lower_model_name_or_path in ["chatgpt_proxyllm", "proxyllm"]
#
#     def get_llm_client_class(
#         self, params: ProxyModelParameters
#     ) -> Type[ProxyLLMClient]:
#         """Get llm client class"""
#         from dbgpt.model.proxy.llms.chatgpt import OpenAILLMClient
#
#         return OpenAILLMClient
#
#     def get_async_generate_stream_function(self, model, model_path: str):
#         from dbgpt.model.proxy.llms.chatgpt import chatgpt_generate_stream
#
#         return chatgpt_generate_stream


class TongyiProxyLLMModelAdapter(ProxyLLMModelAdapter):
    def do_match(self, lower_model_name_or_path: Optional[str] = None):
        return lower_model_name_or_path == "tongyi_proxyllm"

    def get_llm_client_class(
            self, params: ProxyModelParameters
    ) -> Type[ProxyLLMClient]:
        from gpt.model.llm.proxy.llms.tongyi import TongyiLLMClient

        return TongyiLLMClient

    def get_generate_stream_function(self, model, model_path: str):
        from gpt.model.llm.proxy.llms.tongyi import tongyi_generate_stream

        return tongyi_generate_stream


class OllamaLLMModelAdapter(ProxyLLMModelAdapter):
    def do_match(self, lower_model_name_or_path: Optional[str] = None):
        return lower_model_name_or_path == "ollama_proxyllm"

    def get_llm_client_class(
            self, params: ProxyModelParameters
    ) -> Type[ProxyLLMClient]:
        from gpt.model.llm.proxy.llms.ollama import OllamaLLMClient

        return OllamaLLMClient

    def get_generate_stream_function(self, model, model_path: str):
        from gpt.model.llm.proxy.llms.ollama import ollama_generate_stream

        return ollama_generate_stream


# class SiliconFlowProxyLLMModelAdapter(ProxyLLMModelAdapter):
#     """SiliconFlow proxy LLM model adapter.
#
#     See Also: `SiliconFlow Documentation <https://docs.siliconflow.cn/quickstart>`_
#     """
#
#     def support_async(self) -> bool:
#         return True
#
#     def do_match(self, lower_model_name_or_path: Optional[str] = None):
#         return lower_model_name_or_path == "siliconflow_proxyllm"
#
#     def get_llm_client_class(
#             self, params: ProxyModelParameters
#     ) -> Type[ProxyLLMClient]:
#         from gpt.model.llm.proxy.llms.siliconflow import SiliconFlowLLMClient
#
#         return SiliconFlowLLMClient
#
#     def get_async_generate_stream_function(self, model, model_path: str):
#         from gpt.model.llm.proxy.llms.siliconflow import siliconflow_generate_stream
#
#         return siliconflow_generate_stream


register_model_adapter(TongyiProxyLLMModelAdapter)
register_model_adapter(OllamaLLMModelAdapter)
