import logging
import traceback
from typing import Dict, Iterator, List

from gpt.model.adapter.llm_adapter import LLMModelAdapter
from gpt.model.adapter.loader import ModelLoader, get_model_real_path
from gpt.model.adapter.model_adapter import get_llm_model_adapter
from gpt.configs.model_config import get_device
from gpt.model.interface.llm import ModelExtraMetaData, ModelMetadata
from gpt.model.cluster.worker_base import ModelWorkerBase
from gpt.model.model_out import ModelOutput
from gpt.model.parameters import ModelParameters
from gpt.util.model_utils import _clear_model_cache
from gpt.util.parameter_utils import EnvArgumentParser

logger = logging.getLogger(__name__)


def _try_import_torch():
    global torch
    global _torch_imported
    try:
        import torch

        _torch_imported = True
    except ImportError:
        pass


def _try_to_count_token(prompt, tokenizer, model):
    try:
        from gpt.model.llm.proxy.proxy_model import ProxyModel

        if isinstance(model, ProxyModel):
            return model.count_token(prompt)
        # Only support huggingface model now
        return len(tokenizer(prompt).input_ids[0])
    except Exception as e:
        logger.warning(f"Count token error, detail: {e}, return -1")
        return -1


class DefaultModelWorker(ModelWorkerBase):
    def __init__(self) -> None:
        self.model = None
        self.tokenizer = None
        self._model_params = None
        self.llm_adapter: LLMModelAdapter = None
        self._support_async = False

    def load_worker(self, model_name: str, model_path: str, **kwargs) -> None:
        if model_path.endswith("/"):
            model_path = model_path[:-1]
        model_path = get_model_real_path(model_name, model_path)
        self.model_name = model_name
        self.model_path = model_path

        model_type = kwargs.get("model_type")

        self.llm_adapter = get_llm_model_adapter(
            self.model_name,
            self.model_path,
            model_type=model_type,
        )
        model_type = self.llm_adapter.model_type()
        self.param_cls = self.llm_adapter.model_param_class(model_type)
        self._support_async = self.llm_adapter.support_async()
        self._support_generate_func = self.llm_adapter.support_generate_function()

        logger.info(
            f"model_name: {self.model_name}, model_path: {self.model_path}, model_param_class: {self.param_cls}"
        )

        self.ml: ModelLoader = ModelLoader(
            model_path=self.model_path, model_name=self.model_name
        )
        # Default model context len
        self.context_len = 2048

    def model_param_class(self) -> ModelParameters:
        return self.param_cls

    def support_async(self) -> bool:
        return self._support_async

    def parse_parameters(self, command_args: List[str] = None) -> ModelParameters:
        param_cls = self.model_param_class()
        model_args = EnvArgumentParser()
        env_prefix = EnvArgumentParser.get_env_prefix(self.model_name)
        model_type = self.llm_adapter.model_type()
        model_params: ModelParameters = model_args.parse_args_into_dataclass(
            param_cls,
            env_prefixes=[env_prefix, "LLM_"],
            command_args=command_args,
            model_name=self.model_name,
            model_path=self.model_path,
            model_type=model_type,
        )
        if hasattr(model_params, "device") and not model_params.device:
            model_params.device = get_device()
            logger.info(
                f"[DefaultModelWorker] Parameters of device is None, use {model_params.device}"
            )
        return model_params

    def start(
            self, model_params: ModelParameters = None, command_args: List[str] = None
    ) -> None:
        # Lazy load torch
        _try_import_torch()
        if not model_params:
            model_params = self.parse_parameters(command_args)
        self._model_params = model_params
        logger.info(f"Begin load model, model params: {model_params}")
        self.model, self.tokenizer = self.ml.loader_with_params(
            model_params, self.llm_adapter
        )
        model_max_length = self.llm_adapter.parse_max_length(
            self.model, self.tokenizer
        )
        if model_max_length:
            logger.info(
                f"Parse model max length {model_max_length} from model {self.model_name}."
            )
            self.context_len = model_max_length
        elif hasattr(model_params, "max_context_size"):
            self.context_len = model_params.max_context_size

    def stop(self) -> None:
        if not self.model:
            logger.warning("Model has been stopped!!")
            return
        del self.model
        del self.tokenizer
        self.model = None
        self.tokenizer = None
        _clear_model_cache(self._model_params.device)

    def generate_stream(self, params: Dict) -> Iterator[ModelOutput]:
        try:
            (
                params,
                model_context,
                generate_stream_func,
            ) = self._prepare_generate_stream(
                params,
            )

            previous_response = ""

            context_len = params.get("context_len") or self.context_len
            for output in generate_stream_func(
                    self.model, self.tokenizer, params, get_device(), context_len
            ):
                (
                    model_output,
                    incremental_output,
                    output_str,
                ) = self._handle_output(
                    output,
                    previous_response,
                    model_context,
                )
                previous_response = output_str
                yield model_output
            print(
                f"\n\nfull stream output:\n{previous_response}\n\nmodel generate_stream params:\n{params}"
            )
        except Exception as e:
            output = self._handle_exception(e)
            yield output

    def generate(self, params: Dict) -> ModelOutput:
        """Generate non stream result"""
        output = None
        if self._support_generate_func:
            (
                params,
                model_context,
                generate_stream_func,
            ) = self._prepare_generate_stream(
                params,
                is_stream=False,
            )
            previous_response = ""

            output = generate_stream_func(
                self.model, self.tokenizer, params, get_device(), self.context_len
            )
            (
                model_output,
                incremental_output,
                output_str,
            ) = self._handle_output(
                output,
                previous_response,
                model_context,
            )
            return model_output
        else:
            for out in self.generate_stream(params):
                output = out
            return output

    def count_token(self, prompt: str) -> int:
        return _try_to_count_token(prompt, self.tokenizer, self.model)

    async def async_count_token(self, prompt: str) -> int:
        # TODO if we deploy the model by vllm, it can't work, we should run
        #  transformer _try_to_count_token to async
        from gpt.model.llm.proxy.proxy_model import ProxyModel

        if isinstance(self.model, ProxyModel) and self.model.proxy_llm_client:
            return await self.model.proxy_llm_client.count_token(
                self.model.proxy_llm_client.default_model, prompt
            )
        raise NotImplementedError

    def get_model_metadata(self, params: Dict) -> ModelMetadata:
        ext_metadata = ModelExtraMetaData(
            prompt_roles=self.llm_adapter.get_prompt_roles(),
        )
        return ModelMetadata(
            model=self.model_name,
            context_length=self.context_len,
            ext_metadata=ext_metadata,
        )

    async def async_get_model_metadata(self, params: Dict) -> ModelMetadata:
        return self.get_model_metadata(params)

    def embeddings(self, params: Dict) -> List[List[float]]:
        raise NotImplementedError

    async def async_generate_stream(self, params: Dict) -> Iterator[ModelOutput]:
        try:
            (
                params,
                model_context,
                generate_stream_func,
            ) = self._prepare_generate_stream(
                params,
            )

            previous_response = ""
            context_len = params.get("context_len") or self.context_len

            async for output in generate_stream_func(
                    self.model, self.tokenizer, params, get_device(), context_len
            ):
                (
                    model_output,
                    incremental_output,
                    output_str,
                ) = self._handle_output(
                    output,
                    previous_response,
                    model_context,
                )

                previous_response = output_str
                yield model_output
            print(
                f"\n\nfull stream output:\n{previous_response}\n\nmodel generate_stream params:\n{params}"
            )
        except Exception as e:
            output = self._handle_exception(e)
            yield output

    async def async_generate(self, params: Dict) -> ModelOutput:
        if self._support_generate_func:
            (
                params,
                model_context,
                generate_stream_func,
            ) = self._prepare_generate_stream(
                params,
                is_stream=False,
            )
            previous_response = ""

            output = await generate_stream_func(
                self.model, self.tokenizer, params, get_device(), self.context_len
            )
            (
                model_output,
                incremental_output,
                output_str,
            ) = self._handle_output(
                output,
                previous_response,
                model_context,
            )
            return model_output
        else:
            output = None
            async for out in self.async_generate_stream(params):
                output = out
            return output

    def _prepare_generate_stream(
            self, params: Dict, is_stream=True
    ):
        params, model_context = self.llm_adapter.model_adaptation(
            params,
            self.model_name,
            self.model_path,
            self.tokenizer,
            prompt_template=self.ml.prompt_template,
        )
        if self.support_async():
            if not is_stream and self.llm_adapter.support_generate_function():
                func = self.llm_adapter.get_async_generate_function(
                    self.model, self.model_path
                )
                func_type = "async generate"
                logger.info(
                    "current generate function is asynchronous generate function"
                )
            else:
                func = self.llm_adapter.get_async_generate_stream_function(
                    self.model, self.model_path
                )
                func_type = "async generate stream"
                logger.info(
                    "current generate stream function is asynchronous generate stream function"
                )
        else:
            if not is_stream and self.llm_adapter.support_generate_function():
                func = self.llm_adapter.get_generate_function(
                    self.model, self.model_path
                )
                func_type = "generate"
                logger.info(
                    "current generate function is synchronous generate function"
                )
            else:
                func = self.llm_adapter.get_generate_stream_function(
                    self.model, self.model_path
                )
                func_type = "generate stream"
                logger.info(
                    "current generate stream function is synchronous generate stream function"
                )
        str_prompt = params.get("prompt")
        if not str_prompt:
            str_prompt = params.get("string_prompt")
        print(
            f"llm_adapter: {str(self.llm_adapter)}\n\nmodel prompt: \n\n{str_prompt}\n\n{func_type} output:\n"
        )

        return params, model_context, func

    @staticmethod
    def _handle_output(
            output,
            previous_response,
            model_context,
    ):
        finish_reason = None
        usage = None
        error_code = 0
        if isinstance(output, dict):
            finish_reason = output.get("finish_reason")
            usage = output.get("usage")
            output = output["text"]
            if finish_reason is not None:
                logger.info(f"finish_reason: {finish_reason}")
        elif isinstance(output, ModelOutput):
            finish_reason = output.finish_reason
            usage = output.usage
            error_code = output.error_code
            output = output.text
        incremental_output = output[len(previous_response):]
        print(incremental_output, end="", flush=True)

        model_output = ModelOutput(
            text=output,
            error_code=error_code,
            model_context=model_context,
            finish_reason=finish_reason,
            usage=usage,
        )
        return model_output, incremental_output, output

    @staticmethod
    def _handle_exception(e):
        # Check if the exception is a torch.cuda.CudaError and if torch was imported.
        if _torch_imported and isinstance(e, torch.cuda.CudaError):
            model_output = ModelOutput(
                text="**GPU OutOfMemory, Please Refresh.**", error_code=1
            )
        else:
            msg = traceback.format_exc()
            logger.error(f"Model inference error, detail: {msg}")
            model_output = ModelOutput(
                text=f"**LLMServer Generate Error, Please CheckErrorInfo.**: {e}",
                error_code=1,
            )
        return model_output
