import json
import os
import uuid
import torch
import transformers

from .uni_tokenizer import UniTokenizer
from .lm_inference import LanguageModelPredictor, AsyncLanguageModelPredictor
from typing import Any, Coroutine, Dict, List, Optional, Union, Generator, AsyncGenerator

import logging
from chernc.logging import build_logger
from chernc.constants import DEFAULT_LOGGER_DIR, DEFAULT_LOGGER_FILE_NAME

logger: logging.Logger = build_logger(__name__, logger_filename=DEFAULT_LOGGER_FILE_NAME, logger_dir=DEFAULT_LOGGER_DIR)

try:
    import vllm
    from vllm.lora.request import LoRARequest

except ImportError:
    logger.error("vllm module not found, please install it first `pip install vllm`.")
    exit(1)


class LocalInference(LanguageModelPredictor):
    def __init__(
        self,
        model_path: str,
        device: str = "cuda",
        torch_dtype: Union[str, torch.dtype] = "auto",
        enforce_eager: bool = False,
    ):
        self.model_path: str = model_path
        self.device: str = device
        self.torch_dtype: Union[str, torch.dtype] = torch_dtype
        self.enforce_eager: bool = enforce_eager
        self.lora_adapter: Optional[LoRARequest] = None

        # 不存在 config.json 文件
        if os.path.exists(model_path) and not os.path.exists(os.path.join(model_path, "config.json")):
            print(f"No supported config format found in {model_path}, enable lora")
            # read adapter config get base model path
            adapter_config = json.load(
                open(
                    os.path.join(model_path, "adapter_config.json"),
                    "r",
                    encoding="utf-8",
                )
            )
            base_model_path = adapter_config.get("base_model_name_or_path", None)
            if base_model_path is None:
                raise ValueError(f"No base model path found in {model_path}")

            self.lora_adapter = LoRARequest("lora_adapter", 1, model_path)
            self.model_path = base_model_path
            enable_lora = True
        else:
            enable_lora = False
        self.llm = vllm.LLM(
            model=self.model_path,
            trust_remote_code=True,
            enable_prefix_caching=True,
            dtype=str(torch_dtype),
            enable_lora=enable_lora,
            enforce_eager=enforce_eager,
        )
        self.tokenizer = UniTokenizer(self.model_path)

    def token_num(self, text: Union[str, List[Dict[str, str]]]) -> int:
        if self.tokenizer is None:
            raise Exception("The tokenizer is None")
        return self.tokenizer.token_num(text=text)

    def chat(
        self,
        messages: List[Dict[str, str]],
        max_length: int = 2048,
        temperature: Optional[float] = None,
        top_p: Optional[float] = None,
    ) -> Optional[str]:
        text = self.tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
        prompts = [text]
        gen_params = {}
        if temperature is not None:
            gen_params["temperature"] = temperature
        if top_p is not None:
            gen_params["top_p"] = top_p
        sampling_params = vllm.SamplingParams(max_tokens=max_length, **gen_params)
        if self.lora_adapter is not None:
            outputs = self.llm.generate(prompts, sampling_params, use_tqdm=False, lora_request=self.lora_adapter)  # type: ignore
        else:
            outputs = self.llm.generate(prompts, sampling_params, use_tqdm=False)  # type: ignore

        outputs_text_list = []
        for output in outputs:
            generated_text = output.outputs[0].text
            outputs_text_list.append(generated_text)
        response = outputs_text_list[0]
        return response

    def chat_stream(
        self,
        messages: List[Dict[str, str]],
        max_length: int = 2048,
        temperature: Optional[float] = None,
        top_p: Optional[float] = None,
    ) -> Generator[Optional[str], None, None]:
        raise NotImplementedError("LocalInference does not support chat_stream")


class AsyncLocalInference(AsyncLanguageModelPredictor):
    def __init__(
        self,
        model_path: str,
        device: str = "cuda",
        torch_dtype: Union[str, torch.dtype] = "auto",
        enforce_eager: bool = False,
        rate_limit: int = 1024,
    ):
        self.model_path: str = model_path
        self.device: str = device
        self.torch_dtype: Union[str, torch.dtype] = torch_dtype
        self.enforce_eager: bool = enforce_eager
        self.lora_adapter: Optional[LoRARequest] = None

        # 不存在 config.json 文件
        if os.path.exists(model_path) and not os.path.exists(os.path.join(model_path, "config.json")):
            print(f"No supported config format found in {model_path}, enable lora")
            # read adapter config get base model path
            adapter_config = json.load(
                open(
                    os.path.join(model_path, "adapter_config.json"),
                    "r",
                    encoding="utf-8",
                )
            )
            base_model_path = adapter_config.get("base_model_name_or_path", None)
            if base_model_path is None:
                raise ValueError(f"No base model path found in {model_path}")

            self.lora_adapter = LoRARequest("lora_adapter", 1, model_path)
            self.model_path = base_model_path
            enable_lora = True
        else:
            enable_lora = False
        engine_args = vllm.AsyncEngineArgs(
            model=self.model_path,
            trust_remote_code=True,
            enable_prefix_caching=True,
            dtype=str(torch_dtype),
            enable_lora=enable_lora,
            enforce_eager=enforce_eager,
        )
        self.llm = vllm.AsyncLLMEngine.from_engine_args(engine_args)
        self.tokenizer = UniTokenizer(self.model_path)
        self.rate_limit = rate_limit

    def token_num(self, text: Union[str, List[Dict[str, str]]]) -> int:
        if self.tokenizer is None:
            raise Exception("The tokenizer is None")
        return self.tokenizer.token_num(text)

    async def chat(
        self,
        messages: List[Dict[str, str]],
        max_length: int = 2048,
        temperature: Optional[float] = None,
        top_p: Optional[float] = None,
    ) -> Optional[str]:
        if self.tokenizer is None:
            raise Exception("The tokenizer is None")
        text = self.tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
        prompts = [text]
        gen_params = {}
        if temperature is not None:
            gen_params["temperature"] = temperature
        if top_p is not None:
            gen_params["top_p"] = top_p
        sampling_params = vllm.SamplingParams(max_tokens=max_length, **gen_params)
        request_id = uuid.uuid4().hex
        if self.lora_adapter is not None:
            results_generator = self.llm.generate(prompts, sampling_params, request_id=request_id, lora_request=self.lora_adapter)  # type: ignore
        else:
            results_generator = self.llm.generate(prompts, sampling_params, request_id=request_id)  # type: ignore

        accumulate_text = ""
        async for request_output in results_generator:
            text = request_output.outputs[0].text
            accumulate_text = text

        response = accumulate_text
        return response

    async def chat_stream(
        self,
        messages: List[Dict[str, str]],
        max_length: int = 2048,
        temperature: Optional[float] = None,
        top_p: Optional[float] = None,
    ) -> AsyncGenerator[Optional[str], None]:
        if self.tokenizer is None:
            raise Exception("The tokenizer is None")
        text = self.tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
        prompts = [text]
        gen_params = {}
        if temperature is not None:
            gen_params["temperature"] = temperature
        if top_p is not None:
            gen_params["top_p"] = top_p
        sampling_params = vllm.SamplingParams(max_tokens=max_length, **gen_params)

        request_id = uuid.uuid4().hex
        if self.lora_adapter is not None:
            results_generator = self.llm.generate(
                prompts, sampling_params, request_id=request_id, lora_request=self.lora_adapter  # type: ignore
            )
        else:
            results_generator = self.llm.generate(prompts, sampling_params, request_id=request_id)  # type: ignore

        previous_text = ""
        async for request_output in results_generator:
            text = request_output.outputs[0].text
            yield text[len(previous_text) :]
            previous_text = text
