import time
from typing import Dict, List, Union

from transformers import AutoTokenizer
from vllm import LLM, SamplingParams

from utils.utils import timer


class LLMClient(object):
    @timer
    def __init__(
        self,
        model_path: str = "Qwen/Qwen2.5-7B-Instruct",
        tensor_parallel_size: int = 1,
        gpu_memory_utilization: float = 0.9,
        max_model_len: int = 32768,
        dtype: str = "bfloat16",
    ):
        """
        初始化模型和 tokenizer
        :param model_path: HuggingFace 模型路径或本地路径
        :param tensor_parallel_size: 多卡并行数（如 2/4/8）
        :param gpu_memory_utilization: 显存利用率
        :param max_model_len: 模型最大上下文长度
        :param dtype: 计算精度，可选 "bfloat16", "float16"
        """
        print(f"Loading model from: {model_path}")

        # 加载 tokenizer
        self.tokenizer = AutoTokenizer.from_pretrained(
            model_path,
            trust_remote_code=True,
            use_fast=False,  # vLLM 建议 use_fast=False
        )

        # 初始化 vLLM 引擎
        self.model = LLM(
            model=model_path,
            tokenizer=model_path,
            tensor_parallel_size=tensor_parallel_size,
            trust_remote_code=True,
            gpu_memory_utilization=gpu_memory_utilization,
            max_model_len=max_model_len,
            dtype=dtype,
        )

        # 自动获取 stop token ids
        self._stop_token_ids = self._get_stop_token_ids()

        # 默认生成参数
        self.sampling_params = SamplingParams(
            temperature=0.7,  # 多样性
            top_p=0.9,  # 核采样
            top_k=-1,  # -1 表示禁用
            repetition_penalty=1.1,  # 重复惩罚
            max_tokens=2048,  # 最大生成长度
            stop_token_ids=self._stop_token_ids,
            include_stop_str_in_output=False,  # 不包含 stop string 在输出中
            skip_special_tokens=True,  # 解码时跳过 special tokens
        )

    @timer
    def _get_stop_token_ids(self) -> List[int]:
        """获取应停止生成的 token ids"""
        stop_token_ids = []

        # 添加 eos_token_id
        if self.tokenizer.eos_token_id:
            stop_token_ids.append(self.tokenizer.eos_token_id)

        # 添加 <|im_end|> 的 token id（Qwen 特有）
        try:
            im_end_ids = self.tokenizer.encode("<|im_end|>", add_special_tokens=False)
            if im_end_ids:
                stop_token_ids.extend(im_end_ids)
        except Exception:
            pass

        # 去重
        return list(set(stop_token_ids))

    @timer
    def infer(
        self,
        inputs: Union[List[str], List[List[Dict[str, str]]]],
        temperature: float = None,
        max_tokens: int = None,
    ) -> List[str]:
        """
        批量推理
        :param inputs: 可以是字符串列表，或对话列表 list[dict]
            示例1: ["你好", "讲个故事"]
            示例2: [[{"role": "user", "content": "你好"}], ...]
        :param temperature: 覆盖默认 temperature
        :param max_tokens: 覆盖默认 max_tokens
        :return: 生成的文本列表
        """
        # 动态调整参数（可选）
        sampling_params = self.sampling_params
        if temperature is not None:
            sampling_params.temperature = temperature
        if max_tokens is not None:
            sampling_params.max_tokens = max_tokens

        # 格式化输入
        if isinstance(inputs[0], str):
            # 纯文本输入 → 转为 user 消息
            conversations = [[{"role": "user", "content": text}] for text in inputs]
        elif isinstance(inputs[0], list) and isinstance(inputs[0][0], dict):
            # 已是对话格式
            conversations = inputs
        else:
            raise ValueError(
                "inputs must be List[str] or List[List[{'role':..., 'content':...}]]"
            )

        # 使用 tokenizer 的 chat template 构造 prompt
        prompts = self.tokenizer.apply_chat_template(
            conversations,
            tokenize=False,
            add_generation_prompt=True,  # 确保加上 "assistant\n" 开头
        )

        # 调用 vLLM 生成
        outputs = self.model.generate(
            prompts,
            sampling_params=sampling_params,
            use_tqdm=False,  # 批量大时可设为 True
        )

        # 提取生成结果
        responses = []
        for output in outputs:
            if output.outputs:
                responses.append(output.outputs[0].text.strip())
            else:
                responses.append("")
        return responses

    @timer
    def chat(
        self,
        query: str,
        history: List[Dict[str, str]] = None,
        temperature: float = 0.7,
        max_tokens: int = 1024,
    ) -> str:
        """
        单轮对话快捷接口
        :param query: 用户输入
        :param history: 历史对话 [{"role": "user", "content": ...}, {"role": "assistant", "content": ...}]
        :param temperature: 温度
        :param max_tokens: 最大生成长度
        :return: 模型回复
        """
        history = history or []
        conversation = history + [{"role": "user", "content": query}]
        response = self.infer(
            [conversation], temperature=temperature, max_tokens=max_tokens
        )
        return response[0]


if __name__ == "__main__":
    llm_name = "Qwen/Qwen2.5-7B-Instruct"
    start = time.time()
    llm = LLMClient(llm_name)
    test = ["吉利汽车座椅按摩", "吉利汽车语音组手唤醒", "自动驾驶功能介绍"]
    generated_text = llm.infer(test)
    print(generated_text)
    end = time.time()
    print("cost time: " + str((end - start) / 60))
