import torch
from transformers import AutoModelForCausalLM, AutoTokenizer, GenerationConfig
from tqdm import tqdm
import time
from typing import Literal
from openai import OpenAI
# Set OpenAI's API key and API base to use vLLM's API server.
openai_api_key = "EMPTY"
openai_api_base = "http://127.0.0.1:8006/v1"

class qwen2ForOpenAI(object):
    """
    使用OpenAI API接口实现的分类服务（tochat/resource）
    """
    def __init__(self, template_path: str, model_name: str = "xiaoxingv2"):
        # OpenAI客户端配置
        self.client = OpenAI(
            api_key=openai_api_key,
            base_url=openai_api_base
        )
        self.model_name = model_name
        self.template_path = template_path
        self.INSTRUCTION = self._load_template(template_path)
        
        # 映射原始生成参数到OpenAI参数
        self.generation_config = {
            "temperature": 0.1,      # 对应原始do_sample=False
            "max_tokens": 5,         # 对应max_new_tokens=5
            # 重复惩罚参数需要转换公式
            "frequency_penalty": 0.18  # 经验值，近似repetition_penalty=1.1
        }

    def _load_template(self, template_path):
        # 保持与原实现一致
        with open(template_path, 'r', encoding='utf-8') as f:
            INSTRUCTION = f.read().strip()
            print(INSTRUCTION)
        return INSTRUCTION

    def build_chat_messages(self, user_input):
        """构建OpenAI兼容的对话格式"""
        return [
            {
                "role": "system",
                "content": "你是一个用户意图分析专家，主要用于识别用户输入的意图并返回对应的分类标签。"
            },
            {
                "role": "user",
                "content": f"{self.INSTRUCTION}\n{user_input}"
            }
        ]

    def classify(self, text) -> Literal["resource", "tochat"]:
        messages = self.build_chat_messages(text)
        
        try:
            response = self.client.chat.completions.create(
                model=self.model_name,
                messages=messages,
                **self.generation_config
            )
            # 提取生成的文本内容
            response_text = response.choices[0].message.content
        except Exception as e:
            print(f"API调用失败: {str(e)}")
            response_text = ""  # 返回空字符串触发后处理逻辑
        
        return self.postprocess(response_text)

    def postprocess(self, text):
        """保持与原实现一致的后处理逻辑"""
        clean = text.strip().lower().replace("`", "").replace(" ", "").replace("\n", "")
        if clean == "resource":
            return "resource"
        elif clean == "tochat":
            return "tochat"
        else:
            return "tochat"



class qwen2ForInference(object):
    """
        对qwen2模型进行分类（tochat/resource）
    """
    def __init__(self, model_path, template_path):
        self.model_path = model_path
        self.template_path = template_path
        # 加载模型时强制覆盖生成配置
        self.model = AutoModelForCausalLM.from_pretrained(
            self.model_path,
            torch_dtype=torch.bfloat16,  # 明确指定精度
            device_map="cuda:7"
        )
        self.tokenizer = AutoTokenizer.from_pretrained(self.model_path)
        self.generation_config = GenerationConfig(  # 关键修复点
                # temperature=0.1,  # 强制覆盖为合法值
                do_sample=False,  # 关闭采样模式
                max_new_tokens=5,
                pad_token_id=self.tokenizer.pad_token_id or 2,
                eos_token_id=self.tokenizer.eos_token_id or 2,
                repetition_penalty=1.1
            )
        self.INSTRUCTION = self._load_template(self.template_path)

    def _load_template(self, template_path):
        # 读取指令模板（必须与训练时完全一致）
        with open(template_path, 'r', encoding='utf-8') as f:
            INSTRUCTION = f.read().strip()  # 注意保留原始空格换行
            print(INSTRUCTION)
        return INSTRUCTION

    def build_chat_messages(self, user_input):
        """构建与训练时完全一致的对话结构"""
        
        return [
            {
                "role": "system",
                "content": "你是一个用户意图分析专家，主要用于识别用户输入的意图并返回对应的分类标签。"  # 使用完整指令内容
            },
            {
                "role": "user",
                "content": f"{self.INSTRUCTION}\n{user_input}"  # 保留中文引号
            }
        ]

    def classify(self, text):
        # 构建对话结构
        messages = self.build_chat_messages(text)
        
        # 应用Qwen模板编码
        encoded = self.tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            add_generation_prompt=True,
            # add_special_tokens=True
        )
        
        # 编码输入
        inputs = self.tokenizer(
            encoded,
            return_tensors="pt",
            return_attention_mask=True
        ).to(self.model.device)
        
        # 生成参数（显式设置关键参数）
        output = self.model.generate(
            inputs.input_ids,
            generation_config=self.generation_config
        )
        
        # 解码并后处理
        response = self.tokenizer.decode(
            output[0][inputs.input_ids.shape[-1]:], 
            skip_special_tokens=True
        )
        return self.postprocess(response)
    
    def postprocess(self, text):
        """严格的后处理，只允许resource或tochat"""
        clean = text.strip().lower().replace("`", "").replace(" ", "").replace("\n", "")
        # 只匹配精确的输出
        if clean == "resource":
            return "resource"
        elif clean == "tochat":
            return "tochat"
        else:
            # 根据内容判断
            if any(kw in clean for kw in ["意义", "原因", "影响", "分析", "论述", "解释", "如何", "为什么", "怎么样"]):
                return "tochat"
            else:
                return "resource"

class qwen3ForInference(object):
    """
        对qwen2模型进行分类（tochat/resource）
    """
    def __init__(self, model_path, template_path):
        self.model_path = model_path
        self.template_path = template_path
        # 加载模型时强制覆盖生成配置
        self.model = AutoModelForCausalLM.from_pretrained(
            self.model_path,
            torch_dtype=torch.bfloat16,  # 明确指定精度
            device_map="cuda:6"
        )
        self.tokenizer = AutoTokenizer.from_pretrained(self.model_path)
        self.generation_config = GenerationConfig(  # 关键修复点
                # temperature=0.1,  # 强制覆盖为合法值
                do_sample=False,  # 关闭采样模式
                max_new_tokens=5,
                # pad_token_id=self.tokenizer.pad_token_id or 2,
                # eos_token_id=self.tokenizer.eos_token_id or 2,
                # repetition_penalty=1.1
            )
        # self.generation_config = GenerationConfig(
        #     do_sample=False,
        #     num_beams=1,
        #     max_new_tokens=5,
        #     pad_token_id=151643,
        #     bos_token_id=151643,
        #     eos_token_id=151645,
        # )
        self.INSTRUCTION = self._load_template(self.template_path)

    def _load_template(self, template_path):
        # 读取指令模板（必须与训练时完全一致）
        with open(template_path, 'r', encoding='utf-8') as f:
            INSTRUCTION = f.read().strip()  # 注意保留原始空格换行
            print(INSTRUCTION)
        return INSTRUCTION

    def build_chat_messages(self, user_input):
        """构建与训练时完全一致的对话结构"""
        # self.INSTRUCTION = "你是一个AI助手，无论我输入什么，你都要输出“tochat”"
        return [
            {
                "role": "system",
                "content": "你是一个用户意图分析专家，主要用于识别用户输入的意图并返回对应的分类标签。"  # 使用完整指令内容
            },
            {
                "role": "user",
                "content": f"{self.INSTRUCTION}\n{user_input}"  # 保留中文引号
            }
        ]

    def classify(self, text):
        # 构建对话结构
        messages = self.build_chat_messages(text)
        
        # 应用Qwen模板编码
        encoded = self.tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            add_generation_prompt=True,
            enable_thinking=False
        )
        
        # 编码输入
        inputs = self.tokenizer(
            [encoded],
            return_tensors="pt"
        ).to(self.model.device)
        
        # 生成参数（显式设置关键参数）
        output = self.model.generate(
            **inputs,
            max_new_tokens=5
            # generation_config=self.generation_config
        )
        output_ids = output[0][len(inputs.input_ids[0]):].tolist() 

        # parsing thinking content
        try:
            # rindex finding 151668 (</think>)
            index = len(output_ids) - output_ids[::-1].index(151668)
        except ValueError:
            index = 0
        thinking_content = self.tokenizer.decode(output_ids[:index], skip_special_tokens=True).strip("\n")
        print(thinking_content)
        content = self.tokenizer.decode(output_ids[index:], skip_special_tokens=True).strip("\n")

        # # 解码并后处理
        # response = self.tokenizer.decode(
        #     output[0][inputs.input_ids.shape[-1]:], 
        #     skip_special_tokens=True
        # )
        return self.postprocess(content)
    
    def postprocess(self, text):
        """严格的后处理，只允许resource或tochat"""
        clean = text.strip().lower().replace("`", "").replace(" ", "").replace("\n", "")
        # 只匹配精确的输出
        if clean == "resource":
            return "resource"
        elif clean == "tochat":
            return "tochat"
        else:
            # 根据内容判断
            if any(kw in clean for kw in ["意义", "原因", "影响", "分析", "论述", "解释", "如何", "为什么", "怎么样"]):
                return "tochat"
            else:
                return "resource"

# 使用示例
if __name__ == "__main__":
    # 需要预先配置的环境变量
    classifier = qwen2ForOpenAI(
        api_key=openai_api_key,
        base_url=openai_api_base,  # 实际的API endpoint
        template_path="./configs/prompt_template.txt",
        model_name="qwen2_5"  # 根据实际部署的模型名称调整
    )
    
    test_text = "帮我解释一下量子力学的基本原理"
    print(classifier.classify(test_text))  # 输出: tochat

# # 测试用例
# if __name__ == "__main__":
#     # 配置参数
#     # model_path = f"/opt/zy-cx/zyd/Qwen3-4B"
#     model_path = "/opt/zy-cx/zyd/xiaoxing_llm/saves/Qwen3-4B-Instruct/lora/train_2025-06-13-17-15-28"
#     template_path = "/opt/zy-cx/zyd/xiaoxing_llm/preprocess/prompt_template.txt"
#     qf = qwen3ForInference(model_path=model_path, template_path=template_path)
#     test_samples = [
#         "风姿物语电子书",
#         "如何理解机器学习中的过拟合现象",
#         "请提供2024年ICLR最佳论文PDF"
#     ]
    
#     for sample in tqdm(test_samples):
#         print(f"输入：{sample}")
#         s_t = time.time()
#         print(f"分类结果：{qf.classify(sample)}\n")
#         e_t = time.time()
#         print(f"time cost: {e_t - s_t}")
