import torch
import torch.nn as nn
import json
import os
from torch.utils.data import DataLoader
from tqdm import tqdm
from sklearn.metrics import precision_recall_fscore_support
from config import Config
from model import BertNLU
from dataset import NLUDataset
from transformers import BertTokenizer
from nlp_model.logger import get_logger

# 初始化日志
logger = get_logger("quantize")


# --------------------------
# 1. 加载基础资源（模型、映射表、分词器）
# --------------------------
def load_original_resources():
    """加载训练好的原始模型、映射表和分词器（用于量化）"""
    # 加载意图/槽位映射表
    with open(f"{Config.model_dir}/intent_map.json", 'r', encoding='utf-8') as f:
        intent_map = json.load(f)
    with open(f"{Config.model_dir}/slot_map.json", 'r', encoding='utf-8') as f:
        slot_map = json.load(f)

    # 加载分词器（与训练一致）
    tokenizer = BertTokenizer.from_pretrained(Config.bert_model)
    if tokenizer.pad_token is None:
        tokenizer.pad_token = tokenizer.eos_token

    # 加载原始模型（动态量化仅支持CPU，强制指定device=cpu）
    device = torch.device("cpu")
    model = BertNLU(
        num_intents=len(intent_map['intent2id']),
        num_slots=len(slot_map['slot2id'])
    ).to(device)

    # 加载训练好的权重（确保是完整训练后的模型）
    model_path = os.path.join(Config.model_dir, "bert_model.bin")
    if not os.path.exists(model_path):
        raise FileNotFoundError(f"原始模型文件不存在：{model_path}")
    model.load_state_dict(torch.load(model_path, map_location=device))
    model.eval()  # 量化前切换为评估模式

    logger.info(
        f"原始模型加载完成 | 设备: CPU | 意图数: {len(intent_map['intent2id'])} | 槽位数: {len(slot_map['slot2id'])}")
    return model, intent_map, slot_map, tokenizer, device


# --------------------------
# 2. 动态量化核心函数
# --------------------------
def dynamic_quantize_model(original_model, slot2id):
    """
    对原始模型执行动态量化（PyTorch原生API）
    :param original_model: 训练好的原始模型（CPU设备）
    :param slot2id: 槽位映射表（用于排除不需要量化的层）
    :return: 量化后的模型
    """
    # 关键配置：只量化Linear层（BERT的核心计算层，量化收益高且精度损失小）
    # 排除Embedding层和输出层（避免槽位/意图预测精度大幅下降）
    quantizable_layers = {nn.Linear}

    # 执行动态量化（忽略Deprecation Warning，按用户要求使用原生API）
    # 注：PyTorch 2.10后会废弃此API，后续可迁移到torchao.eager.quantize_dynamic
    quantized_model = torch.quantization.quantize_dynamic(
        model=original_model,
        qconfig_spec=quantizable_layers,  # 只量化指定层
        dtype=torch.qint8,  # 量化到INT8（压缩比最高，CPU推理速度最快）
        inplace=False  # 不覆盖原始模型
    )

    quantized_model.eval()  # 量化后切换为评估模式
    logger.info("模型动态量化完成 | 量化精度: INT8 | 量化层类型: nn.Linear")
    return quantized_model


# --------------------------
# 3. 模型大小计算（对比量化前后）
# --------------------------
def calculate_model_size(model_path):
    """计算模型文件大小（单位：MB）"""
    if not os.path.exists(model_path):
        return 0.0
    return round(os.path.getsize(model_path) / (1024 * 1024), 2)


# --------------------------
# 4. 量化模型性能测试（与原始模型对比）
# --------------------------
def evaluate_quantized_model(quantized_model, original_model, intent_map, slot_map, tokenizer, device):
    """测试量化模型的意图/槽位指标，对比原始模型"""
    # 加载测试集（与predict.py逻辑一致）
    test_set = NLUDataset(Config.dev_path, tokenizer, is_test=False)
    test_set.intent2id, test_set.id2intent = intent_map['intent2id'], intent_map['id2intent']
    test_set.slot2id, test_set.id2slot = slot_map['slot2id'], slot_map['id2slot']
    test_loader = DataLoader(
        test_set,
        batch_size=Config.batch_size,
        shuffle=False,
        num_workers=2,
        pin_memory=False  # CPU推理无需pin_memory
    )
    logger.info(f"测试集加载完成 | 共 {len(test_set)} 条数据")

    # 定义指标计算函数
    def evaluate_model(model):
        model.eval()
        all_intent_preds, all_intent_true = [], []
        all_slot_preds, all_slot_true = [], []
        o_label_id = slot_map['slot2id'].get('O', -1)

        with torch.no_grad():
            for batch in tqdm(test_loader, desc=f"评估{model.__class__.__name__}"):
                input_ids = batch['input_ids'].to(device)
                attention_mask = batch['attention_mask'].to(device)
                intent_ids = batch['intent_id'].to(device)
                slot_ids = batch['slot_ids'].to(device)

                # 模型推理
                intent_logits, slot_logits = model(input_ids, attention_mask)
                intent_preds = intent_logits.argmax(1)
                slot_preds = slot_logits.argmax(2)

                # 收集意图标签
                all_intent_preds.extend(intent_preds.cpu().numpy())
                all_intent_true.extend(intent_ids.cpu().numpy())

                # 收集槽位标签（过滤O和填充）
                mask = attention_mask.bool()
                valid_mask = (slot_ids != o_label_id) & mask if o_label_id != -1 else mask
                all_slot_preds.extend(slot_preds[valid_mask].cpu().numpy())
                all_slot_true.extend(slot_ids[valid_mask].cpu().numpy())

        # 计算意图指标
        intent_acc = sum(p == t for p, t in zip(all_intent_preds, all_intent_true)) / len(all_intent_true)
        intent_p, intent_r, intent_f1, _ = precision_recall_fscore_support(
            all_intent_true, all_intent_preds, average='macro', zero_division=0
        )

        # 计算槽位指标
        slot_acc = sum(p == t for p, t in zip(all_slot_preds, all_slot_true)) / len(
            all_slot_true) if all_slot_true else 0.0
        slot_p, slot_r, slot_f1, _ = precision_recall_fscore_support(
            all_slot_true, all_slot_preds, average='macro', zero_division=0
        )

        return {
            "intent": {"acc": intent_acc, "p": intent_p, "r": intent_r, "f1": intent_f1},
            "slot": {"acc": slot_acc, "p": slot_p, "r": slot_r, "f1": slot_f1}
        }

    # 评估原始模型和量化模型
    logger.info("\n" + "=" * 50 + " 开始评估模型性能 " + "=" * 50)
    original_metrics = evaluate_model(original_model)
    quantized_metrics = evaluate_model(quantized_model)

    # 输出对比结果
    logger.info("\n" + "=" * 100)
    logger.info("模型性能对比（量化前后）")
    logger.info("-" * 100)
    logger.info(f"{'指标':<10} {'模型类型':<10} {'准确率':<10} {'精确率':<10} {'召回率':<10} {'F1值':<10}")
    logger.info("-" * 100)
    # 意图指标对比
    logger.info(
        f"{'意图':<10} {'原始模型':<10} {original_metrics['intent']['acc']:.4f} {original_metrics['intent']['p']:.4f} {original_metrics['intent']['r']:.4f} {original_metrics['intent']['f1']:.4f}")
    logger.info(
        f"{'意图':<10} {'量化模型':<10} {quantized_metrics['intent']['acc']:.4f} {quantized_metrics['intent']['p']:.4f} {quantized_metrics['intent']['r']:.4f} {quantized_metrics['intent']['f1']:.4f}")
    # 槽位指标对比
    logger.info(
        f"{'槽位':<10} {'原始模型':<10} {original_metrics['slot']['acc']:.4f} {original_metrics['slot']['p']:.4f} {original_metrics['slot']['r']:.4f} {original_metrics['slot']['f1']:.4f}")
    logger.info(
        f"{'槽位':<10} {'量化模型':<10} {quantized_metrics['slot']['acc']:.4f} {quantized_metrics['slot']['p']:.4f} {quantized_metrics['slot']['r']:.4f} {quantized_metrics['slot']['f1']:.4f}")
    logger.info("=" * 100)

    # 计算精度损失（判断是否可接受，通常F1损失<5%为可接受）
    intent_f1_loss = (original_metrics['intent']['f1'] - quantized_metrics['intent']['f1']) / \
                     original_metrics['intent']['f1'] * 100
    slot_f1_loss = (original_metrics['slot']['f1'] - quantized_metrics['slot']['f1']) / original_metrics['slot'][
        'f1'] * 100
    logger.info(f"\n量化后精度损失：意图F1损失 {intent_f1_loss:.2f}% | 槽位F1损失 {slot_f1_loss:.2f}%")
    if intent_f1_loss < 5 and slot_f1_loss < 5:
        logger.info("✅ 量化模型精度损失在可接受范围内（<5%）")
    else:
        logger.warning("⚠️ 量化模型精度损失超过5%，建议调整量化配置（如减少量化层）")

    return quantized_metrics


# --------------------------
# 5. 保存量化模型
# --------------------------
def save_quantized_model(quantized_model, save_path):
    """保存量化模型（注意：量化模型需用特定方式保存）"""
    # 量化模型保存为state_dict（与原始模型一致，方便后续加载）
    torch.save(quantized_model.state_dict(), save_path)
    # 计算模型大小
    original_size = calculate_model_size(os.path.join(Config.model_dir, "bert_model.bin"))
    quantized_size = calculate_model_size(save_path)
    compression_ratio = round(original_size / quantized_size, 2)

    logger.info(f"\n量化模型保存完成 | 保存路径: {save_path}")
    logger.info(f"模型大小对比：原始模型 {original_size}MB | 量化模型 {quantized_size}MB | 压缩比 {compression_ratio}x")


# --------------------------
# 主函数：串联所有流程
# --------------------------
def main():
    # 1. 加载原始模型和资源
    original_model, intent_map, slot_map, tokenizer, device = load_original_resources()

    # 2. 执行动态量化
    quantized_model = dynamic_quantize_model(original_model, slot_map['slot2id'])

    # 3. 测试量化模型性能
    evaluate_quantized_model(quantized_model, original_model, intent_map, slot_map, tokenizer, device)

    # 4. 保存量化模型
    quantized_save_path = os.path.join(Config.model_dir, "quantized_bert_model.bin")
    save_quantized_model(quantized_model, quantized_save_path)

    logger.info("\n" + "=" * 100)
    logger.info("模型量化流程全部完成！")
    logger.info("=" * 100)


if __name__ == "__main__":
    main()