import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader
from tqdm import tqdm
from sklearn.metrics import precision_recall_fscore_support, accuracy_score
from config import Config
from model import BertNLU  # 教师模型
from dataset import get_datasets  # 复用数据集加载
from nlp_model.logger import get_logger

logger = get_logger("distill")


# --------------------------
# 1. 轻量学生模型（保持不变）
# --------------------------
class SimpleStudentModel(nn.Module):
    def __init__(self, num_intents, num_slots, vocab_size=21128):
        super().__init__()
        self.embedding = nn.Embedding(vocab_size, 128, padding_idx=0)
        self.transformer = nn.TransformerEncoder(
            encoder_layer=nn.TransformerEncoderLayer(
                d_model=128, nhead=4, dim_feedforward=256, dropout=0.1, batch_first=True
            ),
            num_layers=2
        )
        self.intent_head = nn.Linear(128, num_intents)
        self.slot_head = nn.Linear(128, num_slots)
        self.dropout = nn.Dropout(0.1)

    def forward(self, input_ids, attention_mask):
        x = self.embedding(input_ids)
        x = self.dropout(x)
        key_padding_mask = ~attention_mask.bool()
        x = self.transformer(x, src_key_padding_mask=key_padding_mask)
        intent_logits = self.intent_head(self.dropout(x[:, 0, :]))
        slot_logits = self.slot_head(self.dropout(x))
        return intent_logits, slot_logits


# --------------------------
# 2. 完整指标计算函数（新增）
# --------------------------
def calculate_metrics(y_true, y_pred):
    """计算准确率、精确率、召回率、F1"""
    acc = accuracy_score(y_true, y_pred)
    precision, recall, f1, _ = precision_recall_fscore_support(
        y_true, y_pred, average="macro", zero_division=0
    )
    return {
        "acc": round(acc, 4),
        "precision": round(precision, 4),
        "recall": round(recall, 4),
        "f1": round(f1, 4)
    }


# --------------------------
# 3. 蒸馏主逻辑（补充指标输出）
# --------------------------
def distill_main():
    # 加载数据
    train_set, dev_set, _, tokenizer = get_datasets()
    train_loader = DataLoader(train_set, Config.batch_size, shuffle=True, num_workers=2)
    dev_loader = DataLoader(dev_set, Config.batch_size, shuffle=False, num_workers=2)
    logger.info(f"数据加载完成 | 训练集: {len(train_set)} | 验证集: {len(dev_set)}")

    # 教师模型（冻结）
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    teacher_model = BertNLU(len(train_set.intent2id), len(train_set.slot2id)).to(device)
    teacher_model.load_state_dict(torch.load(f"{Config.model_dir}/bert_model.bin", map_location=device))
    teacher_model.eval()
    for param in teacher_model.parameters():
        param.requires_grad = False

    # 学生模型
    student_model = SimpleStudentModel(len(train_set.intent2id), len(train_set.slot2id)).to(device)

    # 蒸馏配置
    temperature = 3.0
    alpha = 0.3  # 硬损失权重
    beta = 0.7  # 软损失权重

    # 损失与优化器
    hard_intent_loss = nn.CrossEntropyLoss()
    hard_slot_loss = nn.CrossEntropyLoss(ignore_index=train_set.slot2id["O"])
    soft_loss = nn.KLDivLoss(reduction="batchmean")
    optimizer = optim.Adam(student_model.parameters(), lr=5e-4)

    # 训练循环
    best_slot_f1 = 0.0
    for epoch in range(15):
        # 训练阶段
        student_model.train()
        total_loss = 0.0
        for batch in tqdm(train_loader, desc=f"Epoch {epoch + 1}/15"):
            input_ids = batch["input_ids"].to(device)
            attention_mask = batch["attention_mask"].to(device)
            intent_true = batch["intent_id"].to(device)
            slot_true = batch["slot_ids"].to(device)

            # 教师输出（软标签）
            with torch.no_grad():
                t_intent_logits, t_slot_logits = teacher_model(input_ids, attention_mask)
                t_intent_soft = torch.softmax(t_intent_logits / temperature, dim=1)
                t_slot_soft = torch.softmax(t_slot_logits / temperature, dim=2)

            # 学生输出
            s_intent_logits, s_slot_logits = student_model(input_ids, attention_mask)
            s_intent_logsoft = torch.log_softmax(s_intent_logits / temperature, dim=1)
            s_slot_logsoft = torch.log_softmax(s_slot_logits / temperature, dim=2)

            # 损失计算
            loss_hard = hard_intent_loss(s_intent_logits, intent_true) + hard_slot_loss(s_slot_logits.transpose(1, 2),
                                                                                        slot_true)
            loss_soft = (soft_loss(s_intent_logsoft, t_intent_soft) + soft_loss(s_slot_logsoft, t_slot_soft)) * (
                        temperature ** 2)
            total_loss_batch = alpha * loss_hard + beta * loss_soft

            optimizer.zero_grad()
            total_loss_batch.backward()
            optimizer.step()
            total_loss += total_loss_batch.item()

        # 验证阶段（计算完整指标）
        student_model.eval()
        all_intent_pred, all_intent_true = [], []
        all_slot_pred, all_slot_true = [], []
        o_label_id = train_set.slot2id["O"]

        with torch.no_grad():
            for batch in dev_loader:
                input_ids = batch["input_ids"].to(device)
                attention_mask = batch["attention_mask"].to(device)
                intent_true = batch["intent_id"].cpu().numpy()
                slot_true = batch["slot_ids"].cpu().numpy()

                # 学生预测
                s_intent_logits, s_slot_logits = student_model(input_ids, attention_mask)
                intent_pred = s_intent_logits.argmax(1).cpu().numpy()
                slot_pred = s_slot_logits.argmax(2).cpu().numpy()

                # 收集有效标签
                all_intent_pred.extend(intent_pred)
                all_intent_true.extend(intent_true)
                mask = attention_mask.cpu().bool().numpy()
                valid_mask = (slot_true != o_label_id) & mask
                all_slot_pred.extend(slot_pred[valid_mask])
                all_slot_true.extend(slot_true[valid_mask])

        # 计算并输出完整指标
        intent_metrics = calculate_metrics(all_intent_true, all_intent_pred)
        slot_metrics = calculate_metrics(all_slot_true, all_slot_pred)
        avg_loss = round(total_loss / len(train_loader), 4)

        logger.info(f"\nEpoch {epoch + 1} | 平均损失: {avg_loss}")
        logger.info("=" * 60)
        logger.info("意图识别指标:")
        logger.info(f"准确率: {intent_metrics['acc']} | 精确率: {intent_metrics['precision']}")
        logger.info(f"召回率: {intent_metrics['recall']} | F1值: {intent_metrics['f1']}")
        logger.info("-" * 60)
        logger.info("槽位填充指标:")
        logger.info(f"准确率: {slot_metrics['acc']} | 精确率: {slot_metrics['precision']}")
        logger.info(f"召回率: {slot_metrics['recall']} | F1值: {slot_metrics['f1']}")
        logger.info("=" * 60)

        # 保存最佳模型
        if slot_metrics["f1"] > best_slot_f1:
            best_slot_f1 = slot_metrics["f1"]
            torch.save(student_model.state_dict(), f"{Config.model_dir}/student_model.bin")
            logger.info(f"✅ 保存最佳学生模型（槽位F1: {best_slot_f1}）\n")

    logger.info(f"蒸馏完成！最佳模型已保存至 {Config.model_dir}/student_model.bin")


if __name__ == "__main__":
    distill_main()
