import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader
import json
from tqdm import tqdm
from sklearn.metrics import precision_recall_fscore_support
from config import Config
from model import BertNLU
from dataset import get_datasets
from nlp_model.logger import get_logger

# 初始化训练日志
logger = get_logger("train")


def train():
    # 1. 加载数据和分词器
    train_set, dev_set, _, tokenizer = get_datasets()
    train_loader = DataLoader(
        train_set,
        batch_size=Config.batch_size,
        shuffle=True,
        num_workers=2
    )
    dev_loader = DataLoader(
        dev_set,
        batch_size=Config.batch_size,
        shuffle=False,
        num_workers=2
    )
    logger.info(f"数据加载完成 | 训练集: {len(train_set)} 条 | 验证集: {len(dev_set)} 条")

    # 2. 初始化模型
    model = BertNLU(
        num_intents=len(train_set.intent2id),
        num_slots=len(train_set.slot2id)
    ).to(Config.device)
    logger.info(
        f"模型初始化完成 | 设备: {Config.device} | 意图数: {len(train_set.intent2id)} | 槽位数: {len(train_set.slot2id)}")

    # 3. 损失函数和优化器
    intent_criterion = nn.CrossEntropyLoss()
    # 关键：槽位损失忽略“O”标签（避免“O”标签主导损失）
    o_label_id = train_set.slot2id['O']  # 获取“O”标签的ID
    slot_criterion = nn.CrossEntropyLoss(ignore_index=o_label_id)
    optimizer = optim.Adam(model.parameters(), lr=Config.lr)

    # 4. 保存映射表
    with open(f"{Config.model_dir}/intent_map.json", 'w', encoding='utf-8') as f:
        json.dump(
            {"intent2id": train_set.intent2id, "id2intent": train_set.id2intent},
            f, ensure_ascii=False, indent=2
        )
    with open(f"{Config.model_dir}/slot_map.json", 'w', encoding='utf-8') as f:
        json.dump(
            {"slot2id": train_set.slot2id, "id2slot": train_set.id2slot},
            f, ensure_ascii=False, indent=2
        )
    logger.info("意图和槽位映射表已保存至模型目录")

    # 5. 训练主循环
    best_dev_slot_f1 = 0.0  # 用有效槽位的F1作为最佳模型标准
    for epoch in range(Config.epochs):
        # 训练阶段
        model.train()
        total_loss = 0.0

        for batch in tqdm(train_loader, desc=f"训练集Epoch {epoch + 1}/{Config.epochs}"):
            input_ids = batch['input_ids'].to(Config.device)
            attention_mask = batch['attention_mask'].to(Config.device)
            intent_ids = batch['intent_id'].to(Config.device)
            slot_ids = batch['slot_ids'].to(Config.device)

            optimizer.zero_grad()
            intent_logits, slot_logits = model(input_ids, attention_mask)

            # 计算损失
            intent_loss = intent_criterion(intent_logits, intent_ids)
            slot_loss = slot_criterion(slot_logits.transpose(1, 2), slot_ids)  # 已忽略“O”标签
            loss = intent_loss + slot_loss

            loss.backward()
            optimizer.step()
            total_loss += loss.item()

        # 验证阶段
        model.eval()
        # 存储所有预测和真实标签（用于指标计算）
        all_intent_preds = []
        all_intent_true = []
        all_slot_preds = []  # 有效槽位的预测标签（排除“O”）
        all_slot_true = []   # 有效槽位的真实标签（排除“O”）

        with torch.no_grad():
            for batch in tqdm(dev_loader, desc=f"验证集Epoch {epoch + 1}/{Config.epochs}"):
                input_ids = batch['input_ids'].to(Config.device)
                attention_mask = batch['attention_mask'].to(Config.device)
                intent_ids = batch['intent_id'].to(Config.device)
                slot_ids = batch['slot_ids'].to(Config.device)

                intent_logits, slot_logits = model(input_ids, attention_mask)

                # --------------------------
                # 1. 意图指标计算（逻辑不变，适配多分类）
                # --------------------------
                intent_preds = intent_logits.argmax(1)
                all_intent_preds.extend(intent_preds.cpu().numpy())
                all_intent_true.extend(intent_ids.cpu().numpy())

                # --------------------------
                # 2. 槽位指标计算（核心修正：排除“O”标签）
                # --------------------------
                slot_preds = slot_logits.argmax(2)
                mask = attention_mask.bool()  # 先过滤填充token

                # 过滤“O”标签：只保留非“O”的有效槽位
                non_o_mask = (slot_ids != o_label_id) & mask  # 有效槽位 + 非填充
                # 收集非“O”的预测和真实标签
                all_slot_preds.extend(slot_preds[non_o_mask].cpu().numpy())
                all_slot_true.extend(slot_ids[non_o_mask].cpu().numpy())

        # --------------------------
        # 计算意图指标（macro平均：适配多分类，每个类别平等对待）
        # --------------------------
        intent_acc = sum(p == t for p, t in zip(all_intent_preds, all_intent_true)) / len(all_intent_true)
        intent_precision, intent_recall, intent_f1, _ = precision_recall_fscore_support(
            all_intent_true,
            all_intent_preds,
            average='macro',  # 多分类常用macro（类别不平衡时用weighted）
            zero_division=0  # 避免无预测样本时报错
        )

        # --------------------------
        # 计算槽位指标（只针对有效槽位，排除“O”）
        # --------------------------
        # 处理无有效槽位的极端情况
        if len(all_slot_true) == 0:
            slot_acc = 0.0
            slot_precision = slot_recall = slot_f1 = 0.0
        else:
            slot_acc = sum(p == t for p, t in zip(all_slot_preds, all_slot_true)) / len(all_slot_true)
            slot_precision, slot_recall, slot_f1, _ = precision_recall_fscore_support(
                all_slot_true,
                all_slot_preds,
                average='macro',  # 序列标注常用macro（关注每个槽位的性能）
                zero_division=0
            )

        # --------------------------
        # 日志输出（清晰展示各类指标）
        # --------------------------
        logger.info("=" * 80)
        logger.info(f"Epoch {epoch + 1}/{Config.epochs} | 训练损失: {total_loss/len(train_loader):.4f}")
        logger.info("【意图指标】")
        logger.info(f"  准确率: {intent_acc:.4f} | 精确率: {intent_precision:.4f} | 召回率: {intent_recall:.4f} | F1值: {intent_f1:.4f}")
        logger.info("【槽位指标】（已排除O标签，仅统计有效槽位）")
        logger.info(f"  准确率: {slot_acc:.4f} | 精确率: {slot_precision:.4f} | 召回率: {slot_recall:.4f} | F1值: {slot_f1:.4f}")
        logger.info("=" * 80)

        # --------------------------
        # 保存最佳模型（用有效槽位F1为标准，更贴合任务目标）
        # --------------------------
        if slot_f1 > best_dev_slot_f1:
            best_dev_slot_f1 = slot_f1
            torch.save(model.state_dict(), f"{Config.model_dir}/bert_model.bin")
            logger.info(f"✅ 已保存最佳模型 | 当前最佳槽位F1: {best_dev_slot_f1:.4f}")

    logger.info("训练完成！最佳模型已保存至 saved_model/bert_model.bin")


if __name__ == "__main__":
    train()