import torch
import torch.nn as nn
from torch.optim import AdamW
from sklearn.metrics import f1_score, accuracy_score
from tqdm import tqdm
from model2dev_utils import model2dev
from config import Config

# 忽略的警告信息
import warnings

warnings.filterwarnings("ignore")

# 新增: 日志与计时
import logging, time
logger = logging.getLogger(__name__)
if not logger.handlers:
    logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')

# todo 加载配置对象，包含模型参数、路径等
conf = Config()
# todo 导入数据处理工具类
from bert_dataloader import build_dataloader
# todo 导入bert模型
from bert_model import BertClassifier


def model2train():
    """
    训练 BERT 分类模型并在验证集上评估，保存最佳模型。
    参数：无显式参数，所有配置通过全局 conf 对象获取。
    返回：无返回值，训练过程中保存最佳模型到指定路径。
    """
    total_start = time.time()
    # todo 1、准备数据
    train_dataloader, dev_dataloader, test_dataloader = build_dataloader()
    try:
        train_size = len(train_dataloader.dataset)
        dev_size = len(dev_dataloader.dataset)
        test_size = len(test_dataloader.dataset)
        logger.info(f"数据集规模: train={train_size:,}, dev={dev_size:,}, test={test_size:,}")
        logger.info(f"批次数: train_batches={len(train_dataloader)}, dev_batches={len(dev_dataloader)}, test_batches={len(test_dataloader)}")
        logger.info(f"batch_size={conf.batch_size}, pad_size={conf.pad_size}")
    except Exception as e:
        logger.warning(f"统计数据集规模失败: {e}")

    # todo 2、准备模型
    # 2.1初始化bert分类模型
    model = BertClassifier()
    # 2.2将模型移动到指定的设备
    model.to(conf.device)

    # 打印设备与模型信息
    try:
        total_params = sum(p.numel() for p in model.parameters())
        trainable_params = sum(p.numel() for p in model.parameters() if p.requires_grad)
        logger.info(f"设备: {conf.device}")
        if torch.cuda.is_available():
            try:
                dev_name = torch.cuda.get_device_name(0)
                logger.info(f"CUDA: {dev_name}")
            except Exception:
                pass
        logger.info(f"模型参数: 总数={total_params:,}, 可训练={trainable_params:,}")
    except Exception as e:
        logger.warning(f"统计模型参数失败: {e}")

    # todo 3.准备损失函数
    loss_fn = nn.CrossEntropyLoss()

    # todo 4.准备优化器
    optimizer = AdamW(model.parameters(), lr=conf.learning_rate)
    logger.info(f"优化器: AdamW, 学习率={conf.learning_rate}, 训练轮次={conf.num_epochs}")

    # todo 5.开始训练模型
    # 初始化F1分数，用于保存最好的模型
    best_f1 = 0.0
    # todo 5.1 外层循环遍历每个训练轮次
    #  （每次需要设置训练模式，累计损失，预存训练集测试和真实标签）
    for epoch in range(conf.num_epochs):
        epoch_start = time.time()
        # 设置模型为训练模式
        model.train()
        # 初始化累计损失，初始化训练集预测和真实标签
        total_loss = 0.0
        train_preds, train_labels = [], []
        seen_samples = 0
        # todo 5.2 内层循环遍历训练DataLoader每个批次
        for i, batch in enumerate(tqdm(train_dataloader, desc=f"训练集训练中...(epoch {epoch+1}/{conf.num_epochs})")):
            step_start = time.time()
            # 提取批次数据并移动到设备
            input_ids, attention_mask, labels = batch
            input_ids = input_ids.to(conf.device)
            attention_mask = attention_mask.to(conf.device)
            labels = labels.to(conf.device)
            seen_samples += labels.size(0)

            # todo 前向传播：模型预测
            logits = model(input_ids, attention_mask)
            # todo 计算损失
            loss = loss_fn(logits, labels)
            # 累计损失
            total_loss += loss.item()
            # todo 获取预测结果（最大logits对应的类别）
            y_pred_list = torch.argmax(logits, dim=1)
            # todo 存储预测和真实标签，用于计算训练集指标
            train_preds.extend(y_pred_list.cpu().tolist())
            train_labels.extend(labels.cpu().tolist())

            # todo 梯度清零
            optimizer.zero_grad()
            # todo 反向传播：计算梯度
            loss.backward()
            # 可选: 每50个batch记录梯度范数和显存占用
            if (i + 1) % 50 == 0:
                try:
                    grad_sq_sum = 0.0
                    for p in model.parameters():
                        if p.grad is not None:
                            g = p.grad.detach()
                            grad_sq_sum += float(g.norm(2).item() ** 2)
                    grad_norm = grad_sq_sum ** 0.5
                except Exception:
                    grad_norm = float('nan')
                if torch.cuda.is_available():
                    try:
                        mem_alloc = torch.cuda.memory_allocated() / (1024 ** 2)
                        mem_reserved = torch.cuda.memory_reserved() / (1024 ** 2)
                        logger.info(f"[Batch {i+1}] grad_norm={grad_norm:.4f}, CUDA内存: allocated={mem_alloc:.1f}MB, reserved={mem_reserved:.1f}MB")
                    except Exception:
                        logger.info(f"[Batch {i+1}] grad_norm={grad_norm:.4f}")
                else:
                    logger.info(f"[Batch {i+1}] grad_norm={grad_norm:.4f}")
            # todo 参数更新：根据梯度更新模型参数
            optimizer.step()

            # todo 每10个批次或一个轮次结束，计算训练集指标
            if (i + 1) % 10 == 0 or i == len(train_dataloader) - 1:
                # 计算准确率和f1值
                acc = accuracy_score(train_labels, train_preds)
                f1 = f1_score(train_labels, train_preds, average='macro')
                # 获取batch_count，并计算平均损失
                batch_count = i % 10 + 1
                avg_loss = total_loss / batch_count
                elapsed = time.time() - epoch_start
                speed = seen_samples / max(elapsed, 1e-6)
                # todo 打印训练信息
                print(f"\n轮次: {epoch + 1}, 批次: {i + 1}, 损失: {avg_loss:.4f}, acc准确率:{acc:.4f}, f1分数:{f1:.4f}")
                logger.info(f"epoch={epoch+1} step={i+1} avg_loss={avg_loss:.4f} acc={acc:.4f} f1={f1:.4f} seen={seen_samples} speed={speed:.1f} samples/s")
                # todo 清空累计损失和预测和真实标签
                total_loss = 0.0
                train_preds, train_labels = [], []

            # todo 每100个批次或一个轮次结束，计算验证集指标，打印，保存模型
            if (i + 1) % 100 == 0 or i == len(train_dataloader) - 1:
                # 计算在测试集的评估报告，准确率，精确率，召回率，f1值
                report, f1score, accuracy, precision, recall = model2dev(model, dev_dataloader, conf.device)
                print("验证集评估报告：\n", report)
                print(f"验证集的f1: {f1score:.4f}, accuracy:{accuracy:.4f}, precision:{precision:.4f}, recall:{recall:.4f}")
                logger.info(f"[Dev] f1={f1score:.4f} acc={accuracy:.4f} precision={precision:.4f} recall={recall:.4f}")
                # todo 将模型再设置为训练模式
                model.train()
                # todo 如果验证F1分数优于历史最佳，保存模型
                if f1score > best_f1:
                    # 更新历史最佳F1分数
                    best_f1 = f1score
                    # 保存模型
                    torch.save(model.state_dict(), conf.model_save_path)
                    print("保存模型成功, 当前f1分数:", best_f1)
                    logger.info(f"保存最佳模型到: {conf.model_save_path}, best_f1={best_f1:.4f}")
        epoch_time = time.time() - epoch_start
        logger.info(f"epoch {epoch+1}/{conf.num_epochs} 完成, 用时={epoch_time:.2f}s")

    total_time = time.time() - total_start
    logger.info(f"训练完成，总用时={total_time:.2f}s, 最佳dev F1={best_f1:.4f}, 模型保存在: {conf.model_save_path}")


if __name__ == '__main__':
    model2train()
