import torch
import torch.nn as nn
from torch.utils.data import DataLoader

from 处理数据集 import LanguageDataset
from 词嵌入 import tokenizer
from torch.optim.lr_scheduler import CosineAnnealingLR

from 聊天模型 import LanguageModel

if __name__ == '__main__':
    # 解决 Windows 多进程问题
    import multiprocessing

    multiprocessing.set_start_method('spawn', force=True)  # 强制使用 spawn 启动方式

    # 设备配置
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    print(f"使用设备: {device}")

    # 超参数
    Epochs = 5000
    batch_size = 8
    lr = 5e-5
    max_grad_norm = 1.0

    # 准备数据
    ds = LanguageDataset()
    print(f"数据集样本数: {len(ds)}")
    if len(ds) == 0:
        raise ValueError("数据集为空，请检查sys1_数据集.py")

    dl = DataLoader(
        ds,
        batch_size=batch_size,
        shuffle=True,
        num_workers=0,  # 改为 0，单进程加载数据
        pin_memory=True if device.type == 'cuda' else False
    )

    # 初始化模型
    model = LanguageModel().to(device)
    print(f"模型参数总数: {sum(p.numel() for p in model.parameters() if p.requires_grad):,}")

    # 损失函数
    try:
        pad_token_id = tokenizer.pad_token_id
    except AttributeError:
        pad_token_id = 0
    loss_fn = nn.CrossEntropyLoss(ignore_index=pad_token_id)

    # 优化器与调度器
    optimizer = torch.optim.Adam(model.parameters(), lr=lr, betas=(0.9, 0.98), eps=1e-9)
    scheduler = CosineAnnealingLR(optimizer, T_max=Epochs, eta_min=1e-6)

    # 训练循环
    best_loss = float('inf')
    for epoch in range(Epochs):
        model.train()
        total_loss = 0.0

        for i, (src, tgt, label) in enumerate(dl):
            # 分词并转移到设备
            try:
                src_ids, src_mask = tokenizer(src)
                tgt_ids, tgt_mask = tokenizer(tgt)
                label_ids, _ = tokenizer(label)
            except Exception as e:
                print(f"分词错误: {e}")
                continue

            # 转移到设备
            src_ids = src_ids.to(device)
            tgt_ids = tgt_ids.to(device)
            label_ids = label_ids.to(device)
            src_mask = src_mask.to(device)
            tgt_mask = tgt_mask.to(device)

            # 前向传播
            optimizer.zero_grad()
            try:
                outputs = model(
                    src=src_ids,
                    tgt=tgt_ids,
                    src_key_padding_mask=src_mask,
                    tgt_key_padding_mask=tgt_mask
                )
            except Exception as e:
                print(f"模型前向传播错误: {e}")
                continue

            # 计算损失
            loss = loss_fn(
                outputs.reshape(-1, outputs.shape[-1]),
                label_ids.reshape(-1)
            )

            # 反向传播与梯度裁剪
            loss.backward()
            torch.nn.utils.clip_grad_norm_(model.parameters(), max_grad_norm)
            optimizer.step()

            total_loss += loss.item()

            if (i + 1) % 10 == 0:
                avg_batch_loss = total_loss / (i + 1)
                print(f"Epoch [{epoch + 1}/{Epochs}], Batch [{i + 1}/{len(dl)}], Loss: {avg_batch_loss:.4f}")

        #  epoch 结束处理
        avg_loss = total_loss / len(dl) if len(dl) > 0 else 0.0
        print(f"\nEpoch [{epoch + 1}/{Epochs}] 平均损失: {avg_loss:.4f}, 学习率: {scheduler.get_last_lr()[0]:.6f}")
        scheduler.step()

        # 保存最佳模型
        if avg_loss < best_loss:
            best_loss = avg_loss
            torch.save(model.state_dict(), "weights/best_model.pt")
            print(f"保存最佳模型 (损失: {best_loss:.4f})")

    torch.save(model.state_dict(), "weights/final_model.pt")
    print(f"训练结束，最终模型保存至 weights/final_model.pt，最佳损失: {best_loss:.4f}")
