# train.py

import os
# 解决 OpenMP 冲突（可选）
os.environ["KMP_DUPLICATE_LIB_OK"] = "TRUE"

import torch
import torch.nn as nn
import torch.optim as optim
from torchvision.utils import save_image
from tqdm import tqdm
import matplotlib.pyplot as plt

# AMP 混合精度
from torch.cuda.amp import autocast, GradScaler
torch.backends.cudnn.benchmark = True

from data_preprocessing import get_dataloaders, tokenizer
from model_with_attention import TextToImageModel, Discriminator
from torchvision.models import vgg19
import torch.nn.functional as F
from torch.nn.utils import spectral_norm

def train():
    # ——1. 超参——
    EPOCHS     = 500
    BATCH_SIZE = 64
    LR_G       = 3e-4
    LR_D       = 5e-5
    LAMBDA_REC = 10.0
    LAMBDA_PER = 1.0
    CLIP_MAX   = 1.0
    DEVICE     = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    print("Using device:", DEVICE)

    # ——2. DataLoader——
    train_loader, val_loader = get_dataloaders(batch_size=BATCH_SIZE)

    # ——3. 模型 & 损失 & 优化器 & AMP——
    vocab_size = tokenizer.vocab_size
    print("Tokenizer vocab size:", vocab_size)
    G = TextToImageModel(vocab_size=vocab_size).to(DEVICE)
    D = Discriminator().to(DEVICE)

    # 给 D 的 Conv2d 层加谱归一化
    for m in D.modules():
        if isinstance(m, nn.Conv2d):
            spectral_norm(m)

    bce_loss   = nn.BCELoss()
    recon_loss = nn.MSELoss()
    opt_G = optim.Adam(G.parameters(), lr=LR_G, betas=(0.5, 0.999))
    opt_D = optim.Adam(D.parameters(), lr=LR_D, betas=(0.5, 0.999))
    scaler = GradScaler()

    # ——4. VGG 感知损失准备——
    vgg = vgg19(weights="IMAGENET1K_V1").features[:8].to(DEVICE)
    for p in vgg.parameters():
        p.requires_grad = False
    def perceptual_loss(x, y):
        fx = vgg(x)
        fy = vgg(y)
        return F.l1_loss(fx, fy)

    # ——5. 尝试恢复训练进度——
    ckpt_path   = "checkpoints/last.pth"
    start_epoch = 1
    if os.path.exists(ckpt_path):
        ckpt = torch.load(ckpt_path, map_location=DEVICE)

        # 恢复 Generator
        old_g = ckpt["G_state"]
        new_g = G.state_dict()
        fit_g = {k: v for k, v in old_g.items() if k in new_g and v.size() == new_g[k].size()}
        new_g.update(fit_g)
        G.load_state_dict(new_g)

        # 恢复 Discriminator —— 处理谱归一化后的 key
        old_d = ckpt["D_state"]
        new_d = D.state_dict()
        # 把老的 "*.weight"   → 新的 "*.weight_orig"
        for k_old, v_old in old_d.items():
            if k_old.endswith(".weight") and (k_old + "_orig") in new_d:
                new_d[k_old + "_orig"] = v_old
            elif k_old in new_d and v_old.size() == new_d[k_old].size():
                # 其他参数（如 bias）直接加载
                new_d[k_old] = v_old
        D.load_state_dict(new_d)

        # 恢复优化器 & scaler
        try:
            opt_G.load_state_dict(ckpt["optG_state"])
        except Exception:
            print("⚠️  Warning: opt_G state mismatch, skipping optimizer load.")
        try:
            opt_D.load_state_dict(ckpt["optD_state"])
        except Exception:
            print("⚠️  Warning: opt_D state mismatch, skipping optimizer load.")
        try:
            scaler.load_state_dict(ckpt["scaler_state"])
        except Exception:
            pass

        start_epoch = ckpt.get("epoch", 0) + 1
        print(f"Resuming from epoch {start_epoch}")

    losses_G, losses_D = [], []

    # ——6. 训练主循环——
    try:
        for epoch in range(start_epoch, EPOCHS + 1):
            G.train(); D.train()
            run_G = run_D = 0.0
            pbar = tqdm(train_loader, desc=f"Epoch {epoch}/{EPOCHS}", leave=False)

            for ids, masks, real_imgs in pbar:
                B = real_imgs.size(0)
                ids       = ids.to(DEVICE, non_blocking=True)
                masks     = masks.to(DEVICE, non_blocking=True)
                real_imgs = real_imgs.to(DEVICE, non_blocking=True)

                # ——— 更新 Discriminator ———
                opt_D.zero_grad()
                with torch.no_grad():
                    _, txt_feat = G.encoder(ids, masks)

                # 标签平滑：真实标签用 0.9，假标签 0
                real_lbl = torch.full((B,), 0.9, device=DEVICE)
                fake_lbl = torch.full((B,), 0.1, device=DEVICE)

                out_r = D(real_imgs, txt_feat)
                loss_dr = bce_loss(out_r, real_lbl)
                with torch.no_grad():
                    fake_imgs, _ = G(ids, masks)
                out_f = D(fake_imgs, txt_feat)
                loss_df = bce_loss(out_f, fake_lbl)

                loss_D = 0.5 * (loss_dr + loss_df)
                loss_D.backward()
                nn.utils.clip_grad_norm_(D.parameters(), CLIP_MAX)
                opt_D.step()

                # ——— 更新 Generator ———
                opt_G.zero_grad()
                with autocast():
                    fake_imgs, _ = G(ids, masks)
                    loss_rec = recon_loss(fake_imgs, real_imgs)
                fake_f32 = fake_imgs.float()
                real_f32 = real_imgs.float()
                loss_per = perceptual_loss(fake_f32, real_f32)
                out_fg   = D(fake_f32, txt_feat)
                loss_gan = bce_loss(out_fg, real_lbl)

                loss_G = loss_gan + LAMBDA_REC * loss_rec + LAMBDA_PER * loss_per
                scaler.scale(loss_G).backward()
                scaler.unscale_(opt_G)
                nn.utils.clip_grad_norm_(G.parameters(), CLIP_MAX)
                scaler.step(opt_G)
                scaler.update()

                run_D += loss_D.item()
                run_G += loss_G.item()
                pbar.set_postfix(d=loss_D.item(), g=loss_G.item())

            avg_D = run_D / len(train_loader)
            avg_G = run_G / len(train_loader)
            losses_D.append(avg_D)
            losses_G.append(avg_G)
            print(f"Epoch {epoch} | D_loss: {avg_D:.4f} | G_loss: {avg_G:.4f}")

            # ——保存 checkpoint——
            os.makedirs("checkpoints", exist_ok=True)
            torch.save({
                "epoch": epoch,
                "G_state": G.state_dict(),
                "D_state": D.state_dict(),
                "optG_state": opt_G.state_dict(),
                "optD_state": opt_D.state_dict(),
                "scaler_state": scaler.state_dict(),
            }, ckpt_path)

            # ——验证集生成示例——
            G.eval()
            with torch.no_grad():
                ids_v, masks_v, _ = next(iter(val_loader))
                ids_v   = ids_v.to(DEVICE)
                masks_v = masks_v.to(DEVICE)
                gen, _  = G(ids_v, masks_v)
                save_image(
                    gen[:16],
                    f"generated_epoch{epoch}.png",
                    nrow=4,
                    normalize=True,
                    value_range=(-1,1)
                )

    except KeyboardInterrupt:
        print("\nInterrupted! Saving checkpoint …")
        torch.save({
            "epoch": epoch,
            "G_state": G.state_dict(),
            "D_state": D.state_dict(),
            "optG_state": opt_G.state_dict(),
            "optD_state": opt_D.state_dict(),
            "scaler_state": scaler.state_dict(),
        }, ckpt_path)
        print("Checkpoint saved.")
        return

    # ——7. 绘制 Loss 曲线——
    plt.figure(figsize=(8,5))
    ep_range = list(range(start_epoch, start_epoch + len(losses_G)))
    plt.plot(ep_range, losses_G, label="G loss")
    plt.plot(ep_range, losses_D, label="D loss")
    plt.xlabel("Epoch"); plt.ylabel("Loss")
    plt.title("Training Losses"); plt.legend(); plt.grid(True)
    plt.tight_layout()
    plt.savefig("loss_curve.png")
    print("Training complete. Loss curve saved to loss_curve.png")

    # 保存 Generator 最终权重用于推理
    os.makedirs("outputs", exist_ok=True)
    torch.save(G.state_dict(), "outputs/model_weights.pth")
    print("✓ 推理用模型权重已保存：outputs/model_weights.pth")


if __name__ == "__main__":
    train()                                                                                                                      