#!/usr/bin/env python3
# main.py
"""
==========================
ESRGAN超分辨率项目主函数入口
==========================
"""
import logging
import torch
import torch.nn as nn
import torch.optim as optim
from torch.amp import GradScaler # PyTorch 2.0+ 对混合精度训练 API 进行了统一重构，将 torch.cuda.amp.GradScaler 迁移到了通用模块 torch.amp，并要求显式指定设备类型
from torch.amp import autocast  # PyTorch 2.0+ 中将混合精度训练的 autocast 从 torch.cuda.amp 迁移到了更通用的 torch.amp 模块，旧接口已被弃用。
from torch.optim.lr_scheduler import CosineAnnealingWarmRestarts
from tqdm import tqdm
import os

from models.generator import ESRGAN
from models.discriminator import Discriminator
from losses.custom_losses import ContentLoss, GANLoss
from datasets.dataset import create_optimized_dataloaders
from utils.helpers import init_tensorboard, save_checkpoint
from utils import setup_logger, default_logger
from config import parse_args

# 在 main.py 的 train() 函数中，加载数据集之前添加：
from datasets.downloader import prepare_large_datasets  # 导入下载函数

def train():
    # 解析输入参数
    args = parse_args()
    
    # 初始化日志
    train_logger = setup_logger(
        logger_name="train",
        log_file="train.log",
        log_dir="logs/train",
        level=logging.DEBUG  # 调试级别，输出更详细信息
    )
    
    # 加载数据集
    if args.download_datasets:
        train_logger.info("开始自动下载数据集...")
        prepare_large_datasets(args.dataset_path)  # 下载到指定路径
        train_logger.info("数据集下载完成")

    train_loader, val_loader = create_optimized_dataloaders(
        batch_size=args.batch_size,
        num_workers=args.num_workers,
        pin_memory=args.pin_memory
    )
    train_logger.info(f"数据集加载完成 - 训练集: {len(train_loader.dataset)} 样本, 验证集: {len(val_loader.dataset)} 样本")
    
    # 初始化TensorBoard
    tb_writer = init_tensorboard(os.path.join(args.log_dir, "tensorboard"))
    
    # 设置设备
    device = torch.device(args.device)
    train_logger.info(f"使用设备: {device}")
    
    # 初始化模型
    generator = ESRGAN(scale_factor=args.scale_factor).to(device)
    discriminator = Discriminator().to(device)
    
    # 初始化损失函数
    content_criterion = ContentLoss(device)
    gan_criterion = GANLoss(gan_type='vanilla').to(device)
    
    # 初始化优化器
    g_optimizer = optim.Adam(generator.parameters(), lr=args.g_lr, betas=(0.9, 0.999))
    d_optimizer = optim.Adam(discriminator.parameters(), lr=args.d_lr, betas=(0.9, 0.999))
    
    # 学习率调度器
    g_scheduler = CosineAnnealingWarmRestarts(g_optimizer, T_0=100, T_mult=2)
    d_scheduler = CosineAnnealingWarmRestarts(d_optimizer, T_0=100, T_mult=2)
    
    # 混合精度训练
    scaler = GradScaler('cuda', enabled=args.use_amp, init_scale=2**10)       # 显式指定cuda设备，虽然默认也是cuda
    
    # 恢复训练（如果有 checkpoint）
    start_epoch = 0
    if args.resume:
        if os.path.isfile(args.resume):
            checkpoint = torch.load(args.resume, map_location=device)
            generator.load_state_dict(checkpoint['generator_state_dict'])
            discriminator.load_state_dict(checkpoint['discriminator_state_dict'])
            g_optimizer.load_state_dict(checkpoint['g_optimizer_state_dict'])
            d_optimizer.load_state_dict(checkpoint['d_optimizer_state_dict'])
            start_epoch = checkpoint['epoch'] + 1
            train_logger.info(f"从检查点恢复训练: {args.resume}, 开始于 epoch {start_epoch}")
        else:
            train_logger.warning(f"未找到检查点文件: {args.resume}, 从头开始训练")
    
    # 训练循环
    train_logger.info("开始训练...")
    for epoch in range(start_epoch, args.epochs):
        generator.train()
        discriminator.train()
        
        total_g_loss = 0.0
        total_d_loss = 0.0
        
        # 进度条
        pbar = tqdm(train_loader, desc=f"Epoch {epoch+1}/{args.epochs}")
        
        for batch_idx, (lr_imgs, hr_imgs) in enumerate(pbar):
            lr_imgs = lr_imgs.to(device)
            hr_imgs = hr_imgs.to(device)
            
            # ---------------------
            #  训练生成器
            # ---------------------
            g_optimizer.zero_grad()     # 初始化梯度
            grad_accum_steps = 4
            
            with autocast('cuda', enabled=args.use_amp):    # 确保在autocase上下文内计算损失
                # 生成超分辨率图像（此时sr_imgs尺寸已正确放大）
                sr_imgs = generator(lr_imgs)
                
                # 计算生成器损失
                print(f"SR尺寸：{sr_imgs.shape}, HR尺寸：{hr_imgs.shape}")
                assert sr_imgs.shape == hr_imgs.shape, "SR与HR尺寸不匹配！"
                content_loss = content_criterion(sr_imgs, hr_imgs)  # 现在尺寸匹配，可正常计算
                fake_pred = discriminator(sr_imgs)
                gan_loss = gan_criterion(fake_pred, True)
                
                # 总生成器损失 (内容损失权重更高)
                # g_loss = content_loss * 0.01 + gan_loss * 0.005
                g_loss = content_loss * 0.5 + gan_loss * 0.01  # 比例更合理
                
            # 梯度累计逻辑（在损失计算后执行）
            scaled_loss = g_loss / grad_accum_steps  # 平均损失
            scaler.scale(scaled_loss).backward(retain_graph=True)   # 第一次反向传播

            if (batch_idx + 1) % grad_accum_steps == 0:
                scaler.step(g_optimizer)
                scaler.update()
                g_optimizer.zero_grad()              # 累积结束后梯度清零
            
            """
            @date 2025/09/30
            @author cmx-cxd
            移除重复反向传播
            """
            # 反向传播和优化（重复，可能导致梯度叠加异常）
            # scaler.scale(g_loss).backward(retain_graph=True)
            # scaler.step(g_optimizer)
            
            # ---------------------
            #  训练判别器
            # ---------------------
            d_optimizer.zero_grad()
            
            # 注意：必须显式指定设备类型
            with autocast('cuda', enabled=args.use_amp):
                # 真实图像损失
                real_pred = discriminator(hr_imgs)
                real_loss = gan_criterion(real_pred, True)
                
                # 生成图像损失
                fake_pred = discriminator(sr_imgs.detach())  #  detach 避免更新生成器
                fake_loss = gan_criterion(fake_pred, False)
                
                # 总判别器损失
                d_loss = (real_loss + fake_loss) * 0.5
            
            # 反向传播和优化
            scaler.scale(d_loss).backward()
            scaler.step(d_optimizer)
            scaler.update()
            
            # 累计损失
            total_g_loss += g_loss.item()
            total_d_loss += d_loss.item()
            
            # 日志
            if batch_idx % args.log_interval == 0:
                avg_g_loss = total_g_loss / (batch_idx + 1)
                avg_d_loss = total_d_loss / (batch_idx + 1)
                pbar.set_postfix({"G Loss": f"{avg_g_loss:.4f}", "D Loss": f"{avg_d_loss:.4f}"})
                
                # 记录TensorBoard
                global_step = epoch * len(train_loader) + batch_idx
                tb_writer.add_scalar('Loss/Generator', g_loss.item(), global_step)
                tb_writer.add_scalar('Loss/Discriminator', d_loss.item(), global_step)
        
        # 每个epoch结束后更新学习率
        g_scheduler.step()
        d_scheduler.step()
        
        # 计算平均损失
        avg_g_loss_epoch = total_g_loss / len(train_loader)
        avg_d_loss_epoch = total_d_loss / len(train_loader)
        train_logger.info(f"Epoch {epoch+1} - G Loss: {avg_g_loss_epoch:.4f}, D Loss: {avg_d_loss_epoch:.4f}")
        
        # 保存检查点
        if (epoch + 1) % args.save_freq == 0:
            save_checkpoint(
                epoch + 1,
                generator,
                discriminator,
                g_optimizer,
                d_optimizer,
                args.checkpoint_dir,
                train_logger
            )
        
        # 验证
        if (epoch + 1) % args.val_interval == 0:
            generator.eval()
            val_loss = 0.0
            
            with torch.no_grad():
                for lr_imgs, hr_imgs in val_loader:
                    lr_imgs = lr_imgs.to(device)
                    hr_imgs = hr_imgs.to(device)
                    
                    sr_imgs = generator(lr_imgs)
                    loss = content_criterion(sr_imgs, hr_imgs)
                    val_loss += loss.item()
            
            avg_val_loss = val_loss / len(val_loader)
            train_logger.info(f"验证损失: {avg_val_loss:.4f}")
            tb_writer.add_scalar('Loss/Validation', avg_val_loss, epoch)
            
            # 记录示例图像
            tb_writer.add_images('LR Images', lr_imgs[:4], epoch)
            tb_writer.add_images('HR Images', hr_imgs[:4], epoch)
            tb_writer.add_images('SR Images', sr_imgs[:4], epoch, dataformats='NCHW')
    
    # 训练结束
    train_logger.info("训练完成！")
    tb_writer.close()

def main():
    args = parse_args()
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    
    # 初始化模型
    # generator = ESRGAN(scale_factor=args.scale_factor).to(device)
    # discriminator = Discriminator().to(device)
    
    # 设置随机种子，确保可复现性
    torch.manual_seed(args.seed)
    if torch.cuda.is_available():
        torch.cuda.manual_seed_all(args.seed)
        
    # 训练循环
    train()

if __name__ == '__main__':
    main()