"""
GPU Training System for Enhanced FakeTagger - 修复版
确保正确使用GPU的训练系统，集成修复后的消息生成和解码器：
1. 修复消息生成器（连续值编码）
2. 修复消息解码器（鲁棒解码策略）
3. 修复训练损失函数（端到端训练）
4. 保持GPU优化和监控功能
"""

import torch
import torch.nn as nn
from torch.utils.data import DataLoader, Dataset
import torchvision.transforms as transforms
import argparse
import os
import numpy as np
import cv2
from datetime import datetime
import json
import time
from tqdm import tqdm
import warnings

# 导入增强版系统
from enhanced_faketagger_system import EnhancedFakeTaggerSystem

def check_gpu_usage():
    """详细检查GPU使用情况"""
    print("="*60)
    print("GPU 使用情况检查")
    print("="*60)
    
    # 基础检查
    print(f"PyTorch版本: {torch.__version__}")
    print(f"CUDA可用: {torch.cuda.is_available()}")
    
    if not torch.cuda.is_available():
        print("❌ CUDA不可用! 将使用CPU训练")
        return False
    
    print(f"✅ CUDA版本: {torch.version.cuda}")
    print(f"GPU数量: {torch.cuda.device_count()}")
    
    for i in range(torch.cuda.device_count()):
        print(f"GPU {i}: {torch.cuda.get_device_name(i)}")
        mem_total = torch.cuda.get_device_properties(i).total_memory / 1024**3
        print(f"  - 总显存: {mem_total:.1f}GB")
    
    current_device = torch.cuda.current_device()
    print(f"当前GPU: {current_device}")
    
    # 测试GPU计算
    # print("\n测试GPU计算...")
    # device = torch.device('cuda')
    
    try:
        # x = torch.randn(1000, 1000, device=device)
        # y = torch.randn(1000, 1000, device=device)
        # z = torch.mm(x, y)
        #
        # print(f"✅ GPU矩阵乘法成功")
        #
        # # 检查显存使用
        # allocated = torch.cuda.memory_allocated() / 1024**3
        # cached = torch.cuda.memory_reserved() / 1024**3
        # print(f"显存使用: {allocated:.3f}GB (已分配) / {cached:.3f}GB (已缓存)")
        #

        return True
        
    except Exception as e:
        print(f"❌ GPU计算失败: {e}")
        return False

def check_model_gpu_usage(model, model_name="Model"):
    """检查模型的GPU使用情况"""
    print(f"\n{model_name} GPU使用检查:")
    
    # 检查模型参数设备
    param_devices = set()
    param_count = 0
    
    for name, param in model.named_parameters():
        param_devices.add(str(param.device))
        param_count += param.numel()
        
        # 只打印前几个参数的设备信息
        if param_count < 5:
            print(f"  参数 {name}: {param.device}")
    
    print(f"  参数总数: {param_count:,}")
    print(f"  参数设备: {list(param_devices)}")
    
    # 检查模型缓冲区设备
    buffer_devices = set()
    for name, buffer in model.named_buffers():
        buffer_devices.add(str(buffer.device))
    
    if buffer_devices:
        print(f"  缓冲区设备: {list(buffer_devices)}")
    
    # 检查是否所有参数都在同一设备上
    if len(param_devices) == 1 and 'cuda' in list(param_devices)[0]:
        print(f"  ✅ {model_name} 正确在GPU上")
        return True
    elif len(param_devices) == 1:
        print(f"  ⚠️ {model_name} 在CPU上")
        return False
    else:
        print(f"  ❌ {model_name} 参数在多个设备上！")
        return False

class GPUFaceDataset(Dataset):
    """优化的GPU人脸数据集"""
    
    def __init__(self, trump_path, cage_path, transform=None, max_samples=None, preload_to_gpu=False):
        self.trump_path = trump_path
        self.cage_path = cage_path
        self.transform = transform
        self.preload_to_gpu = preload_to_gpu
        
        # 获取文件列表
        self.trump_files = [f for f in os.listdir(trump_path) if f.endswith(('.jpg', '.png', '.jpeg'))]
        self.cage_files = [f for f in os.listdir(cage_path) if f.endswith(('.jpg', '.png', '.jpeg'))]
        
        if max_samples:
            self.trump_files = self.trump_files[:max_samples]
            self.cage_files = self.cage_files[:max_samples]
        
        print(f"数据集初始化:")
        print(f"  - Trump图像: {len(self.trump_files)}")
        print(f"  - Cage图像: {len(self.cage_files)}")
        print(f"  - 预加载到GPU: {preload_to_gpu}")
    
    def __len__(self):
        return min(len(self.trump_files), len(self.cage_files))
    
    def __getitem__(self, idx):
        # 获取Trump图像
        trump_file = self.trump_files[idx % len(self.trump_files)]
        trump_path = os.path.join(self.trump_path, trump_file)
        trump_image = cv2.imread(trump_path)
        trump_image = cv2.cvtColor(trump_image, cv2.COLOR_BGR2RGB)
        
        # 获取Cage图像
        cage_file = self.cage_files[idx % len(self.cage_files)]
        cage_path = os.path.join(self.cage_path, cage_file)
        cage_image = cv2.imread(cage_path)
        cage_image = cv2.cvtColor(cage_image, cv2.COLOR_BGR2RGB)
        
        if self.transform:
            trump_image = self.transform(trump_image)
            cage_image = self.transform(cage_image)
        
        return trump_image, cage_image

def to_cuda(x):
    return x.to('cuda')

def create_gpu_optimized_transforms(image_size=256):
    """创建GPU优化的数据变换"""
    train_transform = transforms.Compose([
        transforms.ToPILImage(),                  # 将numpy数组转换为PIL图像
        transforms.Resize((image_size, image_size)),  # 统一调整图像尺寸
        transforms.RandomHorizontalFlip(0.5),     # 随机水平翻转，增强数据多样性
        transforms.ToTensor(),                    # 转换为张量（CHW格式），并归一化到[0,1]
        transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]),  # 标准化到[-1,1]
    ])

    return train_transform

def print_detailed_losses(losses, epoch, batch_idx, swap_type):
    """详细打印关键损失 - 修复版"""
    print(f"\n{'='*50}")
    print(f"Epoch {epoch+1}, Batch {batch_idx}, {swap_type} 模式")
    print(f"{'='*50}")
    
    # 关键Loss 1: PatchGAN对抗loss
    if 'discriminator_loss' in losses and 'encoder_adversarial_loss' in losses:
        print(f"🎯 【PatchGAN对抗Loss】:")
        print(f"   判别器损失 (D_loss): {losses['discriminator_loss']:.6f}")
        print(f"   编码器对抗损失 (G_adv): {losses['encoder_adversarial_loss']:.6f}")
        print(f"   对抗训练状态: ✅ 正常")
    else:
        print(f"⚠️ 【PatchGAN对抗Loss】: 未启用或损失键名不匹配")
    
    # 关键Loss 2: 编码-解码重建loss（修复版）
    print(f"🔄 【编码-解码重建Loss】:")
    if 'encoder_reconstruction_loss' in losses:
        print(f"   编码重建损失: {losses['encoder_reconstruction_loss']:.6f}")
    if 'message_extraction_loss' in losses:
        print(f"   消息提取损失: {losses['message_extraction_loss']:.6f}")
    if 'binary_loss' in losses:
        print(f"   二值化损失: {losses['binary_loss']:.6f}")
    if 'consistency_loss' in losses:
        print(f"   一致性损失: {losses['consistency_loss']:.6f}")
    if 'total_decoder_loss' in losses:
        print(f"   解码器总损失: {losses['total_decoder_loss']:.6f}")
    
    # 性能指标（修复版）
    print(f"📊 【性能指标】:")
    if 'bit_error_rate' in losses:
        ber = losses['bit_error_rate']
        print(f"   比特错误率 (BER): {ber:.6f} {'✅ 优秀' if ber < 0.1 else '❌ 需改进' if ber > 0.4 else '⚠️ 一般'}")
    if 'message_accuracy' in losses:
        acc = losses['message_accuracy']
        print(f"   消息准确率: {acc:.4f} {'✅ 优秀' if acc > 0.8 else '❌ 需改进' if acc < 0.1 else '⚠️ 一般'}")
    if 'soft_ber' in losses:
        soft_ber = losses['soft_ber']
        print(f"   软误码率: {soft_ber:.6f}")
    if 'psnr' in losses:
        psnr = losses['psnr']
        print(f"   图像质量 (PSNR): {psnr:.2f}dB {'✅ 优秀' if psnr > 30 else '❌ 需改进' if psnr < 20 else '⚠️ 一般'}")
    if 'extraction_quality' in losses:
        print(f"   提取质量: {losses['extraction_quality']:.6f}")
    
    # 总损失
    if 'total_encoder_loss' in losses:
        print(f"💯 【总编码器损失】: {losses['total_encoder_loss']:.6f}")
    
    # 训练状态分析
    print(f"🔍 【训练状态分析】:")
    if 'bit_error_rate' in losses:
        ber = losses['bit_error_rate']
        if ber > 0.45:
            print(f"   状态: 🔴 随机猜测状态 - 网络未学习")
        elif ber > 0.3:
            print(f"   状态: 🟡 学习中 - 有改进空间")
        elif ber > 0.1:
            print(f"   状态: 🟢 良好 - 继续训练")
        else:
            print(f"   状态: 🏆 优秀 - 训练成功")
    
    print(f"{'='*50}")

def train_with_gpu_monitoring(args):
    """GPU监控的训练函数"""
    
    # GPU检查
    if not check_gpu_usage():
        print("GPU不可用，退出训练")
        return
    
    # 设置设备
    os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu
    device = torch.device('cuda')
    print(f"\n✅ 使用设备: {device}")
    
    # 创建结果保存目录
    timestamp = datetime.now().strftime("%Y_%m_%d_%H_%M_%S")
    save_name = f"gpu_enhanced_faketagger_{args.message_size}_{args.discriminator_weight}_{timestamp}"
    save_dir = os.path.join(args.save_path, save_name)
    os.makedirs(save_dir, exist_ok=True)
    
    # 保存训练参数
    with open(os.path.join(save_dir, 'training_args.json'), 'w') as f:
        json.dump(vars(args), f, indent=4)
    
    print(f"结果保存到: {save_dir}")
    
    # 创建数据集和数据加载器
    train_transform = create_gpu_optimized_transforms(args.image_size)
    
    train_dataset = GPUFaceDataset(
        trump_path=os.path.join(args.data_path, 'trump'),
        cage_path=os.path.join(args.data_path, 'cage'),
        transform=train_transform,
        max_samples=args.max_samples
    )
    
    train_loader = DataLoader(
        train_dataset,
        batch_size=args.batch_size,
        shuffle=True,
        num_workers=args.num_workers,
        pin_memory=True,  # 重要：启用pin_memory加速GPU传输
        drop_last=True    # 确保批大小一致
    )
    
    print(f"训练数据集大小: {len(train_dataset)}")
    print(f"每个epoch的批次数: {len(train_loader)}")
    
    # 创建增强版FakeTagger系统
    print(f"\n创建增强版FakeTagger系统...")
    system = EnhancedFakeTaggerSystem(
        message_size=args.message_size,
        redundancy_factor=args.redundancy_factor,
        device=device, # type: ignore
        use_discriminator=args.use_discriminator,
        discriminator_weight=args.discriminator_weight,
        simswap_model_path=args.simswap_path,
        lr=args.lr
    )
    
    # 确保所有模型都在GPU上
    print(f"\n检查模型GPU使用情况...")
    check_model_gpu_usage(system.image_encoder.encoder, "图像编码器")
    if system.use_discriminator:
        check_model_gpu_usage(system.image_encoder.discriminator, "PatchGAN判别器")
    check_model_gpu_usage(system.image_decoder, "图像解码器")
    
    # SimSwap检查
    print(f"SimSwap模拟器设备: {system.simswap_simulator.device}")
    print(f"SimSwap模式: {'模拟' if system.simswap_simulator.use_simulation else '真实'}")
    
    # 训练循环
    print(f"\n🚀 开始训练 {args.epochs} 个epochs...")
    
    # 训练日志
    train_log = {
        'epochs': [],
        'losses': {},
        'gpu_memory': {}
    }
    
    best_ber = float('inf')
    
    for epoch in range(args.epochs):
        system.train()
        epoch_start_time = time.time()
        
        # 训练一个epoch
        epoch_losses_trump = {}
        epoch_losses_cage = {}
        
        progress_bar = tqdm(enumerate(train_loader), total=len(train_loader), 
                           desc=f'Epoch {epoch+1}/{args.epochs}')
        
        for batch_idx, (trump_images, cage_images) in progress_bar:
            # 确保数据在GPU上
            trump_images = trump_images.to(device, non_blocking=True)
            cage_images = cage_images.to(device, non_blocking=True)
            
            # 检查数据设备
            assert trump_images.device.type == 'cuda', f"Trump images not on GPU: {trump_images.device}"
            assert cage_images.device.type == 'cuda', f"Cage images not on GPU: {cage_images.device}"
            
            # Trump -> Cage 训练
            trump_losses, trump_results = system.train_step(trump_images, swap_type='A')
            
            # Cage -> Trump 训练
            cage_losses, cage_results = system.train_step(cage_images, swap_type='B')
            
            # 记录损失
            if batch_idx == 0:
                for key in trump_losses.keys():
                    epoch_losses_trump[key] = []
                    epoch_losses_cage[key] = []
            
            for key in trump_losses.keys():
                epoch_losses_trump[key].append(trump_losses[key])
                epoch_losses_cage[key].append(cage_losses[key])
            
            # 更新进度条
            avg_trump_ber = trump_losses['bit_error_rate']
            avg_cage_ber = cage_losses['bit_error_rate']
            progress_bar.set_postfix({
                'Trump_BER': f'{avg_trump_ber:.4f}',
                'Cage_BER': f'{avg_cage_ber:.4f}',
                'GPU_Mem': f'{torch.cuda.memory_allocated()/1024**3:.1f}GB'
            })
            
            # 详细打印loss
            if batch_idx % args.print_freq == 0:
                print_detailed_losses(trump_losses, epoch, batch_idx, "Trump->Cage")
                print_detailed_losses(cage_losses, epoch, batch_idx, "Cage->Trump")
                
                # 打印GPU内存使用
                allocated = torch.cuda.memory_allocated() / 1024**3
                cached = torch.cuda.memory_reserved() / 1024**3
                print(f"💾 GPU内存: {allocated:.2f}GB (已分配) / {cached:.2f}GB (已缓存)")
        
        # 计算epoch平均值
        for key in epoch_losses_trump:
            epoch_losses_trump[key] = np.mean(epoch_losses_trump[key])
            epoch_losses_cage[key] = np.mean(epoch_losses_cage[key])
        
        # 训练时间
        epoch_time = time.time() - epoch_start_time
        
        # 打印epoch总结
        print(f"\n{'='*70}")
        print(f"Epoch {epoch+1}/{args.epochs} 完成 - 用时: {epoch_time:.2f}秒")
        print(f"{'='*70}")
        
        print(f"🔵 Trump->Cage 平均结果:")
        print(f"   比特错误率: {epoch_losses_trump['bit_error_rate']:.6f}")
        print(f"   消息准确率: {epoch_losses_trump['message_accuracy']:.4f}")
        if 'soft_ber' in epoch_losses_trump:
            print(f"   软误码率: {epoch_losses_trump['soft_ber']:.6f}")
        print(f"   图像质量: {epoch_losses_trump['psnr']:.2f}dB")
        if args.use_discriminator:
            if 'discriminator_loss' in epoch_losses_trump:
                print(f"   PatchGAN判别器损失: {epoch_losses_trump['discriminator_loss']:.6f}")
            if 'encoder_adversarial_loss' in epoch_losses_trump:
                print(f"   编码器对抗损失: {epoch_losses_trump['encoder_adversarial_loss']:.6f}")
        if 'total_decoder_loss' in epoch_losses_trump:
            print(f"   解码器总损失: {epoch_losses_trump['total_decoder_loss']:.6f}")
        
        print(f"🔴 Cage->Trump 平均结果:")
        print(f"   比特错误率: {epoch_losses_cage['bit_error_rate']:.6f}")
        print(f"   消息准确率: {epoch_losses_cage['message_accuracy']:.4f}")
        if 'soft_ber' in epoch_losses_cage:
            print(f"   软误码率: {epoch_losses_cage['soft_ber']:.6f}")
        print(f"   图像质量: {epoch_losses_cage['psnr']:.2f}dB")
        if args.use_discriminator:
            if 'discriminator_loss' in epoch_losses_cage:
                print(f"   PatchGAN判别器损失: {epoch_losses_cage['discriminator_loss']:.6f}")
            if 'encoder_adversarial_loss' in epoch_losses_cage:
                print(f"   编码器对抗损失: {epoch_losses_cage['encoder_adversarial_loss']:.6f}")
        if 'total_decoder_loss' in epoch_losses_cage:
            print(f"   解码器总损失: {epoch_losses_cage['total_decoder_loss']:.6f}")
        
        # 训练状态总结
        avg_ber = (epoch_losses_trump['bit_error_rate'] + epoch_losses_cage['bit_error_rate']) / 2
        avg_acc = (epoch_losses_trump['message_accuracy'] + epoch_losses_cage['message_accuracy']) / 2
        
        print(f"\n🎯 【总体训练状态】:")
        print(f"   平均比特错误率: {avg_ber:.6f}")
        print(f"   平均消息准确率: {avg_acc:.4f}")
        
        if avg_ber > 0.45:
            print(f"   🔴 警告: 网络处于随机猜测状态，需要检查:")
            print(f"      - 消息生成和解码器是否正常")
            print(f"      - 训练损失函数是否合理")
            print(f"      - 学习率是否过高")
        elif avg_ber > 0.3:
            print(f"   🟡 网络正在学习，建议继续训练")
        elif avg_ber > 0.1:
            print(f"   🟢 训练效果良好，继续优化")
        else:
            print(f"   🏆 训练效果优秀！")
        
        print(f"{'='*70}")
        
        # 记录训练日志
        train_log['epochs'].append(epoch + 1)
        
        for key, value in epoch_losses_trump.items():
            loss_key = f'trump_{key}'
            if loss_key not in train_log['losses']:
                train_log['losses'][loss_key] = []
            train_log['losses'][loss_key].append(value)
        
        for key, value in epoch_losses_cage.items():
            loss_key = f'cage_{key}'
            if loss_key not in train_log['losses']:
                train_log['losses'][loss_key] = []
            train_log['losses'][loss_key].append(value)
        
        # 记录GPU内存使用
        train_log['gpu_memory'][f'epoch_{epoch+1}'] = {
            'allocated_gb': torch.cuda.memory_allocated() / 1024**3,
            'cached_gb': torch.cuda.memory_reserved() / 1024**3
        }
        
        # 保存检查点
        if (epoch + 1) % args.checkpoint_freq == 0:
            checkpoint_dir = os.path.join(save_dir, f'checkpoint_epoch_{epoch+1}')
            system.save_system(checkpoint_dir)
            print(f"✅ 检查点已保存: epoch {epoch+1}")
        
        # 保存最佳模型
        current_ber = (epoch_losses_trump['bit_error_rate'] + epoch_losses_cage['bit_error_rate']) / 2
        if current_ber < best_ber:
            best_ber = current_ber
            best_model_dir = os.path.join(save_dir, 'best_model')
            system.save_system(best_model_dir)
            print(f"🏆 新的最佳模型! BER: {best_ber:.6f}")
        
        # 保存训练日志
        with open(os.path.join(save_dir, 'train_log.json'), 'w') as f:
            json.dump(train_log, f, indent=4)
        
        # 清理GPU缓存
        torch.cuda.empty_cache()
    
    # 保存最终模型
    final_model_dir = os.path.join(save_dir, 'final_model')
    system.save_system(final_model_dir)
    
    print(f"\n🎉 训练完成!")
    print(f"最佳BER: {best_ber:.6f}")
    print(f"结果保存至: {save_dir}")

def main():
    parser = argparse.ArgumentParser(description='GPU增强版FakeTagger训练')
    
    # 数据相关
    parser.add_argument('--data_path', default='./faketagger/resized_data', type=str,
                       help='数据集路径 (包含trump/和cage/文件夹)')
    parser.add_argument('--image_size', default=256, type=int, help='图像大小')
    parser.add_argument('--max_samples', default=None, type=int, 
                       help='每类最大样本数 (用于调试)')
    
    # 模型相关
    parser.add_argument('--message_size', default=30, type=int, help='原始消息大小')
    parser.add_argument('--redundancy_factor', default=2, type=int, help='冗余因子')
    parser.add_argument('--use_discriminator', action='store_true', default=True,
                       help='使用PatchGAN判别器')
    parser.add_argument('--discriminator_weight', default=0.1, type=float,
                       help='判别器损失权重')
    parser.add_argument('--simswap_path', default=None, type=str,
                       help='自定义SimSwap模型路径')
    
    # 训练相关
    parser.add_argument('--batch_size', default=16, type=int, help='批大小')
    parser.add_argument('--epochs', default=100, type=int, help='训练轮数')
    parser.add_argument('--lr', default=0.001, type=float, help='学习率')
    parser.add_argument('--num_workers', default=4, type=int, help='数据加载线程数')
    parser.add_argument('--print_freq', default=50, type=int, help='打印频率')
    
    # 保存相关
    parser.add_argument('--save_path', default='./gpu_enhanced_results', type=str,
                       help='结果保存路径')
    parser.add_argument('--checkpoint_freq', default=10, type=int,
                       help='检查点保存频率')
    
    # GPU相关
    parser.add_argument('--gpu', default='0', type=str, help='使用的GPU ID')
    
    args = parser.parse_args()
    
    # 检查数据路径
    trump_path = os.path.join(args.data_path, 'trump')
    cage_path = os.path.join(args.data_path, 'cage')
    
    if not os.path.exists(trump_path):
        raise FileNotFoundError(f"Trump数据路径不存在: {trump_path}")
    if not os.path.exists(cage_path):
        raise FileNotFoundError(f"Cage数据路径不存在: {cage_path}")
    
    print("训练参数:")
    for key, value in vars(args).items():
        print(f"  {key}: {value}")
    
    # 开始训练
    train_with_gpu_monitoring(args)

if __name__ == '__main__':
    main() 