import os
import argparse
import numpy as np
import torch
import torch.nn as nn
from torch.utils.data import DataLoader, TensorDataset
from tqdm import tqdm
from gan_model import Generator as VanillaGenerator, Discriminator as VanillaDiscriminator, weights_init_normal
from wgan_model import Generator as WGANGenerator, Discriminator as WGANDiscriminator
from cgan_model import Generator as CGANGenerator, Discriminator as CGANDiscriminator
from wgan_gp_model import Generator as WGANGPGenerator, Discriminator as WGANGPDiscriminator
import json
from datetime import datetime

def parse_args():
    parser = argparse.ArgumentParser()
    parser.add_argument("--n_epochs", type=int, default=100, help="训练轮数")
    parser.add_argument("--batch_size", type=int, default=64, help="批次大小")
    parser.add_argument("--lr", type=float, default=0.0001, help="学习率")
    parser.add_argument("--b1", type=float, default=0.0, help="Adam优化器参数b1")
    parser.add_argument("--b2", type=float, default=0.9, help="Adam优化器参数b2")
    parser.add_argument("--latent_dim", type=int, default=100, help="潜在空间维度")
    parser.add_argument("--signal_length", type=int, default=1200, help="信号长度")
    parser.add_argument("--model_save_interval", type=int, default=10, help="模型保存间隔")
    parser.add_argument("--gan_type", type=str, default="vanilla", 
                       choices=["vanilla", "wgan", "cgan", "wgan_gp"], help="GAN类型")
    parser.add_argument("--n_critic", type=int, default=5, help="WGAN判别器训练次数")
    parser.add_argument("--clip_value", type=float, default=0.01, help="WGAN权重裁剪值")
    parser.add_argument("--lambda_gp", type=float, default=10.0, help="WGAN-GP梯度惩罚系数")
    return parser.parse_args()

def train_vanilla_gan(generator, discriminator, dataloader, args, device, save_dir):
    """训练普通GAN"""
    # 优化器
    optimizer_G = torch.optim.Adam(generator.parameters(), lr=args.lr, betas=(args.b1, args.b2))
    optimizer_D = torch.optim.Adam(discriminator.parameters(), lr=args.lr, betas=(args.b1, args.b2))
    
    # 损失函数
    adversarial_loss = nn.BCELoss()
    
    # 记录训练历史
    history = {
        'g_loss': [],
        'd_loss': [],
        'total_loss': [],
        'training_time': 0
    }
    
    # 记录开始时间
    start_time = datetime.now()
    
    # 训练循环
    for epoch in range(args.n_epochs):
        epoch_loss_D = 0.0
        epoch_loss_G = 0.0
        num_batches = 0
        
        for i, (real_signals,) in enumerate(dataloader):
            batch_size = real_signals.shape[0]
            real_signals = real_signals.to(device)
            
            # 真实和虚假标签
            valid = torch.ones(batch_size, 1).to(device)
            fake = torch.zeros(batch_size, 1).to(device)
            
            # -----------------
            # 训练生成器
            # -----------------
            optimizer_G.zero_grad()
            
            # 生成噪声
            z = torch.randn(batch_size, args.latent_dim).to(device)
            
            # 生成信号
            gen_signals = generator(z)
            
            # 生成器损失
            g_loss = adversarial_loss(discriminator(gen_signals), valid)
            
            g_loss.backward()
            optimizer_G.step()
            
            # ---------------------
            # 训练判别器
            # ---------------------
            optimizer_D.zero_grad()
            
            # 真实信号损失
            real_loss = adversarial_loss(discriminator(real_signals), valid)
            # 虚假信号损失
            fake_loss = adversarial_loss(discriminator(gen_signals.detach()), fake)
            d_loss = (real_loss + fake_loss) / 2
            
            d_loss.backward()
            optimizer_D.step()
            
            # 累积损失
            epoch_loss_D += d_loss.item()
            epoch_loss_G += g_loss.item()
            num_batches += 1
            
            if i % 100 == 0:
                print(
                    f"[Epoch {epoch}/{args.n_epochs}] "
                    f"[Batch {i}/{len(dataloader)}] "
                    f"[D loss: {d_loss.item():.4f}] [G loss: {g_loss.item():.4f}]"
                )
        
        # 计算平均损失
        avg_loss_D = epoch_loss_D / num_batches
        avg_loss_G = epoch_loss_G / num_batches
        total_loss = avg_loss_D + avg_loss_G
        
        # 记录损失
        history['g_loss'].append(float(avg_loss_G))
        history['d_loss'].append(float(avg_loss_D))
        history['total_loss'].append(float(total_loss))
        
        # 保存模型
        if epoch % args.model_save_interval == 0:
            torch.save(generator.state_dict(), os.path.join(save_dir, f"generator_{epoch}.pth"))
            torch.save(discriminator.state_dict(), os.path.join(save_dir, f"discriminator_{epoch}.pth"))
    
    # 计算总训练时间
    end_time = datetime.now()
    history['training_time'] = (end_time - start_time).total_seconds()
    
    # 保存训练历史
    history_path = os.path.join(save_dir, "training_history.json")
    with open(history_path, 'w') as f:
        json.dump(history, f, indent=4)
    
    # 保存最终模型
    torch.save(generator.state_dict(), os.path.join(save_dir, "generator_final.pth"))
    torch.save(discriminator.state_dict(), os.path.join(save_dir, "discriminator_final.pth"))
    
    print(f"训练完成！训练历史已保存到: {history_path}")

def train_wgan(generator, discriminator, dataloader, args, device, save_dir):
    """训练WGAN"""
    # 优化器
    optimizer_G = torch.optim.Adam(generator.parameters(), lr=args.lr, betas=(args.b1, args.b2))
    optimizer_D = torch.optim.Adam(discriminator.parameters(), lr=args.lr, betas=(args.b1, args.b2))
    
    # 用于跟踪最佳模型
    best_loss = float('inf')
    best_epoch = 0
    
    # 记录训练历史
    history = {
        'g_loss': [],
        'd_loss': [],
        'total_loss': [],
        'training_time': 0
    }
    
    # 记录开始时间
    start_time = datetime.now()
    
    # 训练循环
    for epoch in range(args.n_epochs):
        epoch_loss_D = 0.0
        epoch_loss_G = 0.0
        num_batches = 0
        
        for i, (real_signals,) in enumerate(dataloader):
            batch_size = real_signals.shape[0]
            real_signals = real_signals.to(device)
            
            # ---------------------
            # 训练判别器
            # ---------------------
            for _ in range(args.n_critic):
                optimizer_D.zero_grad()
                
                # 生成噪声
                z = torch.randn(batch_size, args.latent_dim).to(device)
                
                # 生成信号
                fake_signals = generator(z)
                fake_signals = fake_signals.detach()
                
                # 判别器损失
                loss_D = -torch.mean(discriminator(real_signals)) + torch.mean(discriminator(fake_signals))
                
                loss_D.backward()
                optimizer_D.step()
                
                # 裁剪判别器权重
                for p in discriminator.parameters():
                    p.data.clamp_(-args.clip_value, args.clip_value)
            
            # -----------------
            # 训练生成器
            # -----------------
            optimizer_G.zero_grad()
            
            # 生成信号
            gen_signals = generator(z)
            
            # 生成器损失
            loss_G = -torch.mean(discriminator(gen_signals))
            
            loss_G.backward()
            optimizer_G.step()
            
            # 累积损失
            epoch_loss_D += loss_D.item()
            epoch_loss_G += loss_G.item()
            num_batches += 1
            
            if i % 100 == 0:
                print(
                    f"[Epoch {epoch}/{args.n_epochs}] "
                    f"[Batch {i}/{len(dataloader)}] "
                    f"[D loss: {loss_D.item():.4f}] [G loss: {loss_G.item():.4f}]"
                )
        
        # 计算平均损失
        avg_loss_D = epoch_loss_D / num_batches
        avg_loss_G = epoch_loss_G / num_batches
        total_loss = avg_loss_D + avg_loss_G
        
        # 记录损失
        history['g_loss'].append(float(avg_loss_G))
        history['d_loss'].append(float(avg_loss_D))
        history['total_loss'].append(float(total_loss))
        
        # 保存最佳模型
        if total_loss < best_loss:
            best_loss = total_loss
            best_epoch = epoch
            torch.save(generator.state_dict(), os.path.join(save_dir, "generator_best.pth"))
            torch.save(discriminator.state_dict(), os.path.join(save_dir, "discriminator_best.pth"))
            print(f"保存最佳模型，轮次: {epoch}, 总损失: {total_loss:.4f}")
        
        # 定期保存检查点
        if epoch % args.model_save_interval == 0:
            torch.save(generator.state_dict(), os.path.join(save_dir, f"generator_{epoch}.pth"))
            torch.save(discriminator.state_dict(), os.path.join(save_dir, f"discriminator_{epoch}.pth"))
    
    # 计算总训练时间
    end_time = datetime.now()
    history['training_time'] = (end_time - start_time).total_seconds()
    
    # 保存训练历史
    history_path = os.path.join(save_dir, "training_history.json")
    with open(history_path, 'w') as f:
        json.dump(history, f, indent=4)
    
    # 保存最终模型
    torch.save(generator.state_dict(), os.path.join(save_dir, "generator_final.pth"))
    torch.save(discriminator.state_dict(), os.path.join(save_dir, "discriminator_final.pth"))
    
    print(f"训练完成！最佳模型在第 {best_epoch} 轮，总损失: {best_loss:.4f}")
    print(f"训练历史已保存到: {history_path}")

def train_cgan(generator, discriminator, dataloader, args, device, save_dir):
    """训练CGAN"""
    # 优化器
    optimizer_G = torch.optim.Adam(generator.parameters(), lr=args.lr, betas=(args.b1, args.b2))
    optimizer_D = torch.optim.Adam(discriminator.parameters(), lr=args.lr, betas=(args.b1, args.b2))
    
    # 损失函数
    adversarial_loss = nn.BCEWithLogitsLoss()
    
    # 记录训练历史
    history = {
        'g_loss': [],
        'd_loss': [],
        'total_loss': [],
        'training_time': 0
    }
    
    # 记录开始时间
    start_time = datetime.now()
    
    # 训练循环
    for epoch in range(args.n_epochs):
        epoch_loss_D = 0.0
        epoch_loss_G = 0.0
        num_batches = 0
        
        for i, (real_signals, labels) in enumerate(dataloader):
            batch_size = real_signals.shape[0]
            real_signals = real_signals.to(device)
            labels = labels.to(device)
            
            # 真实和虚假标签
            valid = torch.ones(batch_size, 1).to(device)
            fake = torch.zeros(batch_size, 1).to(device)
            
            # -----------------
            # 训练生成器
            # -----------------
            optimizer_G.zero_grad()
            
            # 生成噪声
            z = torch.randn(batch_size, args.latent_dim).to(device)
            
            # 生成信号
            gen_signals = generator(z, labels)
            
            # 生成器损失
            g_loss = adversarial_loss(discriminator(gen_signals, labels), valid)
            
            g_loss.backward()
            optimizer_G.step()
            
            # ---------------------
            # 训练判别器
            # ---------------------
            optimizer_D.zero_grad()
            
            # 真实信号损失
            real_loss = adversarial_loss(discriminator(real_signals, labels), valid)
            # 虚假信号损失
            fake_loss = adversarial_loss(discriminator(gen_signals.detach(), labels), fake)
            d_loss = (real_loss + fake_loss) / 2
            
            d_loss.backward()
            optimizer_D.step()
            
            # 累积损失
            epoch_loss_D += d_loss.item()
            epoch_loss_G += g_loss.item()
            num_batches += 1
            
            if i % 100 == 0:
                print(
                    f"[Epoch {epoch}/{args.n_epochs}] "
                    f"[Batch {i}/{len(dataloader)}] "
                    f"[D loss: {d_loss.item():.4f}] [G loss: {g_loss.item():.4f}]"
                )
        
        # 计算平均损失
        avg_loss_D = epoch_loss_D / num_batches
        avg_loss_G = epoch_loss_G / num_batches
        total_loss = avg_loss_D + avg_loss_G
        
        # 记录损失
        history['g_loss'].append(float(avg_loss_G))
        history['d_loss'].append(float(avg_loss_D))
        history['total_loss'].append(float(total_loss))
        
        # 保存模型
        if epoch % args.model_save_interval == 0:
            torch.save(generator.state_dict(), os.path.join(save_dir, f"generator_{epoch}.pth"))
            torch.save(discriminator.state_dict(), os.path.join(save_dir, f"discriminator_{epoch}.pth"))
    
    # 计算总训练时间
    end_time = datetime.now()
    history['training_time'] = (end_time - start_time).total_seconds()
    
    # 保存训练历史
    history_path = os.path.join(save_dir, "training_history.json")
    with open(history_path, 'w') as f:
        json.dump(history, f, indent=4)
    
    # 保存最终模型
    torch.save(generator.state_dict(), os.path.join(save_dir, "generator_final.pth"))
    torch.save(discriminator.state_dict(), os.path.join(save_dir, "discriminator_final.pth"))
    
    print(f"训练完成！训练历史已保存到: {history_path}")

def train_wgan_gp(generator, discriminator, dataloader, args, device, save_dir):
    """训练WGAN-GP"""
    # 优化器
    optimizer_G = torch.optim.Adam(generator.parameters(), lr=args.lr, betas=(args.b1, args.b2))
    optimizer_D = torch.optim.Adam(discriminator.parameters(), lr=args.lr, betas=(args.b1, args.b2))
    
    # 记录训练历史
    history = {
        'g_loss': [],
        'd_loss': [],
        'total_loss': [],
        'training_time': 0
    }
    
    # 记录开始时间
    start_time = datetime.now()
    
    def compute_gradient_penalty(D, real_samples, fake_samples, device):
        """计算梯度惩罚"""
        # 随机权重
        alpha = torch.rand((real_samples.size(0), 1)).to(device)
        
        # 插值样本
        interpolates = (alpha * real_samples + ((1 - alpha) * fake_samples)).requires_grad_(True)
        d_interpolates = D(interpolates)
        
        # 计算梯度
        fake = torch.ones(real_samples.size(0), 1).to(device)
        gradients = torch.autograd.grad(
            outputs=d_interpolates,
            inputs=interpolates,
            grad_outputs=fake,
            create_graph=True,
            retain_graph=True,
            only_inputs=True,
        )[0]
        
        # 计算梯度惩罚
        gradients = gradients.view(gradients.size(0), -1)
        gradient_penalty = ((gradients.norm(2, dim=1) - 1) ** 2).mean()
        return gradient_penalty
    
    # 训练循环
    for epoch in range(args.n_epochs):
        epoch_loss_D = 0.0
        epoch_loss_G = 0.0
        num_batches = 0
        
        for i, (real_signals,) in enumerate(dataloader):
            batch_size = real_signals.shape[0]
            real_signals = real_signals.to(device)
            
            # ---------------------
            # 训练判别器
            # ---------------------
            for _ in range(args.n_critic):
                optimizer_D.zero_grad()
                
                # 生成噪声
                z = torch.randn(batch_size, args.latent_dim).to(device)
                
                # 生成信号
                fake_signals = generator(z).detach()
                
                # 计算梯度惩罚
                gradient_penalty = compute_gradient_penalty(discriminator, real_signals, fake_signals, device)
                
                # 判别器损失
                real_validity = discriminator(real_signals)
                fake_validity = discriminator(fake_signals)
                d_loss = -torch.mean(real_validity) + torch.mean(fake_validity) + args.lambda_gp * gradient_penalty
                
                d_loss.backward()
                optimizer_D.step()
            
            # -----------------
            # 训练生成器
            # -----------------
            optimizer_G.zero_grad()
            
            # 生成信号
            gen_signals = generator(z)
            
            # 生成器损失
            fake_validity = discriminator(gen_signals)
            g_loss = -torch.mean(fake_validity)
            
            g_loss.backward()
            optimizer_G.step()
            
            # 累积损失
            epoch_loss_D += d_loss.item()
            epoch_loss_G += g_loss.item()
            num_batches += 1
            
            if i % 100 == 0:
                print(
                    f"[Epoch {epoch}/{args.n_epochs}] "
                    f"[Batch {i}/{len(dataloader)}] "
                    f"[D loss: {d_loss.item():.4f}] [G loss: {g_loss.item():.4f}] "
                    f"[GP: {gradient_penalty.item():.4f}]"
                )
        
        # 计算平均损失
        avg_loss_D = epoch_loss_D / num_batches
        avg_loss_G = epoch_loss_G / num_batches
        total_loss = avg_loss_D + avg_loss_G
        
        # 记录损失
        history['g_loss'].append(float(avg_loss_G))
        history['d_loss'].append(float(avg_loss_D))
        history['total_loss'].append(float(total_loss))
        
        # 保存模型
        if epoch % args.model_save_interval == 0:
            torch.save(generator.state_dict(), os.path.join(save_dir, f"generator_{epoch}.pth"))
            torch.save(discriminator.state_dict(), os.path.join(save_dir, f"discriminator_{epoch}.pth"))
    
    # 计算总训练时间
    end_time = datetime.now()
    history['training_time'] = (end_time - start_time).total_seconds()
    
    # 保存训练历史
    history_path = os.path.join(save_dir, "training_history.json")
    with open(history_path, 'w') as f:
        json.dump(history, f, indent=4)
    
    # 保存最终模型
    torch.save(generator.state_dict(), os.path.join(save_dir, "generator_final.pth"))
    torch.save(discriminator.state_dict(), os.path.join(save_dir, "discriminator_final.pth"))
    
    print(f"训练完成！训练历史已保存到: {history_path}")

def train_gan_for_class(class_name, data, args, device):
    """为特定类别训练GAN"""
    # 创建保存目录
    if args.gan_type == "cgan":
        save_dir = os.path.join("models", args.gan_type)
    else:
        save_dir = os.path.join("models", args.gan_type, class_name)
    os.makedirs(save_dir, exist_ok=True)
    
    # 准备数据
    if args.gan_type == "cgan":
        # 对于CGAN，直接使用传入的data和labels
        signals, labels = data  # 解包数据和标签
        dataset = TensorDataset(torch.FloatTensor(signals), torch.LongTensor(labels))
    else:
        dataset = TensorDataset(torch.FloatTensor(data))
    dataloader = DataLoader(dataset, batch_size=args.batch_size, shuffle=True)
    
    # 初始化基础模型
    if args.gan_type == "vanilla":
        generator = VanillaGenerator(args.latent_dim, args.signal_length).to(device)
        discriminator = VanillaDiscriminator(args.signal_length).to(device)
        train_fn = train_vanilla_gan
    elif args.gan_type == "wgan":
        generator = WGANGenerator(args.latent_dim, args.signal_length).to(device)
        discriminator = WGANDiscriminator(args.signal_length).to(device)
        train_fn = train_wgan
    elif args.gan_type == "cgan":
        generator = CGANGenerator(args.latent_dim, args.signal_length, n_classes=10).to(device)
        discriminator = CGANDiscriminator(args.signal_length, n_classes=10).to(device)
        train_fn = train_cgan
    elif args.gan_type == "wgan_gp":
        generator = WGANGPGenerator(args.latent_dim, args.signal_length).to(device)
        discriminator = WGANGPDiscriminator(args.signal_length).to(device)
        train_fn = train_wgan_gp
    
    # 初始化权重
    generator.apply(weights_init_normal)
    discriminator.apply(weights_init_normal)
    
    # 训练模型
    train_fn(generator, discriminator, dataloader, args, device, save_dir)

def main():
    args = parse_args()
    
    # 设置设备
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    
    # 检查预处理数据是否存在
    train_data_path = os.path.join("cwru_prepro", "train")
    if not os.path.exists(train_data_path):
        print("预处理数据不存在，开始预处理...")
        import preprocess
        preprocess.main()
    
    # 加载训练数据
    class_files = [f for f in os.listdir(train_data_path) if f.endswith('.npy')]
    data = {}
    
    for class_file in class_files:
        class_name = class_file.replace('.npy', '')
        data[class_name] = np.load(os.path.join(train_data_path, class_file))
        print(f"加载类别 {class_name} 的数据，形状: {data[class_name].shape}")
    
    # 为每个类别训练GAN
    if args.gan_type == "cgan":
        # 对于CGAN，将所有数据合并在一起训练
        all_data = []
        all_labels = []
        class_mapping = {
            'normal': 0,
            'ball_07': 1, 'ball_14': 2, 'ball_21': 3,
            'inner_07': 4, 'inner_14': 5, 'inner_21': 6,
            'outer_07': 7, 'outer_14': 8, 'outer_21': 9
        }
        
        for class_name, class_data in data.items():
            all_data.append(class_data)
            all_labels.extend([class_mapping[class_name]] * len(class_data))
        
        all_data = np.concatenate(all_data, axis=0)
        all_labels = np.array(all_labels)
        
        print("开始训练CGAN...")
        train_gan_for_class("all", (all_data, all_labels), args, device)
        print("CGAN训练完成！")
    else:
        # 对于其他GAN类型，为每个类别单独训练
        for class_name in data.keys():
            print(f"开始训练 {class_name} 类别的GAN...")
            train_gan_for_class(class_name, data[class_name], args, device)
            print(f"{class_name} 类别的GAN训练完成！")

if __name__ == "__main__":
    main() 