import torch
import torch.nn as nn
import torch.optim as optim
import torchvision
import torchvision.transforms as transforms
from torch.utils.data import DataLoader
import matplotlib.pyplot as plt
import numpy as np
import time
import os
from datetime import datetime

# 设置随机种子以确保结果可重现
torch.manual_seed(42)
np.random.seed(42)

class Generator(nn.Module):
    """
    生成器网络：将随机噪声转换为图像
    输入：100维的随机噪声向量
    输出：3通道的64x64彩色图像
    """
    def __init__(self, nz=100, ngf=64, nc=3):
        super(Generator, self).__init__()
        self.main = nn.Sequential(
            # 输入是Z，进入卷积
            nn.ConvTranspose2d(nz, ngf * 8, 4, 1, 0, bias=False),
            nn.BatchNorm2d(ngf * 8),
            nn.ReLU(True),
            # 状态大小: (ngf*8) x 4 x 4
            nn.ConvTranspose2d(ngf * 8, ngf * 4, 4, 2, 1, bias=False),
            nn.BatchNorm2d(ngf * 4),
            nn.ReLU(True),
            # 状态大小: (ngf*4) x 8 x 8
            nn.ConvTranspose2d(ngf * 4, ngf * 2, 4, 2, 1, bias=False),
            nn.BatchNorm2d(ngf * 2),
            nn.ReLU(True),
            # 状态大小: (ngf*2) x 16 x 16
            nn.ConvTranspose2d(ngf * 2, ngf, 4, 2, 1, bias=False),
            nn.BatchNorm2d(ngf),
            nn.ReLU(True),
            # 状态大小: (ngf) x 32 x 32
            nn.ConvTranspose2d(ngf, nc, 4, 2, 1, bias=False),
            nn.Tanh()
            # 状态大小: (nc) x 64 x 64
        )

    def forward(self, input):
        return self.main(input)

class Discriminator(nn.Module):
    """
    判别器网络：判断输入图像是真实的还是生成的
    输入：3通道的64x64彩色图像
    输出：单个概率值（0-1之间）
    """
    def __init__(self, nc=3, ndf=64):
        super(Discriminator, self).__init__()
        self.main = nn.Sequential(
            # 输入是 (nc) x 64 x 64
            nn.Conv2d(nc, ndf, 4, 2, 1, bias=False),
            nn.LeakyReLU(0.2, inplace=True),
            # 状态大小: (ndf) x 32 x 32
            nn.Conv2d(ndf, ndf * 2, 4, 2, 1, bias=False),
            nn.BatchNorm2d(ndf * 2),
            nn.LeakyReLU(0.2, inplace=True),
            # 状态大小: (ndf*2) x 16 x 16
            nn.Conv2d(ndf * 2, ndf * 4, 4, 2, 1, bias=False),
            nn.BatchNorm2d(ndf * 4),
            nn.LeakyReLU(0.2, inplace=True),
            # 状态大小: (ndf*4) x 8 x 8
            nn.Conv2d(ndf * 4, ndf * 8, 4, 2, 1, bias=False),
            nn.BatchNorm2d(ndf * 8),
            nn.LeakyReLU(0.2, inplace=True),
            # 状态大小: (ndf*8) x 4 x 4
            nn.Conv2d(ndf * 8, 1, 4, 1, 0, bias=False),
            nn.Sigmoid()
        )

    def forward(self, input):
        return self.main(input).view(-1, 1).squeeze(1)

def weights_init(m):
    """
    初始化网络权重的函数
    使用正态分布初始化卷积层和批归一化层的权重
    """
    classname = m.__class__.__name__
    if classname.find('Conv') != -1:
        nn.init.normal_(m.weight.data, 0.0, 0.02)
    elif classname.find('BatchNorm') != -1:
        nn.init.normal_(m.weight.data, 1.0, 0.02)
        nn.init.constant_(m.bias.data, 0)

def create_dataloader(batch_size=64, image_size=64, num_workers=2):
    """
    创建数据加载器，使用CIFAR-10数据集
    由于你要求使用torchvision下载训练图片，这里使用CIFAR-10作为示例
    你可以根据需要更换为其他数据集
    """
    # 数据预处理
    transform = transforms.Compose([
        transforms.Resize(image_size),
        transforms.CenterCrop(image_size),
        transforms.ToTensor(),
        transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))  # 标准化到[-1, 1]
    ])
    
    # 下载CIFAR-10数据集
    dataset = torchvision.datasets.CIFAR10(
        root='./data', 
        train=True, 
        download=True, 
        transform=transform
    )
    
    # 创建数据加载器
    dataloader = DataLoader(
        dataset, 
        batch_size=batch_size, 
        shuffle=True, 
        num_workers=num_workers
    )
    
    return dataloader

def save_generated_images(generator, device, epoch, nz=100, num_images=64):
    """
    保存生成的图像
    """
    generator.eval()
    with torch.no_grad():
        # 生成随机噪声
        noise = torch.randn(num_images, nz, 1, 1, device=device)
        # 生成图像
        fake_images = generator(noise)
        
        # 反标准化
        fake_images = (fake_images + 1) / 2.0
        fake_images = torch.clamp(fake_images, 0, 1)
        
        # 创建网格图像
        grid = torchvision.utils.make_grid(fake_images, nrow=8, padding=2)
        
        # 保存图像
        os.makedirs('generated_images', exist_ok=True)
        torchvision.utils.save_image(grid, f'generated_images/epoch_{epoch}.png')
        
        print(f"生成的图像已保存到 generated_images/epoch_{epoch}.png")

def train_gan(num_epochs=100, batch_size=64, lr=0.0002, nz=100):
    """
    训练GAN网络的主函数
    """
    # 检查GPU是否可用
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    print(f"使用设备: {device}")
    
    # 创建数据加载器
    dataloader = create_dataloader(batch_size)
    print(f"数据集大小: {len(dataloader.dataset)}")
    
    # 创建网络
    netG = Generator(nz).to(device)
    netD = Discriminator().to(device)
    
    # 初始化权重
    netG.apply(weights_init)
    netD.apply(weights_init)
    
    # 损失函数
    criterion = nn.BCELoss()
    
    # 优化器
    optimizerD = optim.Adam(netD.parameters(), lr=lr, betas=(0.5, 0.999))
    optimizerG = optim.Adam(netG.parameters(), lr=lr, betas=(0.5, 0.999))
    
    # 训练标签
    real_label = 1.
    fake_label = 0.
    
    # 用于记录损失
    G_losses = []
    D_losses = []
    
    print("开始训练...")
    
    for epoch in range(num_epochs):
        for i, (data, _) in enumerate(dataloader):
            ############################
            # (1) 更新判别器网络: 最大化 log(D(x)) + log(1 - D(G(z)))
            ###########################
            # 训练真实数据
            netD.zero_grad()
            real_data = data.to(device)
            batch_size = real_data.size(0)
            label = torch.full((batch_size,), real_label, dtype=torch.float, device=device)
            
            output = netD(real_data)
            errD_real = criterion(output, label)
            errD_real.backward()
            D_x = output.mean().item()
            
            # 训练生成数据
            noise = torch.randn(batch_size, nz, 1, 1, device=device)
            fake = netG(noise)
            label.fill_(fake_label)
            output = netD(fake.detach())
            errD_fake = criterion(output, label)
            errD_fake.backward()
            D_G_z1 = output.mean().item()
            errD = errD_real + errD_fake
            optimizerD.step()
            
            ############################
            # (2) 更新生成器网络: 最大化 log(D(G(z)))
            ###########################
            netG.zero_grad()
            label.fill_(real_label)  # 生成器希望判别器认为生成的图像是真实的
            output = netD(fake)
            errG = criterion(output, label)
            errG.backward()
            D_G_z2 = output.mean().item()
            optimizerG.step()
            
            # 记录损失
            G_losses.append(errG.item())
            D_losses.append(errD.item())
            
            # 打印统计信息
            if i % 50 == 0:
                print(f'[{epoch}/{num_epochs}][{i}/{len(dataloader)}] '
                      f'Loss_D: {errD.item():.4f} Loss_G: {errG.item():.4f} '
                      f'D(x): {D_x:.4f} D(G(z)): {D_G_z1:.4f} / {D_G_z2:.4f}')
        
        # 每10个epoch保存一次生成的图像
        if epoch % 10 == 0 or epoch == num_epochs - 1:
            save_generated_images(netG, device, epoch, nz)
    
    # 保存训练好的模型
    torch.save(netG.state_dict(), 'generator_model.pth')
    torch.save(netD.state_dict(), 'discriminator_model.pth')
    print("模型已保存")
    
    # 绘制损失曲线
    plt.figure(figsize=(10, 5))
    plt.title("Generator and Discriminator Loss During Training")
    plt.plot(G_losses, label="G")
    plt.plot(D_losses, label="D")
    plt.xlabel("iterations")
    plt.ylabel("Loss")
    plt.legend()
    plt.savefig('training_loss.png')
    plt.show()

    # 保存训练损失到文本文件
    loss_path = os.path.join('resource', 'training_loss.txt')

    with open(loss_path, 'w') as f:
        f.write("Generator Loss,Discriminator Loss\n")
        for g_loss, d_loss in zip(G_losses, D_losses):
            f.write(f"{g_loss},{d_loss}\n")
    
    return netG, netD

def train_gan_improved(num_epochs=100, batch_size=64, lr=0.0002, nz=100):
    """
    改进的GAN训练函数，解决训练不平衡问题
    
    Args:
        num_epochs: 训练轮数
        device: 设备
    """

    # 检查GPU是否可用
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    print(f"使用设备: {device}")
    
    # 创建数据加载器
    dataloader = create_dataloader(batch_size)
    print(f"数据集大小: {len(dataloader.dataset)}")
    
    # 创建网络
    netG = Generator(nz).to(device)
    netD = Discriminator().to(device)
    
    # 初始化权重
    netG.apply(weights_init)
    netD.apply(weights_init)
    
    # 损失函数
    criterion = nn.BCELoss()
    
    # 优化器
    optimizerD = optim.Adam(netD.parameters(), lr=lr, betas=(0.5, 0.999))
    optimizerG = optim.Adam(netG.parameters(), lr=lr, betas=(0.5, 0.999))
    
    # 训练标签
    real_label = 1.
    fake_label = 0.
    
    # 用于记录损失
    G_losses = []
    D_losses = []
    
    print("开始训练...")
    
    
    # 确保resource目录存在
    os.makedirs('resource', exist_ok=True)
    
    # 使用不同的损失函数 - Wasserstein Loss with Gradient Penalty
    def gradient_penalty(netD, real_data, fake_data, device):
        batch_size = real_data.size(0)
        alpha = torch.rand(batch_size, 1, 1, 1).to(device)
        interpolated = alpha * real_data + (1 - alpha) * fake_data
        interpolated.requires_grad_(True)
        
        d_interpolated = netD(interpolated)
        gradients = torch.autograd.grad(
            outputs=d_interpolated,
            inputs=interpolated,
            grad_outputs=torch.ones_like(d_interpolated),
            create_graph=True,
            retain_graph=True
        )[0]
        
        gradients = gradients.view(batch_size, -1)
        gradient_norm = gradients.norm(2, dim=1)
        penalty = ((gradient_norm - 1) ** 2).mean()
        return penalty
    
    # 优化器设置 - 大幅降低判别器学习率
    optimizerD = optim.Adam(netD.parameters(), lr=0.00005, betas=(0.0, 0.9))  # 极低学习率
    optimizerG = optim.Adam(netG.parameters(), lr=0.0002, betas=(0.0, 0.9))   # 正常学习率
    
    # 训练参数
    lambda_gp = 10  # 梯度惩罚系数
    n_critic = 5    # 每5次更新判别器，更新1次生成器
    
    # 用于记录损失
    G_losses = []
    D_losses = []
    
    # 初始化实时损失文件
    loss_file_path = 'resource/training_loss_improved.txt'
    with open(loss_file_path, 'w') as f:
        f.write("Iteration,Epoch,Batch,Generator_Loss,Discriminator_Loss,D_Real,D_Fake,Elapsed_Time\n")
        f.flush()  # 确保立即写入
    
    print("开始改进训练...")
    start_time = time.time()
    iteration_count = 0
    
    for epoch in range(num_epochs):
        epoch_start_time = time.time()
        
        for i, (data, _) in enumerate(dataloader):
            iter_start_time = time.time()
            real_data = data.to(device)
            batch_size = real_data.size(0)
            
            # 更新判别器 - 使用Wasserstein Loss
            for _ in range(n_critic):
                netD.zero_grad()
                
                # 真实数据
                d_real = netD(real_data).mean()
                
                # 生成数据
                noise = torch.randn(batch_size, nz, 1, 1, device=device)
                fake_data = netG(noise).detach()
                d_fake = netD(fake_data).mean()
                
                # 梯度惩罚
                gp = gradient_penalty(netD, real_data, fake_data, device)
                
                # Wasserstein Loss with Gradient Penalty
                d_loss = d_fake - d_real + lambda_gp * gp
                d_loss.backward()
                optimizerD.step()
            
            # 更新生成器
            netG.zero_grad()
            noise = torch.randn(batch_size, nz, 1, 1, device=device)
            fake_data = netG(noise)
            g_loss = -netD(fake_data).mean()  # 生成器希望最大化判别器输出
            g_loss.backward()
            optimizerG.step()
            
            # 记录损失
            G_losses.append(g_loss.item())
            D_losses.append(d_loss.item())
            iteration_count += 1
            
            # 实时写入损失到文件
            current_time = time.time()
            elapsed_time = current_time - start_time
            
            with open(loss_file_path, 'a') as f:
                f.write(f"{iteration_count},{epoch},{i},{g_loss.item():.6f},{d_loss.item():.6f},{d_real.item():.6f},{d_fake.item():.6f},{elapsed_time:.2f}\n")
                f.flush()  # 确保立即写入磁盘
            
            # 打印统计信息
            if i % 50 == 0:
                iter_time = current_time - iter_start_time
                avg_time_per_iter = elapsed_time / iteration_count
                remaining_iters = (num_epochs - epoch - 1) * len(dataloader) + (len(dataloader) - i - 1)
                estimated_remaining_time = avg_time_per_iter * remaining_iters
                
                print(f'[第{epoch}轮/共{num_epochs}轮][第{i}批/共{len(dataloader)}批] '
                      f'本次耗时: {iter_time:.2f}s | 已用: {elapsed_time/60:.1f}min | '
                      f'预计剩余: {estimated_remaining_time/60:.1f}min | '
                      f'判别器损失: {d_loss.item():.4f} 生成器损失: {g_loss.item():.4f} '
                      f'D(real): {d_real.item():.4f} D(fake): {d_fake.item():.4f}')
        
        # 每10个epoch保存一次生成的图像
        if epoch % 10 == 0 or epoch == num_epochs - 1:
            save_generated_images(netG, device, epoch, nz)
        
        epoch_time = time.time() - epoch_start_time
        print(f'[Epoch {epoch} 完成] 耗时: {epoch_time:.2f}s')
    
    # 保存模型
    torch.save(netG.state_dict(), 'resource/generator_model_improved.pth')
    torch.save(netD.state_dict(), 'resource/discriminator_model_improved.pth')
    
    print(f"改进训练完成，模型已保存，实时损失已记录到 {loss_file_path}")
    return G_losses, D_losses

def generate_single_image(model_path='generator_model.pth', nz=100):
    """
    使用训练好的生成器生成单张图片
    """
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    
    # 加载生成器
    netG = Generator(nz).to(device)
    netG.load_state_dict(torch.load(model_path, map_location=device))
    netG.eval()
    
    # 生成图片
    with torch.no_grad():
        noise = torch.randn(1, nz, 1, 1, device=device)
        fake_image = netG(noise)
        
        # 反标准化
        fake_image = (fake_image + 1) / 2.0
        fake_image = torch.clamp(fake_image, 0, 1)
        
        # 保存图片
        timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
        filename = f'generated_single_{timestamp}.png'
        torchvision.utils.save_image(fake_image, filename)
        print(f"生成的图片已保存为: {filename}")
        
        return filename

if __name__ == "__main__":
    print("GAN图片生成器")
    print("1. 训练新模型")
    print("2. 使用已有模型生成图片")
    
    choice = input("请选择操作 (1 或 2): ")
    
    if choice == "1":
        print("开始训练GAN模型...")
        print("注意：训练可能需要较长时间，建议使用GPU")
        
        # 训练参数
        num_epochs = int(input("请输入训练轮数 (建议100): ") or "100")
        batch_size = int(input("请输入批次大小 (建议64): ") or "64")
        
        # 开始训练
        netG, netD = train_gan_improved(num_epochs=num_epochs, batch_size=batch_size)
        
        # 训练完成后生成一张图片
        print("训练完成，生成示例图片...")
        generate_single_image()
        
    elif choice == "2":
        if os.path.exists('generator_model.pth'):
            print("使用已有模型生成图片...")
            generate_single_image()
        else:
            print("未找到训练好的模型文件 'generator_model.pth'")
            print("请先训练模型或确保模型文件存在")
    else:
        print("无效选择")