import torch
import torch.nn as nn
import torch.optim as optim
import torchvision
import torchvision.transforms as transforms
from torch.utils.data import DataLoader
from PIL import Image
import numpy as np
import os
from datetime import datetime
import time
import matplotlib.pyplot as plt

# 设置随机种子以确保结果可重现
torch.manual_seed(42)
np.random.seed(42)

class Generator(nn.Module):
    """
    生成器网络：将随机噪声转换为图像
    输入：100维的随机噪声向量
    输出：3通道的64x64彩色图像
    """
    def __init__(self, nz=100, ngf=64, nc=3):
        super(Generator, self).__init__()
        self.main = nn.Sequential(
            # 输入是Z，进入卷积
            nn.ConvTranspose2d(nz, ngf * 16, 4, 1, 0, bias=False),
            nn.BatchNorm2d(ngf * 16),
            nn.ReLU(True),
            # 状态大小: (ngf*16) x 4 x 4
            nn.ConvTranspose2d(ngf * 16, ngf * 8, 4, 2, 1, bias=False),
            nn.BatchNorm2d(ngf * 8),
            nn.ReLU(True),
            # 状态大小: (ngf*8) x 8 x 8
            nn.ConvTranspose2d(ngf * 8, ngf * 4, 4, 2, 1, bias=False),
            nn.BatchNorm2d(ngf * 4),
            nn.ReLU(True),
            # 状态大小: (ngf*4) x 16 x 16
            nn.ConvTranspose2d(ngf * 4, ngf * 2, 4, 2, 1, bias=False),
            nn.BatchNorm2d(ngf * 2),
            nn.ReLU(True),
            # 状态大小: (ngf*2) x 32 x 32
            nn.ConvTranspose2d(ngf * 2, ngf, 4, 2, 1, bias=False),
            nn.BatchNorm2d(ngf),
            nn.ReLU(True),
            # 状态大小: (ngf) x 64 x 64
            nn.ConvTranspose2d(ngf, nc, 4, 2, 1, bias=False),
            nn.Tanh()
            # 状态大小: (nc) x 128 x 128
        )

    def forward(self, input):
        return self.main(input)

class Discriminator(nn.Module):
    """
    判别器网络：判断输入图像是真实的还是生成的
    输入：3通道的64x64彩色图像
    输出：单个概率值（0-1之间）
    """
    def __init__(self, nc=3, ndf=64):
        super(Discriminator, self).__init__()
        self.main = nn.Sequential(
             # 输入是 (nc) x 128 x 128
            nn.Conv2d(nc, ndf, 4, 2, 1, bias=False),
            nn.LeakyReLU(0.2, inplace=True),
            # 状态大小: (ndf) x 64 x 64
            nn.Conv2d(ndf, ndf * 2, 4, 2, 1, bias=False),
            nn.BatchNorm2d(ndf * 2),
            nn.LeakyReLU(0.2, inplace=True),
            # 状态大小: (ndf*2) x 32 x 32
            nn.Conv2d(ndf * 2, ndf * 4, 4, 2, 1, bias=False),
            nn.BatchNorm2d(ndf * 4),
            nn.LeakyReLU(0.2, inplace=True),
            # 状态大小: (ndf*4) x 16 x 16
            nn.Conv2d(ndf * 4, ndf * 8, 4, 2, 1, bias=False),
            nn.BatchNorm2d(ndf * 8),
            nn.LeakyReLU(0.2, inplace=True),
            # 状态大小: (ndf*8) x 8 x 8
            nn.Conv2d(ndf * 8, ndf * 16, 4, 2, 1, bias=False),
            nn.BatchNorm2d(ndf * 16),
            nn.LeakyReLU(0.2, inplace=True),
            # 状态大小: (ndf*16) x 4 x 4
            nn.Conv2d(ndf * 16, 1, 4, 1, 0, bias=False),
            nn.Sigmoid()
        )

    def forward(self, input):
        return self.main(input).view(-1, 1).squeeze(1)

def weights_init(m):
    """
    初始化网络权重的函数
    使用正态分布初始化卷积层和批归一化层的权重
    """
    classname = m.__class__.__name__
    if classname.find('Conv') != -1:
        nn.init.normal_(m.weight.data, 0.0, 0.02)
    elif classname.find('BatchNorm') != -1:
        nn.init.normal_(m.weight.data, 1.0, 0.02)
        nn.init.constant_(m.bias.data, 0)

def create_dataloader(batch_size=64, image_size=128, num_workers=2):
    """
    创建数据加载器，使用CIFAR-10数据集
    由于你要求使用torchvision下载训练图片，这里使用CIFAR-10作为示例
    你可以根据需要更换为其他数据集
    """
    # 数据预处理
    transform = transforms.Compose([
        transforms.Resize(image_size),
        transforms.CenterCrop(image_size),
        transforms.ToTensor(),
        transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))  # 标准化到[-1, 1]
    ])
    
    # 下载CIFAR-10数据集
    dataset = torchvision.datasets.CIFAR10(
        root='./data', 
        train=True, 
        download=True, 
        transform=transform
    )
    
    # 创建数据加载器
    dataloader = DataLoader(
        dataset, 
        batch_size=batch_size, 
        shuffle=True, 
        num_workers=num_workers
    )
    
    return dataloader

def save_generated_images(generator, device, epoch, nz=100, num_images=64):
    """
    保存生成的图像
    """
    generator.eval()
    with torch.no_grad():
        # 生成随机噪声
        noise = torch.randn(num_images, nz, 1, 1, device=device)
        # 生成图像
        fake_images = generator(noise)
        
        # 反标准化
        fake_images = (fake_images + 1) / 2.0
        fake_images = torch.clamp(fake_images, 0, 1)
        
        # 创建网格图像
        grid = torchvision.utils.make_grid(fake_images, nrow=8, padding=2)
        
        # 保存图像
        save_dir = os.path.join('resource', 'images')
        os.makedirs(save_dir, exist_ok=True)
        save_path = os.path.join(save_dir, f'epoch_{epoch}.png')
        torchvision.utils.save_image(grid, save_path)
        
        print(f"生成的图像已保存到 {save_path}")

def train_gan(num_epochs=100, batch_size=64, lr_g=0.0002, lr_d=0.0001, nz=100):
    """
    训练GAN网络的主函数

    Args:
        netG: 生成器网络
        netD: 判别器网络
        dataloader: 数据加载器
        num_epochs: 训练轮数
        lr_g: 生成器学习率
        lr_d: 判别器学习率
        device: 设备
    """
    # 确保resource目录存在
    os.makedirs('resource', exist_ok=True)
    
    # 检查GPU是否可用
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    print(f"使用设备: {device}")
    
    # 创建数据加载器
    dataloader = create_dataloader(batch_size)
    print(f"数据集大小: {len(dataloader.dataset)}")
    
    # 创建网络
    netG = Generator(nz).to(device)
    netD = Discriminator().to(device)
    
    # 初始化权重
    netG.apply(weights_init)
    netD.apply(weights_init)
    
    # 损失函数(二元交叉熵)
    criterion = nn.BCELoss()
    
    # 优化器， 降低判别器的学习率 0.5
    optimizerD = optim.Adam(netD.parameters(), lr=lr_d, betas=(0.5, 0.999))
    optimizerG = optim.Adam(netG.parameters(), lr=lr_g, betas=(0.5, 0.999))
    
    # 标签平滑化和噪声
    real_label_smooth = 0.9  # 真实标签平滑化
    fake_label_smooth = 0.1  # 假标签平滑化
    label_noise = 0.05       # 标签噪声强度
    
    # 用于记录损失
    G_losses = []
    D_losses = []
    
    print("开始训练...")

    # 时间计算
    start_time = time.time()
    last_time = start_time
    
    for epoch in range(num_epochs):
        epoch_start_time = time.time()

        for i, (data, _) in enumerate(dataloader):
            iter_start_time = time.time()

            batch_size = data.size(0)
            real_data = data.to(device)
            
            # 每3次迭代更新一次判别器，生成器每次都更新
            update_discriminator = (i % 3 == 0)
            
            if update_discriminator:
                ############################
                # (1) 更新判别器网络
                ###########################
                netD.zero_grad()
                
                # 训练真实数据 - 使用标签平滑化和噪声
                real_labels = torch.full((batch_size,), real_label_smooth, dtype=torch.float, device=device)
                # 添加随机噪声到标签
                real_labels += torch.randn_like(real_labels) * label_noise
                real_labels = torch.clamp(real_labels, 0.0, 1.0)
                
                output_real = netD(real_data)
                errD_real = criterion(output_real, real_labels)
                errD_real.backward()
                D_x = output_real.mean().item()
                
                # 训练生成数据 - 使用标签平滑化和噪声
                noise = torch.randn(batch_size, nz, 1, 1, device=device)
                fake_data = netG(noise)
                fake_labels = torch.full((batch_size,), fake_label_smooth, dtype=torch.float, device=device)
                # 添加随机噪声到标签
                fake_labels += torch.randn_like(fake_labels) * label_noise
                fake_labels = torch.clamp(fake_labels, 0.0, 1.0)
                
                output_fake = netD(fake_data.detach())
                errD_fake = criterion(output_fake, fake_labels)
                errD_fake.backward()
                D_G_z1 = output_fake.mean().item()
                
                errD = errD_real + errD_fake
                optimizerD.step()
            else:
                # 不更新判别器时，仍需要计算这些值用于显示
                with torch.no_grad():
                    output_real = netD(real_data)
                    D_x = output_real.mean().item()
                    noise = torch.randn(batch_size, nz, 1, 1, device=device)
                    fake_data = netG(noise)
                    output_fake = netD(fake_data.detach())
                    D_G_z1 = output_fake.mean().item()
                    errD = torch.tensor(0.0)  # 占位符
            
            ############################
            # (2) 更新生成器网络 - 每次都更新
            ###########################
            netG.zero_grad()
            
            # 重新生成假数据（如果判别器没有更新）
            if not update_discriminator:
                noise = torch.randn(batch_size, nz, 1, 1, device=device)
                fake_data = netG(noise)
            
            # 生成器希望判别器认为生成的图像是真实的
            gen_labels = torch.full((batch_size,), real_label_smooth, dtype=torch.float, device=device)
            output_gen = netD(fake_data)
            errG = criterion(output_gen, gen_labels)
            errG.backward()
            D_G_z2 = output_gen.mean().item()
            optimizerG.step()
            
            # 记录损失
            G_losses.append(errG.item())
            if update_discriminator:
                D_losses.append(errD.item())
            else:
                D_losses.append(D_losses[-1] if D_losses else 0.0)  # 使用上一次的值
            
            # 打印统计信息
            if i % batch_size == 0:
                current_time = time.time()
                iter_time = current_time - iter_start_time
                elapsed_time = current_time - start_time
                avg_time_per_iter = elapsed_time / (epoch * len(dataloader) + i + 1)
                remaining_iters = (num_epochs - epoch - 1) * len(dataloader) + (len(dataloader) - i - 1)
                estimated_remaining_time = avg_time_per_iter * remaining_iters

                update_status = "[D更新]" if update_discriminator else "[D跳过]"
                print(f'[第{epoch}轮/共{num_epochs}轮][第{i}批/共{len(dataloader)}批] {update_status} '
                      f'本次耗时: {iter_time:.2f}s | 已用: {elapsed_time/60:.1f}min | '
                      f'预计剩余: {estimated_remaining_time/60:.1f}min | '
                      f'判别器损失: {errD.item() if update_discriminator else "跳过"} 生成器损失: {errG.item():.4f} '
                      f'判别器对真实图片的评分: {D_x:.4f} 判别器对生成图片的评分: {D_G_z1:.4f} / {D_G_z2:.4f}')
        
        # 每10个epoch保存一次生成的图像
        if epoch % 10 == 0 or epoch == num_epochs - 1:
            save_generated_images(netG, device, epoch, nz)
    
    # 保存训练好的模型到resource目录
    generator_path = os.path.join('resource', 'generator_model.pth')
    discriminator_path = os.path.join('resource', 'discriminator_model.pth')
    torch.save(netG.state_dict(), generator_path)
    torch.save(netD.state_dict(), discriminator_path)
    print(f"模型已保存到 {generator_path} 和 {discriminator_path}")
    
    # 绘制损失曲线
    plt.figure(figsize=(10, 5))
    plt.title("Generator and Discriminator Loss During Training")
    plt.plot(G_losses, label="G")
    plt.plot(D_losses, label="D")
    plt.xlabel("iterations")
    plt.ylabel("Loss")
    plt.legend()
    plt.savefig('resource/training_loss.png')
    plt.show()
    
    # 保存训练损失到文本文件
    loss_path = os.path.join('resource', 'training_loss.txt')

    with open(loss_path, 'w') as f:
        f.write("Generator Loss,Discriminator Loss\n")
        for g_loss, d_loss in zip(G_losses, D_losses):
            f.write(f"{g_loss},{d_loss}\n")
    
    print(f"训练损失已保存到 {loss_path}")
    
    return netG, netD

def generate_single_image(model_path='resource/generator_model.pth', nz=100):
    """
    使用训练好的生成器生成单张图片
    """
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    
    # 加载生成器
    netG = Generator(nz).to(device)
    netG.load_state_dict(torch.load(model_path, map_location=device))
    netG.eval()
    
    # 生成图片
    with torch.no_grad():
        noise = torch.randn(1, nz, 1, 1, device=device)
        fake_image = netG(noise)
        
        # 反标准化
        fake_image = (fake_image + 1) / 2.0
        fake_image = torch.clamp(fake_image, 0, 1)
        
        # 保存图片
        timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
        save_dir = os.path.join('resource', 'images')
        os.makedirs(save_dir, exist_ok=True)
        filename = os.path.join(save_dir, f'generated_single_{timestamp}.png')
        torchvision.utils.save_image(fake_image, filename)
        print(f"生成的图片已保存为: {filename}")
        
        return filename

if __name__ == "__main__":
    print("GAN图片生成器")
    print("1. 训练新模型")
    print("2. 使用已有模型生成图片")
    
    choice = input("请选择操作 (1 或 2): ")
    
    if choice == "1":
        print("开始训练GAN模型...")
        print("注意：训练可能需要较长时间，建议使用GPU")
        
        # 训练参数
        num_epochs = int(input("请输入训练轮数 (建议100): ") or "100")
        batch_size = int(input("请输入批次大小 (建议64): ") or "64")
        
        # 开始训练
        netG, netD = train_gan(num_epochs=num_epochs, batch_size=batch_size)
        
        # 训练完成后生成一张图片
        print("训练完成，生成示例图片...")
        generate_single_image()
        
    elif choice == "2":
        model_path = os.path.join('resource', 'generator_model.pth')
        if os.path.exists(model_path):
            print("使用已有模型生成图片...")
            generate_single_image(model_path)
        else:
            print(f"未找到训练好的模型文件 '{model_path}'")
            print("请先训练模型或确保模型文件存在")
    else:
        print("无效选择")