import torch
import torch.nn as nn
import torch.optim as optim
import torchvision
import torchvision.transforms as transforms
from torch.utils.data import DataLoader
import torch.nn.functional as F
from PIL import Image
import numpy as np
import os
from datetime import datetime
import time
import matplotlib.pyplot as plt
import math

# 设置随机种子以确保结果可重现
torch.manual_seed(42)
np.random.seed(42)

class MappingNetwork(nn.Module):
    """
    映射网络：将潜在码z映射到中间潜在空间w
    这是StyleGAN的核心创新之一，用于解耦潜在空间
    """
    def __init__(self, z_dim=512, w_dim=512, num_layers=8):
        super(MappingNetwork, self).__init__()
        layers = []
        for i in range(num_layers):
            layers.append(nn.Linear(z_dim if i == 0 else w_dim, w_dim))
            layers.append(nn.LeakyReLU(0.2))
        self.mapping = nn.Sequential(*layers)
        
    def forward(self, z):
        return self.mapping(z)

class AdaIN(nn.Module):
    """
    自适应实例归一化层
    根据样式码动态调整特征图的均值和方差
    """
    def __init__(self, num_features, w_dim):
        super(AdaIN, self).__init__()
        self.norm = nn.InstanceNorm2d(num_features, affine=False)
        self.style_scale = nn.Linear(w_dim, num_features)
        self.style_bias = nn.Linear(w_dim, num_features)
        
    def forward(self, x, w):
        normalized = self.norm(x)
        style_scale = self.style_scale(w).unsqueeze(2).unsqueeze(3)
        style_bias = self.style_bias(w).unsqueeze(2).unsqueeze(3)
        return style_scale * normalized + style_bias

class NoiseInjection(nn.Module):
    """
    噪声注入层
    为每个像素添加可学习的噪声，增加生成图像的随机性
    """
    def __init__(self, channels):
        super(NoiseInjection, self).__init__()
        self.weight = nn.Parameter(torch.zeros(1, channels, 1, 1))
        
    def forward(self, x, noise=None):
        if noise is None:
            batch, _, height, width = x.shape
            noise = torch.randn(batch, 1, height, width, device=x.device)
        return x + self.weight * noise

class StyleBlock(nn.Module):
    """
    StyleGAN的基本构建块
    包含上采样、卷积、噪声注入、AdaIN等操作
    """
    def __init__(self, in_channels, out_channels, w_dim, upsample=True):
        super(StyleBlock, self).__init__()
        self.upsample = upsample
        
        if upsample:
            self.upsample_layer = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=False)
            
        self.conv1 = nn.Conv2d(in_channels, out_channels, 3, padding=1)
        self.conv2 = nn.Conv2d(out_channels, out_channels, 3, padding=1)
        
        self.noise1 = NoiseInjection(out_channels)
        self.noise2 = NoiseInjection(out_channels)
        
        self.adain1 = AdaIN(out_channels, w_dim)
        self.adain2 = AdaIN(out_channels, w_dim)
        
        self.activation = nn.LeakyReLU(0.2)
        
    def forward(self, x, w, noise1=None, noise2=None):
        if self.upsample:
            x = self.upsample_layer(x)
            
        x = self.conv1(x)
        x = self.noise1(x, noise1)
        x = self.activation(x)
        x = self.adain1(x, w)
        
        x = self.conv2(x)
        x = self.noise2(x, noise2)
        x = self.activation(x)
        x = self.adain2(x, w)
        
        return x

class StyleGenerator(nn.Module):
    """
    StyleGAN生成器
    使用映射网络、样式注入和噪声注入生成高质量图像
    """
    def __init__(self, z_dim=512, w_dim=512, img_channels=3, img_size=256):
        super(StyleGenerator, self).__init__()
        self.z_dim = z_dim
        self.w_dim = w_dim
        self.img_size = img_size
        
        # 映射网络
        self.mapping = MappingNetwork(z_dim, w_dim)
        
        # 常量输入
        self.constant = nn.Parameter(torch.randn(1, 512, 4, 4))
        
        # 计算需要的层数
        self.num_layers = int(math.log2(img_size)) - 1
        
        # 构建生成器层
        self.style_blocks = nn.ModuleList()
        self.to_rgb_layers = nn.ModuleList()
        
        in_channels = 512
        for i in range(self.num_layers):
            out_channels = min(512, 512 // (2 ** max(0, i - 2)))
            
            self.style_blocks.append(
                StyleBlock(in_channels, out_channels, w_dim, upsample=(i > 0))
            )
            
            self.to_rgb_layers.append(
                nn.Conv2d(out_channels, img_channels, 1)
            )
            
            in_channels = out_channels
            
    def forward(self, z, truncation_psi=1.0, noise_inputs=None):
        batch_size = z.shape[0]
        
        # 映射到w空间
        w = self.mapping(z)
        
        # 截断技巧（用于控制生成多样性）
        if truncation_psi < 1.0:
            w_avg = w.mean(dim=0, keepdim=True)
            w = w_avg + truncation_psi * (w - w_avg)
            
        # 从常量开始
        x = self.constant.repeat(batch_size, 1, 1, 1)
        
        # 通过样式块
        for i, (style_block, to_rgb) in enumerate(zip(self.style_blocks, self.to_rgb_layers)):
            noise1 = noise_inputs[i*2] if noise_inputs else None
            noise2 = noise_inputs[i*2+1] if noise_inputs else None
            
            x = style_block(x, w, noise1, noise2)
            
            if i == len(self.style_blocks) - 1:
                # 最后一层，输出RGB图像
                rgb = to_rgb(x)
                rgb = torch.tanh(rgb)
                return rgb
                
        return x

class StyleDiscriminator(nn.Module):
    """
    StyleGAN判别器
    使用渐进式下采样和残差连接
    """
    def __init__(self, img_channels=3, img_size=256):
        super(StyleDiscriminator, self).__init__()
        self.img_size = img_size
        
        # 计算层数
        self.num_layers = int(math.log2(img_size)) - 1
        
        # 从RGB层
        self.from_rgb_layers = nn.ModuleList()
        
        # 判别器块
        self.blocks = nn.ModuleList()
        
        channels = [512, 512, 512, 512, 256, 128, 64, 32, 16]
        
        for i in range(self.num_layers):
            in_ch = min(512, channels[i] if i < len(channels) else 16)
            out_ch = min(512, channels[i+1] if i+1 < len(channels) else 32)
            
            self.from_rgb_layers.append(
                nn.Conv2d(img_channels, in_ch, 1)
            )
            
            self.blocks.append(nn.Sequential(
                nn.Conv2d(in_ch, in_ch, 3, padding=1),
                nn.LeakyReLU(0.2),
                nn.Conv2d(in_ch, out_ch, 3, padding=1),
                nn.LeakyReLU(0.2),
                nn.AvgPool2d(2)
            ))
        
        # 获取最终通道数
        final_channels = out_ch
        
        # 添加自适应平均池化层，将特征图调整为4x4大小
        self.adaptive_pool = nn.AdaptiveAvgPool2d((4, 4))
        
        # 最终分类层
        self.final_conv = nn.Conv2d(final_channels, final_channels, 3, padding=1)
        self.final_linear = nn.Linear(final_channels * 4 * 4, 1)
        
    def forward(self, x):
        # 使用第一个from_rgb层
        x = self.from_rgb_layers[0](x)
        
        # 通过判别器块，按顺序处理
        for i, block in enumerate(self.blocks):
            x = block(x)
        
        # 应用最终卷积
        x = self.final_conv(x)
        x = F.leaky_relu(x, 0.2)
        
        # 使用自适应池化确保特征图大小为4x4
        x = self.adaptive_pool(x)
        
        # 展平并应用线性层
        x = x.view(x.size(0), -1)
        x = self.final_linear(x)
        
        return x.squeeze()

def weights_init(m):
    """
    初始化网络权重
    使用He初始化和正态分布初始化
    """
    classname = m.__class__.__name__
    if classname.find('Conv') != -1:
        nn.init.kaiming_normal_(m.weight.data, a=0.2, mode='fan_in')
        if m.bias is not None:
            nn.init.constant_(m.bias.data, 0)
    elif classname.find('Linear') != -1:
        nn.init.kaiming_normal_(m.weight.data, a=0.2, mode='fan_in')
        if m.bias is not None:
            nn.init.constant_(m.bias.data, 0)
    elif classname.find('BatchNorm') != -1:
        nn.init.normal_(m.weight.data, 1.0, 0.02)
        nn.init.constant_(m.bias.data, 0)

def create_celeba_dataloader(data_path='./data/celeba', batch_size=16, image_size=256, num_workers=2):
    """
    创建CelebA数据集的数据加载器
    CelebA是高质量的人脸数据集，适合StyleGAN训练
    """
    transform = transforms.Compose([
        transforms.Resize((image_size, image_size)),
        transforms.CenterCrop(image_size),
        transforms.ToTensor(),
        transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))  # 标准化到[-1, 1]
    ])
    
    try:
        # 尝试加载CelebA数据集
        dataset = torchvision.datasets.CelebA(
            root=data_path,
            split='train',
            download=True,
            transform=transform
        )
    except:
        print("无法下载CelebA数据集，使用CIFAR-10作为替代...")
        # 如果CelebA不可用，使用CIFAR-10
        dataset = torchvision.datasets.CIFAR10(
            root='./data',
            train=True,
            download=True,
            transform=transform
        )
    
    dataloader = DataLoader(
        dataset,
        batch_size=batch_size,
        shuffle=True,
        num_workers=num_workers,
        pin_memory=True
    )
    
    return dataloader

def save_generated_images(generator, device, epoch, z_dim=512, num_images=16):
    """
    保存生成的图像网格
    """
    generator.eval()
    with torch.no_grad():
        # 生成随机噪声
        noise = torch.randn(num_images, z_dim, device=device)
        
        # 生成图像
        fake_images = generator(noise)
        
        # 反标准化到[0, 1]
        fake_images = (fake_images + 1) / 2.0
        fake_images = torch.clamp(fake_images, 0, 1)
        
        # 创建图像网格
        grid = torchvision.utils.make_grid(fake_images, nrow=4, padding=2)
        
        # 保存图像
        os.makedirs('resource/stylegan_images', exist_ok=True)
        save_path = f'resource/stylegan_images/epoch_{epoch}.png'
        torchvision.utils.save_image(grid, save_path)
        
        print(f"生成的图像已保存到: {save_path}")
    
    generator.train()

def gradient_penalty(discriminator, real_samples, fake_samples, device):
    """
    计算梯度惩罚项（WGAN-GP）
    用于稳定训练过程
    """
    batch_size = real_samples.size(0)
    alpha = torch.rand(batch_size, 1, 1, 1, device=device)
    
    # 插值样本
    interpolates = alpha * real_samples + (1 - alpha) * fake_samples
    interpolates.requires_grad_(True)
    
    # 计算判别器输出
    d_interpolates = discriminator(interpolates)
    
    # 计算梯度
    gradients = torch.autograd.grad(
        outputs=d_interpolates,
        inputs=interpolates,
        grad_outputs=torch.ones_like(d_interpolates),
        create_graph=True,
        retain_graph=True,
        only_inputs=True
    )[0]
    
    # 计算梯度惩罚
    gradient_penalty = ((gradients.norm(2, dim=1) - 1) ** 2).mean()
    return gradient_penalty

def train_stylegan(num_epochs=100, batch_size=16, lr_g=0.0001, lr_d=0.0004, 
                   z_dim=512, image_size=256, lambda_gp=10.0, device=None):
    """
    训练StyleGAN模型
    使用WGAN-GP损失和渐进式训练策略
    """
    if device is None:
        device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    
    print(f"使用设备: {device}")
    print(f"图像尺寸: {image_size}x{image_size}")
    print(f"批次大小: {batch_size}")
    
    # 创建数据加载器
    dataloader = create_celeba_dataloader(
        batch_size=batch_size, 
        image_size=image_size
    )
    
    print(f"数据集大小: {len(dataloader.dataset)}")
    
    # 创建网络
    netG = StyleGenerator(z_dim=z_dim, img_size=image_size).to(device)
    netD = StyleDiscriminator(img_size=image_size).to(device)
    
    # 初始化权重
    netG.apply(weights_init)
    netD.apply(weights_init)
    
    print(f"生成器参数数量: {sum(p.numel() for p in netG.parameters()):,}")
    print(f"判别器参数数量: {sum(p.numel() for p in netD.parameters()):,}")
    
    # 优化器
    optimizerG = optim.Adam(netG.parameters(), lr=lr_g, betas=(0.0, 0.99))
    optimizerD = optim.Adam(netD.parameters(), lr=lr_d, betas=(0.0, 0.99))
    
    # 学习率调度器
    schedulerG = optim.lr_scheduler.ExponentialLR(optimizerG, gamma=0.99)
    schedulerD = optim.lr_scheduler.ExponentialLR(optimizerD, gamma=0.99)
    
    # 损失记录
    G_losses = []
    D_losses = []
    
    print("开始训练StyleGAN...")
    start_time = time.time()
    
    for epoch in range(num_epochs):
        epoch_start_time = time.time()
        epoch_g_loss = 0.0
        epoch_d_loss = 0.0
        
        for i, (real_data, _) in enumerate(dataloader):
            batch_size_current = real_data.size(0)
            real_data = real_data.to(device)
            
            # 训练判别器
            for _ in range(1):  # 判别器训练次数
                netD.zero_grad()
                
                # 真实数据
                real_output = netD(real_data)
                
                # 生成假数据
                noise = torch.randn(batch_size_current, z_dim, device=device)
                fake_data = netG(noise)
                fake_output = netD(fake_data.detach())
                
                # WGAN损失
                d_loss_real = -torch.mean(real_output)
                d_loss_fake = torch.mean(fake_output)
                
                # 梯度惩罚
                gp = gradient_penalty(netD, real_data, fake_data, device)
                
                # 总判别器损失
                d_loss = d_loss_real + d_loss_fake + lambda_gp * gp
                d_loss.backward()
                optimizerD.step()
                
                epoch_d_loss += d_loss.item()
            
            # 训练生成器
            netG.zero_grad()
            
            # 生成假数据
            noise = torch.randn(batch_size_current, z_dim, device=device)
            fake_data = netG(noise)
            fake_output = netD(fake_data)
            
            # 生成器损失（WGAN）
            g_loss = -torch.mean(fake_output)
            g_loss.backward()
            optimizerG.step()
            
            epoch_g_loss += g_loss.item()
            
            # 打印进度
            if i % 50 == 0:
                current_time = time.time()
                elapsed = current_time - start_time
                print(f'[{epoch}/{num_epochs}][{i}/{len(dataloader)}] '
                      f'D损失: {d_loss.item():.4f} G损失: {g_loss.item():.4f} '
                      f'已用时间: {elapsed/60:.1f}min')
        
        # 记录epoch损失
        avg_g_loss = epoch_g_loss / len(dataloader)
        avg_d_loss = epoch_d_loss / len(dataloader)
        G_losses.append(avg_g_loss)
        D_losses.append(avg_d_loss)
        
        # 更新学习率
        schedulerG.step()
        schedulerD.step()
        
        # 保存生成图像
        if epoch % 10 == 0 or epoch == num_epochs - 1:
            save_generated_images(netG, device, epoch, z_dim)
        
        # 保存模型检查点
        if epoch % 50 == 0 or epoch == num_epochs - 1:
            os.makedirs('resource/stylegan_checkpoints', exist_ok=True)
            torch.save({
                'epoch': epoch,
                'generator_state_dict': netG.state_dict(),
                'discriminator_state_dict': netD.state_dict(),
                'optimizer_g_state_dict': optimizerG.state_dict(),
                'optimizer_d_state_dict': optimizerD.state_dict(),
                'g_losses': G_losses,
                'd_losses': D_losses,
            }, f'resource/stylegan_checkpoints/checkpoint_epoch_{epoch}.pth')
        
        epoch_time = time.time() - epoch_start_time
        print(f'Epoch {epoch} 完成，用时: {epoch_time:.2f}s, '
              f'G损失: {avg_g_loss:.4f}, D损失: {avg_d_loss:.4f}')
    
    # 保存最终模型
    os.makedirs('resource', exist_ok=True)
    torch.save(netG.state_dict(), 'resource/stylegan_generator.pth')
    torch.save(netD.state_dict(), 'resource/stylegan_discriminator.pth')
    
    # 绘制损失曲线
    plt.figure(figsize=(12, 5))
    
    plt.subplot(1, 2, 1)
    plt.plot(G_losses, label='Generator Loss')
    plt.plot(D_losses, label='Discriminator Loss')
    plt.xlabel('Epoch')
    plt.ylabel('Loss')
    plt.title('StyleGAN Training Loss')
    plt.legend()
    
    plt.subplot(1, 2, 2)
    plt.plot(G_losses, label='Generator Loss')
    plt.xlabel('Epoch')
    plt.ylabel('Loss')
    plt.title('Generator Loss Detail')
    plt.legend()
    
    plt.tight_layout()
    plt.savefig('resource/stylegan_training_loss.png')
    plt.show()
    
    # 保存损失数据
    with open('resource/stylegan_training_loss.txt', 'w') as f:
        f.write("Epoch,Generator_Loss,Discriminator_Loss\n")
        for epoch, (g_loss, d_loss) in enumerate(zip(G_losses, D_losses)):
            f.write(f"{epoch},{g_loss},{d_loss}\n")
    
    print("训练完成！")
    print(f"模型已保存到: resource/stylegan_generator.pth")
    print(f"损失曲线已保存到: resource/stylegan_training_loss.png")
    
    return netG, netD

def generate_images_with_stylegan(model_path='resource/stylegan_generator.pth', 
                                  num_images=16, z_dim=512, image_size=256, 
                                  truncation_psi=0.7, device=None):
    """
    使用训练好的StyleGAN生成器生成图像
    truncation_psi控制生成多样性，值越小生成质量越高但多样性越低
    """
    if device is None:
        device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    
    # 加载生成器
    netG = StyleGenerator(z_dim=z_dim, img_size=image_size).to(device)
    netG.load_state_dict(torch.load(model_path, map_location=device))
    netG.eval()
    
    with torch.no_grad():
        # 生成随机噪声
        noise = torch.randn(num_images, z_dim, device=device)
        
        # 生成图像
        fake_images = netG(noise, truncation_psi=truncation_psi)
        
        # 反标准化
        fake_images = (fake_images + 1) / 2.0
        fake_images = torch.clamp(fake_images, 0, 1)
        
        # 创建图像网格
        grid = torchvision.utils.make_grid(fake_images, nrow=4, padding=2)
        
        # 保存图像
        timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
        save_path = f'resource/stylegan_generated_{timestamp}.png'
        torchvision.utils.save_image(grid, save_path)
        
        print(f"生成的图像已保存到: {save_path}")
        
        return fake_images

if __name__ == "__main__":
    # 检查CUDA可用性  程序
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    print(f"使用设备: {device}")
    
    if torch.cuda.is_available():
        # 显式初始化CUDA上下文
        torch.cuda.init()
        # 或者创建一个小的测试张量来初始化上下文
        _ = torch.zeros(1).cuda()
        
        print(f"GPU: {torch.cuda.get_device_name(0)}")
        print(f"显存: {torch.cuda.get_device_properties(0).total_memory / 1024**3:.1f} GB")
    
    # 训练参数
    config = {
        'num_epochs': 50,
        'batch_size': 12,  # StyleGAN需要较大显存，根据GPU调整
        'lr_g': 0.0001,
        'lr_d': 0.0004,
        'z_dim': 512,
        'image_size': 256,  # 可以调整为128, 256, 512等
        'lambda_gp': 10.0,
        'device': device
    }
    
    print("开始训练StyleGAN...")
    print(f"配置: {config}")
    
    # 开始训练
    generator, discriminator = train_stylegan(**config)
    
    # 训练完成后生成一些示例图像
    print("\n生成示例图像...")
    generate_images_with_stylegan(
        num_images=16,
        truncation_psi=0.7,
        device=device
    )