import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim

# 定义生成器
class Generator(nn.Module):
    def __init__(self, latent_dim=100):
        super(Generator, self).__init__()
        self.latent_dim = latent_dim
        # 编码器部分
        self.encoder = nn.Sequential(
            nn.Conv2d(3, 64, kernel_size=4, stride=2, padding=1),
            nn.LeakyReLU(0.2),
            nn.Conv2d(64, 128, kernel_size=4, stride=2, padding=1),
            nn.BatchNorm2d(128),
            nn.LeakyReLU(0.2),
            nn.Conv2d(128, 256, kernel_size=4, stride=2, padding=1),
            nn.BatchNorm2d(256),
            nn.LeakyReLU(0.2),
            nn.Conv2d(256, 512, kernel_size=4, stride=2, padding=1),
            nn.BatchNorm2d(512),
            nn.LeakyReLU(0.2),
        )
        # 解码器部分
        self.decoder = nn.Sequential(
            nn.ConvTranspose2d(512 + latent_dim, 256, kernel_size=4, stride=2, padding=1),
            nn.BatchNorm2d(256),
            nn.ReLU(),
            nn.ConvTranspose2d(256, 128, kernel_size=4, stride=2, padding=1),
            nn.BatchNorm2d(128),
            nn.ReLU(),
            nn.ConvTranspose2d(128, 64, kernel_size=4, stride=2, padding=1),
            nn.BatchNorm2d(64),
            nn.ReLU(),
            nn.ConvTranspose2d(64, 3, kernel_size=4, stride=2, padding=1),
            nn.Tanh()
        )

    def forward(self, x, noise):
        encoded = self.encoder(x)
        noise = noise.view(noise.size(0), self.latent_dim, 1, 1).repeat(1, 1, encoded.size(2), encoded.size(3))
        combined = torch.cat([encoded, noise], 1)
        decoded = self.decoder(combined)
        return decoded

# 定义判别器
class Discriminator(nn.Module):
    def __init__(self):
        super(Discriminator, self).__init__()
        self.model = nn.Sequential(
            nn.Conv2d(3, 64, kernel_size=4, stride=2, padding=1),
            nn.LeakyReLU(0.2),
            nn.Conv2d(64, 128, kernel_size=4, stride=2, padding=1),
            nn.BatchNorm2d(128),
            nn.LeakyReLU(0.2),
            nn.Conv2d(128, 256, kernel_size=4, stride=2, padding=1),
            nn.BatchNorm2d(256),
            nn.LeakyReLU(0.2),
            nn.Conv2d(256, 512, kernel_size=4, stride=2, padding=1),
            nn.BatchNorm2d(512),
            nn.LeakyReLU(0.2),
            nn.Conv2d(512, 1, kernel_size=4, stride=1, padding=0),
            nn.Sigmoid()
        )

    def forward(self, x):
        return self.model(x)
    
# 创建生成器和判别器实例
generator = Generator()
discriminator = Discriminator()

# 定义损失函数和优化器
adversarial_loss = nn.BCELoss()
feature_matching_loss = nn.MSELoss()
optimizer_G = optim.Adam(generator.parameters(), lr=0.0002)
optimizer_D = optim.Adam(discriminator.parameters(), lr=0.0002)

num_epochs=100
print_interval=10
latent_dim = 3
lambda_fm = 1
dataloader = None
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

# 训练循环
for epoch in range(num_epochs):
    for i, real_images in enumerate(dataloader):
        # 训练判别器
        optimizer_D.zero_grad()

        # 真实图像标签为 1
        real_labels = torch.ones(real_images.size(0), 1).to(device)
        real_outputs = discriminator(real_images.to(device))
        real_loss = adversarial_loss(real_outputs, real_labels)

        # 生成假图像
        noise = torch.randn(real_images.size(0), latent_dim).to(device)
        fake_images = generator(noise)
        fake_labels = torch.zeros(real_images.size(0), 1).to(device)
        fake_outputs = discriminator(fake_images.detach())
        fake_loss = adversarial_loss(fake_outputs, fake_labels)

        # 判别器总损失
        d_loss = real_loss + fake_loss
        d_loss.backward()
        optimizer_D.step()

        # 训练生成器
        optimizer_G.zero_grad()

        # 生成假图像
        noise = torch.randn(real_images.size(0), latent_dim).to(device)
        fake_images = generator(noise)
        fake_labels = torch.ones(real_images.size(0), 1).to(device)
        fake_outputs = discriminator(fake_images)
        g_loss = adversarial_loss(fake_outputs, fake_labels)

        # 特征匹配损失
        real_features = discriminator(real_images.to(device)).detach()
        fake_features = discriminator(fake_images)
        fm_loss = feature_matching_loss(fake_features, real_features)

        # 生成器总损失
        total_g_loss = g_loss + lambda_fm * fm_loss
        total_g_loss.backward()
        optimizer_G.step()

    # 打印训练进度
    if (epoch + 1) % print_interval == 0:
        print(f"Epoch [{epoch+1}/{num_epochs}], D_loss: {d_loss.item()}, G_loss: {total_g_loss.item()}")