import torch
import torch.nn as nn

# 检查是否有可用的 GPU，如果没有则使用 CPU
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(f"Using device: {device}")


# 定义生成器
class Generator(nn.Module):
    def __init__(self, z_dim=100):
        super(Generator, self).__init__()
        self.gen = nn.Sequential(
            nn.ConvTranspose2d(z_dim, 128, kernel_size=7, stride=1, padding=0, bias=False),  # 输出尺寸: (128, 7, 7)
            nn.BatchNorm2d(128),
            nn.ReLU(),
            nn.ConvTranspose2d(128, 64, kernel_size=4, stride=2, padding=1, bias=False),  # 输出尺寸: (64, 14, 14)
            nn.BatchNorm2d(64),
            nn.ReLU(),
            nn.ConvTranspose2d(64, 1, kernel_size=4, stride=2, padding=1, bias=False),  # 输出尺寸: (1, 28, 28)
            nn.Tanh()  # 输出范围在 -1 到 1 之间
        )

    def forward(self, x):
        x = x.view(-1, x.size(1), 1, 1)  # 将输入张量调整为合适的形状
        return self.gen(x)


# 定义判别器
class Discriminator(nn.Module):
    def __init__(self):
        super(Discriminator, self).__init__()
        self.dis = nn.Sequential(
            nn.Conv2d(1, 64, kernel_size=4, stride=2, padding=1, bias=False),  # 输出尺寸: (64, 14, 14)
            nn.LeakyReLU(0.2),
            nn.Conv2d(64, 128, kernel_size=4, stride=2, padding=1, bias=False),  # 输出尺寸: (128, 7, 7)
            nn.BatchNorm2d(128),
            nn.LeakyReLU(0.2),
            nn.Flatten(),
            nn.Linear(128 * 7 * 7, 1),
            nn.Sigmoid()  # 输出范围在 0 到 1 之间
        )

    def forward(self, x):
        return self.dis(x)
