import torch
import torch.nn as nn
import torch.optim as optim
import torchvision
import torchvision.transforms as transforms


# 生成器模型
class Generator(nn.Module):
    def __init__(self):
        super(Generator, self).__init__()

        self.conv_layer = nn.Sequential(
            nn.Conv2d(1, 64, kernel_size=3, padding=1),
            nn.LeakyReLU(inplace=True),
            nn.Conv2d(64, 64, kernel_size=3, padding=1),
            nn.LeakyReLU(inplace=True),
            nn.Conv2d(64, 3, kernel_size=3, padding=1),
            nn.LeakyReLU(inplace=True)
        )

    def forward(self, image, matrix):
        # images = torch.cat([image, matrix], dim=1)
        attack_image = image + self.conv_layer(matrix)
        return attack_image


# 判别器模型
# class Discriminator(nn.Module):
#     def __init__(self, img_shape):
#         super(Discriminator, self).__init__()
#         self.features = nn.Sequential(
#             nn.Conv2d(3, 64, kernel_size=3, stride=2, padding=1),  # 使用64个通道
#             nn.ReLU(),
#             nn.Conv2d(64, 64, kernel_size=3, stride=2, padding=1),  # 使用64个通道
#             nn.ReLU(),
#             nn.Conv2d(64, 64, kernel_size=3, stride=2, padding=1),  # 使用64个通道
#             nn.ReLU()
#         )
#
#         # 计算线性层的输入尺寸
#         self.flat_features = self._flatten_features(img_shape)
#
#         self.classifier = nn.Sequential(
#             nn.Linear(self.flat_features, 128),
#             nn.ReLU(),
#             nn.Linear(128, 1),
#             nn.ReLU()
#         )
#
#     def _flatten_features(self, input_size):
#         dummy_input = torch.zeros(1, 3, *input_size)
#         dummy_output = self.features(dummy_input)
#         return dummy_output.view(dummy_output.size(0), -1).size(1)
#
#     def forward(self, image):
#         x = self.features(image)
#         x = x.view(-1, self.flat_features)
#         x = self.classifier(x)
#         return x
class Discriminator(nn.Module):
    """
    The BasicCritic module takes an image and predicts whether it is a cover
    image or a steganographic image (N, 1).

    Input: (N, 3, H, W)
    Output: (N, 1)
    """

    def _conv2d(self, in_channels, out_channels):
        return nn.Conv2d(
            in_channels=in_channels,
            out_channels=out_channels,
            kernel_size=3
        )

    def _build_models(self):
        return nn.Sequential(
            self._conv2d(3, self.hidden_size),
            nn.LeakyReLU(inplace=True),
            nn.BatchNorm2d(self.hidden_size),

            self._conv2d(self.hidden_size, self.hidden_size),
            nn.LeakyReLU(inplace=True),
            nn.BatchNorm2d(self.hidden_size),

            self._conv2d(self.hidden_size, self.hidden_size),
            nn.LeakyReLU(inplace=True),
            nn.BatchNorm2d(self.hidden_size),

            self._conv2d(self.hidden_size, 1)
        )

    def __init__(self, hidden_size):
        super().__init__()
        self.version = '1'
        self.hidden_size = hidden_size
        self._models = self._build_models()


    def forward(self, x):
        x = self._models(x)
        x = torch.mean(x.view(x.size(0), -1), dim=1)
        return x
