import torch
import torch.nn as nn
from torch.nn.modules.activation import Sigmoid

#卷积层
class Conv(nn.Module):
    def __init__(self, in_channels, out_channels, stride=2):
        super().__init__()
        self.conv = nn.Sequential(
            nn.Conv2d(in_channels, out_channels, stride=stride, kernel_size=5, padding=2),
            nn.LeakyReLU(0.1)
        )
    def forward(self, x):
        return self.conv(x)

#上采样
class Upscale(nn.Module):
    def __init__(self, in_channels, filters, upscale_factor=2):
        super().__init__()
        self.layers = nn.Sequential(
            nn.Conv2d(in_channels, filters*4, kernel_size=3, padding=1),
            nn.LeakyReLU(0,1),
            nn.PixelShuffle(upscale_factor)
        )
    def forward(self, x):
        return self.layers(x)
    

class Encoder(nn.Module):
    def __init__(self):
        super().__init__()
        self.layers = nn.Sequential(
            Conv(  3,   64),
            Conv( 64,  128),
            Conv(128,  256),
            Conv(256,  512),
            Conv(512, 1024),
            nn.Flatten(),
            nn.Linear(16384, 1024),
            nn.Linear( 1024, 4*4*1024),
        )
        self.upscale = Upscale(1024, 512)

    def forward(self, x):
        x = self.layers(x)
        x = torch.reshape(x, (-1, 1024, 4, 4))
        x = self.upscale(x)
        return x

    
class Decoder(nn.Module):
    def __init__(self):
        super().__init__()
        self.layers = nn.Sequential(
            Upscale(512, 256),
            Upscale(256, 128),
            Upscale(128, 64),
            Upscale(64, 32),
            nn.Conv2d(32, 3, kernel_size=5, padding=2),
            nn.Sigmoid()
        )
    def forward(self, x):
        return self.layers(x)
        

#卷积层
class ConvBlock(nn.Module):
    def __init__(self, in_channels, out_channels, stride=2):
        super().__init__()
        self.conv = nn.Sequential(
            nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=stride, bias = True, padding = 1),
            nn.InstanceNorm2d(out_channels),
            nn.LeakyReLU(0.1),
        )
    def forward(self, x):
        return self.conv(x)

#反卷积层
class DeconvBlock(nn.Module):
    def __init__(self, in_channels, out_channels, activation, stride=2):
        super().__init__()
        self.conv = nn.Sequential(
            nn.ConvTranspose2d(in_channels, out_channels, kernel_size=3, stride=stride, bias = True, padding = 1, output_padding=1),
            nn.InstanceNorm2d(out_channels),
            nn.LeakyReLU(0.1) if activation=="leaklyrelu" else nn.Tanh(),
        )
    def forward(self, x):
        return self.conv(x)


#水印生成模块
class WaterMark(nn.Module):
    def __init__(self, in_channels):
        super().__init__()
        self.layers = nn.Sequential(
            ConvBlock(in_channels, 6),
            ConvBlock(6, 16),
            ConvBlock(16, 32),
            ConvBlock(32, 32, stride=1),
            DeconvBlock(32, 16, "leaklyrelu"),
            DeconvBlock(16, 6, "leaklyrelu"),
            DeconvBlock(6, 3, "tanh"),
        )
    def forward(self, x):
        return self.layers(x)
