from torch import nn, Tensor
from torch.nn import functional as F

class ConvBlock(nn.Module):
    def __init__(self, in_channels, out_channels, kernel_size, stride=1, norm=True, up_sample=None, relu=True):
        super(ConvBlock, self).__init__()
        self.relu = relu
        self.up_sample = up_sample
        # noinspection PyTypeChecker
        self.block = nn.Sequential(
            nn.ReflectionPad2d(kernel_size // 2),
            nn.Conv2d(in_channels, out_channels, kernel_size, stride))
        self.norm = nn.InstanceNorm2d(out_channels, affine=True) if norm else None

    def forward(self, x: Tensor):
        if self.up_sample:
            x = F.interpolate(x, mode="nearest", scale_factor=self.up_sample)

        x = self.block(x)
        if self.norm:
            x = self.norm(x)
        if self.relu:
            x = F.relu(x)

        return x

class ResidualBlock(nn.Module):
    def __init__(self, channels, kernel_size=3, stride=1):
        super(ResidualBlock, self).__init__()
        self.block = nn.Sequential(
            ConvBlock(channels, channels, kernel_size=kernel_size, stride=stride, relu=True),
            ConvBlock(channels, channels, kernel_size=kernel_size, stride=stride, relu=False))

    def forward(self, x: Tensor):
        return self.block(x) + x

class TransformerNet(nn.Module):
    def __init__(self):
        super(TransformerNet, self).__init__()
        self.model = nn.Sequential(
            # Convolution Block
            ConvBlock(3, 32, kernel_size=9, stride=1),
            ConvBlock(32, 64, kernel_size=3, stride=2),
            ConvBlock(64, 128, kernel_size=3, stride=2),
            # Residual Block
            # introduced in: https://arxiv.org/abs/1512.03385
            # recommended architecture: http://torch.ch/blog/2016/02/04/resnets.html
            ResidualBlock(128),
            ResidualBlock(128),
            ResidualBlock(128),
            ResidualBlock(128),
            ResidualBlock(128),
            # Upsample Convolution Block
            # Upsamples the input and then does a convolution. This method gives better
            # results compared to ConvTranspose2d.
            # ref: http://distill.pub/2016/deconv-checkerboard/
            ConvBlock(128, 64, kernel_size=3, stride=1, up_sample=2),
            ConvBlock(64, 32, kernel_size=3, stride=1, up_sample=2),
            ConvBlock(32, 3, kernel_size=9, stride=1, norm=False, relu=False))

    def forward(self, x: Tensor):
        return self.model(x)
