import torch.nn as nn
import torch


class ConvBlock(nn.Module):
    def __init__(self, in_ch, out_ch, kernel_size=(3, 3), padding=1, stride=1):
        super(ConvBlock, self).__init__()
        self.conv1 = nn.Conv2d(in_channels=in_ch, out_channels=out_ch,
                               kernel_size=kernel_size, stride=stride, padding=padding)
        self.bn1 = nn.BatchNorm2d(out_ch)
        self.leakyrelu = nn.LeakyReLU(0.2)
        self.conv2 = nn.Conv2d(in_channels=out_ch, out_channels=out_ch,
                               kernel_size=kernel_size, stride=1, padding=padding)
        self.bn2 = nn.BatchNorm2d(out_ch)
        self.dropout = nn.Dropout(0.3)
        self.conv_block = nn.Sequential(
            self.conv1,
            self.bn1,
            self.leakyrelu,
            self.conv2,
            self.bn2,
            self.leakyrelu
        )

    def forward(self, x):
        output = self.conv_block(x)
        return output


class DownSampling(nn.Module):
    def __init__(self, kernel_size, stride):
        super(DownSampling, self).__init__()
        self.maxpool = nn.MaxPool2d(kernel_size=kernel_size, stride=stride)

    def forward(self, x):
        return self.maxpool(x)


class UpSampling(nn.Module):
    def __init__(self, in_ch, out_ch, kernel_size=(3, 3), padding=1, stride=1):
        super(UpSampling, self).__init__()
        self.upsample = nn.Upsample(scale_factor=2, mode='bilinear')
        self.conv = nn.Conv2d(in_ch, out_ch, kernel_size, padding=padding, stride=stride)

    def forward(self, x1, x2):
        output = self.upsample(x1)
        output = self.conv(output)
        output = torch.cat([output, x2], dim=1)
        return output
