from torch import nn


class ConvUnit(nn.Module):
    def __init__(self, conv_in, conv_out, conv_size=3, conv_step=1, conv_pad=0):
        super().__init__()
        self.conv = nn.Conv2d(conv_in, conv_out, conv_size, conv_step, conv_pad)
        self.bn = nn.BatchNorm2d(conv_out)
        nn.init.kaiming_normal_(self.conv.weight, mode='fan_out', nonlinearity='relu')
        if self.conv.bias is not None:
            nn.init.constant_(self.conv.bias, 0)

    def forward(self, x):
        return nn.functional.relu(self.bn(self.conv(x)))


class ConvRelu(nn.Module):
    def __init__(self, conv_in, conv_out, conv_size=3, conv_step=1, conv_pad=1):
        super().__init__()
        self.conv = ConvUnit(conv_in, conv_out, conv_size, conv_step, conv_pad)
        self.relu = nn.ReLU()

    def forward(self, x):
        return self.relu(self.conv(x))


class ConvPool(nn.Module):
    def __init__(self, conv_in, conv_out, conv_size=3, conv_step=1, conv_pad=1, pool_size=2, pool_step=2, pool_pad=0):
        super().__init__()
        self.conv = ConvRelu(conv_in, conv_out, conv_size, conv_step, conv_pad)
        self.pool = nn.MaxPool2d(pool_size, pool_step, pool_pad)

    def forward(self, x):
        return self.pool(self.conv(x))


class Dense(nn.Module):
    def __init__(self, fc_in, fc_out, dropout=0):
        super().__init__()
        self.linear = nn.Linear(fc_in, fc_out)
        nn.init.kaiming_normal_(self.linear.weight, mode='fan_out', nonlinearity='relu')
        if self.linear.bias is not None:
            nn.init.constant_(self.linear.bias, 0)
        self.dropout = nn.Dropout(dropout)

    def forward(self, x):
        return self.dropout(self.linear(x))
        