import math
import torch
import torch.nn as nn
import numpy as np
import torch.nn.functional as F
import config.risfnet_config as cfg


class Mish(nn.Module):
    def __init__(self):
        super(Mish, self).__init__()

    def forward(self, x):
        return x * torch.tanh(F.softplus(x))


norm_name = {"bn": nn.BatchNorm2d}
activate_name = {
    "relu": nn.ReLU,
    "leaky": nn.LeakyReLU,
    "linear": nn.Identity(),
    "mish": Mish(),
}


class Convolutional(nn.Module):
    def __init__(
        self,
        filters_in,
        filters_out,
        kernel_size,
        stride=1,
        norm="bn",
        activate="relu",
    ):
        super(Convolutional, self).__init__()

        self.norm = norm
        self.activate = activate

        self.__conv = nn.Conv2d(
            in_channels=filters_in,
            out_channels=filters_out,
            kernel_size=kernel_size,
            stride=stride,
            padding=kernel_size // 2,
            bias=not norm,
        )
        if norm:
            assert norm in norm_name.keys()
            if norm == "bn":
                self.__norm = norm_name[norm](num_features=filters_out)

        if activate:
            assert activate in activate_name.keys()
            if activate == "leaky":
                self.__activate = activate_name[activate](
                    negative_slope=0.1, inplace=True
                )
            if activate == "relu":
                self.__activate = activate_name[activate](inplace=True)
            if activate == "mish":
                self.__activate = activate_name[activate]

    def forward(self, x):
        x = self.__conv(x)
        if self.norm:
            x = self.__norm(x)
        if self.activate:
            x = self.__activate(x)

        return x


class VGG13(nn.Module):
    def __init__(
        self,
        arch=[2, 2, 2, 2, 2],
        feature_channels=[64, 128, 256, 512, 512],
        num_features=3,
    ):
        super(VGG13, self).__init__()

        self.in_channels = 3

        self.convs = nn.ModuleList(
            [
                self._make_layer(feature_channels[0], arch[0]),
                self._make_layer(feature_channels[1], arch[1]),
                self._make_layer(feature_channels[2], arch[2]),
                self._make_layer(feature_channels[3], arch[3]),
                self._make_layer(feature_channels[4], arch[4]),
            ]
        )

        self.feature_channels = feature_channels
        self.num_features = num_features

        self._initialize_weights()

    def forward(self, x):
        features = []

        for conv in self.convs:
            x = conv(x)
            x = F.max_pool2d(x, 2)
            features.append(x)

        return features[-self.num_features:]

    def _initialize_weights(self):
        # print("**" * 10, "Initing VGG13 weights", "**" * 10) # @@print initing

        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
                m.weight.data.normal_(0, math.sqrt(2.0 / n))
                if m.bias is not None:
                    m.bias.data.zero_()

                # print("initing {}".format(m)) # @@pint initing
            elif isinstance(m, nn.BatchNorm2d):
                m.weight.data.fill_(1)
                m.bias.data.zero_()

                # print("initing {}".format(m)) # @@print initing

    def _make_layer(self, channels, num):
        layers = []
        for _ in range(num):
            layers.append(Convolutional(self.in_channels, channels, 3)) # conv+bn+relu
            self.in_channels = channels
        return nn.Sequential(*layers)


def _BuildVGG13():
    model = VGG13()

    return model, model.feature_channels[-3:]


if __name__ == "__main__":
    model = VGG13()
    x = torch.randn(1, 9, 224, 224)
    y = model(x)
