from typing import Callable, List, Optional, Tuple
import torchvision.models as models
import torch
import torch.nn as nn
from torch.nn import Module
from torch import Tensor
import torch.nn.functional as F
from lib.components import CBAM

conv_bias = True


def conv3x3(in_planes: int, out_planes: int, stride: int = 1, groups: int = 1, dilation: int = 1) -> nn.Conv2d:
    """3x3 convolution with padding"""
    return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=dilation, groups=groups, bias=conv_bias, dilation=dilation)


def conv1x1(in_planes: int, out_planes: int, stride: int = 1) -> nn.Conv2d:
    """1x1 convolution"""
    return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=conv_bias)


class Bottleneck(nn.Module):
    expansion: int = 4

    def __init__(self, inplanes: int, planes: int, stride: int = 1, downsample: Optional[nn.Module] = None, groups: int = 1, base_width: int = 64, dilation: int = 1, norm_layer: Optional[Callable[..., nn.Module]] = None) -> None:
        super(Bottleneck, self).__init__()
        norm_layer is None and (norm_layer := nn.BatchNorm2d)
        width = int(planes * (base_width / 64.)) * groups
        # Both self.conv2 and self.downsample layers downsample the input when stride != 1
        # self.c1 = CBAM(width)
        self.conv1 = conv1x1(inplanes, width)
        self.bn1 = norm_layer(inplanes)
        # self.c2 = CBAM(width)
        self.conv2 = conv3x3(width, width, stride, groups, dilation)
        self.bn2 = norm_layer(width)
        self.c3 = CBAM(planes * self.expansion)
        self.conv3 = conv1x1(width, planes * self.expansion)
        self.bn3 = norm_layer(width)
        self.relu = nn.ReLU(inplace=True)
        self.downsample = downsample
        self.stride = stride
        

    def forward(self, input: Tensor) -> Tensor:
        identity = self.downsample(input) if self.downsample else input
        return self.c3(self.conv3(self.relu(self.bn3(self.conv2(self.relu(self.bn2(self.conv1(self.relu(self.bn1(input)))))))))) + identity

class ResUnet(Module):

    def __init__(self, input_channel: int, branchs: List[int], layers: int, base_dim: int = 1, blochs: int = 2) -> None:
        super().__init__()
        self.base_dim = base_dim
        self.layers = layers
        self.branchs = branchs
        self.blochs = blochs
        self.encoder = nn.ModuleList()
        self.downsample = nn.MaxPool2d(2, 2)
        for i in range(layers):
            e = [Bottleneck(input_channel, 16 * self.base_dim * 2**i, 1, conv1x1(input_channel, 64 * self.base_dim * 2**i, 1))] + \
                [Bottleneck(64 * self.base_dim * 2**i, 16 * self.base_dim * 2**i, 1) for _ in range(blochs - 1)]
            self.encoder.append(nn.Sequential(*e))
            input_channel = 64 * self.base_dim * 2**i
        self.decoders = nn.ModuleList()
        for _ in branchs:
            decodes = nn.ModuleList()
            for j in range(layers - 1):
                d = nn.ModuleDict()
                d['up'] = nn.ConvTranspose2d(64 * self.base_dim * 2**(j + 1), 64 * self.base_dim * 2**j, 2, 2)
                d['de'] = nn.Sequential(Bottleneck(64 * self.base_dim * 2**(j + 1), 16 * self.base_dim * 2**j, 
                                        1, conv1x1(64 * self.base_dim * 2**(j + 1), 64 * self.base_dim * 2**j, 1)), 
                                        Bottleneck(64 * self.base_dim * 2**j, 16 * self.base_dim * 2**j, 1))
                decodes.append(d)
            self.decoders.append(decodes)
        self.out_layer = nn.ModuleList([nn.Conv2d(64 * self.base_dim, i, 1, 1) for i in branchs])
        # 1/8
        self.out1_8 = nn.Conv2d(1024, 2, 1, 1)
        self.out1_4 = nn.Conv2d(512, 2, 1, 1)
        self.out1_2 = nn.Conv2d(256, 2, 1, 1)

    def forward(self, input: Tensor):
        x = input
        xs = []
        for i in range(self.layers):
            x = self.encoder[i](x)
            xs.append(x)
            if i != self.layers - 1:
                x = self.downsample(x)
        x1, x2 = xs[-1], xs[-1]
        output = []
        for j in range(self.layers - 2, -1, -1):
            output.append(getattr(self, f'out1_{2 ** (j + 1)}')(x1))
            x1 = self.decoders[0][j]["up"](x1)
            x1 = torch.cat([x1, xs[j]], dim=1)
            x1 = self.decoders[0][j]["de"](x1)

            x2 = self.decoders[1][j]["up"](x2)
            x2 = torch.cat([x2, xs[j]], dim=1)
            x2 = self.decoders[1][j]["de"](x2)

        return self.out_layer[0](x1), self.out_layer[1](x2), output[::-1]


def Net(n_rays: int = 4):
    return ResUnet(3, [2, 1 + n_rays], 4, 2, 3)
