from typing import Sized

import torch
import torch.nn as nn
import torch.nn.functional as F
# from utils import display_np_arrays_as_images
import einops
from einops.layers.torch import Reduce, Rearrange
from collections import OrderedDict

if torch.cuda.is_available():
    device = torch.device('cuda')
else:
    device = torch.device('cpu')

class SelectiveKernelLayer(nn.Module):
    """

    """
    # minimize mid channel
    minimized_mid_channels = 32

    def __init__(self, in_channels, out_channels, reduce_ratio=2,
                 kernel_sizes: tuple = (3, 5), strides=1, dilations=1, groups=1):
        super(SelectiveKernelLayer, self).__init__()

        assert out_channels % reduce_ratio == 0, "out_channels not divisible of reduce_ratio"
        self.mid_channels = max(out_channels // reduce_ratio, self.minimized_mid_channels)
        self.in_channels = in_channels
        self.out_channels = out_channels
        self.num_heads = len(kernel_sizes)
        if isinstance(strides, int):
            strides = [strides] * self.num_heads
        if isinstance(dilations, int):
            dilations = [dilations] * self.num_heads

        assert self.num_heads == len(strides), "wrong strides"
        assert self.num_heads == len(dilations), "wrong dilations"

        self.strides = strides
        self.dilations = dilations

        self.heads = []
        for kernel_size, stride, dilation in zip(kernel_sizes, strides, dilations):
            # TODO:calculate the padding...
            ks = 1 + (kernel_size - 1) * dilation
            # width + 2*padding -stride < ks + (w'-1)*stride <=width + 2*padding
            # -> ks -s <= 2p < ks
            padding = (ks - stride) // 2 if (ks - stride) % 2 == 0 else (ks - stride) // 2 + 1

            self.heads.append(
                nn.Sequential(
                    nn.Conv2d(in_channels, out_channels, kernel_size,
                              stride=stride, padding=padding, dilation=dilation, groups=groups),
                    nn.BatchNorm2d(out_channels),
                    nn.ReLU()
                )
            )
        self.heads = nn.ModuleList(self.heads)
        # this is for reduction of dimensionality
        self.fc_reduce = nn.Linear(out_channels, self.mid_channels)
        self.bn = nn.BatchNorm1d(self.mid_channels)

        # TODO: not using attention at all, still predicting attention with layers
        self.fc_recovering_heads = nn.ModuleList([nn.Linear(self.mid_channels, self.out_channels) for _ in kernel_sizes])

    def forward(self, x):
        assert x.shape[1] == self.in_channels, 'wrong input channels'
        # TODO:这里可以优化,但是head一般不会高于20(卷积核暂时没有大于32的)
        split = torch.stack([head(x) for head in self.heads], dim=0)
        # print(split.shape)
        # fuse
        # TODO:maybe fuse in a more complicated way?
        #  Like using a global max pooling with a convolutional layer. Or using biased summention

        fuse = split.sum(dim=0)
        # print(fuse.shape)

        # generate channel-wise statistics
        avg_pooled = einops.reduce(fuse, 'b c h w -> b c', 'mean')
        # this is the article's z
        reduced = F.relu(self.bn(self.fc_reduce(avg_pooled)))
        attentions = torch.stack([head(reduced) for head in self.fc_recovering_heads], dim=0)
        attentions = torch.softmax(attentions, dim=0)
        attentions = einops.rearrange(attentions, 'heads b c ->heads b c 1 1')
        # print(attentions.shape)
        output = attentions * split
        output = output.sum(0)
        return output


class SelectiveKernelBlock(SelectiveKernelLayer):
    def __init__(self, in_channels, mid_channels, out_channels, reduce_ratio=2, downsample=None,
                 kernel_sizes: tuple = (3, 5), strides=1, dilations=1, groups=1):
        super(SelectiveKernelBlock, self).__init__(mid_channels, mid_channels, reduce_ratio,
                                                   kernel_sizes, strides, dilations, groups)
        self.downsample = downsample
        if downsample is None and strides != 1 or out_channels!=in_channels:
            self.downsample = nn.Sequential(
                nn.Conv2d(in_channels,
                          out_channels,
                          kernel_size=1,
                          stride=strides,
                          bias=False),
                nn.BatchNorm2d(out_channels))
        self.point_wise_encoder = nn.Conv2d(in_channels, mid_channels, 1)
        self.bn1 = nn.BatchNorm2d(mid_channels)
        self.point_wise_decoder = nn.Conv2d(mid_channels, out_channels, 1)
        self.bn2 = nn.BatchNorm2d(out_channels)
        # print(in_channels, mid_channels, out_channels)

    def forward(self, x):
        identity = x
        if self.downsample is not None:
            identity = self.downsample(identity)
        x = torch.relu(self.bn1(self.point_wise_encoder(x)))
        x = super(SelectiveKernelBlock, self).forward(x)
        x = torch.relu(self.bn2(self.point_wise_decoder(x)))
        # print(x.shape)
        return x + identity


if __name__ == '__main__':
    simulate_shape = (32, 16, 224, 224)
    fake_batch = torch.randn(simulate_shape)
    layers = [
        SelectiveKernelLayer(simulate_shape[1], 2 * simulate_shape[1], strides=2),
        SelectiveKernelBlock(simulate_shape[1], simulate_shape[1], 2 * simulate_shape[1], strides=2)
    ]

    for layer in layers:
        layer = layer.to(device)
        attention = layer(fake_batch.to(device))
        attention.sum().backward()
        print(attention.abs().min())
        print('Layer:', type(layer), '----output shape', attention.shape, )
        # '----multiply to input,and get shape:', torch.mul(fake_batch, attention).shape)
