from typing import Type

import torch
import torch.nn as nn
import torch.nn.functional as F
# from utils import display_np_arrays_as_images
import einops
from einops.layers.torch import Reduce, Rearrange
from collections import OrderedDict


class ChannelAttentionLayer(nn.Module):
    """
    channel attention directly coming out of a convolution layers. Which means it's predicted by networks.
    from article:
    https://openaccess.thecvf.com/content_ECCV_2018/papers/Sanghyun_Woo_Convolutional_Block_Attention_ECCV_2018_paper.pdf
    该类没有residual connect
    """

    def __init__(self,
                 in_channels,
                 reduction_ratio=2,
                 ):
        super(ChannelAttentionLayer, self).__init__()

        assert in_channels % reduction_ratio == 0, f"in_channels(which is {in_channels})*reduction_ratio(which is" \
                                                   f" {reduction_ratio})!= reduction_ratio(which is {reduction_ratio})"
        self.in_channels = in_channels
        self.mid_channels = in_channels // reduction_ratio

        self.reducing = nn.Linear(in_channels, self.mid_channels)
        self.recovering = nn.Linear(self.mid_channels, in_channels)

    def forward(self, x: torch.Tensor):
        shape = x.shape
        assert len(shape) == 4
        batch_size, channel_size, height, width = x.shape
        assert channel_size == self.in_channels, 'wrong in_channels'
        # avg_pool = torch.mean(x, dim=(-2, -1))
        avg_pool = einops.reduce(x, 'b c h w -> b c', 'mean')
        max_pool = einops.reduce(x, 'b c h w -> b c', 'max')
        mid_avg = F.relu(self.reducing(avg_pool))
        mid_max = F.relu(self.reducing(max_pool))
        channel_attention = F.relu(self.recovering(mid_avg)) + F.relu(self.recovering(mid_max))
        channel_attention = torch.sigmoid(channel_attention)
        # channel_attention.resize((batch_size, channel_size, 1, 1))
        channel_attention = einops.rearrange(channel_attention, 'b c -> b c 1 1')
        # print(channel_attention.shape)
        return channel_attention


class SpaceAttentionLayer(nn.Module):
    """
    space attention directly coming out of a convolution layers. Which means it's predicted by networks.
    from article:
    https://openaccess.thecvf.com/content_ECCV_2018/papers/Sanghyun_Woo_Convolutional_Block_Attention_ECCV_2018_paper.pdf
    该类没有residual connect
    """

    statistics = {
        'mean': Reduce('b c h w -> b 1 h w', 'mean'),
        'max': Reduce('b c h w -> b 1 h w', 'max')
    }
    statistics_planes = len(statistics)
    mid_planes = 1

    def __init__(
            self,
            in_channels,
            kernel_size=7
    ):
        super(SpaceAttentionLayer, self).__init__()
        self.in_channels = in_channels
        self.conv = nn.Conv2d(self.statistics_planes, self.mid_planes, kernel_size=kernel_size, padding='same')
        self.conv2attention = None if self.mid_planes == 1 else nn.Conv2d(self.mid_planes, 1, kernel_size=1)

    def forward(self, x: torch.Tensor):
        shape = x.shape
        assert len(shape) == 4
        batch_size, channel_size, height, width = x.shape
        assert channel_size == self.in_channels, 'wrong in_channels'
        # avg_pool = torch.mean(x, dim=(-2, -1))
        stats = [reduce(x) for _, reduce in self.statistics.items()]
        # TODO: maybe we can add a batch normalization here
        mid_plane_tensor = torch.sigmoid(self.conv(torch.cat(stats, dim=1)))
        space_attention = mid_plane_tensor if self.mid_planes == 1 \
            else torch.sigmoid(self.conv2attention(mid_plane_tensor))
        # space_attention.resize((batch_size, channel_size, 1, 1))
        # space_attention = einops.rearrange(space_attention, 'b c -> b 1 h w')
        # print(space_attention.shape)
        return space_attention


# class BasicBlock(nn.Module):
#     """resnet basicblock"""
#     expansion = 1
#     def __init__(self, in_channel, out_channel, stride=1, downsample=None):
#         if downsample is None:
#             assert in_channel * self.expansion == out_channel, " in_channel*self.expansion != out_channel "
#         super(BasicBlock, self).__init__()
#         self.conv1 = nn.Conv2d(in_channels=in_channel,
#                                out_channels=out_channel,
#                                kernel_size=3,
#                                stride=stride,
#                                padding=1,
#                                bias=False)
#         self.bn1 = nn.BatchNorm2d(out_channel)
#         self.relu = nn.ReLU()
#         self.conv2 = nn.Conv2d(in_channels=out_channel,
#                                out_channels=out_channel,
#                                kernel_size=3,
#                                stride=1,
#                                padding=1,
#                                bias=False)
#         self.bn2 = nn.BatchNorm2d(out_channel)
#         self.downsample = downsample
#
#     def forward(self, x):
#         identity = x
#         if self.downsample is not None:
#             identity = self.downsample(x)
#         out = self.conv1(x)
#         out = self.bn1(out)
#         out = self.relu(out)
#
#         out = self.conv2(out)
#         out = self.bn2(out)
#
#         out += identity
#         out = self.relu(out)
#
#         return out


class BasicBlock_AttentionLayers(nn.Module):
    expansion = 1

    def __init__(self, in_channel, out_channel, stride=1, downsample=None):
        super(BasicBlock_AttentionLayers, self).__init__()
        if downsample is None:
            assert in_channel * self.expansion == out_channel, " in_channel*self.expansion != out_channel "
        self.conv1 = nn.Conv2d(in_channels=in_channel,
                               out_channels=out_channel,
                               kernel_size=3,
                               stride=stride,
                               padding=1,
                               bias=False)
        self.bn1 = nn.BatchNorm2d(out_channel)
        self.relu = nn.ReLU()
        self.conv2 = nn.Conv2d(in_channels=out_channel,
                               out_channels=out_channel,
                               kernel_size=3,
                               stride=1,
                               padding=1,
                               bias=False)
        self.bn2 = nn.BatchNorm2d(out_channel)
        self.downsample = downsample

        self.get_channel_attention = ChannelAttentionLayer(out_channel)
        self.get_space_attention = SpaceAttentionLayer(out_channel)

    def forward(self, x):
        identity = x
        if self.downsample is not None:
            identity = self.downsample(x)
        out = self.conv1(x)
        out = self.bn1(out)
        out = self.relu(out)

        out = self.conv2(out)
        out = self.bn2(out)
        out = self.relu(out)

        out = out * self.get_channel_attention(out)
        out = out * self.get_space_attention(out)
        # print(x[0])
        out += identity

        return out


if __name__ == '__main__':
    simulate_shape = (32, 8, 224, 224)
    fake_batch = torch.randn(simulate_shape)
    layers = [
        ChannelAttentionLayer(simulate_shape[1]),
        SpaceAttentionLayer(simulate_shape[1]),
        BasicBlock_AttentionLayers(8, 8),
    ]

    for layer in layers:
        attention = layer(fake_batch)
        attention.sum()
        print(attention[0])
        print('Layer:', type(layer), '----output shape', attention.shape,
              '----multiply to input,and get shape:', torch.mul(fake_batch, attention).shape)
