import torch
from torch import nn
"""
CBAM：一种结合了空间saptial 和通道channel的注意力机制模块；
（1）通道注意力：将输入的feature map分别经过基于weight和height的MaxPool和AvgPool；
                然后分别经过MLP；
                将MLP输出的特征进行基于逐元素的加和操作，在经过Sigmoid激活操作，生成最终的通道注意力特征图；
                将channel feature map 和imput feature map逐元素相乘操作，生成Spatial Attention模块需要的输入特征；
                
（2）空间注意力：将Channel attention模块输出的特征图作为本模块的输入特征图。
                先做一个基于channel的global max pooling 和global average pooling，然后将这2个结果基于channel 做concat操作。
                然后经过一个卷积操作，降维为1个channel。
                再经过sigmoid生成spatial attention feature。
                最后将该feature和该模块的输入feature做乘法，得到最终生成的特征。

"""
try:
    from torch.hub import load_state_dict_from_url
except ImportError:
    from torch.utils.model_zoo import load_url as load_state_dict_from_url

# （1）通道注意力机制
class ChannelAttention(nn.Module):
    def __init__(self,in_planes,ratio = 16):
        super(ChannelAttention, self).__init__()
        self.avg_pool = nn.AdaptiveAvgPool2d(1)
        self.max_pool = nn.AdaptiveAvgPool2d(1)

        self.fc1 = nn.Conv2d(in_planes,in_planes // 16,1,bias = False)
        self.relu1 = nn.ReLU()
        self.fc2 = nn.Conv2d(in_planes // 16,in_planes,1,bias = False)

        self.sigmoid = nn.Sigmoid()

    def forward(self,x):
        avg_out = self.fc2(self.relu1(self.fc1(self.avg_pool(x))))
        max_out = self.fc2(self.relu1(self.fc1(self.max_pool(x))))
        out = avg_out + max_out
        return self.sigmoid(out)

# （2）空间注意力机制
class SpatialAttention(nn.Module):
    def __init__(self,kernel_size = 7):
        super(SpatialAttention, self).__init__()

        # kernel_size必须是3或者7
        assert kernel_size in (3,7)
        padding = 3 if kernel_size == 7 else 1

        self.conv1 = nn.Conv2d(2,1,kernel_size,padding=padding,bias = False)
        self.sigmoid = nn.Sigmoid()

    def forward(self,x):
        # torch.mean()函数：dim = 0——按行求平均值，返回的形状是（1，列数）
        #                     dim = 1——按列求平均值，返回的形状是（行数，1）
        avg_out = torch.mean(x,dim = 1,keepdim = True)
        max_out,_= torch.max(x,dim=1,keepdim=True)
        x = torch.cat([avg_out,max_out],dim = 1)
        x = self.conv1(x)
        return self.sigmoid(x)


class Bottleneck(nn.Module):
    expansion = 4

    def __init__(self, inplanes, planes, stride=1, downsample=None):
        super(Bottleneck, self).__init__()
        self.conv1 = conv1x1(inplanes, planes)
        self.bn1 = nn.BatchNorm2d(planes)
        self.conv2 = conv3x3(planes, planes, stride)
        self.bn2 = nn.BatchNorm2d(planes)
        self.conv3 = conv1x1(planes, planes * self.expansion)
        self.bn3 = nn.BatchNorm2d(planes * self.expansion)
        self.relu = nn.ReLU(inplace=True)
        self.downsample = downsample
        self.stride = stride

    def forward(self, x):
        identity = x

        out = self.conv1(x)
        out = self.bn1(out)
        out = self.relu(out)

        out = self.conv2(out)
        out = self.bn2(out)
        out = self.relu(out)

        out = self.conv3(out)
        out = self.bn3(out)

        if self.downsample is not None:
            identity = self.downsample(x)

        out += identity
        out = self.relu(out)

        return out



# 举例：在Resnet网络中添加注意力机制
class ResNet(nn.Module):
    def __init__(self,block,layers,num_classes = 1000,zero_init_residual = False,
                 groups = 1,width_per_group = 64,replace_stride_with_dilation = None,
                 norm_layer = None):
        super(ResNet, self).__init__()
        if norm_layer is None:
            norm_layer = nn.BatchNorm2d
            self.norm_layer = norm_layer

            self.inplanes = 64#输入通道数
            self.dilation = 1
            if replace_stride_with_dilation is None:
                # 替换元组中的每个元素
                #使用扩张卷积代替2 * 2步长
                 replace_stride_with_dilation = [False,False,False]
            if len(replace_stride_with_dilation) != 3:
                raise ValueError("replace_stride_with_dilation should be None"
                                 "or a 3_element tupkle,got {}".format(replace_stride_with_dilation))

            # groups分组卷积参数；groups = 1相当于没有分组
            self.groups = groups
            self.base_width = width_per_group
            self.conv1 = nn.Conv2d(3,self.inplanes,kernel_size=7,stride = 2,padding = 3,bias = False)
            self.bn1 = norm_layer(self.inplanes)
            self.relu = nn.ReLU(inplace=True)

            # 网络的第一层加入注意力机制
            self.ca == ChannelAttention(self.inplanes)
            self.sa = SpatialAttention()

            self.maxpool = nn.MaxPool2d(kernel_size=3,stride = 2,padding = 1)
            self.layer1 = self._make_layer(block,64,layers[0])
            self.layer2 = self._make_layer(block,128,layers[1],stride = 2,dilation = replace_stride_with_dilation[0])
            self.layer3 = self._make_layer(block,256,layers[2],stride = 2,dilation = replace_stride_with_dilation[1])
            self.layer4 = self._make_layer(block,512,layers[3],stride = 2,dilation = replace_stride_with_dilation[2])

            # 网络的卷积层的最后一层加入注意力机制
            self.ca1 = ChannelAttention(self.inplanes)
            self.sa1 = nn.Linear(512 * block.expansion,num_classes)

            for m in self.modules():
                if isinstance(m.nn.Conv2d):
                    # pytorch中默认使用kaiming 正态分布初始化卷积层参数；
                    # 对于全连接层：fan——in是输入维度；fan_out是输出维度
                    # 对于卷积层：设其维度为[Cout,Cin,H,W]，其中H * W是kernel规模；
                    #                 fan_in是 H * W * Cin;fan_out是H * W * Cout
                    nn.init.kaiming_normal_(m.weight,mode = 'fan_out',nonlinearity='relu')
                elif isinstance(m,(nn.BatchNorm2d,nn.GroupNorm)):
                    # nn.init.constant():初始化为常数
                    nn.init.constant_(m.weight,1)
                    nn.init.constant(m.bias,0)

            # 对每个残差分支中的最后一个BN进行零初始化
            if zero_init_residual:
                for m in self.modules():
                    # Boottleneck
                    if isinstance(m, Bottleneck):
                        nn.init.constant_(m.bn3.weight, 0)
                    elif isinstance(m, BasicBlock):
                        nn.init.constant_(m.bn2.weight, 0)