from collections import OrderedDict

import torch
import torch.nn as nn
import torch.nn.functional as F

#---------------------------------------------------#
#   SPP结构，利用不同大小的池化核进行池化
#   池化后堆叠
#---------------------------------------------------#
class SpatialPyramidPooling(nn.Module):
    def __init__(self, pool_sizes=[5, 9, 13], PoolType=[nn.MaxPool2d, nn.AvgPool2d, nn.AvgPool2d, nn.AdaptiveAvgPool2d, nn.AdaptiveMaxPool2d], pool_5=0, pool_9=0, pool_13=0):
        super(SpatialPyramidPooling, self).__init__()

        POOL5 = PoolType[pool_5]
        POOL9 = PoolType[pool_9]
        POOL13 = PoolType[pool_13]
        pool_list = []
        # self.maxpools = nn.ModuleList([nn.MaxPool2d(pool_size, 1, pool_size//2) for pool_size in pool_sizes])
        pool_list.append(POOL5(pool_sizes[0], 1, pool_sizes[0]//2))
        pool_list.append(POOL9(pool_sizes[1], 1, pool_sizes[1] // 2))
        pool_list.append(POOL13(pool_sizes[2], 1, pool_sizes[2] // 2))
        self.pools = nn.ModuleList(pool_list)


        print('this is a Spatial!')

    def forward(self, x):
        # features = [maxpool(x) for maxpool in self.maxpools[::-1]]
        features = [pool(x) for pool in self.pools[::-1]]
        features = torch.cat(features + [x], dim=1)

        return features


#   ASPPNet结构-------------------# in 512,out 2048  SPPnet
class ASPPBN(nn.Module):
    '''ASPP + Concat'''
    def __init__(self, in_channels=512, out_channels=2048):
        super(ASPPBN, self).__init__()

        self.ty_conv_1x1_1 = nn.Conv2d(in_channels, out_channels, kernel_size=1)
        self.ty_bn_conv_1x1_1 = nn.BatchNorm2d(out_channels)

        self.ty_conv_3x3_1 = nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=1, padding=6, dilation=6)
        self.ty_bn_conv_3x3_1 = nn.BatchNorm2d(out_channels)

        self.ty_conv_3x3_2 = nn.Conv2d(in_channels, out_channels,  kernel_size=3, stride=1, padding=12, dilation=12)
        self.ty_bn_conv_3x3_2 = nn.BatchNorm2d(out_channels)

        self.ty_conv_3x3_3 = nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=1, padding=18, dilation=18)
        self.ty_bn_conv_3x3_3 = nn.BatchNorm2d(out_channels)

        self.ty_avg_pool = nn.AdaptiveAvgPool2d(1)

        self.ty_conv_1x1_2 = nn.Conv2d(in_channels, out_channels,  kernel_size=1, stride=1)
        self.ty_bn_conv_1x1_2 = nn.BatchNorm2d(out_channels)

        self.ty_conv_1x1_3 = nn.Conv2d(out_channels * 5, out_channels,  kernel_size=1)  # (1280 = 5*256)
        self.ty_bn_conv_1x1_3 = nn.BatchNorm2d(out_channels)

        print('this is a ASPPBN!')

    def forward(self, feature_map):
        feature_map_h = feature_map.size()[2]  # (== h/16)
        feature_map_w = feature_map.size()[3]  # (== w/16)

        out_1x1 = F.relu(self.ty_bn_conv_1x1_1(self.ty_conv_1x1_1(feature_map)))  # (shape: (batch_size, 256, h/16, w/16))
        out_3x3_1 = F.relu(self.ty_bn_conv_3x3_1(self.ty_conv_3x3_1(feature_map)))  # (shape: (batch_size, 256, h/16, w/16))
        out_3x3_2 = F.relu(self.ty_bn_conv_3x3_2(self.ty_conv_3x3_2(feature_map)))  # (shape: (batch_size, 256, h/16, w/16))
        out_3x3_3 = F.relu(self.ty_bn_conv_3x3_3(self.ty_conv_3x3_3(feature_map)))  # (shape: (batch_size, 256, h/16, w/16))

        out_img = self.ty_avg_pool(feature_map)  # (shape: (batch_size, 512, 1, 1))
        out_img = F.relu(self.ty_bn_conv_1x1_2(self.ty_conv_1x1_2(out_img)))  # (shape: (batch_size, 256, 1, 1))
        out_img = F.interpolate(out_img, size=(feature_map_h, feature_map_w),
                             mode="bilinear")  # (shape: (batch_size, 256, h/16, w/16))

        out = torch.cat([out_1x1, out_3x3_1, out_3x3_2, out_3x3_3, out_img], 1)  # (shape: (batch_size, 1280, h/16, w/16))
        out = F.relu(self.ty_bn_conv_1x1_3(self.ty_conv_1x1_3(out)))  # (shape: (batch_size, 256, h/16, w/16))

        return out


#   PPM结构----------------------# in 512,out 2048  SPPnet
class PPM(nn.Module):
    def __init__(self, inchannel=512, outchannel=2048, **kwargs):
        super(PPM, self).__init__()
        interchannel = int( inchannel / 4)
        self.ty_conv1 = nn.Conv2d(inchannel, interchannel, 1, **kwargs)
        self.ty_conv2 = nn.Conv2d(inchannel, interchannel, 1, **kwargs)
        self.ty_conv3 = nn.Conv2d(inchannel, interchannel, 1, **kwargs)
        self.ty_conv4 = nn.Conv2d(inchannel, interchannel, 1, **kwargs)
        self.ty_out = nn.Conv2d(inchannel * 2, outchannel, 1)

        print('this is a PPM!')

    def ty_pool(self, x, size):
        avge = nn.AdaptiveAvgPool2d(size)
        return avge(x)

    def ty_upsample(self, x, size):
        return F.interpolate(x, size, mode='bilinear', align_corners=True)

    def forward(self, x):
        size = x.size()[2:]
        interout1 = self.ty_pool(x, 1)
        interout2 = self.ty_pool(x, 2)
        interout3 = self.ty_pool(x, 3)
        interout6 = self.ty_pool(x, 6)

        out1 = self.ty_conv1(interout1)
        out2 = self.ty_conv2(interout2)
        out3 = self.ty_conv3(interout3)
        out4 = self.ty_conv4(interout6)

        out1_1 = self.ty_upsample(out1, size)
        out2_1 = self.ty_upsample(out2, size)
        out3_1 = self.ty_upsample(out3, size)
        out6_1 = self.ty_upsample(out4, size)

        out5 = torch.cat([x, out1_1, out2_1, out3_1, out6_1], dim=1)
        out = self.ty_out(out5)
        return out

class ASPPNOBN(nn.Module):
    def __init__(self, in_channel=512, depth=2048):
        super(ASPPNOBN, self).__init__()
        self.mean = nn.AdaptiveAvgPool2d((1, 1))  # (1,1)means ouput_dim
        self.conv = nn.Conv2d(in_channel, depth, 1, 1)
        self.atrous_block1 = nn.Conv2d(in_channel, depth, 1, 1)
        self.atrous_block6 = nn.Conv2d(in_channel, depth, 3, 1, padding=6, dilation=6)
        self.atrous_block12 = nn.Conv2d(in_channel, depth, 3, 1, padding=12, dilation=12)
        self.atrous_block18 = nn.Conv2d(in_channel, depth, 3, 1, padding=18, dilation=18)
        self.conv_1x1_output = nn.Conv2d(depth * 5, depth, 1, 1)
    def forward(self, x):
        size = x.shape[2:]
        image_features = self.mean(x)
        image_features = self.conv(image_features)
        image_features = F.upsample(image_features, size=size, mode='bilinear')
        atrous_block1 = self.atrous_block1(x)
        atrous_block6 = self.atrous_block6(x)
        atrous_block12 = self.atrous_block12(x)
        atrous_block18 = self.atrous_block18(x)
        net = self.conv_1x1_output(torch.cat([image_features, atrous_block1, atrous_block6,atrous_block12, atrous_block18], dim=1))
        return net