import torch
import torch.nn as nn
import torch.nn.functional as F


class PPM(nn.ModuleList):
    """
    金字塔池化模型 Pyramid Pooling Module
    https://arxiv.org/abs/1612.01105
    CVPR 2017年 的工作
    使用最大池化，获取
    """

    def __init__(self, pool_sizes, in_channels, out_channels):
        super(PPM, self).__init__()
        self.pool_sizes = pool_sizes
        self.in_channels = in_channels
        self.out_channels = out_channels
        for pool_size in pool_sizes:
            self.append(
                nn.Sequential(
                    nn.AdaptiveMaxPool2d(pool_size),
                    nn.Conv2d(self.in_channels, self.out_channels, kernel_size=1),
                )
            )

    def forward(self, x):
        out_puts = []
        for ppm in self:
            ppm_out = nn.functional.interpolate(ppm(x), size=(x.size(2), x.size(3)), mode='bilinear',
                                                align_corners=True)
            out_puts.append(ppm_out)
        return out_puts


class PPMHEAD(nn.Module):
    def __init__(self, in_channels, out_channels, pool_sizes=[1, 2, 3, 6], ):
        super(PPMHEAD, self).__init__()
        self.pool_sizes = pool_sizes
        self.in_channels = in_channels
        self.out_channels = out_channels
        self.psp_modules = PPM(self.pool_sizes, self.in_channels, self.out_channels)
        self.final = nn.Sequential(
            nn.Conv2d(self.in_channels + len(self.pool_sizes) * self.out_channels, 4 * self.out_channels,
                      kernel_size=1),
            nn.BatchNorm2d(4 * self.out_channels),
            nn.ReLU(),
        )

    def forward(self, x):
        out = self.psp_modules(x)

        out.append(x)
        out = torch.cat(out, 1)
        out = self.final(out)
        return out


class FPNHEAD(nn.Module):
    def __init__(self, out_channels=512, num_classes=19, channels=[64, 128, 320, 512]):
        """

        Args:
            out_channels: 最后一层融合的 通道数，在分类前的通道数
            num_classes:  最后分类数目
            channels: 四层backbone的通道数
        """
        super(FPNHEAD, self).__init__()
        self.num_classes = num_classes
        self.PPMHead = PPMHEAD(in_channels=channels[-1], out_channels=channels[-1] // 4)

        self.Conv_fuse1 = nn.Sequential(
            nn.Conv2d(channels[-2], channels[-2], 1),
            nn.BatchNorm2d(channels[-2]),
            nn.ReLU()
        )
        self.Conv_fuse1_ = nn.Sequential(
            nn.Conv2d(channels[-2] + channels[-1], channels[-2], 1),
            nn.BatchNorm2d(channels[-2]),
            nn.ReLU()
        )
        self.Conv_fuse2 = nn.Sequential(
            nn.Conv2d(channels[-3], channels[-3], 1),
            nn.BatchNorm2d(channels[-3]),
            nn.ReLU()
        )
        self.Conv_fuse2_ = nn.Sequential(
            nn.Conv2d(channels[-3] + channels[-2], channels[-3], 1),
            nn.BatchNorm2d(channels[-3]),
            nn.ReLU()
        )

        self.Conv_fuse3 = nn.Sequential(
            nn.Conv2d(channels[-4], channels[-4], 1),
            nn.BatchNorm2d(channels[-4]),
            nn.ReLU()
        )
        self.Conv_fuse3_ = nn.Sequential(
            nn.Conv2d(channels[-4] + channels[-3], channels[-4], 1),
            nn.BatchNorm2d(channels[-4]),
            nn.ReLU()
        )

        self.fuse_all = nn.Sequential(
            nn.Conv2d(sum(channels), out_channels, 1),
            nn.BatchNorm2d(out_channels),
            nn.ReLU()
        )

        self.cls_seg = nn.Sequential(
            nn.Conv2d(out_channels, self.num_classes, kernel_size=3, padding=1),
        )

    def forward(self, input_fpn):
        """
        Args:
            input_fpn: 四个特征图

        Returns:

        """
        ##############################
        # x1 = torch.randn(1, 64, 56, 56)
        # x2 = torch.randn(1, 128, 28, 28)
        # x3 = torch.randn(1, 320, 14, 14)
        # x4 = torch.randn(1, 512, 7, 7)

        #  1/32的特征图 使用PPMHead torch.Size([1, 2048, 7, 7])
        # x1= [1, 512, 7, 7]
        x1 = self.PPMHead(input_fpn[-1])
        # print(x1.shape)

        # [1, 512, 7, 7]-->[1, 512, 14, 14]
        x = nn.functional.interpolate(x1,
                                      size=(x1.size(2) * 2, x1.size(3) * 2),
                                      mode='bilinear',
                                      align_corners=True)

        # 融合1/16的图  torch.Size([1, 3072, 14, 14])。仅仅在通道上拼接
        # [1, 512, 14, 14] + [1,320, 14, 14] =[1, 832, 14, 14]
        x = torch.cat([x, self.Conv_fuse1(input_fpn[-2])], dim=1)

        ##############################
        # [1, 832, 14, 14] -->[1, 320, 14, 14] ,进行通道数上的减少
        x2 = self.Conv_fuse1_(x)
        #  [1, 320, 14, 14]->[1, 320, 28,28]
        x = nn.functional.interpolate(x2,
                                      size=(x2.size(2) * 2, x2.size(3) * 2),
                                      mode='bilinear',
                                      align_corners=True)

        # 融合1/8的图
        # [1, 320, 28,28] +[1,  128, 28,28] = [1,  448, 28,28]
        x = torch.cat([x, self.Conv_fuse2(input_fpn[-3])], dim=1)
        # print(x.shape)
        ##############################
        # [1,  448, 28,28] -> [1, 128, 28, 28]进行通道上缩减。
        x3 = self.Conv_fuse2_(x)

        #  对1/8---> 1/4
        # [1, 128, 28, 28]-> [1, 128, 56, 56]
        x = F.interpolate(x3,
                          size=(x3.size(2) * 2, x3.size(3) * 2),
                          mode='bilinear',
                          align_corners=True)
        # 融合1/4的图
        # [1, 128, 56, 56]+[1, 64, 56, 56]=[1, 192, 56, 56]
        x = torch.cat([x, self.Conv_fuse3(input_fpn[-4])], dim=1)

        ##############################
        # [1, 192, 56, 56]-> [1, 64, 56, 56]
        x4 = self.Conv_fuse3_(x)

        x1 = F.interpolate(x1, x4.size()[-2:], mode='bilinear', align_corners=True)
        x2 = F.interpolate(x2, x4.size()[-2:], mode='bilinear', align_corners=True)
        x3 = F.interpolate(x3, x4.size()[-2:], mode='bilinear', align_corners=True)

        x = self.fuse_all(torch.cat([x1, x2, x3, x4], 1))
        # print(x.shape)
        x = F.interpolate(x, size=(x.size(2) * 4, x.size(3) * 4), mode='bilinear', align_corners=True)
        # print(x.shape)
        x = self.cls_seg(x)
        return x


if __name__ == '__main__':
    x1 = torch.randn(1, 64, 56, 56)
    x2 = torch.randn(1, 128, 28, 28)
    x3 = torch.randn(1, 320, 14, 14)
    x4 = torch.randn(1, 512, 7, 7)
    # backbone 的数据，返回4层数据
    # 1/4,1/8,1/16,1/32 四层特征图
    backbone = [x1, x2, x3, x4]

    model = FPNHEAD(out_channels=512, num_classes=19, channels=[64, 128, 320, 512])
    y = model(backbone)
    print(y.shape)
