# -*- coding: utf-8 -*-
# ===========================================
# @Time    : 2021/9/8 上午11:00
# @Author  : shutao
# @FileName: cpnet.py
# @remark  : 
# 
# @Software: PyCharm
# Github 　： https://github.com/NameLacker
# ===========================================

import numpy as np
import paddle

from paddle import nn
from paddle.nn import functional as F
from paddle.vision import resnet18, resnet34, resnet50, resnet101, resnet152

from ..modules import AggregationModule, Conv2D
from .. import BaseNet


class CPNet(BaseNet):
    def __init__(self,
                 n_classes,
                 out_size,
                 prior_channels=512,
                 prior_size=64,
                 am_kernel_size=11,
                 groups=1,
                 channels=512,
                 pretrained=False,
                 backbone="resnet18"
                 ):
        super(CPNet, self).__init__()
        self.prior_channels = prior_channels
        self.prior_size = (prior_size, prior_size)
        self.am_kernel_size = am_kernel_size
        self.channels = channels

        backbone_version = {"resnet18": [resnet18, 512],
                            "resnet34": [resnet34, 512],
                            "resnet50": [resnet50, 2048],
                            "resnet101": [resnet101, 2048],
                            "resnet152": [resnet152, 2048]}
        assert backbone in backbone_version, "输入的backbone版本不在设置backbone版本中"
        resnet_base = backbone_version[backbone][0]
        self.in_channels = backbone_version[backbone][1]
        self.resnet = resnet_base(pretrained=pretrained, num_classes=-1, with_pool=False)

        self.transform_inputs = nn.Upsample(self.prior_size, mode='bilinear')
        self.aggregation = AggregationModule(self.in_channels, prior_channels,
                                             am_kernel_size)

        self.prior_conv = Conv2D(
            self.prior_channels,
            np.prod(self.prior_size),
            kernel_size=1,
            padding=0,
            stride=1,
            groups=groups,
            act=False
        )

        self.intra_conv = Conv2D(
            self.prior_channels,
            self.prior_channels,
            kernel_size=1,
            padding=0,
            stride=1
        )

        self.inter_conv = Conv2D(
            self.prior_channels,
            self.prior_channels,
            kernel_size=1,
            padding=0,
            stride=1
        )

        self.bottleneck = Conv2D(
            self.in_channels + self.prior_channels * 2,
            self.channels,
            kernel_size=3,
            padding=1,
            stride=1
        )

        self.seg_cls = nn.Sequential(
            Conv2D(self.channels, n_classes, 1),
            nn.Upsample(size=out_size)
        )
        self.stage4_cls = nn.Sequential(
            Conv2D(self.in_channels, n_classes, 1),
            nn.Upsample(size=out_size)
        )

    def forward(self, inputs):
        inputs = self.resnet(inputs)
        x = self.transform_inputs(inputs)
        B, C, H, W = x.shape
        assert self.prior_size[0] == H and self.prior_size[1] == W

        value = self.aggregation(x)

        context_prior_map = self.prior_conv(value)
        context_prior_map = context_prior_map.reshape((B, np.prod(self.prior_size), -1))
        context_prior_map = context_prior_map.transpose((0, 2, 1))
        context_prior_map = F.sigmoid(context_prior_map)

        inter_context_prior_map = 1 - context_prior_map

        value = value.reshape((B, self.prior_channels, -1))
        value = value.transpose((0, 2, 1))

        intra_context = paddle.bmm(context_prior_map, value)
        intra_context = intra_context.divide(paddle.to_tensor(np.prod(self.prior_size), dtype=paddle.float32))
        intra_context = intra_context.transpose((0, 2, 1))
        intra_context = intra_context.reshape((B, self.prior_channels, self.prior_size[0], self.prior_size[1]))
        intra_context = self.intra_conv(intra_context)

        inter_context = paddle.bmm(inter_context_prior_map, value)
        inter_context = inter_context.divide(paddle.to_tensor(np.prod(self.prior_size), dtype=paddle.float32))
        inter_context = inter_context.transpose((0, 2, 1))
        inter_context = inter_context.reshape((B, self.prior_channels, self.prior_size[0], self.prior_size[1]))
        inter_context = self.inter_conv(inter_context)

        cp_outs = paddle.concat([x, intra_context, inter_context], axis=1)
        output = self.bottleneck(cp_outs)
        output = self.seg_cls(output)
        stage4 = self.stage4_cls(inputs)
        return output, stage4, context_prior_map
