'''
Function:
    Implementation of Pyramid Pooling Module
Author:
    Zhenchao Jin
'''
# import torch
# import torch.nn as nn
# import torch.nn.functional as F

import luojianet.numpy as np

import luojianet
import luojianet.nn as nn
import luojianet.ops as ops
from luojianet import ops as P
from luojianet import nn, ops, Parameter, Tensor

from ...backbones import BuildActivation, BuildNormalization, constructnormcfg

class _AdaptiveAvgPool2d(nn.Module):
    """AdaptiveAvgPool2d"""
    def __init__(self):
        super(_AdaptiveAvgPool2d, self).__init__()
        self.mean = P.ReduceMean(True)

    def forward(self, x):
        x = self.mean(x, (2, 3))
        return x
# class _AdaptiveAvgPool2d(nn.Module):
#     def __init__(self, output_size):
#         """Initialize AdaptiveAvgPool2d."""
#         super(_AdaptiveAvgPool2d, self).__init__()
#         self.output_size = output_size

#     def adaptive_avgpool2d(self, inputs):
#         """ NCHW """
#         H = self.output_size[0]
#         W = self.output_size[1]

#         H_start = ops.Cast()(np.arange(start=0, stop=H, dtype=luojianet.float32) * (inputs.shape[-2] / H), luojianet.int64)
#         H_end = ops.Cast()(np.ceil(((np.arange(start=0, stop=H, dtype=luojianet.float32)+1) * (inputs.shape[-2] / H))), luojianet.int64)

#         W_start = ops.Cast()(np.arange(start=0, stop=W, dtype=luojianet.float32) * (inputs.shape[-1] / W), luojianet.int64)
#         W_end = ops.Cast()(np.ceil(((np.arange(start=0, stop=W, dtype=luojianet.float32)+1) * (inputs.shape[-1] / W))), luojianet.int64)

#         pooled2 = []
#         for idx_H in range(H):
#             pooled1 = []
#             for idx_W in range(W):
#                 h_s = int(H_start[idx_H].asnumpy())
#                 h_e = int(H_end[idx_H].asnumpy())
#                 w_s = int(W_start[idx_W].asnumpy())
#                 w_e = int(W_end[idx_W].asnumpy())
#                 res = inputs[:, :, h_s:h_e, w_s:w_e]
#                 # res = inputs[:, :, H_start[idx_H]:H_end[idx_H], W_start[idx_W]:W_end[idx_W]]  # 这样写mindspore tensor切片报类型错误，不知道为啥
#                 pooled1.append(ops.ReduceMean(keep_dims=True)(res, (-2,-1)))
#             pooled1 = ops.Concat(-1)(pooled1)
#             pooled2.append(pooled1)
#         pooled2 = ops.Concat(-2)(pooled2)

#         return pooled2

#     def forward(self, x):
#         x = self.adaptive_avgpool2d(x)
#         return x

'''PyramidPoolingModule'''
class PyramidPoolingModule(nn.Module):
    def __init__(self, in_channels, out_channels, pool_scales, align_corners=False, norm_cfg=None, act_cfg=None):
        super(PyramidPoolingModule, self).__init__()
        self.align_corners = align_corners
        # self.branches = nn.ModuleList()
        self.branches = nn.CellList()
        for pool_scale in pool_scales:
            self.branches.append(nn.SequentialCell(
                # nn.AdaptiveAvgPool2d(output_size=pool_scale),
                # _AdaptiveAvgPool2d(output_size=(pool_scale, pool_scale)),
                # _AdaptiveAvgPool2d(output_size=pool_scale),
                _AdaptiveAvgPool2d(),
                nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=1, padding=0, has_bias=False),
                BuildNormalization(constructnormcfg(placeholder=out_channels, norm_cfg=norm_cfg)),
                BuildActivation(act_cfg)
            ))
        self.bottleneck = nn.SequentialCell(
            nn.Conv2d(in_channels + out_channels * len(pool_scales), out_channels, kernel_size=3, stride=1, padding=1, pad_mode='pad', has_bias=False),
            BuildNormalization(constructnormcfg(placeholder=out_channels, norm_cfg=norm_cfg)),
            BuildActivation(act_cfg)
        )
        self.in_channels = in_channels
        self.out_channels = out_channels
        # self.interpolate = nn.ResizeBilinear()
        self.con_op = ops.Concat(axis=1)
    '''forward'''
    def forward(self, x):
        # h, w = x.size(2), x.size(3)
        h, w = x.shape[2], x.shape[3]
        pyramid_lvls = [x]
        for branch in self.branches:
            out = branch(x)
            # predictions = interpolate(predictions, size=img_size, align_corners=self.align_corners)
            # prediction = self.interpolate(out, size=(h, w), align_corners=self.align_corners)
            # prediction =  ops.interpolate(out, scales=None, sizes=(h,w), coordinate_transformation_mode='align_corners', mode="linear")
            prediction =  ops.interpolate(out, (h,w), mode='bilinear', align_corners=self.align_corners)
            # pyramid_lvls.append(F.interpolate(out, size=(h, w), mode='bilinear', align_corners=self.align_corners))
            pyramid_lvls.append(prediction)
        # output = torch.cat(pyramid_lvls, dim=1)
        output =  self.con_op(pyramid_lvls)
        output = self.bottleneck(output)
        return output