'''
Author: q00475944 qianzehong@huawei.com
Date: 2022-07-22 09:40:49
LastEditors: q00475944 qianzehong@huawei.com
LastEditTime: 2022-07-23 10:56:59
FilePath: /PreciseRoIPooling-master/src/prroi_pooling_ms.py
'''
import mindspore as ms
import math
import numpy as np

import torch
import torch.nn as nn
import torch.nn.functional as F


class PrRoIPool2D:
    def __init__(self, channels, height, width, pooled_height, pooled_width, spatial_scale):
        self.channels = channels
        self.in_height = height
        self.in_width = width
        self.pooled_height = pooled_height
        self.pooled_width = pooled_width
        self.spatial_scale = spatial_scale

    def prroi_pooling_2d(self, bottom_data, bottom_rois):
        roi_batch_ind = bottom_rois[0]
        roi_start_w = bottom_rois[1] * self.spatial_scale
        roi_start_h = bottom_rois[2] * self.spatial_scale
        roi_end_w = bottom_rois[3] * self.spatial_scale
        roi_end_h = bottom_rois[4] * self.spatial_scale

        roi_width = max(roi_end_w - roi_start_w, 0.0)
        roi_height = max(roi_end_h - roi_start_h, 0.0)

        bin_size_h = roi_height / self.pooled_height
        bin_size_w = roi_width / self.pooled_width
        this_data = bottom_data[int(roi_batch_ind), :, :, :]
        win_size = max(0.0, bin_size_w * bin_size_h)

        output_tensor = ms.Tensor(np.zeros([self.channels,self.pooled_height,self.pooled_width]),dtype=ms.float32)
        
        if (win_size == 0):
            return output_tensor

        # 假设输入为  (4, 16, 24, 32) 输出为 [n,16,7,7]   n为roi输入的个数这里假设为1
        for i in range(self.pooled_height):
            for j in range(self.pooled_width):
                # 表示池化后像素点需要在原roi中做积分的区域
                win_start_w = roi_start_w + bin_size_w * j
                win_start_h = roi_start_h + bin_size_h * i
                win_end_w = win_start_w + bin_size_w
                win_end_h = win_start_h + bin_size_h
                sum_out = ms.Tensor([0]*self.channels, ms.float32)
                # 取整后取浮点数
                start_w = math.floor(win_start_w)
                end_w = math.ceil(win_end_w)
                start_h = math.floor(win_start_h)
                end_h = math.ceil(win_end_h)

                for h_iter in range(start_h, end_h):
                    for w_iter in range(start_w, end_w):
                        sum_out += self.mat_calculation(this_data, h_iter, w_iter, h_iter + 1, w_iter + 1,
                            # 理论上win_start_h更大 所以这里取的是 win_start_h
                            max(win_start_h, float(h_iter)), max(win_start_w, float(w_iter)),
                            # win_end_h更小，这里 是取的取整前的窄范围
                            min(win_end_h, float(h_iter) + 1.0), min(win_end_w, float(w_iter + 1.0)))
                output_tensor[:,i,j] = sum_out / win_size

        return output_tensor

    def mat_calculation(self, this_data, s_h, s_w, e_h, e_w, y0, x0, y1, x1):
        alpha = x0 - float(s_w)
        beta = y0 - float(s_h)
        lim_alpha = x1 - float(s_w)
        lim_beta = y1 - float(s_h)
        tmp = (lim_alpha - 0.5*lim_alpha*lim_alpha - alpha + 0.5*alpha*alpha)*(lim_beta - 0.5*lim_beta*lim_beta - beta + 0.5*beta*beta)
        sum_out = self.get_data(this_data, s_h, s_w) * tmp

        alpha = float(e_w) - x1
        lim_alpha = float(e_w) - x0
        tmp = (lim_alpha - 0.5*lim_alpha*lim_alpha - alpha + 0.5*alpha*alpha)*(lim_beta - 0.5*lim_beta*lim_beta - beta + 0.5*beta*beta)
        sum_out += self.get_data(this_data, s_h, e_w) * tmp

        alpha = x0 - float(s_w)
        beta = float(e_h) - y1
        lim_alpha = x1 - float(s_w)
        lim_beta = float(e_h) - y0
        tmp = (lim_alpha - 0.5*lim_alpha * lim_alpha - alpha + 0.5*alpha*alpha)*(lim_beta - 0.5*lim_beta*lim_beta - beta + 0.5*beta*beta)
        sum_out += self.get_data(this_data, e_h, s_w) * tmp

        alpha = float(e_w) - x1
        lim_alpha = float(e_w) - x0
        tmp = (lim_alpha - 0.5*lim_alpha*lim_alpha - alpha + 0.5*alpha*alpha)*(lim_beta - 0.5*lim_beta * lim_beta - beta + 0.5*beta*beta)
        sum_out += self.get_data(this_data, e_h, e_w) * tmp

        return sum_out

    def get_data(self, data, h, w):
        overflow = ((h < 0) or (w < 0) or (h >= self.in_height) or (w >= self.in_width))
        retVal = 0.0 if overflow else data[:, h, w]
        return retVal



def test_forward():
    np_arr_in = np.random.randn(4, 16, 24, 32)
    features = torch.FloatTensor(np_arr_in)
    # c通道的需要全做avg，这里 0，1 表示n维度
    # rois = torch.tensor([
    #     [0, 0, 0, 14, 14],
    #     [1, 14, 14, 28, 28],
    # ]).float()
    rois = torch.tensor([0, 0, 0, 14, 14]).float()
    out_gold = F.avg_pool2d(features, kernel_size=2, stride=1)
    print("out_gold.shape",out_gold.shape)
    prroi_pool_2d_ms = PrRoIPool2D(16,24,32,7,7,0.5)
    ms_out = prroi_pool_2d_ms.prroi_pooling_2d(ms.Tensor(np_arr_in,ms.float32), ms.Tensor([0, 0, 0, 14, 14],ms.float32))
    print(ms_out.asnumpy())
    print("==============================")
    print(out_gold[0, :, :7, :7].numpy())
    print(np.allclose(ms_out.asnumpy(), out_gold[0, :, :7, :7].numpy(),0.1,0.1))

if __name__ == "__main__":
    test_forward()