import torch
import torch.nn as nn
from torch.nn.modules.utils import _pair
import torch.nn.init as init
from torch.nn import functional as F

def weight_1(size):
    data = nn.Parameter(torch.ones(size))
    return data

def bias_init(size):
    data = nn.Parameter(torch.Tensor(size,1))
    data = init.xavier_normal_(data)
    data.required_grad = True
    return data

class PConv2d(nn.modules.conv._ConvNd):
    def __init__(self, in_channels, out_channels, kernel_size, stride=1,
                 padding=0, dilation=1, groups=1, bias=False):
        kernel_size = _pair(kernel_size)
        stride = _pair(stride)
        padding = _pair(padding)
        dilation = _pair(dilation)
        super(PConv2d, self).__init__(
            in_channels, out_channels, kernel_size, stride, padding, dilation,
            False, _pair(0), groups, bias)
        self.mask_conv2d = nn.Conv2d(in_channels=1,out_channels=1,kernel_size=kernel_size,stride=stride,padding=padding,bias=False)
        self.mask_conv2d.weight = weight_1(self.mask_conv2d.weight.size())
        self.mask_conv2d.required_grad = False
        self.bias = bias_init(out_channels)

    def forward(self, input, mask):
        # input = image
        mask_out = self.mask_conv2d(mask)
        total_pic = input * mask
        conv_data = F.conv2d(total_pic, self.weight, self.bias, self.stride,
                        self.padding, self.dilation, self.groups)
        bias = self.bias.expand_as(conv_data)
        for i in range(mask_out.size()[0]):
            for j in range(mask_out.size()[1]):
                if mask_out[i][j] <= 0:
                    conv_data[:][:][i][j] = 0
                    # mask_out[i][j] = 0
                else:
                    conv_data[:][:][i][j] /= mask_out[i][j]
                    # mask_out[i][j] = 1

        conv_data += bias
        return conv_data


