import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F

class PAM(nn.Module):
    def __init__(self, in_shape):
        super(PAM, self).__init__()
        n, c, h, w = in_shape
        self.N_blocks = n
        self.Classifier = nn.Sequential(
            nn.Linear(n*2*h*w, int(np.sqrt(n*2*h*w))),
            nn.ReLU(True),
            nn.Linear(int(np.sqrt(n*2*h*w)), n)
        )
        nn.init.constant_(self.Classifier[2].weight, 0.)
        nn.init.constant_(self.Classifier[2].bias, 1./n)
        self.Flatten = nn.Flatten()
        
    def ChannelPooling(self, x):
        return torch.cat( (torch.max(x,2)[0].unsqueeze(2), torch.mean(x,2).unsqueeze(2)), dim=1 )
    
    def forward(self, x):
        xp = self.ChannelPooling(x) # (b, n, 2, h, w)
        xf = self.Flatten(xp)
        xr = self.Classifier(xf)
        return F.softmax(xr, dim=1)

class stackPAM(nn.Module):
    def __init__(self, in_shape):
        super(stackPAM, self).__init__()


class convolutionPAM(nn.Module):
    def __init__(self, in_shape):
        super(convolutionPAM, self).__init__()
