import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd.function import Function
import copy


def get_mask(s, n, m, ratio):
    """
    assuming alpha size is [out_channel, in_channel, ...]
    ratio is zero ratio
    """
    shape = s.shape
    assert len(shape) in [2, 4]
    
    t = s
    if ratio == 0:
        return torch.ones_like(s)
    
    oc = shape[0] # out_channels
    ic = shape[1]
    t = t.transpose(1, -1)
    if ic % m > 0:
        t = F.pad(t, [0, m - ic % m])
    t = t.reshape([-1, m])
    t = torch.softmax(t, dim=1)
    v, idx = torch.topk(t, n, -1, largest=True, sorted=True)
    with torch.no_grad():
        mask = torch.zeros_like(t)
        mask.scatter_(-1, idx, 1.0)
    if ratio < 1:
        z_values = t[mask==0]
        v, idx = torch.topk(z_values, int(len(z_values)*ratio), largest=False, sorted=True)
        v = v[-1].item()
        mask = torch.logical_or(mask>0, t>v)
        
    mask = mask[:, :ic].reshape([oc, -1, ic]).transpose(-1, 1).reshape(shape)
    return mask

class M1Conv2d(nn.Conv2d):
    # magnitude-based pruning
    def __init__(self, *args, **kwargs):
        super().__init__(*args, **kwargs)
        self.register_buffer('n', torch.zeros(1, dtype=torch.int))
        self.register_buffer('m', torch.zeros(1, dtype=torch.int))
        self.register_buffer('ratio', torch.zeros(1))
        self.register_buffer('mask', torch.empty_like(self.weight))
        self.score = nn.Parameter(torch.empty_like(self.weight))

    def forward(self, x):
        mask = get_mask(self.score, self.n.item(), self.m.item(), self.ratio.item())
        self.mask.copy_(mask)
        w = self.weight * self.score * mask
        return F.conv2d(x, w, self.bias, self.stride, self.padding, self.dilation, self.groups)
    
    @classmethod
    def from_dense(cls, mod: nn.Conv2d):
        new_mod = cls(
            in_channels = mod.in_channels,
            out_channels = mod.out_channels,
            kernel_size = mod.kernel_size,
            bias = mod.bias is not None,
            stride = mod.stride,
            padding = mod.padding,
            dilation = mod.dilation,
            groups = mod.groups
        )
        with torch.no_grad():
            new_mod.weight.copy_(mod.weight)
            if mod.bias is not None:
                new_mod.bias.copy_(mod.bias)
        return new_mod.to(mod.weight.device)

class M1Linear(nn.Linear):
    def __init__(self, *args, **kwargs):
        super().__init__(*args, **kwargs)
        self.register_buffer('n', torch.zeros(1, dtype=torch.int))
        self.register_buffer('m', torch.zeros(1, dtype=torch.int))
        self.register_buffer('ratio', torch.zeros(1))
        self.register_buffer('mask', torch.empty_like(self.weight))
        self.score = nn.Parameter(torch.empty_like(self.weight))
    
    def forward(self, x):
        mask = get_mask(self.score, self.n.item(), self.m.item(), self.ratio.item())
        self.mask.copy_(mask)
        w = self.weight * self.score * mask
        return F.linear(x, w, self.bias)
    
    @classmethod
    def from_dense(cls, mod: nn.Linear):
        new_mod = cls(
            in_features = mod.in_features,
            out_features = mod.out_features,
            bias = mod.bias is not None,
        )
        with torch.no_grad():
            new_mod.weight.copy_(mod.weight)
            if mod.bias is not None:
                new_mod.bias.copy_(mod.bias)
        return new_mod.to(mod.weight.device)

mapping = {
    nn.Conv2d: M1Conv2d,
    nn.Linear: M1Linear
}