import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd.function import Function

from .mconv import MConv2d, MLinear

class ActPrune(nn.Module):
    def __init__(self, thres):
        super().__init__()
        self.register_buffer('thres', torch.zeros([]))

    def forward(self, x):
        m = (x.abs() < self.thres).float()
        return m * x

class MAConv2d(nn.Conv2d):
    # magnitude-based pruning
    def __init__(self, *args, **kwargs):
        super().__init__(*args, **kwargs)
        self.register_buffer('mask', torch.ones_like(self.weight))
        self.act_prune = ActPrune(0)
    
    def forward(self, x):
        w = self.weight * self.mask
        out = F.conv2d(x, w, self.bias, self.stride, self.padding, self.dilation, self.groups)
        return self.act_prune(out)
    
    @classmethod
    def from_dense(cls, mod: nn.Conv2d):
        new_mod = cls(
            in_channels = mod.in_channels,
            out_channels = mod.out_channels,
            kernel_size = mod.kernel_size,
            bias = mod.bias is not None,
            stride = mod.stride,
            padding = mod.padding,
            dilation = mod.dilation,
            groups = mod.groups
        )
        with torch.no_grad():
            new_mod.weight.copy_(mod.weight)
            if mod.bias is not None:
                new_mod.bias.copy_(mod.bias)
            if hasattr(mod, 'mask'):
                new_mod.mask.copy_(mod.mask)
        return new_mod.to(mod.weight.device)

class MALinear(nn.Linear):
    def __init__(self, *args, **kwargs):
        super().__init__(*args, **kwargs)
        self.register_buffer('mask', torch.empty_like(self.weight))
        self.act_prune = ActPrune(0)
    
    def forward(self, x):
        w = self.weight * self.mask
        out = F.linear(x, w, self.bias)
        return self.act_prune(out)
    
    @classmethod
    def from_dense(cls, mod: nn.Linear):
        new_mod = cls(
            in_features = mod.in_features,
            out_features = mod.out_features,
            bias = mod.bias is not None,
        )
        with torch.no_grad():
            new_mod.weight.copy_(mod.weight)
            if mod.bias is not None:
                new_mod.bias.copy_(mod.bias)
            if hasattr(mod, 'mask'):
                new_mod.mask.copy_(mod.mask)
        return new_mod.to(mod.weight.device)

mapping = {
    nn.Conv2d: MAConv2d,
    nn.Linear: MALinear,
    MConv2d: MAConv2d,
    MLinear: MALinear,
}
