import numba as nb
import numpy as np
import torch

from utils.compressor import Compressor
from utils.sparser import Sparser


class SSFDME(Compressor):
    def __init__(self, ratio: float = 0.98, bit: int = 2, *args, **kwargs) -> None:
        self.ratio = ratio
        self.bit = bit
        self.sparser = Sparser(ratio)
        self.length = (1 << bit) - 1
        self.arg = args
        self.kwargs = kwargs

    def __str__(self):
        return f"Semi-sparsified (Top-{int((1 - self.ratio) * 100)}%) " \
               f"Features with Dynamic Mask Encoding ({self.bit}-bit)"

    def encode(self, x: torch.Tensor) -> (torch.Tensor, np.ndarray):
        with torch.no_grad():
            vector, mask1 = self.sparser.encode(x)
            mask = torch.mul(mask1, self.length)

            mask_np = mask.view(-1).numpy()
            data_np = x.data.view(-1).numpy()

            self.__encode(mask_np, data_np, self.length, vector[0].item())
        return vector, mask_np

    def decode(self, x: torch.Tensor, vector: torch.Tensor, mask: np.ndarray) -> None:
        with torch.no_grad():
            self.__decode(vector.numpy(), mask, self.length)
            x.data = torch.from_numpy(mask).view(x.size())

    @staticmethod
    @nb.njit
    def __encode(mask: np.ndarray, data: np.ndarray, length: int, first: float) -> None:
        last = first
        for i, (x, y) in enumerate(zip(mask, data)):
            if x:
                last = y
            else:
                mask[i] = min(round(length * y / last), length - 1)

    @staticmethod
    @nb.njit
    def __decode(vector: np.ndarray, mask: np.ndarray, length: int) -> None:
        cur = 0
        for i, val in enumerate(mask):
            if val == length:
                mask[i] = vector[cur]
                cur += 1
            else:
                mask[i] *= (vector[max(0, cur - 1)] / length)
