import torch
import torch.nn as nn
import torchvision.transforms.functional as F
from typing import *
from torchaudio.transforms import MelSpectrogram, AmplitudeToDB

from data_loader.transforms.rrc import RandomMaskCrop
from data_loader.transforms.mixup import LogMixupExp
from data_loader.transforms.mask import get_mask
from data_loader.transforms.SED import SEDNorm
from data_loader.utils.io import TorchScaler
from copy import deepcopy

class MinMax:
    def __init__(self, min, max):
        self.min=min
        self.max=max
        
    def __call__(self,input):
        min_,max_ = None,None
        if self.min is None:
            min_ = torch.min(input)
            max_ = torch.max(input)
        else:
            min_ = self.min
            max_ = self.max
        input = (input - min_)/(max_- min_) *2. - 1.
        return input

class ATSTNorm(nn.Module):
    def __init__(self):
        super(ATSTNorm, self).__init__()
        self.amp_to_db = AmplitudeToDB(stype="power", top_db=80)
        self.scaler = MinMax(min=-79.6482,max=50.6842)

    def amp2db(self, spec):
        return self.amp_to_db(spec).clamp(min=-50, max=80)

    def forward(self, Mel):
        logMel = self.amp2db(Mel)
        normed_logMel = self.scaler(logMel)
        return normed_logMel

class ATSTInputTransform(nn.Module):
    def __init__(self):
        super(ATSTInputTransform, self).__init__()
        # Audio feature extraction
        self.Mel = MelSpectrogram(16000, f_min=60, f_max=7800, hop_length=160, win_length=1024, n_fft=1024, n_mels=64)

    def amp2db(self, spec):
        return self.amp_to_db(spec).clamp(min=-50, max=80)

    def forward(self, wav):
        Mel = self.Mel(wav)
        return Mel # normed_logMel
        

class RandomCrop(nn.Module):
    def __init__(self, crop_rate):
        super().__init__()
        self.crop_rate = crop_rate
    
    @staticmethod
    def get_params(crop_rate: List[float]) -> Tuple[float, float]:
        start_ratio_i = torch.rand(1) * (1 - crop_rate[0])
        start_ratio_j = torch.rand(1) * (1 - crop_rate[1])        
        return start_ratio_i, start_ratio_j


    def forward(self, x: List[torch.Tensor]):
        start_ratio_i, start_ratio_j = self.get_params(self.crop_rate)
        for i in range(len(x)):
            start_idx_i = int(x[i].shape[1] * start_ratio_i)
            start_idx_j = int(x[i].shape[2] * start_ratio_j)
            crop_len_i = int(x[i].shape[1] * self.crop_rate[0])
            crop_len_j = int(x[i].shape[2] * self.crop_rate[1])
            x[i] = F.crop(x[i], start_idx_i, start_idx_j, crop_len_i, crop_len_j)
        return x

class ATSTFrameTransform(nn.Module):
    def __init__(
        self, 
        crop_size: List[float], 
        rmc_crop_size: List[float],
        rmc_freq_scale: List[float],
        rmc_time_scale: float, 
        mask_ratio: float, 
        mask_len: int
        ):
        super().__init__()
        self.random_crop = RandomCrop(crop_size)  # crop length in time
        rrc = RandomMaskCrop(virtual_crop_scale=rmc_crop_size, freq_scale=rmc_freq_scale, time_scale=rmc_time_scale)
        self.mixup1 = LogMixupExp()
        self.mixup2 = LogMixupExp()
        self.rrc = rrc
        self.post_norm = TorchScaler("instance", "minmax", [1, 2])
        self.mask_ratio = mask_ratio
        self.mask_len = mask_len
        self.atst_norm = ATSTNorm()
        self.sed_norm = SEDNorm()

    def forward(self, x: List[torch.Tensor]):
        for i in x:
            assert len(i.shape) == 2, "Input tensor must be 2D"
        x_i = deepcopy(x)
        if len(x) == 1:
            normed_x = [self.sed_norm(x[0].unsqueeze(0))]
        elif len(x) == 2:
            normed_x = [self.sed_norm(x[0].unsqueeze(0)), self.atst_norm(x[1].unsqueeze(0))]
        if self.random_crop.crop_rate[0] != 1.0 and self.random_crop.crop_rate[1] != 1.0:
            normed_x = self.random_crop(normed_x)
            
        # apply mixup
        view_1 = self.mixup1(normed_x)
        view_2 = self.mixup2(normed_x)
        view_1 = self.rrc(view_1)
        view_2 = self.rrc(view_2)
        view_1[0] = self.post_norm(view_1[0])
        view_2[0] = self.post_norm(view_2[0])
        view_1 = [v.squeeze(0) for v in view_1]
        view_2 = [v.squeeze(0) for v in view_2]
        # generate mask for this x
        mask = get_mask(
            1,
            view_1[0].shape[1] // 4,
            self.mask_ratio,
            no_overlap=True,
            min_length=self.mask_len,
            type="static"
        ).squeeze(0)
        masks = [mask]
        for i in range(1, len(view_1)):
            # interpolate mask to match the feature size
            target_len = view_1[i].shape[1] // 4 
            mask_i = torch.nn.functional.interpolate(mask.unsqueeze(0).unsqueeze(0).float(), size=target_len, mode='nearest').squeeze(0).squeeze(0).bool()
            masks.append(mask_i)
        x = [i.squeeze(0) for i in x]
        assert sum([torch.equal(x[i], x_i[i]) for i in range(len(x))]) == len(x), "Input tensor has been changed!"
        return view_1, view_2, x, masks