import torch
import math
import warnings
import torch.nn as nn
import torch.nn.functional as F
from training.data_augms.mask import get_mask
from nets.modules.RNN import BidirectionalGRU

def _no_grad_trunc_normal_(tensor, mean, std, a, b):
    # Cut & paste from PyTorch official master until it's in a few official releases - RW
    # Method based on https://people.sc.fsu.edu/~jburkardt/presentations/truncated_normal.pdf
    def norm_cdf(x):
        # Computes standard normal cumulative distribution function
        return (1. + math.erf(x / math.sqrt(2.))) / 2.

    if (mean < a - 2 * std) or (mean > b + 2 * std):
        warnings.warn("mean is more than 2 std from [a, b] in nn.init.trunc_normal_. "
                      "The distribution of values may be incorrect.",
                      stacklevel=2)

    with torch.no_grad():
        # Values are generated by using a truncated uniform distribution and
        # then using the inverse CDF for the normal distribution.
        # Get upper and lower cdf values
        l = norm_cdf((a - mean) / std)
        u = norm_cdf((b - mean) / std)

        # Uniformly fill tensor with values from [l, u], then translate to
        # [2l-1, 2u-1].
        tensor.uniform_(2 * l - 1, 2 * u - 1)

        # Use inverse cdf transform for normal distribution to get truncated
        # standard normal
        tensor.erfinv_()

        # Transform to proper mean, std
        tensor.mul_(std * math.sqrt(2.))
        tensor.add_(mean)

        # Clamp to ensure it's in the proper range
        tensor.clamp_(min=a, max=b)
        return tensor
def compute_var(y):
        y = y.view(-1, y.size(-1))
        zc = torch.tensor(y.size(0)).cuda()
        zs = y.sum(dim=0)
        zss = (y ** 2).sum(dim=0)
        var = zss / (zc - 1) - (zs ** 2) / (zc * (zc - 1))
        return torch.sqrt(var + 1e-6)

def trunc_normal_(tensor, mean=0., std=1., a=-2., b=2.):
    return _no_grad_trunc_normal_(tensor, mean, std, a, b)

class SelfModel(nn.Module):
    def __init__(self, encoder, in_dim, out_dim, self_target, mask_rate, mask_len):
        super().__init__()
        self.encoder = encoder
        self.proj = build_mlp(2, in_dim, 1024, out_dim)
        self.pred = build_mlp(2, out_dim, 1024, out_dim)
        self.self_target = self_target
        self.mask_rate = mask_rate
        self.mask_len = mask_len
        self.pos_embedding = nn.Parameter(torch.zeros(1, self.encoder.cnn.nb_filters[-1]))
        trunc_normal_(self.pos_embedding, std=.02)

    def forward(self, x, mode="student", mask_rate=None):
        assert mode in ["student", "teacher"], "mode must be student or teacher"
    
        if mask_rate is not None:
            self.mask_rate = mask_rate
    
        if mode == "student":
            mask = None
            mask_cnn = None
            mask = get_mask(
                x.shape[0] // 2, 
                x.shape[2] // 4, 
                self.mask_rate, 
                no_overlap=True,
                min_length=self.mask_len,
                type="static").to(x).unsqueeze(-1).unsqueeze(-1)
            mask = mask.repeat(2, 1, 1, 1).permute(0, 2, 1, 3)  # Shape: (B, C, T, F); Repeat two for two views
            B, C, T, F = mask.shape
            mask_cnn = [mask] * 5
            mask_cnn_1 = mask.repeat(1, 1, 1, 2).reshape(B, C, 2 * T, 1)
            mask_cnn_0 = mask_cnn_1.repeat(1, 1, 1, 2).reshape(B, C, 4 * T, 1)
            mask_cnn = [mask_cnn_0, mask_cnn_1] + mask_cnn
        else:
            mask_cnn = None
            mask = None
        x = x.transpose(1, 2).unsqueeze(1)
        x = self.encoder.cnn(x, mask=mask_cnn, track_running_stats=False)
        bs, dim, frames, _ = x.shape
        x = x.permute(0, 2, 1, 3).reshape(bs, frames, -1)
        
        # mask rnn input
        if mask is not None:
            mask = mask.squeeze(1)
            x = (1 - mask) * x + mask * self.pos_embedding
        x = self.encoder.rnn(x)

        if mode == "student":
            x_interested = self.proj(x)
            x_interested = self.pred(x_interested)
            return x_interested, mask.squeeze(-1)
        elif mode == "teacher":
            x_interested = self.proj(x)
            return x_interested


def build_mlp(num_layers, input_dim, mlp_dim, output_dim):
    mlp = []
    for l in range(num_layers):
        dim1 = input_dim if l == 0 else mlp_dim
        dim2 = output_dim if l == num_layers - 1 else mlp_dim

        mlp.append(nn.Linear(dim1, dim2, bias=False))

        if l < num_layers - 1:
            mlp.append(Transpose3D())
            mlp.append(nn.BatchNorm1d(dim2))
            mlp.append(Transpose3D())
            mlp.append(nn.ReLU(inplace=True))

    return nn.Sequential(*mlp)

class Transpose3D(nn.Module):
    def __init__(self):
        super().__init__()

    def forward(self, x):
        if len(x.shape) == 3:
            return x.transpose(1, 2)
        else:
            return x