import warnings

import torch.nn as nn
import torch.nn.functional as F
import torch
from .modules.RNN import BidirectionalGRU
from .modules.CNN_mask import MaskCNN
from .SelfModel import build_mlp

def compute_var(y):
        y = y.view(-1, y.size(-1))
        zc = torch.tensor(y.size(0)).cuda()
        zs = y.sum(dim=0)
        zss = (y ** 2).sum(dim=0)
        var = zss / (zc - 1) - (zs ** 2) / (zc * (zc - 1))
        return torch.sqrt(var + 1e-6)

class MaskCRNN(nn.Module):
    def __init__(
        self,
        n_in_channel=1,
        nclass=10,
        attention=True,
        activation="glu",
        dropout=0.5,
        rnn_type="BGRU",
        n_RNN_cell=128,
        n_layers_RNN=2,
        dropout_recurrent=0,
        cnn_integration=False,
        freeze_bn=False,
        **kwargs,
    ):
        super(MaskCRNN, self).__init__()
        self.n_in_channel = n_in_channel
        self.attention = attention
        self.freeze_bn = freeze_bn


        n_in_cnn = n_in_channel

        if cnn_integration:
            n_in_cnn = 1

        self.cnn = MaskCNN(
            n_in_channel=n_in_cnn, activation=activation, conv_dropout=dropout, **kwargs
        )

        if rnn_type == "BGRU":
            nb_in = self.cnn.nb_filters[-1]
            self.rnn = BidirectionalGRU(
                n_in=nb_in,
                n_hidden=n_RNN_cell,
                dropout=dropout_recurrent,
                num_layers=n_layers_RNN,
            )
        else:
            NotImplementedError("Only BGRU supported for CRNN for now")

        self.dropout = nn.Dropout(dropout)
        self.dense = nn.Linear(n_RNN_cell * 2, nclass)
        self.sigmoid = nn.Sigmoid()

        if self.attention:
            self.dense_softmax = nn.Linear(n_RNN_cell * 2, nclass)
            self.softmax = nn.Softmax(dim=-1)

    def forward(self, x):
        # input size : (batch_size, n_channels, n_frames, n_freq)
        x = x.transpose(1, 2).unsqueeze(1)
        x = self.cnn(x)
        bs, chan, frames, freq = x.size()
        x = x.permute(0, 2, 1, 3).reshape(bs, frames, -1)
        # rnn features
        x = self.rnn(x)
        x = self.dropout(x)
        strong_x = self.dense(x)  # [bs, frames, nclass]
        strong = self.sigmoid(strong_x)
        if self.attention:
            sof = self.dense_softmax(x)  # [bs, frames, nclass]
            sof = self.softmax(sof)
            sof = torch.clamp(sof, min=1e-7, max=1)
            weak = (strong * sof).sum(1) / sof.sum(1)  # [bs, nclass]
        else:
            weak = strong.mean(1)
        
        return strong.transpose(1, 2), weak


    # def feature_extract(self, x, mask=None):
    #     x = x.transpose(1, 2).unsqueeze(1)
    #     # conv features + SwitchBN (id/od)
    #     if mask is None:
    #         x = self.cnn(x, mode="id")
    #     else:
    #         x = self.cnn(x, mode="id", mask=mask)
    #     bs, chan, frames, freq = x.size()
    #     x = x.permute(0, 2, 1, 3).reshape(bs, frames, -1)
    #     # rnn features
    #     x = self.rnn(x)
    #     # print("ID rnn var:", compute_var(F.normalize(x,dim=-1).reshape(-1, 256)).mean())
    #     return x

    # def joint_forward(self, x):
    #     x = self.rnn(x)
    #     x = self.dropout(x)
    #     strong_x = self.dense(x)  # [bs, frames, nclass]
    #     strong = self.sigmoid(strong_x)
    #     if self.attention:
    #         sof = self.dense_softmax(x)  # [bs, frames, nclass]
    #         sof = self.softmax(sof)
    #         sof = torch.clamp(sof, min=1e-7, max=1)
    #         weak = (strong * sof).sum(1) / sof.sum(1)  # [bs, nclass]
    #     else:
    #         weak = strong.mean(1)
        
    #     return strong.transpose(1, 2), weak