import warnings

import torch.nn as nn
import torch
from .modules.RNN import BidirectionalGRU
from .modules.CNN import CNN

class IODAttn(nn.Module):
    def __init__(self, id_dim=128, od_dim=128) -> None:
        super().__init__()
        self.attn_weight = nn.Parameter(torch.empty((id_dim + od_dim, od_dim)))
    
    def forward(self, x):
        # inpu feat: [bs, frames, tot_dim]
        attn_weight = torch.softmax(self.attn_weight, dim=0)
        import numpy as np
        np.save("attn_weight.npy", attn_weight.detach().cpu().numpy())
        attn_feats = torch.matmul(x, attn_weight)
        np.save("attn_feats.npy", attn_feats.detach().cpu().numpy())
        return attn_feats

class DupCNN(nn.Module):
    def __init__(self, cnn_id, cnn_od) -> None:
        super().__init__()
        self.cnn_id = cnn_id
        self.cnn_od = cnn_od
        # self.linear_id = nn.Linear(256, 128)
        # self.linear_od = nn.Linear(256, 128)
    
    def forward(self, x, mode="id"):
        if mode == "id":
            feat_id = self.cnn_id(x)
            # with torch.no_grad():
            feat_od = self.cnn_od(x)
            # feat_cnn = self.linear_id(feat_cnn.permute(0, 2, 3, 1)).permute(0, 3, 1, 2)
        elif mode == "od":
            with torch.no_grad():
                feat_id = self.cnn_id(x)
            feat_od = self.cnn_od(x)
            # feat_cnn = self.linear_od(feat_cnn.permute(0, 2, 3, 1)).permute(0, 3, 1, 2)
        else:
            return NotImplementedError
        feat_cnn = torch.cat([feat_id, feat_od], dim=1)
        return feat_cnn
    
class CRNN(nn.Module):
    def __init__(
        self,
        n_in_channel=1,
        nclass=10,
        attention=True,
        activation="glu",
        dropout=0.5,
        rnn_type="BGRU",
        n_RNN_cell=128,
        n_layers_RNN=2,
        dropout_recurrent=0,
        cnn_integration=False,
        freeze_bn=False,
        **kwargs,
    ):
        super(CRNN, self).__init__()
        self.n_in_channel = n_in_channel
        self.attention = attention
        self.freeze_bn = freeze_bn


        n_in_cnn = n_in_channel

        if cnn_integration:
            n_in_cnn = 1

        self.cnn = DupCNN(
            CNN(n_in_channel=n_in_cnn, activation=activation, conv_dropout=dropout, **kwargs),
            CNN(n_in_channel=n_in_cnn, activation=activation, conv_dropout=dropout, **kwargs)
        )
        # self.iod_attn = IODAttn()
        self.linear_proj = nn.Linear(256, 128)
        if rnn_type == "BGRU":
            nb_in = 128
            self.rnn = BidirectionalGRU(
                n_in=nb_in,
                n_hidden=n_RNN_cell,
                dropout=dropout_recurrent,
                num_layers=n_layers_RNN,
            )
        else:
            NotImplementedError("Only BGRU supported for CRNN for now")

        self.dropout = nn.Dropout(dropout)
        self.dense = nn.Linear(n_RNN_cell * 2, nclass)
        self.sigmoid = nn.Sigmoid()

        if self.attention:
            self.dense_softmax = nn.Linear(n_RNN_cell * 2, nclass)
            self.softmax = nn.Softmax(dim=-1)


    def forward(self, x, id_index=None, cut_cnn=False, mode="id"):
        # input size : (batch_size, n_channels, n_frames, n_freq)
        x = x.transpose(1, 2).unsqueeze(1)
        # conv features + SwitchBN (id/od)
        # if cut_cnn:
        #     with torch.no_grad():
        #         x = self.cnn(x, mode=mode)
        # else:
        x = self.cnn(x, mode=mode)
        bs, chan, frames, freq = x.size()
        x = x.permute(0, 2, 1, 3).reshape(bs, frames, -1)
        # IOD attention
        x = self.linear_proj(x)
        # rnn features
        x = self.rnn(x)
        x = self.dropout(x)
        strong_x = self.dense(x)  # [bs, frames, nclass]
        strong = self.sigmoid(strong_x)
        if self.attention:
            sof = self.dense_softmax(x)  # [bs, frames, nclass]
            sof = self.softmax(sof)
            sof = torch.clamp(sof, min=1e-7, max=1)
            weak = (strong * sof).sum(1) / sof.sum(1)  # [bs, nclass]
        else:
            weak = strong.mean(1)
        
        return strong.transpose(1, 2), weak


