import torch.nn as nn
import torch
import math
from copy import deepcopy
from model.modules.initialization import trunc_normal_
from model.modules.classifier import BaseCls
from model.modules.cnn import CNN
from model.modules.rnn import RNN

class CRNN(nn.Module):
    def __init__(
        self,
        cnn: CNN,
        rnn: RNN,
        classifier: BaseCls,
        projector: BaseCls,
    ):
        super(CRNN, self).__init__()
        self.cnn = cnn
        self.rnn = rnn
        self.classifier = classifier
        self.proj = projector
        self.pred = deepcopy(projector)
        self.layer_mask_embedding = []
        for i in range(len(self.cnn.channels)+1): # e.g. 7 layers, 8 embeddings (each input/output)
            cur_dim = 128 // int(math.pow(2, min(i, len(self.cnn.channels))))
            layer_emd = nn.Parameter(torch.zeros(1, 1, 1, cur_dim), requires_grad=True)
            trunc_normal_(layer_emd, std=0.02)
            self.layer_mask_embedding.append(layer_emd)
        self.layer_mask_embedding = nn.ParameterList(self.layer_mask_embedding)

    def forward(self, x):
        x = x[0]
        # input size : (batch_size, n_freq, n_frames)
        x = x.transpose(1, 2).unsqueeze(1)
        x = self.cnn(x)
        bs, chan, frames, freq = x.size()
        x = x.permute(0, 2, 1, 3).reshape(bs, frames, -1)
        x = self.rnn(x)
        strong, weak = self.classifier(x)
        return strong, weak

    def organize_masks(self, mask):
        mask = mask.unsqueeze(1).unsqueeze(3).float()  # (B, 1, T, 1)
        B, _, T, _ = mask.shape
        dialated_mask2 = mask.repeat(1, 1, 1, 2).reshape(B, 1, 2 * T, 1)
        dialated_mask2 = nn.functional.pad(dialated_mask2, [0, 0, 1, 0])
        dialated_mask1 = dialated_mask2.repeat(1, 1, 1, 2).reshape(B, 1, -1, 1)
        # add two zeros in Time domain       
        return [dialated_mask1, dialated_mask2] + [mask] * (len(self.cnn.channels) - 2)

    def contrastive_forward(self, x, mask, apply_mask):
        # input size : (batch_size, n_freq, n_frames)
        x = x.transpose(1, 2).unsqueeze(1)     # (B, 1, T, F)
        mask = mask.repeat(2, 1)               # (B, T)
        masks = self.organize_masks(mask)  # List of mask

        for i, layer in enumerate(self.cnn.cnn):
            if isinstance(layer, nn.Conv2d):
                if apply_mask:
                    B, C, H, W = x.shape
                    layer_emd = self.layer_mask_embedding[(i + 1) // 5]
                    layer_mask = masks[(i + 1)//5]
                    emd = layer_emd.repeat(B, C, H, 1)
                    x = x * (1 - layer_mask) + emd * layer_mask
                x = layer(x)
            else:
                x = layer(x)
        if apply_mask:
            # apply mask to the last layer
            B, C, H, W = x.shape
            layer_emd = self.layer_mask_embedding[-1]
            layer_mask = masks[-1]
            emd = layer_emd.repeat(B, C, H, 1)
            x = x * (1 - layer_mask) + emd * layer_mask
        bs, chan, frames, freq = x.size()
        x = x.permute(0, 2, 1, 3).reshape(bs, frames, -1)
        x = self.rnn(x)
        # extract frames by mask
        mask = mask.reshape(bs, -1).bool() # (B, T)
        x = x[mask]
        projections = self.proj(x)
        predictions = self.pred(projections)
        return projections, predictions

    def tracking_bn_stats(self, tracking: bool = True):
        bns = [
            m for m in self.cnn.cnn if isinstance(m, nn.BatchNorm2d) or isinstance(m, nn.BatchNorm1d)
        ]
        for m in bns:
            m.track_running_stats = tracking

if __name__ == "__main__":
    from model.modules.cnn import *
    from model.modules.rnn import *
    from model.modules.classifier import *
    
    crnn = CRNN(
        cnn=CNN(
            n_in_channel=1,
            channels=[16, 32, 64, 128, 128, 128, 128],
            kernel_size=[3, 3, 3, 3, 3, 3, 3],
            stride=[1, 1, 1, 1, 1, 1, 1],
            padding=[1, 1, 1, 1, 1, 1, 1],
            pooling=[(2, 2), (2, 2), (1, 2), (1, 2), (1, 2), (1, 2), (1, 2)],
            dropout=0.5,
            normalization="batch"
        ),
        rnn=BidirectionalGRU(
            n_in=128,
            n_hidden=128,
            dropout=0,
            num_layers=2
        ),
        classifier=NonLinearAttentionCls(
            n_in=256,
            n_classes=10,
            dropout=0.5
        )
    )
    x = torch.randn(2, 1, 626, 128)
    strong, weak = crnn(x)
    print(strong.shape, weak.shape)  # Expected output: (2, 10