import torch
import torch.nn as nn
import torch.nn.functional as F
from model.modules.atst.atst_model import ATST
from model.modules.classifier import BaseCls
from model.modules.cnn import CNN
from model.modules.rnn import RNN
import math
from copy import deepcopy
from model.modules.initialization import trunc_normal_
import copy


class ATSTSED(nn.Module):
    def __init__(
        self,
        cnn: CNN,
        rnn: RNN,
        classifier: BaseCls,
        projector: BaseCls,
        atst_dropout=0.0,
        atst_train=False,
    ):
        super(ATSTSED, self).__init__()
        self.cnn = cnn
        self.rnn = rnn
        self.classifier = classifier
        self.atst_dropout = atst_dropout
        self.init_atst()
        self.merge_layer = torch.nn.Linear(cnn.channels[-1] + self.atst_frame.atst.embed_dim, cnn.channels[-1])        
        if atst_train:
            for param in self.atst_frame.parameters():
                param.requires_grad = True
        self.atst_train = atst_train
        self.proj = projector
        self.pred = deepcopy(projector)
        self.layer_mask_embedding = []
        for i in range(len(self.cnn.channels)+1): # e.g. 7 layers, 8 embeddings (each input/output)
            cur_dim = 128 // int(math.pow(2, min(i, len(self.cnn.channels))))
            layer_emd = nn.Parameter(torch.zeros(1, 1, 1, cur_dim), requires_grad=True)
            trunc_normal_(layer_emd, std=0.02)
            self.layer_mask_embedding.append(layer_emd)
        self.layer_mask_embedding = nn.ParameterList(self.layer_mask_embedding)
        # self.mt_cls_head = copy.deepcopy(self.classifier)


    def init_atst(self, path=None):
        if path is None:
            atst_path = "./pretrained_ckpts/atstframe_as2M.ckpt"
        else:
            atst_path = path
        print("Loading ATST from:", atst_path)
        self.atst_frame = ATST(atst_path, atst_dropout=self.atst_dropout)
        self.atst_frame.eval()
        for param in self.atst_frame.parameters():
            param.detach_()

    def forward(self, x):
        x, pretrain_x = x[0], x[1]
        # CNN
        x = x.transpose(1, 2).unsqueeze(1)
        x = self.cnn(x)
        bs, chan, frames, freq = x.size()
        x = x.permute(0, 2, 1, 3).reshape(bs, frames, -1)
        # ATST-Frame
        pretrain_x = self.atst_frame(pretrain_x)
        # Merge
        pretrain_x = F.adaptive_avg_pool1d(pretrain_x, x.shape[-2]).transpose(1, 2)
        x = self.merge_layer(torch.cat((x, pretrain_x), -1))
        # RNN
        x = self.rnn(x)
        strong, weak = self.classifier(x)
        return strong, weak

    def cls_head_forward(self, x):
        self.tracking_bn_stats(False)
        x, pretrain_x = x[0], x[1]
        # CNN
        x = x.transpose(1, 2).unsqueeze(1)
        x = self.cnn(x)
        bs, chan, frames, freq = x.size()
        x = x.permute(0, 2, 1, 3).reshape(bs, frames, -1)
        # ATST-Frame
        pretrain_x = self.atst_frame(pretrain_x)
        # Merge  
        pretrain_x = F.adaptive_avg_pool1d(pretrain_x, x.shape[-2]).transpose(1, 2)
        x = self.merge_layer(torch.cat((x, pretrain_x), -1))
        # RNN
        x = self.rnn(x)
        self.tracking_bn_stats(True)
        strong, weak = self.mt_cls_head(x)
        return strong, weak

    def organize_masks(self, mask):
        mask = mask.unsqueeze(1).unsqueeze(3).float()  # (B, 1, T, 1)
        B, _, T, _ = mask.shape
        dialated_mask2 = mask.repeat(1, 1, 1, 2).reshape(B, 1, 2 * T, 1)
        dialated_mask2 = nn.functional.pad(dialated_mask2, [0, 0, 1, 0])
        dialated_mask1 = dialated_mask2.repeat(1, 1, 1, 2).reshape(B, 1, -1, 1)
        # add two zeros in Time domain       
        return [dialated_mask1, dialated_mask2] + [mask] * (len(self.cnn.channels) - 2)

    def contrastive_forward(self, x, mask, apply_mask):
        # input size : (batch_size, n_freq, n_frames)
        x, pretrain_x = x[0], x[1]
        mask, pretrain_mask = mask[0], mask[1]
        x = x.transpose(1, 2).unsqueeze(1)     # (B, 1, T, F)
        mask = mask.repeat(2, 1)              # (B, T)
        pretrain_mask = pretrain_mask.repeat(2, 1)  # (B, T)
        masks = self.organize_masks(mask)  # List of mask

        for i, layer in enumerate(self.cnn.cnn):
            if isinstance(layer, nn.Conv2d):
                if apply_mask:
                    B, C, H, W = x.shape
                    layer_emd = self.layer_mask_embedding[(i + 1) // 5]
                    layer_mask = masks[(i + 1)//5]
                    emd = layer_emd.repeat(B, C, H, 1)
                    x = x * (1 - layer_mask) + emd * layer_mask
                x = layer(x)
            else:
                x = layer(x)
        if apply_mask:
            # apply mask to the last layer
            B, C, H, W = x.shape
            layer_emd = self.layer_mask_embedding[-1]
            layer_mask = masks[-1]
            emd = layer_emd.repeat(B, C, H, 1)
            x = x * (1 - layer_mask) + emd * layer_mask
        bs, chan, frames, freq = x.size()
        x = x.permute(0, 2, 1, 3).reshape(bs, frames, -1)
        
        pretrain_x = self.atst_frame(pretrain_x, pretrain_mask, apply_mask)
        pretrain_x = F.adaptive_avg_pool1d(pretrain_x, x.shape[-2]).transpose(1, 2)
        x = self.merge_layer(torch.cat((x, pretrain_x), -1))
        
        x = self.rnn(x)
        # extract frames by mask
        mask = mask.reshape(bs, -1).bool() # (B, T)
        x = x[mask]
        projections = self.proj(x)
        predictions = self.pred(projections)
        return projections, predictions

    def tracking_bn_stats(self, tracking: bool = True):
        bns = [
            m for m in self.cnn.cnn if isinstance(m, nn.BatchNorm2d) or isinstance(m, nn.BatchNorm1d)
        ]
        for m in bns:
            m.track_running_stats = tracking