"""This trainer includes the fundamental validation/test steps of SED
"""
import torch
import scheduler
import pytorch_lightning as pl
import torch.nn as nn
import loss
import random
import torch.nn.functional as F
from model.utils import load_prev_model_ckpt
from trainer.augms.mixup import mixup
from trainer.SEDBaseTrainer import SEDBaseTrainer
from trainer.utils.base_cli import BaseCLI
from pytorch_lightning.cli import LightningArgumentParser
from pytorch_lightning.callbacks import ModelCheckpoint
from trainer.utils.save_config_callback import MySaveConfigCallback
from scheduler.CosDown import WeightScheduler
from data_loader.transforms.SED import SEDNorm
from data_loader.transforms.ATSTFrame import ATSTNorm

class IntplContrastiveTrainer(SEDBaseTrainer):
    def __init__(
        self, 
        contrastive_loss: nn.Module,
        contrastive_weight: WeightScheduler,
        cl_weight: float = 0.5,
        cl_decay: bool = True,
        psd_lo_thd: float = 0.05,
        psd_hi_thd: float = 0.95,
        psd_label_filter: bool = False,
        ckpt_path: str = None,
        hard_MT: bool = True,
        *args,
        **kwargs
        ):
        super(IntplContrastiveTrainer, self).__init__(*args, **kwargs)
        self.contrastive_loss = contrastive_loss
        self.contrastive_weight = contrastive_weight
        self.cl_weight = cl_weight
        self.cl_decay = cl_decay
        self.psd_lo_thd = psd_lo_thd
        self.psd_hi_thd = psd_hi_thd
        self.psd_label_filter = psd_label_filter
        self.sed_norm = SEDNorm()
        self.atst_norm = ATSTNorm()
        self.use_hard_MT = hard_MT
        if ckpt_path is not None:
            params = torch.load(ckpt_path, map_location="cpu")["state_dict"]
            sed_student_params = {k.replace("sed_student.", ""): v for k, v in params.items() if "sed_student." in k}
            sed_teacher_params = {k.replace("sed_teacher.", ""): v for k, v in params.items() if "sed_teacher." in k}
            # check keys
            missing_keys, unexpected_keys = self.sed_student.load_state_dict(sed_student_params, strict=False)
            if len(missing_keys) > 0 or len(unexpected_keys) > 0:
                print(f"The checkpoint provided is not from the current trainer! \n Given: {ckpt_path}")
                self.sed_student = load_prev_model_ckpt(self.sed_student, ckpt_path)
                self.sed_teacher = load_prev_model_ckpt(self.sed_teacher, ckpt_path)
            else:
                self.sed_teacher.load_state_dict(sed_teacher_params, strict=False)
                # load by parts
            
    def separate_opt_params(self, model):
        # group parameters
        cnn_params = []
        rnn_params = []
        tfm_params = [[], [], [], [], [], [], [], [], [], [], [], [], [], []]
        for k, p in model.named_parameters():
            if "atst_frame" not in k:
                if "cnn" in k:
                    cnn_params.append(p)
                else:
                    rnn_params.append(p)
            else:
                if "blocks.0." in k:
                    tfm_params[1].append(p)
                elif "blocks.1." in k:
                    tfm_params[2].append(p)
                elif "blocks.2." in k:
                    tfm_params[3].append(p)
                elif "blocks.3." in k:
                    tfm_params[4].append(p)
                elif "blocks.4." in k:
                    tfm_params[5].append(p)
                elif "blocks.5." in k:
                    tfm_params[6].append(p)
                elif "blocks.6." in k:
                    tfm_params[7].append(p)
                elif "blocks.7." in k:
                    tfm_params[8].append(p)
                elif "blocks.8" in k:
                    tfm_params[9].append(p)
                elif "blocks.9." in k:
                    tfm_params[10].append(p)
                elif "blocks.10." in k:
                    tfm_params[11].append(p)
                elif "blocks.11." in k:
                    tfm_params[12].append(p)
                elif ".norm_frame." in k:
                    tfm_params[13].append(p)
                else:
                    tfm_params[0].append(p)
        return cnn_params, rnn_params, list(reversed(tfm_params))

    def configure_optimizers(self):
        # non_proj_params = [p for n, p in self.sed_student.named_parameters() if ("proj" not in n) and ("pred" not in n)]
        # opts_proj = [p for n, p in self.sed_student.named_parameters() if ("proj" in n) or ("pred" in n)]
        param_groups = [
            {"params": [p for p in self.sed_student.parameters() if p.requires_grad], "lr": self.opts["optimizer_params"]["lr"]},
            ]
        opts = getattr(torch.optim, self.opts["optimizer"])(param_groups)
        schs = getattr(scheduler, self.schs["scheduler"])(opts, **self.schs["scheduler_params"])
        
        return {"optimizer": opts, "lr_scheduler": {"scheduler": schs, "interval": "step"}}

    def update_ema(self, alpha, global_step, model, ema_model):
        # Use the true average until the exponential average is more correct
        alpha = min(1 - 1 / (global_step + 1), alpha)
        for (k, ema_params), params in zip(ema_model.named_parameters(), model.parameters()):      
            ema_params.data.mul_(alpha).add_(params.data, alpha=1 - alpha)

    def training_step(self, batch, batch_idx):
        id_batch, od_batch = batch
        feats, labels, params = id_batch
        od_feats, od_view1, od_view2, contrastive_mask, od_params = od_batch
        # apply ATST normalization
        if self.psd_label_filter:      
            psd_labels = self.pseudo_label_rule(od_feats)
            for i in range(len(od_feats)):
                od_feats[i] = od_feats[i][psd_labels]
        label_types = [x["label_type"] for x in params]
        strong_mask = torch.tensor(["strong" in x for x in label_types], device=labels.device).bool()
        weak_mask = torch.tensor(["weak" in x for x in label_types], device=labels.device).bool()
        assert sum(strong_mask) + sum(weak_mask) == labels.shape[0], \
            f"strong: {sum(strong_mask)}, weak: {sum(weak_mask)}, total: {labels.shape[0]}"
        loss_sup, loss_id = self.in_domain_step(
            feats=feats,
            labels=labels,
            strong_mask=strong_mask,
            weak_mask=weak_mask,
            unlabelled_feats=od_feats
        )
        # reload mask)
        loss_od = self.out_domain_step(
            view1=od_view1,
            view2=od_view2,
            mask=contrastive_mask
        )
        
        warmup = self.lr_schedulers()._get_scaling_factor()
        weight = warmup
        
        intp_weight = self.contrastive_weight.get_weight(self.lr_schedulers()._step_count) ** 2
        loss_id = loss_id * weight 
        cl_weight = self.cl_weight * intp_weight if self.cl_decay else self.cl_weight
        loss_od = loss_od * cl_weight
        
        self.log("train/intp_weight", intp_weight, prog_bar=False)
        tot_loss = loss_sup + loss_id + loss_od
        self.log("train-intp/MeanTeacher", loss_id, prog_bar=True)
        self.log("train-intp/Contrastive", loss_od, prog_bar=True)
        self.log("train-intp/Supervised", loss_sup, prog_bar=True)
        self.log("train-intp/Total", tot_loss, prog_bar=True)
        return tot_loss

    def detect(self, model, feats):
        cnn_feats, atst_feats = self.sed_norm(feats[0]), self.atst_norm(feats[1])
        feats = [cnn_feats, atst_feats]
        return model(feats)

    def in_domain_step(self, feats, labels, strong_mask, weak_mask, unlabelled_feats):
        # BCE + MeanTeacher loss
        in_domain_len = feats[0].shape[0]
        labels_weak = (torch.sum(labels[weak_mask], -1) > 0).float()
        
        # if 0.5 > random.random():
        cnn_feats_weak = feats[0][weak_mask]
        atst_feats_weak = feats[1][weak_mask]
        cnn_feats_strong = feats[0][strong_mask]
        atst_feats_strong = feats[1][strong_mask]
        cnn_feats_unlabelled = unlabelled_feats[0]
        atst_feats_unlabelled = unlabelled_feats[1]
        # mixup CNN features only
        
        if 0.5 > random.random():
            cnn_feats_weak, labels_weak = mixup(cnn_feats_weak, labels_weak, mixup_label_type="soft")
            cnn_feats_strong, labels[strong_mask] = mixup(cnn_feats_strong, labels[strong_mask], mixup_label_type="soft")
            cnn_feats_unlabelled = mixup(cnn_feats_unlabelled, None, mixup_label_type="soft")
        
        cnn_feats = torch.cat([cnn_feats_strong, cnn_feats_weak, cnn_feats_unlabelled], dim=0)
        atst_feats = torch.cat([atst_feats_strong, atst_feats_weak, atst_feats_unlabelled], dim=0)

        cnn_feats = self.sed_norm(cnn_feats)
        atst_feats = self.atst_norm(atst_feats)

        # sed student forward
        strong_preds_student, weak_preds_student = self.sed_student([cnn_feats, atst_feats])
        # supervised loss on strong labels
        loss_strong = self.supervised_loss(strong_preds_student[:in_domain_len][strong_mask], labels[:in_domain_len][strong_mask])
        # supervised loss on weakly labelled
        loss_weak = self.supervised_loss(weak_preds_student[:in_domain_len][weak_mask], labels_weak)
        # total supervised loss
        tot_loss_supervised = loss_strong + loss_weak

        with torch.no_grad():
            strong_preds_teacher, weak_preds_teacher = self.sed_teacher([cnn_feats, atst_feats])
            loss_strong_teacher = self.supervised_loss(strong_preds_teacher[:in_domain_len][strong_mask], labels[:in_domain_len][strong_mask])
            loss_weak_teacher = self.supervised_loss(weak_preds_teacher[:in_domain_len][weak_mask], labels_weak)

        if self.use_hard_MT:
            strong_self_sup_loss = self.hard_MT(strong_preds_student, strong_preds_teacher.detach())
            weak_self_sup_loss = self.hard_MT(weak_preds_student, weak_preds_teacher.detach())
        else:
            strong_self_sup_loss = self.unsupervised_loss(strong_preds_student, strong_preds_teacher.detach())
            weak_self_sup_loss = self.unsupervised_loss(weak_preds_student, weak_preds_teacher.detach())
    
        tot_self_loss = (strong_self_sup_loss + weak_self_sup_loss)
        step_num = self.lr_schedulers()._step_count
        
        self.log("train/student/loss_strong", loss_strong)
        self.log("train/student/loss_weak", loss_weak)
        self.log("train/teacher/loss_strong", loss_strong_teacher)
        self.log("train/teacher/loss_weak", loss_weak_teacher)
        self.log("train/step", step_num, prog_bar=False)
        self.log("train/student/weak_self_sup_loss", weak_self_sup_loss)
        self.log("train/student/strong_self_sup_loss", strong_self_sup_loss)
        self.log("train/student/tot_self_loss", tot_self_loss, prog_bar=True)
        self.log("train/student/tot_supervised", tot_loss_supervised, prog_bar=True)

        return tot_loss_supervised, tot_self_loss


    def out_domain_step(self, view1, view2, mask):
        # concatenate view1 and view2 for symmetric loss
        feats = [torch.cat([view1[i], view2[i]], dim=0) for i in range(len(view1))]
        # stop tracking batch norm statistics
        self.sed_student.tracking_bn_stats(False)
        self.sed_teacher.tracking_bn_stats(False)
        # generate mask
        stu_proj, stu_pred = self.sed_student.contrastive_forward(feats, mask=mask, apply_mask=True)
        with torch.no_grad():
            tea_proj, tea_pred = self.sed_teacher.contrastive_forward(feats, mask=mask, apply_mask=False)
            
        stu_feats_std, tea_feats_std, byol_loss = self.contrastive_loss(
            stu_pred,
            tea_proj
        )
        self.sed_student.tracking_bn_stats(True)
        self.sed_teacher.tracking_bn_stats(True)
        self.log("selfsl/BYOL", byol_loss)
        self.log("selfsl/step", self.lr_schedulers()._step_count, prog_bar=False)
        self.log("selfsl/student_std", stu_feats_std, prog_bar=False)
        self.log("selfsl/teacher_std", tea_feats_std, prog_bar=False)
        
        return byol_loss

    def pseudo_label_rule(self, feats):
        with torch.no_grad():
            cnn_feats = self.sed_norm(feats[0])
            atst_feats = self.atst_norm(feats[1])
            self.sed_teacher.eval()
            _, weak_preds = self.sed_teacher([cnn_feats, atst_feats])
            self.sed_teacher.train()
        # Rule1: confidence > 0.95 
        # weak_preds = weak_preds.max(-1)[0]
        # id_psd_labels = weak_preds > self.psd_label_thd
        # Rule2: confidence in (0.05, 0.95) --> uncertain region
        id_psd_labels = (weak_preds > self.psd_hi_thd) | (weak_preds < self.psd_lo_thd)
        id_psd_labels = id_psd_labels.sum(-1) > 0  # at least one class is in threshold region
        # max pred
        self.log("train-intp/psd_label_rate", id_psd_labels.float().mean().item(), on_epoch=True)
        return id_psd_labels
    
    def hard_MT(self, stu_preds, tea_preds):
        hard_preds = torch.zeros_like(tea_preds) - 1    
        hard_preds[tea_preds > self.psd_hi_thd] = 1.0
        hard_preds[tea_preds < self.psd_lo_thd] = 0.0
        valid_mask = hard_preds != -1
        stu_preds = stu_preds * valid_mask.float()
        hard_preds = hard_preds * valid_mask.float()
        mt_loss = F.binary_cross_entropy(stu_preds, hard_preds, reduction="none") * 5
        mt_loss = mt_loss[valid_mask].mean()
        if valid_mask.sum() == 0:
            return 0
        else:
            return mt_loss
        

class TrainCLI(BaseCLI):

    def add_arguments_to_parser(self, parser: LightningArgumentParser) -> None:
        # ModelCheckpoint
        parser.add_lightning_class_args(ModelCheckpoint, "model_checkpoint")
        model_checkpoint_defaults = {
            "model_checkpoint.filename": "epoch{epoch}_metric{val/metric:.4f}",
            "model_checkpoint.monitor": "val/metric",
            "model_checkpoint.mode": "min",
            "model_checkpoint.every_n_epochs": 1,
            "model_checkpoint.save_top_k": 10,  # save all checkpoints
            "model_checkpoint.auto_insert_metric_name": False,
            "model_checkpoint.save_last": True
        }
        parser.set_defaults(model_checkpoint_defaults)

        self.add_model_invariant_arguments_to_parser(parser)

if __name__ == '__main__':
    import warnings
    warnings.filterwarnings("ignore")
    
    cli = TrainCLI(
        IntplContrastiveTrainer,
        pl.LightningDataModule,
        save_config_callback=MySaveConfigCallback,
        save_config_kwargs={'overwrite': True},
        subclass_mode_data=True,
    )