"""This trainer includes the fundamental validation/test steps of SED
"""
import torch
import scheduler
import pytorch_lightning as pl
import torch.nn as nn
from trainer.augms.mixup import mixup
from trainer.SEDBaseTrainer import SEDBaseTrainer
from trainer.utils.base_cli import BaseCLI
from pytorch_lightning.cli import LightningArgumentParser
from pytorch_lightning.callbacks import ModelCheckpoint
from trainer.utils.save_config_callback import MySaveConfigCallback
from scheduler.CosDown import WeightScheduler
from lightning.pytorch.utilities import grad_norm


class IntplContrastiveTrainer(SEDBaseTrainer):
    def __init__(
        self, 
        contrastive_loss: nn.Module,
        contrastive_weight: WeightScheduler,
        cl_weight: float = 0.5,
        cl_decay: bool = True,
        psd_lo_thd: float = 0.05,
        psd_hi_thd: float = 0.95,
        psd_label_filter: bool = False,
        *args,
        **kwargs
        ):
        super(IntplContrastiveTrainer, self).__init__(*args, **kwargs)
        self.contrastive_loss = contrastive_loss
        self.contrastive_weight = contrastive_weight
        self.cl_weight = cl_weight
        self.cl_decay = cl_decay
        self.psd_lo_thd = psd_lo_thd
        self.psd_hi_thd = psd_hi_thd
        self.psd_label_filter = psd_label_filter
        self.automatic_optimization = False  # manual optimization

    def check_grad_norm(self, loss_name):

        cnn_grad_norm = grad_norm(self.sed_student.cnn, norm_type=2.0)['grad_2.0_norm_total']
        rnn_grad_norm = grad_norm(self.sed_student.rnn, norm_type=2.0)['grad_2.0_norm_total']
        sed_head_grad_norm = grad_norm(self.sed_student.classifier, norm_type=2.0)
        cl_head_grad_norm = grad_norm(self.sed_student.proj, norm_type=2.0)
        if sed_head_grad_norm:
            sed_head_grad_norm = sed_head_grad_norm['grad_2.0_norm_total']
        else:
            sed_head_grad_norm = torch.tensor(0.0)
        if cl_head_grad_norm:
            cl_head_grad_norm = cl_head_grad_norm['grad_2.0_norm_total']
        else:
            cl_head_grad_norm = torch.tensor(0.0)
        self.log(f"grad_norm/{loss_name}/cnn", cnn_grad_norm.item(), prog_bar=False)
        self.log(f"grad_norm/{loss_name}/rnn", rnn_grad_norm.item(), prog_bar=False)
        self.log(f"grad_norm/{loss_name}/sed_head", sed_head_grad_norm.item(), prog_bar=False)
        self.log(f"grad_norm/{loss_name}/cl_head", cl_head_grad_norm.item(), prog_bar=False)
        
        
    def configure_optimizers(self):
        # non_proj_params = [p for n, p in self.sed_student.named_parameters() if ("proj" not in n) and ("pred" not in n)]
        # opts_proj = [p for n, p in self.sed_student.named_parameters() if ("proj" in n) or ("pred" in n)]
        param_groups = [
            {"params": self.sed_student.parameters(), "lr": self.opts["optimizer_params"]["lr"]},
            # {"params": opts_proj, "lr": 0.0001}  # lower lr for projector
        ]
        opts = getattr(torch.optim, self.opts["optimizer"])(param_groups)
        schs = getattr(scheduler, self.schs["scheduler"])(opts, **self.schs["scheduler_params"])
        
        return {"optimizer": opts, "lr_scheduler": {"scheduler": schs, "interval": "step"}}

    def training_step(self, batch, batch_idx):
        id_batch, od_batch = batch
        feats, labels, params = id_batch
        od_feats, od_view1, od_view2, contrastive_mask, od_params = od_batch
        if self.psd_label_filter:      
            psd_labels = self.pseudo_label_rule(od_feats)
            for i in range(len(od_feats)):
                od_feats[i] = od_feats[i][psd_labels]
        label_types = [x["label_type"] for x in params]
        strong_mask = torch.tensor(["strong" in x for x in label_types], device=labels.device).bool()
        weak_mask = torch.tensor(["weak" in x for x in label_types], device=labels.device).bool()
        assert sum(strong_mask) + sum(weak_mask) == labels.shape[0], \
            f"strong: {sum(strong_mask)}, weak: {sum(weak_mask)}, total: {labels.shape[0]}"
        loss_sup, loss_id = self.in_domain_step(
            feats=feats,
            labels=labels,
            strong_mask=strong_mask,
            weak_mask=weak_mask,
            unlabelled_feats=od_feats
        )
        # reload mask)
        loss_od = self.out_domain_step(
            view1=od_view1,
            view2=od_view2,
            mask=contrastive_mask
        )
        intp_weight = self.contrastive_weight.get_weight(self.lr_schedulers()._step_count) ** 2
        loss_id = loss_id * 2 * (1 - intp_weight) 
        cl_weight = self.cl_weight * intp_weight if self.cl_decay else self.cl_weight
        loss_od = loss_od * cl_weight
        
        self.log("train/intp_weight", intp_weight, prog_bar=False)
        tot_loss = loss_sup + loss_id + loss_od
        self.log("train-intp/MeanTeacher", loss_id, prog_bar=True)
        self.log("train-intp/Contrastive", loss_od, prog_bar=True)
        self.log("train-intp/Supervised", loss_sup, prog_bar=True)
        self.log("train-intp/Total", tot_loss, prog_bar=True)
        
        opt = self.optimizers()
        sch = self.lr_schedulers()
        self.update_ema(    # update teacher
            self.mean_teacher_ema,
            self.lr_schedulers()._step_count,
            self.sed_student,
            self.sed_teacher,
        )
        opt.zero_grad(set_to_none=True)
        if self.global_step % 50:
            self.manual_backward(loss_od, retain_graph=True)
            self.check_grad_norm("Contrastive")
            opt.zero_grad(set_to_none=True)
            self.manual_backward(loss_id, retain_graph=True)
            self.check_grad_norm("MeanTeacher")
            opt.zero_grad(set_to_none=True)
            self.manual_backward(loss_sup, retain_graph=True)
            self.check_grad_norm("Supervised")
            opt.zero_grad(set_to_none=True)
        self.manual_backward(tot_loss)
        if self.global_step % 50:
            self.check_grad_norm("Total")
        opt.step()
        sch.step()        
        return None

    def in_domain_step(self, feats, labels, strong_mask, weak_mask, unlabelled_feats):
        # BCE + MeanTeacher loss
        in_domain_len = feats[0].shape[0]
        labels_weak = (torch.sum(labels[weak_mask], -1) > 0).float()
        # mixup
        # if 0.5 > random.random():
        for i in range(len(feats)):  # enhance CRNN inputs only
            feats[i][weak_mask], labels_weak = mixup(feats[i][weak_mask],labels_weak)
            feats[i][strong_mask], labels[strong_mask] = mixup(feats[i][strong_mask], labels[strong_mask])
            if unlabelled_feats[i].numel(): # if empty, skip mixup
                unlabelled_feats[i] = mixup(unlabelled_feats[i], None)

        feats = [torch.cat([x, y]) for x, y in zip(feats, unlabelled_feats)]    # merge features (whether unlabelled is empty or not)
        # sed student forward
        strong_preds_student, weak_preds_student = self.sed_student(feats)
        # supervised loss on strong labels
        loss_strong = self.supervised_loss(strong_preds_student[:in_domain_len][strong_mask], labels[:in_domain_len][strong_mask])
        # supervised loss on weakly labelled
        loss_weak = self.supervised_loss(weak_preds_student[:in_domain_len][weak_mask], labels_weak)
        # total supervised loss
        tot_loss_supervised = loss_strong + loss_weak * 0.5

        with torch.no_grad():
            strong_preds_teacher, weak_preds_teacher = self.sed_teacher(feats)
            loss_strong_teacher = self.supervised_loss(strong_preds_teacher[:in_domain_len][strong_mask], labels[:in_domain_len][strong_mask])
            loss_weak_teacher = self.supervised_loss(weak_preds_teacher[:in_domain_len][weak_mask], labels_weak)

        warmup = self.lr_schedulers()._get_scaling_factor()
        weight = 2 * warmup
        strong_self_sup_loss = self.unsupervised_loss(strong_preds_student, strong_preds_teacher.detach())
        weak_self_sup_loss = self.unsupervised_loss(weak_preds_student, weak_preds_teacher.detach())
    
        tot_self_loss = (strong_self_sup_loss + weak_self_sup_loss)
        
        step_num = self.lr_schedulers()._step_count

        student_unlabeled_strong_confidence = strong_preds_student[in_domain_len:].detach().mean()
        student_unlabeled_weak_confidence = weak_preds_student[in_domain_len:].detach().mean()
        teacher_unlabeled_strong_confidence = strong_preds_teacher[in_domain_len:].detach().mean()
        teacher_unlabeled_weak_confidence = weak_preds_teacher[in_domain_len:].detach().mean()
        student_unlabeled_weak_max_conficence = weak_preds_student[in_domain_len:].max(-1)[0].detach().mean()
        teacher_unlabeled_weak_max_conficence = weak_preds_teacher[in_domain_len:].max(-1)[0].detach().mean()
        student_unlabeled_weak_under_confidence = (weak_preds_student[in_domain_len:] < self.psd_lo_thd).float().mean()
        teacher_unlabeled_weak_under_confidence = (weak_preds_teacher[in_domain_len:] < self.psd_lo_thd).float().mean()
        student_unlabeled_weak_above_confidence = (weak_preds_student[in_domain_len:] > self.psd_hi_thd).float().mean()
        teacher_unlabeled_weak_above_confidence = (weak_preds_teacher[in_domain_len:] > self.psd_hi_thd).float().mean()
        
        self.log("train/student/loss_strong", loss_strong)
        self.log("train/student/loss_weak", loss_weak)
        self.log("train/teacher/loss_strong", loss_strong_teacher)
        self.log("train/teacher/loss_weak", loss_weak_teacher)
        self.log("train/step", step_num, prog_bar=False)
        self.log("train/student/weak_self_sup_loss", weak_self_sup_loss)
        self.log("train/student/strong_self_sup_loss", strong_self_sup_loss)
        self.log("train/student/tot_self_loss", tot_self_loss * weight, prog_bar=True)
        self.log("train/weight", weight)
        self.log("train/student/tot_supervised", tot_loss_supervised, prog_bar=True)
        self.log("train/student/unlabeled_strong_confidence", student_unlabeled_strong_confidence, prog_bar=False)
        self.log("train/student/unlabeled_weak_confidence", student_unlabeled_weak_confidence, prog_bar=False)
        self.log("train/teacher/unlabeled_strong_confidence", teacher_unlabeled_strong_confidence, prog_bar=False)
        self.log("train/teacher/unlabeled_weak_confidence", teacher_unlabeled_weak_confidence, prog_bar=False)
        self.log("train/student/unlabeled_weak_max_conficence", student_unlabeled_weak_max_conficence, prog_bar=False)
        self.log("train/teacher/unlabeled_weak_max_conficence", teacher_unlabeled_weak_max_conficence, prog_bar=False)
        self.log("train/student/unlabeled_weak_under_confidence", student_unlabeled_weak_under_confidence, prog_bar=False)
        self.log("train/teacher/unlabeled_weak_under_confidence", teacher_unlabeled_weak_under_confidence, prog_bar=False)
        self.log("train/student/unlabeled_weak_above_confidence", student_unlabeled_weak_above_confidence, prog_bar=False)
        self.log("train/teacher/unlabeled_weak_above_confidence", teacher_unlabeled_weak_above_confidence, prog_bar=False)

        return tot_loss_supervised, tot_self_loss


    def out_domain_step(self, view1, view2, mask):
        # concatenate view1 and view2 for symmetric loss
        feats = torch.cat([view1, view2], dim=0)
        # stop tracking batch norm statistics
        self.sed_student.tracking_bn_stats(False)
        self.sed_teacher.tracking_bn_stats(False)
        # generate mask
        stu_proj, stu_pred = self.sed_student.contrastive_forward(feats, mask=mask, apply_mask=True)
        with torch.no_grad():
            tea_proj, tea_pred = self.sed_teacher.contrastive_forward(feats, mask=mask, apply_mask=False)
            
        stu_feats_std, tea_feats_std, byol_loss = self.contrastive_loss(
            stu_pred,
            tea_proj
        )
        self.sed_student.tracking_bn_stats(True)
        self.sed_teacher.tracking_bn_stats(True)
        self.log("selfsl/BYOL", byol_loss)
        self.log("selfsl/step", self.lr_schedulers()._step_count, prog_bar=False)
        self.log("selfsl/student_std", stu_feats_std, prog_bar=False)
        self.log("selfsl/teacher_std", tea_feats_std, prog_bar=False)
        
        return byol_loss

    def pseudo_label_rule(self, feats):
        with torch.no_grad():
            self.sed_teacher.eval()
            _, weak_preds = self.sed_teacher(feats)
            self.sed_teacher.train()
        # Rule1: confidence > 0.95 
        # weak_preds = weak_preds.max(-1)[0]
        # id_psd_labels = weak_preds > self.psd_label_thd
        # Rule2: confidence in (0.05, 0.95) --> uncertain region
        id_psd_labels = (weak_preds < self.psd_hi_thd) | (weak_preds > self.psd_lo_thd)
        id_psd_labels = id_psd_labels.sum(-1) > 0  # at least one class is in threshold region
        # max pred
        self.log("train-intp/psd_label_rate", id_psd_labels.float().mean().item(), on_epoch=True)
        return id_psd_labels

class TrainCLI(BaseCLI):

    def add_arguments_to_parser(self, parser: LightningArgumentParser) -> None:
        # ModelCheckpoint
        parser.add_lightning_class_args(ModelCheckpoint, "model_checkpoint")
        model_checkpoint_defaults = {
            "model_checkpoint.filename": "epoch{epoch}_metric{val/metric:.4f}",
            "model_checkpoint.monitor": "val/metric",
            "model_checkpoint.mode": "min",
            "model_checkpoint.every_n_epochs": 1,
            "model_checkpoint.save_top_k": 20,  # save all checkpoints
            "model_checkpoint.auto_insert_metric_name": False,
            "model_checkpoint.save_last": True
        }
        parser.set_defaults(model_checkpoint_defaults)

        self.add_model_invariant_arguments_to_parser(parser)

if __name__ == '__main__':
    import warnings
    warnings.filterwarnings("ignore")
    
    cli = TrainCLI(
        IntplContrastiveTrainer,
        pl.LightningDataModule,
        save_config_callback=MySaveConfigCallback,
        save_config_kwargs={'overwrite': True},
        subclass_mode_data=True,
    )