"""This trainer includes the fundamental validation/test steps of SED
"""
import torch
import random
import pytorch_lightning as pl
import torch.nn as nn
import loss
from trainer.augms.mixup import mixup
from trainer.SEDBaseTrainer import SEDBaseTrainer
from trainer.utils.base_cli import BaseCLI
from pytorch_lightning.cli import LightningArgumentParser
from pytorch_lightning.callbacks import ModelCheckpoint
from trainer.utils.save_config_callback import MySaveConfigCallback
from scheduler.CosDown import WeightScheduler

class DomainAgnosticTrainer(SEDBaseTrainer):
    def __init__(
        self, 
        contrastive_loss: nn.Module,
        contrastive_weight: WeightScheduler,
        psd_label_thd: float = 0.9,
        *args,
        **kwargs
        ):
        super(DomainAgnosticTrainer, self).__init__(*args, **kwargs)
        self.contrastive_loss = contrastive_loss
        self.contrastive_weight = contrastive_weight
        self.psd_label_thd = psd_label_thd

    def training_step(self, batch, batch_idx):
        id_batch, od_batch = batch
        feats, labels, params = id_batch
        od_feats, od_view1, od_view2, contrastive_mask, od_params = od_batch      
    
        label_types = [x["label_type"] for x in params]
        strong_mask = torch.tensor(["strong" in x for x in label_types], device=feats.device).bool()
        weak_mask = torch.tensor(["weak" in x for x in label_types], device=feats.device).bool()
        unlabelled_mask = torch.tensor(["unlabeled" in x for x in label_types], device=feats.device).bool()
        assert sum(strong_mask) + sum(weak_mask) + sum(unlabelled_mask) == feats.shape[0], \
            f"strong: {sum(strong_mask)}, weak: {sum(weak_mask)}, unlabelled: {sum(unlabelled_mask)}, total: {feats.shape[0]}"
        loss_sup, loss_id = self.in_domain_step(
            feats=feats,
            labels=labels,
            strong_mask=strong_mask,
            weak_mask=weak_mask,
            unlabelled_mask=unlabelled_mask
        )
        loss_od = self.out_domain_step(
            view1=od_view1,
            view2=od_view2,
            mask=contrastive_mask
        )
        intp_weight = self.contrastive_weight.get_weight(self.lr_schedulers()._step_count) ** 2
        loss_id = loss_id * 2 * (1 - intp_weight)
        loss_od = loss_od * 0.5 * intp_weight
        
        self.log("train/intp_weight", intp_weight, prog_bar=False)
        tot_loss = loss_sup + loss_id + loss_od
        self.log("train-intp/MeanTeacher", loss_id, prog_bar=True)
        self.log("train-intp/Contrastive", loss_od, prog_bar=True)
        self.log("train-intp/Supervised", loss_sup, prog_bar=True)
        self.log("train-intp/Total", tot_loss, prog_bar=True)
        return tot_loss

    def in_domain_step(self, feats, labels, strong_mask, weak_mask, unlabelled_mask):
        # BCE + MeanTeacher loss
        labels_weak = (torch.sum(labels[weak_mask], -1) > 0).float()
        # mixup
        # if 0.5 > random.random():
        feats[weak_mask], labels_weak = mixup(feats[weak_mask],labels_weak)
        feats[strong_mask], labels[strong_mask] = mixup(feats[strong_mask], labels[strong_mask])
        feats[unlabelled_mask], _ = mixup(feats[unlabelled_mask], labels[unlabelled_mask])

        # sed student forward
        strong_preds_student, weak_preds_student = self.sed_student(feats)
        # supervised loss on strong labels
        loss_strong = self.supervised_loss(strong_preds_student[strong_mask], labels[strong_mask])
        # supervised loss on weakly labelled
        loss_weak = self.supervised_loss(weak_preds_student[weak_mask], labels_weak)
        # total supervised loss
        tot_loss_supervised = loss_strong + loss_weak * 0.5

        with torch.no_grad():
            strong_preds_teacher, weak_preds_teacher = self.sed_teacher(feats)
            loss_strong_teacher = self.supervised_loss(strong_preds_teacher[strong_mask], labels[strong_mask])
            loss_weak_teacher = self.supervised_loss(weak_preds_teacher[weak_mask], labels_weak)

        warmup = self.lr_schedulers()._get_scaling_factor()
        weight = 2 * warmup
        strong_self_sup_loss = self.unsupervised_loss(strong_preds_student, strong_preds_teacher.detach())
        weak_self_sup_loss = self.unsupervised_loss(weak_preds_student, weak_preds_teacher.detach())
    
        tot_self_loss = (strong_self_sup_loss + weak_self_sup_loss)
        
        step_num = self.lr_schedulers()._step_count

        self.log("train/student/loss_strong", loss_strong)
        self.log("train/student/loss_weak", loss_weak)
        self.log("train/teacher/loss_strong", loss_strong_teacher)
        self.log("train/teacher/loss_weak", loss_weak_teacher)
        self.log("train/step", step_num, prog_bar=False)
        self.log("train/student/weak_self_sup_loss", weak_self_sup_loss)
        self.log("train/student/strong_self_sup_loss", strong_self_sup_loss)
        self.log("train/student/tot_self_loss", tot_self_loss * weight, prog_bar=True)
        self.log("train/weight", weight)
        self.log("train/student/tot_supervised", tot_loss_supervised, prog_bar=True)

        return tot_loss_supervised, tot_self_loss

    def pseudo_label(self, feats):
        # Rule: Model confidence enough on the prediction
        self.sed_teacher.eval()
        with torch.no_grad():
            preds = self.sed_teacher(feats)
        self.sed_teacher.train()
        pseudo_labels = preds > self.psd_label_thd
        in_domain_mask = torch.sum(pseudo_labels, -1) > 0
        return in_domain_mask
    
    def out_domain_step(self, view1, view2, mask):
        # concatenate view1 and view2 for symmetric loss
        feats = torch.cat([view1, view2], dim=0)
        # stop tracking batch norm statistics
        self.sed_student.tracking_bn_stats(False)
        self.sed_teacher.tracking_bn_stats(False)
        # generate mask
        stu_proj, stu_pred = self.sed_student.contrastive_forward(feats, mask=mask, apply_mask=True)
        with torch.no_grad():
            tea_proj, tea_pred = self.sed_teacher.contrastive_forward(feats, mask=mask, apply_mask=False)
            
        stu_feats_std, tea_feats_std, byol_loss = self.contrastive_loss(
            stu_pred,
            tea_proj
        )
        self.sed_student.tracking_bn_stats(True)
        self.sed_teacher.tracking_bn_stats(True)
        self.log("selfsl/BYOL", byol_loss)
        self.log("selfsl/step", self.lr_schedulers()._step_count, prog_bar=False)
        self.log("selfsl/student_std", stu_feats_std, prog_bar=False)
        self.log("selfsl/teacher_std", tea_feats_std, prog_bar=False)
        
        return byol_loss
        

class TrainCLI(BaseCLI):

    def add_arguments_to_parser(self, parser: LightningArgumentParser) -> None:
        # ModelCheckpoint
        parser.add_lightning_class_args(ModelCheckpoint, "model_checkpoint")
        model_checkpoint_defaults = {
            "model_checkpoint.filename": "epoch{epoch}_metric{val/metric:.4f}",
            "model_checkpoint.monitor": "val/metric",
            "model_checkpoint.mode": "min",
            "model_checkpoint.every_n_epochs": 1,
            "model_checkpoint.save_top_k": 10,  # save all checkpoints
            "model_checkpoint.auto_insert_metric_name": False,
            "model_checkpoint.save_last": True
        }
        parser.set_defaults(model_checkpoint_defaults)

        self.add_model_invariant_arguments_to_parser(parser)

if __name__ == '__main__':
    import warnings
    warnings.filterwarnings("ignore")
    
    cli = TrainCLI(
        DomainAgnosticTrainer,
        pl.LightningDataModule,
        save_config_callback=MySaveConfigCallback,
        save_config_kwargs={'overwrite': True},
        subclass_mode_data=True,
    )