"""This trainer includes the fundamental validation/test steps of SED
"""
import torch
import scheduler
import pytorch_lightning as pl
import torch.nn as nn
import loss
from trainer.augms.mixup import mixup
from trainer.SEDBaseTrainer import SEDBaseTrainer
from trainer.utils.base_cli import BaseCLI
from pytorch_lightning.cli import LightningArgumentParser
from pytorch_lightning.callbacks import ModelCheckpoint
from trainer.utils.save_config_callback import MySaveConfigCallback
from scheduler.CosDown import WeightScheduler

class CLTrainer(SEDBaseTrainer):
    def __init__(
        self, 
        contrastive_loss: nn.Module,
        contrastive_weight: WeightScheduler,
        *args,
        **kwargs
        ):
        super(CLTrainer, self).__init__(*args, **kwargs)
        self.contrastive_loss = contrastive_loss
        self.contrastive_weight = contrastive_weight

    def configure_optimizers(self):
        non_proj_params = [p for n, p in self.sed_student.named_parameters() if ("proj" not in n) and ("pred" not in n)]
        opts_proj = [p for n, p in self.sed_student.named_parameters() if ("proj" in n) or ("pred" in n)]
        param_groups = [
            {"params": non_proj_params, "lr": self.opts["optimizer_params"]["lr"]},
            {"params": opts_proj, "lr": self.opts["optimizer_params"]["lr"]}  # lower lr for projector
        ]
        opts = getattr(torch.optim, self.opts["optimizer"])(param_groups)
        schs = getattr(scheduler, self.schs["scheduler"])(opts, **self.schs["scheduler_params"])
        
        return {"optimizer": opts, "lr_scheduler": {"scheduler": schs, "interval": "step"}}

    def training_step(self, batch, batch_idx):
        id_batch, od_batch = batch
        od_feats, od_view1, od_view2, contrastive_mask, od_params = od_batch
        loss_od = self.out_domain_step(
            view1=od_view1,
            view2=od_view2,
            mask=contrastive_mask
        )
        loss_od = loss_od * 0.5 # * intp_weight
        self.log("train-intp/Contrastive", loss_od, prog_bar=True)
        return loss_od
    
    def out_domain_step(self, view1, view2, mask):
        # concatenate view1 and view2 for symmetric loss
        feats = torch.cat([view1, view2], dim=0)
        # stop tracking batch norm statistics
        self.sed_student.tracking_bn_stats(False)
        self.sed_teacher.tracking_bn_stats(False)
        # generate mask
        stu_proj, stu_pred = self.sed_student.contrastive_forward(feats, mask=mask, apply_mask=True)
        with torch.no_grad():
            tea_proj, tea_pred = self.sed_teacher.contrastive_forward(feats, mask=mask, apply_mask=False)
            
        stu_feats_std, tea_feats_std, byol_loss = self.contrastive_loss(
            stu_pred,
            tea_proj
        )
        self.sed_student.tracking_bn_stats(True)
        self.sed_teacher.tracking_bn_stats(True)
        self.log("selfsl/BYOL", byol_loss)
        self.log("selfsl/step", self.lr_schedulers()._step_count, prog_bar=False)
        self.log("selfsl/student_std", stu_feats_std, prog_bar=False)
        self.log("selfsl/teacher_std", tea_feats_std, prog_bar=False)
        
        return byol_loss

    def validation_step(self, batch, batch_idx):
        self.log("val/metric", self.global_step, prog_bar=True)  # No validation step in CLTrainer
        pass

    def on_validation_epoch_end(self):
        # No validation step in CLTrainer
        pass

class TrainCLI(BaseCLI):

    def add_arguments_to_parser(self, parser: LightningArgumentParser) -> None:
        # ModelCheckpoint
        parser.add_lightning_class_args(ModelCheckpoint, "model_checkpoint")
        model_checkpoint_defaults = {
            "model_checkpoint.filename": "epoch{epoch}_metric{val/metric:.4f}",
            "model_checkpoint.monitor": "val/metric",
            "model_checkpoint.mode": "max",
            "model_checkpoint.every_n_epochs": 1,
            "model_checkpoint.save_top_k": 5,  # save all checkpoints
            "model_checkpoint.auto_insert_metric_name": False,
            "model_checkpoint.save_last": True
        }
        parser.set_defaults(model_checkpoint_defaults)

        self.add_model_invariant_arguments_to_parser(parser)

if __name__ == '__main__':
    import warnings
    warnings.filterwarnings("ignore")
    
    cli = TrainCLI(
        CLTrainer,
        pl.LightningDataModule,
        save_config_callback=MySaveConfigCallback,
        save_config_kwargs={'overwrite': True},
        subclass_mode_data=True,
    )