"""This trainer includes the fundamental validation/test steps of SED
"""
import torch
import random
import pytorch_lightning as pl

from trainer.augms.mixup import mixup
from trainer.SEDBaseTrainer import SEDBaseTrainer
from trainer.utils.base_cli import BaseCLI
from pytorch_lightning.cli import LightningArgumentParser
from pytorch_lightning.callbacks import ModelCheckpoint
from trainer.utils.save_config_callback import MySaveConfigCallback

class BaselineTrainer(SEDBaseTrainer):
    def __init__(self, *args, **kwargs):
        super(BaselineTrainer, self).__init__(*args, **kwargs)

    def training_step(self, batch, batch_idx):
        feats, labels, params = batch
        label_types = [x["label_type"] for x in params]
        strong_mask = torch.tensor(["strong" in x for x in label_types], device=labels.device).bool()
        weak_mask = torch.tensor(["weak" in x for x in label_types], device=labels.device).bool()
        unlabelled_mask = torch.tensor(["unlabeled" in x for x in label_types], device=labels.device).bool()
        assert sum(strong_mask) + sum(weak_mask) + sum(unlabelled_mask) == labels.shape[0], \
            f"strong: {sum(strong_mask)}, weak: {sum(weak_mask)}, unlabelled: {sum(unlabelled_mask)}, total: {labels.shape[0]}"
        # deriving weak labels
        labels_weak = (torch.sum(labels[weak_mask], -1) > 0).float()

        # 1-batch augmentation
        if 0.5 > random.random():
            for i in range(1):  # enhance CRNN inputs only
                feats[i][weak_mask], labels_weak = mixup(feats[i][weak_mask],labels_weak)
                feats[i][strong_mask], labels[strong_mask] = mixup(feats[i][strong_mask], labels[strong_mask])
                feats[i][unlabelled_mask], _ = mixup(feats[i][unlabelled_mask], labels[unlabelled_mask])

        # sed student forward
        strong_preds_student, weak_preds_student = self.sed_student(feats)
        # supervised loss on strong labels
        loss_strong = self.supervised_loss(strong_preds_student[strong_mask], labels[strong_mask])
        # supervised loss on weakly labelled
        loss_weak = self.supervised_loss(weak_preds_student[weak_mask], labels_weak)
        # total supervised loss
        tot_loss_supervised = loss_strong + loss_weak

        with torch.no_grad():
            strong_preds_teacher, weak_preds_teacher = self.sed_teacher(feats)
            loss_strong_teacher = self.supervised_loss(strong_preds_teacher[strong_mask], labels[strong_mask])
            loss_weak_teacher = self.supervised_loss(weak_preds_teacher[weak_mask], labels_weak)

        warmup = self.lr_schedulers()._get_scaling_factor()
        weight = 2 * warmup
        strong_self_sup_loss = self.unsupervised_loss(strong_preds_student, strong_preds_teacher.detach())
        weak_self_sup_loss = self.unsupervised_loss(weak_preds_student, weak_preds_teacher.detach())
    
        tot_self_loss = (strong_self_sup_loss + weak_self_sup_loss) * weight
        
        step_num = self.lr_schedulers()._step_count
        tot_loss = tot_loss_supervised + tot_self_loss

        self.log("train/student/loss_strong", loss_strong)
        self.log("train/student/loss_weak", loss_weak)
        self.log("train/teacher/loss_strong", loss_strong_teacher)
        self.log("train/teacher/loss_weak", loss_weak_teacher)
        self.log("train/step", step_num, prog_bar=False)
        self.log("train/student/weak_self_sup_loss", weak_self_sup_loss)
        self.log("train/student/strong_self_sup_loss", strong_self_sup_loss)
        self.log("train/student/tot_self_loss", tot_self_loss, prog_bar=True)
        self.log("train/weight", weight)
        self.log("train/student/tot_supervised", tot_loss_supervised, prog_bar=True)

        return tot_loss


class TrainCLI(BaseCLI):

    def add_arguments_to_parser(self, parser: LightningArgumentParser) -> None:
        # ModelCheckpoint
        parser.add_lightning_class_args(ModelCheckpoint, "model_checkpoint")
        model_checkpoint_defaults = {
            "model_checkpoint.filename": "epoch{epoch}_metric{val/metric:.4f}",
            "model_checkpoint.monitor": "val/metric",
            "model_checkpoint.mode": "max",
            "model_checkpoint.every_n_epochs": 1,
            "model_checkpoint.save_top_k": -1,  # save all checkpoints
            "model_checkpoint.auto_insert_metric_name": False,
            "model_checkpoint.save_last": True
        }
        parser.set_defaults(model_checkpoint_defaults)

        self.add_model_invariant_arguments_to_parser(parser)

if __name__ == '__main__':
    import warnings
    warnings.filterwarnings("ignore")
    
    cli = TrainCLI(
        BaselineTrainer,
        pl.LightningDataModule,
        save_config_callback=MySaveConfigCallback,
        save_config_kwargs={'overwrite': True},
        subclass_mode_data=True,
    )