"""This file only includes the train step
"""
import torch
import random

import torch.nn as nn

from .sed_core import SEDCoreModule
from .data_augms import mixup



class SSSLTrainer(SEDCoreModule):
    def __init__(self, *args, **kwargs):
        super(SSSLTrainer, self).__init__(*args, **kwargs)
        self.selfsup_loss = nn.MSELoss(reduction="mean")
        self.id_index = sum(self.opt_configs["batch_sizes"][:4]) if self.opt_configs["net"]["normalization"] == "switch" else 0
    
    def training_step(self, batch, batch_idx):
        feats, labels, padded_indxs, _ = batch
        indx_strong = sum(self.opt_configs["batch_sizes"][:2])
        indx_weak = self.opt_configs["batch_sizes"][2]
        indx_unlabeled = sum(self.opt_configs["batch_sizes"][3:])
        batch_num = feats.shape[0]
        strong_mask = torch.zeros(batch_num).to(feats).bool()
        weak_mask = torch.zeros(batch_num).to(feats).bool()
        unlabelled_mask = torch.zeros(batch_num).to(feats).bool()
        strong_mask[:indx_strong] = 1
        weak_mask[indx_strong : indx_weak + indx_strong] = 1
        unlabelled_mask[indx_weak + indx_strong :] = 1

        # deriving weak labels
        labels_weak = (torch.sum(labels[weak_mask], -1) > 0).float()

        # 1-batch augmentation
        if 0.5 > random.random():
            feats[weak_mask], labels_weak = mixup(feats[weak_mask],labels_weak)
            feats[strong_mask], labels[strong_mask] = mixup(feats[strong_mask], labels[strong_mask])
            # feats[unlabelled_mask], _ = mixup(feats[unlabelled_mask], labels[unlabelled_mask])

        # sed student forward
        strong_preds_student, weak_preds_student = self.sed_student(feats)
        # supervised loss on strong labels
        loss_strong = self.supervised_loss(strong_preds_student[strong_mask], labels[strong_mask])
        # supervised loss on weakly labelled
        loss_weak = self.supervised_loss(weak_preds_student[weak_mask], labels_weak)
        # total supervised loss
        tot_loss_supervised = loss_strong + loss_weak

        with torch.no_grad():
            strong_preds_teacher, weak_preds_teacher = self.sed_teacher(feats)
            loss_strong_teacher = self.supervised_loss(strong_preds_teacher[strong_mask], labels[strong_mask])
            loss_weak_teacher = self.supervised_loss(weak_preds_teacher[weak_mask], labels_weak)

        warmup = self.lr_schedulers()._get_scaling_factor()
        weight = self.opt_configs["const_max"] * warmup
        
        strong_self_sup_loss = self.selfsup_loss(strong_preds_student[unlabelled_mask], strong_preds_teacher[unlabelled_mask].detach())
        weak_self_sup_loss = self.selfsup_loss(weak_preds_student[unlabelled_mask], weak_preds_teacher[unlabelled_mask].detach())
    
        tot_self_loss = (strong_self_sup_loss + weak_self_sup_loss) * weight

        # we apply consistency between the predictions, use the scheduler for learning rate (to be changed ?)
        
        step_num = self.lr_schedulers()._step_count
        # lr_annotates = (
        #     {"name": x, "lr": self.optimizers().param_groups[i]["lr"]} for i, x in enumerate(self.opt_configs["group_prefix"] + ["others"])
        # )
            
        tot_loss = tot_loss_supervised + tot_self_loss

        self.log("train/student/loss_strong", loss_strong)
        self.log("train/student/loss_weak", loss_weak)
        self.log("train/teacher/loss_strong", loss_strong_teacher)
        self.log("train/teacher/loss_weak", loss_weak_teacher)
        self.log("train/step", step_num, prog_bar=True)
        self.log("train/student/weak_self_sup_loss", weak_self_sup_loss)
        self.log("train/student/strong_self_sup_loss", strong_self_sup_loss)
        self.log("train/student/tot_self_loss", tot_self_loss, prog_bar=True)
        self.log("train/weight", weight)
        self.log("train/student/tot_supervised", tot_loss_supervised, prog_bar=True)
        # for d in lr_annotates:
        #     self.log(f"lr/{d['name']}", d["lr"], prog_bar=True)

       
        return tot_loss