"""This file only includes the train step
"""
import torch
import random

import torch.nn as nn

from .sed_core import SEDCoreModule
from .self_trainer import ByolLoss
from .data_augms import mixup
from copy import deepcopy

class SSSLTrainer(SEDCoreModule):
    def __init__(self, *args, **kwargs):
        super(SSSLTrainer, self).__init__(*args, **kwargs)
        self.encoder_st = self.sed_student
        self.encoder_tea = self.sed_teacher
        self.sed_student = self.sed_student.encoder
        self.sed_teacher = self.sed_teacher.encoder
        self.semisup_loss = nn.MSELoss(reduction="mean")
        self.id_index = sum(self.opt_configs["batch_sizes"][:4]) if self.opt_configs["net"]["normalization"] == "switch" else 0
        self.selfsup_loss = ByolLoss(symmetric=True)
        self.automatic_optimization = False


    def on_before_zero_grad(self, *args, **kwargs):
        # update EMA teacher
        self.update_ema(
            self.opt_configs["ema_factor"],
            self.lr_schedulers()._step_count,
            self.encoder_st,
            self.encoder_tea,
        )

    # def grad_compensate(self, id_weight, od_weight):
    #     for i, param_group in enumerate(self.optimizers().param_groups):
    #         if i == 0:
    #             pass
    #         elif i in [1, 2]:
    #             for p in param_group["params"]:
    #                 p.grad.mul_(1 / id_weight)
    #         else:
    #             for p in param_group["params"]:
    #                 p.grad.mul_(1 / od_weight)

    def freeze_cnn_grad(self, freeze_cnn_layer):
        if -1 in freeze_cnn_layer:
            self.sed_student.rnn.zero_grad()
            print("freeze RNN")
        for i in freeze_cnn_layer:
            if i == -1:
                continue
            print("freeze CNN layer {}".format(i))
            for n, p in self.encoder_st.encoder.cnn.named_parameters():
                if str(i) in n:
                    p.grad.data.zero_()


    def training_step(self, batch, batch_idx):
        opt = self.optimizers()
        sch = self.lr_schedulers()
        self.on_before_zero_grad()
        opt.zero_grad()
        # mannual optimization
        feats, od_feat, labels, _, _ = batch
        od_feat = torch.cat(od_feat, dim=0)
        id_loss = self.in_domain_step(feats, labels[:len(feats)])
        od_loss = self.out_domain_step(od_feat)

        self.manual_backward(od_loss)   # Gradient computation w.r.t. od_loss
        self.freeze_cnn_grad(self.opt_configs["freeze_cnn_layer"])
        self.manual_backward(id_loss)   # Gradient computation w.r.t. id_loss
        total_loss = id_loss + od_loss
    
        self.log("train/total_loss", total_loss)
        opt.step()
        sch.step()
        return 

    def in_domain_step(self, feats, labels, mode="id"):
        # In-domain processing
        batch_num = feats.shape[0]
        labels = labels[:batch_num]
        indx_strong = sum(self.opt_configs["batch_sizes"][:2])
        indx_weak = self.opt_configs["batch_sizes"][2]
        indx_unlabeled = sum(self.opt_configs["batch_sizes"][3:])

        strong_mask = torch.zeros(batch_num).to(feats).bool()
        weak_mask = torch.zeros(batch_num).to(feats).bool()
        unlabelled_mask = torch.zeros(batch_num).to(feats).bool()
        strong_mask[:indx_strong] = 1
        weak_mask[indx_strong : indx_weak + indx_strong] = 1
        unlabelled_mask[indx_weak + indx_strong :] = 1

        # deriving weak labels
        labels_weak = (torch.sum(labels[weak_mask], -1) > 0).float()

        # 1-batch augmentation
        if 0.5 > random.random():
            feats[weak_mask], labels_weak = mixup(feats[weak_mask],labels_weak)
            feats[strong_mask], labels[strong_mask] = mixup(feats[strong_mask], labels[strong_mask])
            # feats[unlabelled_mask], _ = mixup(feats[unlabelled_mask], labels[unlabelled_mask])

        # sed student forward
        strong_preds_student, weak_preds_student = self.sed_student(feats, mode=mode)
        # supervised loss on strong labels
        loss_strong = self.supervised_loss(strong_preds_student[strong_mask], labels[strong_mask])
        # supervised loss on weakly labelled
        loss_weak = self.supervised_loss(weak_preds_student[weak_mask], labels_weak)
        # total supervised loss
        tot_loss_supervised = loss_strong + loss_weak

        with torch.no_grad():
            strong_preds_teacher, weak_preds_teacher = self.sed_teacher(feats, mode=mode)
            loss_strong_teacher = self.supervised_loss(strong_preds_teacher[strong_mask], labels[strong_mask])
            loss_weak_teacher = self.supervised_loss(weak_preds_teacher[weak_mask], labels_weak)

        warmup = self.lr_schedulers()._get_scaling_factor()
        weight = self.opt_configs["const_max"] * warmup
        
        strong_self_sup_loss = self.semisup_loss(strong_preds_student, strong_preds_teacher.detach())
        weak_self_sup_loss = self.semisup_loss(weak_preds_student, weak_preds_teacher.detach())
    
        tot_self_loss = (strong_self_sup_loss + weak_self_sup_loss) * weight
        step_num = self.lr_schedulers()._step_count
        tot_loss = tot_loss_supervised # + tot_self_loss

        self.log("train/student/loss_strong", loss_strong)
        self.log("train/student/loss_weak", loss_weak)
        self.log("train/teacher/loss_strong", loss_strong_teacher)
        self.log("train/teacher/loss_weak", loss_weak_teacher)
        self.log("train/step", step_num, prog_bar=True)
        self.log("train/student/weak_self_sup_loss", weak_self_sup_loss)
        self.log("train/student/strong_self_sup_loss", strong_self_sup_loss)
        self.log("train/student/tot_self_loss", tot_self_loss, prog_bar=True)
        self.log("train/weight", weight)
        self.log("train/student/tot_supervised", tot_loss_supervised, prog_bar=True)
        return tot_loss
    
    def out_domain_step(self, feats, mode="od", unfreeze_cnn_layer=0):
        # Out-domain processing
        stu_frm = self.encoder_st(feats, mode="student", encoder_mode=mode)
        with torch.no_grad():
            tea_frm = self.encoder_tea(feats, mode="teacher", encoder_mode=mode)
        loss_frm, std_frm_stu, std_frm_tea = self.selfsup_loss(stu_frm, tea_frm)
    
        self.log("selfsl/train_loss", loss_frm, prog_bar=True)
        self.log("selfsl/std_std", std_frm_stu, prog_bar=True)
        self.log("selfsl/std_tea", std_frm_tea, prog_bar=True)
        return loss_frm