"""This file only includes the train step
"""
import torch
import random

import torch.nn as nn
import torch.nn.functional as F

from .sed_core import SEDCoreModule
from .data_augms import mixup
from .losses.temporal_contrast_loss import TemporalContrastiveLoss
from copy import deepcopy


class DomainAgnosticTrainer(SEDCoreModule):
    def __init__(self, *args, **kwargs):
        super(DomainAgnosticTrainer, self).__init__(*args, **kwargs)
        self.encoder_st = self.sed_student
        self.encoder_tea = self.sed_teacher
        self.sed_student = self.sed_student.encoder
        self.sed_teacher = self.sed_teacher.encoder
        self.semisup_loss = nn.MSELoss(reduction="mean")
        self.unlabeled_indx = self.opt_configs["batch_sizes"][3]
        self.selfsup_loss = TemporalContrastiveLoss(cnst_win=21) # TemporalContrastiveLoss(cnst_win=11)
        self.automatic_optimization = True
        self.high_confidence = 0.9
        self.low_confidence = 0.1

    # def freeze_cnn_grad(self, freeze_cnn_layer):
    #     if -1 in freeze_cnn_layer:
    #         self.sed_student.rnn.zero_grad()

    #     for i in freeze_cnn_layer:
    #         if i == -1:
    #             continue
    #         for n, p in self.encoder_st.encoder.cnn.named_parameters():
    #             if str(i) in n:
    #                 p.grad.data.zero_()
                    
    def on_before_zero_grad(self, *args, **kwargs):
        # update EMA teacher
        self.update_ema(
            self.opt_configs["ema_factor"],
            self.lr_schedulers()._step_count,
            self.encoder_st,
            self.encoder_tea,
        )

    def training_step(self, batch, batch_idx):
        sup_input, unsup_input = batch
        sup_feats, sup_labels, _, sup_filenames = sup_input
        unsup_feats, (unsup_view_1, unsup_view_2, _), _, _, od_filenames, id_flags = unsup_input
        unsup_id_feats = unsup_feats[id_flags]
        id_feats = torch.concat([sup_feats, unsup_id_feats], dim=0)

        # ID loss
        supervised_loss, mt_loss = self.in_domain_step(id_feats, labels=sup_labels, mode="id", sup_index=sup_feats.shape[0])
        # OD loss
        unsup_od_feats_aug = torch.cat([unsup_view_1, unsup_view_2], dim=0)
        cl_loss = self.out_domain_step(unsup_od_feats_aug)
        # Add weights
        self_weight = self.lr_schedulers()._get_interpolate_factor() ** 2
        # od_loss = weight * od_loss
        unsup_loss = 0.5 * cl_loss * self_weight #  2 * mt_loss * (1 - self_weight) +
        
        total_loss = supervised_loss + unsup_loss
        step_num = self.lr_schedulers()._step_count
        
        self.log("train/total_loss", total_loss)
        self.log("train/weight", 1 - self_weight)
        self.log("train/self_weight", self_weight)
        self.log("train/step", step_num, prog_bar=True)
        
        return total_loss

    def in_domain_step(
        self, 
        feats,
        labels,
        mode="id",
        sup_index=0):
        ### In-domain data processing
        indx_strong = sum(self.opt_configs["batch_sizes"][:2])
        indx_weak = self.opt_configs["batch_sizes"][2]

        strong_mask = torch.zeros(sup_index).to(feats).bool()
        weak_mask = torch.zeros(sup_index).to(feats).bool()
        strong_mask[:indx_strong] = 1
        weak_mask[indx_strong : indx_weak + indx_strong] = 1

        # deriving weak labels
        labels_weak = (torch.sum(labels[weak_mask], -1) > 0).float()

        # 1-batch augmentation
        if 0.5 > random.random():
            use_mixup = True
            feats[:sup_index][weak_mask], labels_weak = mixup(feats[:sup_index][weak_mask], labels_weak)    # sup weak mixup
            feats[:sup_index][strong_mask], labels[strong_mask] = mixup(feats[:sup_index][strong_mask], labels[strong_mask]) # sup strong mixup
            feats[sup_index:] = mixup(feats[sup_index:], None)  # unsup mixup
        else:
            use_mixup = False
                 
        # supervised loss on strong labels
        strong_preds_student, weak_preds_student = self.sed_student(feats)
        # supervised loss on strong labels
        loss_strong = self.supervised_loss(strong_preds_student[:sup_index][strong_mask], labels[strong_mask])
        # supervised loss on weakly labelled
        loss_weak = self.supervised_loss(weak_preds_student[:sup_index][weak_mask], labels_weak)
        # total supervised loss
        suprevised_loss = loss_strong + loss_weak * 0.5

        with torch.no_grad():
            strong_preds_teacher, weak_preds_teacher = self.sed_teacher(feats)
            loss_strong_teacher = self.supervised_loss(strong_preds_teacher[:sup_index][strong_mask], labels[strong_mask])
            loss_weak_teacher = self.supervised_loss(weak_preds_teacher[:sup_index][weak_mask], labels_weak)

        mt_weak = F.mse_loss(weak_preds_student, weak_preds_teacher.detach()) # id_mt_weak_loss.mean()
        mt_strong =  F.mse_loss(strong_preds_student, strong_preds_teacher.detach())
        mt_loss = mt_weak + mt_strong

        if use_mixup:
            self.log("train/student/loss_strong_w_mixup", loss_strong)
            self.log("train/student/loss_weak_w_mixup", loss_weak)
            self.log("train/teacher/loss_strong_w_mixup", loss_strong_teacher)
            self.log("train/teacher/loss_weak_w_mixup", loss_weak_teacher)
        else:
            self.log("train/student/loss_strong", loss_strong)
            self.log("train/student/loss_weak", loss_weak)
            self.log("train/teacher/loss_strong", loss_strong_teacher)
            self.log("train/teacher/loss_weak", loss_weak_teacher)            
        self.log("train/student/mt_weak_loss", mt_weak)
        self.log("train/student/mt_strong_loss", mt_strong)
        self.log("train/mt_loss", mt_loss)
        self.log("train/supervised_loss", suprevised_loss)
        
        return suprevised_loss, mt_loss

    # def pseudo_label_rule(self, psd_labels, mode):
    #     # Rule: Model confidence enough on the prediction
    #     if mode == "weak":
    #         # shape: (batch_size, num_classes)
    #         # For low-confident samples, all classes should be less than the threshold
    #         # low_mask = (psd_labels.max(-1)[0] < self.low_confidence)
    #         # Or, for high-confident samples, at least two class should be greater than the threshold
    #         high_mask = (psd_labels >= self.high_confidence).sum(-1) > 0

    #     return high_mask  # | low_mask

    def out_domain_step(self, feats):
        # Out-domain processing
        stu_frm, mask = self.encoder_st(feats, mode="student")
        with torch.no_grad():
            tea_frm = self.encoder_st(feats, mode="teacher")
        # Get masked sample only
        stu_frm_mask = stu_frm[mask.bool()]
        tea_frm_mask = tea_frm[mask.bool()]
        # Cat all frame-wise representations
        _, std_frm_stu, std_frm_tea, frame_byol_loss = self.selfsup_loss(stu_frm_mask, tea_frm_mask)
        
        self.log("selfsl/byol_loss", frame_byol_loss, prog_bar=True)
        self.log("selfsl/std_std", std_frm_stu, prog_bar=True)
        self.log("selfsl/std_tea", std_frm_tea, prog_bar=True)
        ### Warning on the loss
        return frame_byol_loss