"""This file only includes the train step
"""
import torch
import random

import torch.nn as nn
import torch.nn.functional as F

from .sed_core import SEDCoreModule
from .self_trainer import ByolLoss
from .data_augms import mixup
from copy import deepcopy

class SemiTrainer(SEDCoreModule):
    def __init__(self, *args, **kwargs):
        super(SemiTrainer, self).__init__(*args, **kwargs)
        self.encoder_st = self.sed_student
        self.encoder_tea = self.sed_teacher
        self.sed_student = self.sed_student.encoder
        self.sed_teacher = self.sed_teacher.encoder
        self.semisup_loss = nn.MSELoss(reduction="mean")
        self.id_index = self.opt_configs["batch_sizes"][3]
        self.od_index = self.opt_configs["batch_sizes"][4]
        self.selfsup_loss = ByolLoss(symmetric=True)
        self.automatic_optimization = False
        self.target_pool = nn.AvgPool1d(4)

    def freeze_cnn_grad(self, freeze_cnn_layer):
        if -1 in freeze_cnn_layer:
            self.sed_student.rnn.zero_grad()

        for i in freeze_cnn_layer:
            if i == -1:
                continue
            for n, p in self.encoder_st.encoder.cnn.named_parameters():
                if str(i) in n:
                    p.grad.data.zero_()
                    
    def on_before_zero_grad(self, *args, **kwargs):
        # update EMA teacher
        self.update_ema(
            self.opt_configs["ema_factor"],
            self.lr_schedulers()._step_count,
            self.encoder_st,
            self.encoder_tea,
        )

    def reorder_feats_by_domain(self, feats, id_index, od_index):
        id_feats = torch.cat([feats[:id_index], feats[od_index + id_index:od_index + 2 * id_index]], dim=0)
        od_feats = torch.cat([feats[id_index:od_index + id_index], feats[od_index + 2 * id_index:]], dim=0)
        return torch.cat([id_feats, od_feats], dim=0)

    def reorder_feats_by_view(self, feats, id_index, od_index):
        if isinstance(feats, torch.Tensor):
            feats = torch.cat([
                feats[:id_index],
                feats[id_index * 2: od_index + id_index * 2],
                feats[id_index: id_index * 2],
                feats[od_index + 2 * id_index:],
            ], dim=0)
        elif isinstance(feats, list):
            feats = feats[:id_index] + \
                feats[id_index * 2: od_index + id_index * 2] + \
                feats[id_index: id_index * 2] + \
                feats[od_index + 2 * id_index:]
        else:
            raise NotImplementedError("You did nothing!")
        return feats

    def training_step(self, batch, batch_idx):
        opt = self.optimizers()
        sch = self.lr_schedulers()
        self.on_before_zero_grad()
        # mannual optimization
        feats, od_feats, labels, _, filenames, crop_pos = batch
        overlap_area = torch.stack([
            torch.maximum(crop_pos[0][0], crop_pos[1][0]),
            torch.minimum(crop_pos[0][1], crop_pos[1][1]),
        ], dim=1)
        crop_ratio = torch.stack([
            (overlap_area[:, 0] - crop_pos[0][0]) / (crop_pos[0][1] - crop_pos[0][0]),
            (overlap_area[:, 1] - crop_pos[0][0]) / (crop_pos[0][1] - crop_pos[0][0]),
            (overlap_area[:, 0] - crop_pos[1][0]) / (crop_pos[1][1] - crop_pos[1][0]),
            (overlap_area[:, 1] - crop_pos[1][0]) / (crop_pos[1][1] - crop_pos[1][0]),
        ], dim=1)
        crop_ratio = torch.concat(crop_ratio.chunk(2, dim=1), dim=0)
        # crop_ratio = self.reorder_feats_by_domain(crop_ratio, self.id_index, self.od_index)
        od_feats = torch.cat(od_feats, dim=0)
        # od_feats = self.reorder_feats_by_domain(od_feats, self.id_index, self.od_index)
        # ID loss
        id_loss = self.in_domain_step(feats, labels=labels, mode="id")
        # OD loss
        od_loss = self.out_domain_step(od_feats, crop_ratio=crop_ratio)
        warmup = self.lr_schedulers()._get_interpolate_factor()
        weight = warmup * self.opt_configs["ood_weight"]
        od_loss = weight * od_loss
        
        # Optimization
        opt.zero_grad()
        self.manual_backward(od_loss)  # Gradient computation w.r.t. od_loss
        self.freeze_cnn_grad(self.opt_configs["freeze_cnn_layer"])
        self.manual_backward(id_loss)  # Gradient computation w.r.t. id_loss
        opt.step()
        sch.step()
        total_loss = id_loss + od_loss 
        self.log("train/total_loss", total_loss)
        self.log("train/weight", weight)
        # self.log("train/affine_loss", affine_loss)
        # self.log("monitor/id_linear_norm", id_linear_weight.norm())
        # self.log("monitor/od_linear_norm", od_linear_weight.norm())
        return

    def in_domain_step(
        self, 
        feats,
        labels,
        mode="id"):
        ### In-domain data processing
        batch_num = feats.shape[0]
        labels = labels[:batch_num]
        indx_strong = sum(self.opt_configs["batch_sizes"][:2])
        indx_weak = self.opt_configs["batch_sizes"][2]

        strong_mask = torch.zeros(batch_num).to(feats).bool()
        weak_mask = torch.zeros(batch_num).to(feats).bool()
        strong_mask[:indx_strong] = 1
        weak_mask[indx_strong : indx_weak + indx_strong] = 1

        # deriving weak labels
        labels_weak = (torch.sum(labels[weak_mask], -1) > 0).float()

        # 1-batch augmentation
        if 0.5 > random.random():
            feats[weak_mask], labels_weak = mixup(feats[weak_mask],labels_weak)
            feats[strong_mask], labels[strong_mask] = mixup(feats[strong_mask], labels[strong_mask])               
        # supervised loss on strong labels
        strong_preds_student, weak_preds_student = self.sed_student(feats, mode=mode)
        # supervised loss on strong labels
        loss_strong = self.supervised_loss(strong_preds_student[strong_mask], labels[strong_mask])
        # supervised loss on weakly labelled
        loss_weak = self.supervised_loss(weak_preds_student[weak_mask], labels_weak)
        # total supervised loss
        tot_loss_supervised = loss_strong + loss_weak

        with torch.no_grad():
            strong_preds_teacher, weak_preds_teacher = self.sed_teacher(feats, mode=mode)
            loss_strong_teacher = self.supervised_loss(strong_preds_teacher[strong_mask], labels[strong_mask])
            loss_weak_teacher = self.supervised_loss(weak_preds_teacher[weak_mask], labels_weak)
        # MeanTeacher loss
        warmup = self.lr_schedulers()._get_scaling_factor()
        weight = self.opt_configs["const_max"] * warmup
        strong_self_sup_loss = F.mse_loss(strong_preds_student[strong_mask], strong_preds_teacher[strong_mask].detach())
        weak_self_sup_loss = F.mse_loss(weak_preds_student[weak_mask], weak_preds_teacher[weak_mask].detach())
    
        tot_self_loss = (strong_self_sup_loss + weak_self_sup_loss) * weight

        step_num = self.lr_schedulers()._step_count
        tot_loss = tot_loss_supervised + tot_self_loss

        self.log("train/student/loss_strong", loss_strong)
        self.log("train/student/loss_weak", loss_weak)
        self.log("train/teacher/loss_strong", loss_strong_teacher)
        self.log("train/teacher/loss_weak", loss_weak_teacher)
        self.log("train/step", step_num, prog_bar=True)
        self.log("train/student/tot_supervised", tot_loss_supervised, prog_bar=True)
        return tot_loss
    
    def out_domain_step(self, feats, mode="od", crop_ratio=None):
        # Out-domain processing
        mae_target = feats.clone()
        mae_target = self.target_pool(mae_target).transpose(1, 2)
        stu_frm, mask = self.encoder_st(feats, mode="student", encoder_mode=mode, sep_index=self.id_index * 2, crop_ratio=crop_ratio)
        # Cat all frame-wise representations
        std_frm_stu = compute_var(F.normalize(stu_frm,dim=-1)).mean()
        loss_mae = F.mse_loss(stu_frm[mask.squeeze(-1).bool()], mae_target[mask.squeeze(-1).bool()])
        if torch.isnan(loss_mae): 
            print(stu_frm[:, :10])
            import os
            os._exit(0)
        self.log("selfsl/train_loss", loss_mae, prog_bar=True)
        self.log("selfsl/std_std", std_frm_stu, prog_bar=True)
        # self.log("selfsl/std_tea", std_frm_tea, prog_bar=True)
        return loss_mae
    
def compute_var(y):
    y = y.view(-1, y.size(-1))
    zc = torch.tensor(y.size(0)).cuda()
    zs = y.sum(dim=0)
    zss = (y ** 2).sum(dim=0)
    var = zss / (zc - 1) - (zs ** 2) / (zc * (zc - 1))
    return torch.sqrt(var + 1e-6)