"""This file only includes the train step
"""
import torch

import torch.nn as nn
import torch.nn.functional as F
import pytorch_lightning as pl
import numpy as np

from copy import deepcopy
from .losses.temporal_contrast_loss import TemporalContrastiveLoss


class SelfTrainer(pl.LightningModule):
    def __init__(
            self, 
            opt_configs,
            sed_student,
            opt_n_schd,
            train_loader,
            val_loader,
            test_loader
        ):
        super(SelfTrainer, self).__init__()
        self.opt_configs = opt_configs
        self.opt_n_schd = opt_n_schd
        self.train_loader = train_loader
        self.val_loader = val_loader
        self.test_loader = test_loader

        self.encoder_st = sed_student
        self.encoder_tea = deepcopy(sed_student)
        self.self_loss = TemporalContrastiveLoss()

        self.train_losses = []

    def update_ema(self, alpha, global_step, model, ema_model):
        for (k, ema_params), params in zip(ema_model.named_parameters(), model.parameters()):      
            ema_params.data.mul_(alpha).add_(params.data, alpha=1 - alpha)

    def on_before_zero_grad(self, *args, **kwargs):
        # update EMA teacher
        self.update_ema(
            self.opt_configs["ema_factor"],
            self.lr_schedulers()._step_count,
            self.encoder_st,
            self.encoder_tea,
        )

    def training_step(self, batch, batch_idx):
        feats, _, _, _, _ = batch
        feats = torch.cat(feats, dim=0)
        stu_frm, mask = self.encoder_st(feats, mode="student")
        with torch.no_grad():
            tea_frm = self.encoder_tea(feats, mode="teacher")
        # Get masked sample only
        stu_frm = stu_frm[mask.squeeze(-1).bool()]
        # Reorder mask for teacher
        tea_frm = tea_frm[mask.squeeze(-1).bool()]
        # Cat all frame-wise representations
        if isinstance(stu_frm, list):
            stu_frm = torch.cat(stu_frm, dim=0)
            tea_frm = torch.cat(tea_frm, dim=0)
        elif isinstance(stu_frm, torch.Tensor):
            stu_frm = stu_frm.reshape(-1, stu_frm.shape[-1])
            tea_frm = tea_frm.reshape(-1, tea_frm.shape[-1])
        else:
            raise NotImplementedError("You did nothing!")
        stu_frm = stu_frm.reshape(-1, stu_frm.shape[-1])
        tea_frm = tea_frm.reshape(-1, tea_frm.shape[-1])
        loss_frm, std_frm_stu, std_frm_tea, byol_loss = self.self_loss(stu_frm, tea_frm)
    
        self.log("selfsl/train_loss", loss_frm, prog_bar=True)
        self.log("obj_metric", byol_loss, prog_bar=False)
        self.log("selfsl/std_std", std_frm_stu, prog_bar=True)
        self.log("selfsl/std_tea", std_frm_tea, prog_bar=True)
        self.log("selfsl/byol_loss", byol_loss, prog_bar=True)
        step_num = self.lr_schedulers()._step_count
        self.log("train/step", step_num, prog_bar=False)
        return byol_loss

    def validation_step(self, batch, batch_idx):
        pass
        return 
    
    def test_step(self, *args, **kwargs):
        pass
        return super().test_step(*args, **kwargs)
    
    def configure_optimizers(self):
        return self.opt_n_schd

    def train_dataloader(self):
        return self.train_loader
    
    def val_dataloader(self):
        return self.val_loader
    
    def test_dataloader(self):
        return self.test_loader