"""This trainer includes the fundamental validation/test steps of SED
"""
from ast import List
import glob
import os

from regex import W
import scheduler
import torch
import sed_scores_eval

import numpy as np
import pandas as pd
import torchmetrics as tm
import pytorch_lightning as pl
import torch.nn as nn

from typing import *
from copy import deepcopy
from pprint import pprint
from pytorch_lightning.cli import LightningArgumentParser
from pytorch_lightning.callbacks import ModelCheckpoint
from loss.base_loss import Loss
from trainer.utils.base_cli import BaseCLI
from trainer.utils.save_config_callback import MySaveConfigCallback
from torchaudio.transforms import AmplitudeToDB
from data_loader.utils.io import TorchScaler
from trainer.sed_tools.pred_decoder import batched_decode_preds, val_decode_preds, batched_decode_scores
from evaluation import (
    MedianPool2d, 
    SEDMetrics,
    compute_psds_from_operating_points,
    compute_psds_from_scores,
    compute_per_intersection_macro_f1,
    log_sedeval_metrics,
)

class SEDBaseTrainer(pl.LightningModule):
    def __init__(
            self,
            sed_model: torch.nn.Module,
            supervised_loss: Loss,
            unsupervised_loss: Loss,
            opts,
            schs,
            mean_teacher_ema: float = 0.999,
            exp_name: str = "Base",
            test_n_thresholds: int = 50,
            median_window: Optional[List[int]] = [7] * 10,
            infer_avg_n_models: int = 10,
            val_psds: bool = True,
            ):
        super(SEDBaseTrainer, self).__init__()
        self.sed_student = sed_model
        self.sed_teacher = deepcopy(sed_model)
        for p in self.sed_teacher.parameters():
            p.requires_grad = False
        
        # save arguments to 'self'
        args = locals().copy()
        for k, v in args.items():
            if k == 'self' or k == '__class__' or hasattr(self, k) or k == 'sed_model':
                continue
            setattr(self, k, v)
        self.name = self.exp_name
        # validation and evaluation
        # average previous 10 models to smooth the curve
        self.infer_avg_n_models = infer_avg_n_models
        self.previous_models = {
            "student": [],
            "teacher": []
        }
        # weak
        self.get_weak_student_f1 = tm.classification.MultilabelF1Score(
            self.sed_student.classifier.n_classes,
            average="macro"
        )
        self.get_weak_teacher_f1 = deepcopy(self.get_weak_student_f1)
        # strong
        self.median_filter = MedianPool2d(median_window, same=True)
        self.median_window = median_window
        self.strong_metrics = {
            "real_stu": SEDMetrics(intersection_thd=0.5),
            "real_tea": SEDMetrics(intersection_thd=0.5),
            "synth_stu": SEDMetrics(intersection_thd=0.5),
            "synth_tea": SEDMetrics(intersection_thd=0.5),
            "eval_stu": SEDMetrics(intersection_thd=0.5),
            "eval_tea": SEDMetrics(intersection_thd=0.5),
        }
        self.strong_scores = {
            "real_stu": {},
            "real_tea": {},
            "synth_stu": {},
            "synth_tea": {},
            "eval_stu": {},
            "eval_tea": {},
        }
        # test
        test_n_thresholds = test_n_thresholds
        self.test_thresholds = np.arange(
            1 / (test_n_thresholds * 2), 1, 1 / test_n_thresholds
        )
        self.test_psds_buffer_student = {k: pd.DataFrame() for k in self.test_thresholds}
        self.test_psds_buffer_teacher = {k: pd.DataFrame() for k in self.test_thresholds}
        self.test_scores_weak_buffer_student = {}
        self.test_scores_weak_buffer_teacher = {}
        self.test_scores_postprocessed_buffer_student = {}
        self.test_scores_postprocessed_buffer_teacher = {}
        self.decoded_05_buffer_student = pd.DataFrame({})
        self.decoded_05_buffer_teacher = pd.DataFrame({})
        # placeholders
        self.loss_weak_student = []
        self.loss_weak_teacher = []
        self.loss_synth_student = []
        self.loss_synth_teacher = []
        self.loss_real_student = []
        self.loss_real_teacher = []
        self.loss_test_student = []
        self.loss_test_teacher = []
        self.val_psds = val_psds
        

    def configure_optimizers(self):
        opts = getattr(torch.optim, self.opts["optimizer"])(self.sed_student.parameters(), **self.opts["optimizer_params"])
        schs = getattr(scheduler, self.schs["scheduler"])(opts, **self.schs["scheduler_params"])
        return {"optimizer": opts, "lr_scheduler": {"scheduler": schs, "interval": "step"}}

    def on_before_zero_grad(self, *args, **kwargs):
        # update EMA teacher
        self.update_ema(
            self.mean_teacher_ema,
            self.lr_schedulers()._step_count,
            self.sed_student,
            self.sed_teacher,
        )

    def update_ema(self, alpha, global_step, model, ema_model):
        for (k, ema_params), params in zip(ema_model.named_parameters(), model.parameters()):      
            ema_params.data.mul_(alpha).add_(params.data, alpha=1 - alpha)

    def training_step(self, batch, batch_idx):
        pass

    def on_validation_epoch_start(self):
        self.label_encoder = self.trainer.val_dataloaders.dataset.datasets[0].encoder
        
        return super().on_validation_epoch_start()

    def strong_validation_step(self, preds_stu, preds_tea, labels, data_type, filenames):
        loss_student = self.supervised_loss(preds_stu, labels)
        loss_teacher = self.supervised_loss(preds_tea, labels)
        self.log(f"{'val' if data_type != 'eval' else 'eval'}/student/loss_{data_type}", loss_student, on_epoch=True, sync_dist=True, batch_size=len(labels))
        self.log(f"{'val' if data_type != 'eval' else 'eval'}/teacher/loss_{data_type}", loss_teacher, on_epoch=True, sync_dist=True, batch_size=len(labels))

        decode_strong_student = val_decode_preds(preds_stu, [0.5], self.median_filter)
        decode_strong_teacher = val_decode_preds(preds_tea, [0.5], self.median_filter)

        self.strong_metrics[f"{data_type}_stu"].accm_macro_f1(decode_strong_student, labels)
        self.strong_metrics[f"{data_type}_tea"].accm_macro_f1(decode_strong_teacher, labels)
        if self.current_epoch % 10 == 0 and self.val_psds:
            # accumulate scores for psds calculation
            self.strong_scores[f"{data_type}_stu"].update(batched_decode_preds(preds_stu, filenames, self.label_encoder, median_filter=self.median_window)[1])
            self.strong_scores[f"{data_type}_tea"].update(batched_decode_preds(preds_tea, filenames, self.label_encoder, median_filter=self.median_window)[1]) 

    def weak_validation_step(self, preds_stu, preds_tea, labels):
        # log loss
        loss_student = self.supervised_loss(preds_stu, labels)
        loss_teacher = self.supervised_loss(preds_tea, labels)
        self.log("val/student/loss_weak", loss_student, on_epoch=True, sync_dist=True, batch_size=len(labels))
        self.log("val/teacher/loss_weak", loss_teacher, on_epoch=True, sync_dist=True, batch_size=len(labels))
        # accumulate f1 score for weak labels
        self.get_weak_student_f1(
            preds_stu, labels.int()
        )
        self.get_weak_teacher_f1(
            preds_tea, labels.int()
        )

    def detect(self, model, feats):
        return model(feats)

    def validation_step(self, batch, batch_idx):
        feats, labels, params = batch
        label_types = [x["label_type"] for x in params]
        # student prediction
        strong_preds_student, weak_preds_student = self.detect(self.sed_student, feats)
        # teacher prediction
        strong_preds_teacher, weak_preds_teacher = self.detect(self.sed_teacher, feats)

        mask_weak = torch.tensor([x == "weak" for x in label_types]).to(labels).bool()
        mask_synth = torch.tensor([x == "strong_synthetic" for x in label_types]).to(labels).bool()
        mask_real = torch.tensor([x == "strong_real" for x in label_types]).to(labels).bool()
        mask_eval = torch.tensor([x == "strong_eval" for x in label_types]).to(labels).bool()
        assert sum(mask_weak) + sum(mask_synth) + sum(mask_real) + sum(mask_eval) == labels.shape[0], \
            f"weak: {sum(mask_weak)}, synth: {sum(mask_synth)}, real: {sum(mask_real)}, total: {labels.shape[0]}"

        if torch.any(mask_weak):
            labels_weak = (torch.sum(labels[mask_weak], -1) >= 1).float()
            self.weak_validation_step(
                preds_stu=weak_preds_student[mask_weak],
                preds_tea=weak_preds_teacher[mask_weak],
                labels=labels_weak
            )
            
        if torch.any(mask_synth):
            self.strong_validation_step(
                preds_stu=strong_preds_student[mask_synth],
                preds_tea=strong_preds_teacher[mask_synth],
                labels=labels[mask_synth],
                data_type="synth",
                filenames=[x["mixture"] for x in params if x["label_type"] == "strong_synthetic"]
            )
        
        if torch.any(mask_real):
            self.strong_validation_step(
                preds_stu=strong_preds_student[mask_real],
                preds_tea=strong_preds_teacher[mask_real],
                labels=labels[mask_real],
                data_type="real",
                filenames=[x["mixture"] for x in params if x["label_type"] == "strong_real"]
            )
        
        if torch.any(mask_eval):
            self.strong_validation_step(
                preds_stu=strong_preds_student[mask_eval],
                preds_tea=strong_preds_teacher[mask_eval],
                labels=labels[mask_eval],
                data_type="eval",
                filenames=[x["mixture"] for x in params if x["label_type"] == "strong_eval"]
            )
        return 

    def compute_val_psds(self):
        self.val_psds1 = {}
        self.val_psds2 = {}
        tsv_files = [x.tsv_file for x in self.trainer.val_dataloaders.dataset.datasets]
        dur_files = [x.dur_file for x in self.trainer.val_dataloaders.dataset.datasets]
        tsv_files = {
            "synth": tsv_files[1],
            "real": tsv_files[2],
            "eval": tsv_files[3]
        }
        dur_files = {
            "synth": dur_files[1],
            "real": dur_files[2],
            "eval": dur_files[3]
        }  # order defined in datasets
        
        for data_type in ["synth", "real", "eval"]: 
            ground_truth = sed_scores_eval.io.read_ground_truth_events(tsv_files[data_type])
            audio_durations = sed_scores_eval.io.read_audio_durations(dur_files[data_type])
            # print(ground_truth.keys(), len(ground_truth.keys()))
            # print(self.strong_scores[f"{data_type}_tea"].keys(), len(self.strong_scores[f"{data_type}_tea"].keys()))
            # drop audios without events
            ground_truth = {
                audio_id: gt for audio_id, gt in ground_truth.items()
                if len(gt) > 0
            }
            audio_durations = {
                audio_id: audio_durations[audio_id]
                for audio_id in ground_truth.keys()
            }
            # compute psds
            self.val_psds1[f"{data_type}_stu"] = self.psds1(
                self.strong_scores[f"{data_type}_stu"],
                ground_truth,
                audio_durations
            )
            
            self.val_psds1[f"{data_type}_tea"] = self.psds1(
                self.strong_scores[f"{data_type}_tea"],
                ground_truth,
                audio_durations
            )
            
            self.val_psds2[f"{data_type}_stu"] = self.psds2(
                self.strong_scores[f"{data_type}_stu"],
                ground_truth,
                audio_durations
            )
            
            self.val_psds2[f"{data_type}_tea"] = self.psds2(
                self.strong_scores[f"{data_type}_tea"],
                ground_truth,
                audio_durations
            )
            # logging
            self.log(f"{'val' if data_type != 'eval' else 'eval'}/student/{data_type}_psds1", self.val_psds1[f"{data_type}_stu"], prog_bar=False, sync_dist=True)
            self.log(f"{'val' if data_type != 'eval' else 'eval'}/teacher/{data_type}_psds1", self.val_psds1[f"{data_type}_tea"], prog_bar=False, sync_dist=True)
            self.log(f"{'val' if data_type != 'eval' else 'eval'}/student/{data_type}_psds2", self.val_psds2[f"{data_type}_stu"], prog_bar=False, sync_dist=True)
            self.log(f"{'val' if data_type != 'eval' else 'eval'}/teacher/{data_type}_psds2", self.val_psds2[f"{data_type}_tea"], prog_bar=False, sync_dist=True)
            

    def on_validation_epoch_end(self):
        # weak
        weak_student_f1 = self.get_weak_student_f1.compute()
        weak_teacher_f1 = self.get_weak_teacher_f1.compute()
        # strong
        results = {}
        for k, metric in self.strong_metrics.items():
            results[k] = metric.compute_macro_f1()
        # strong PSDS
        if self.current_epoch % 10 == 0 and self.val_psds:
            self.compute_val_psds()
        self.log("val/student/weak_f1", weak_student_f1, prog_bar=False, sync_dist=True)
        self.log("val/student/synth_f1", results["synth_stu"], prog_bar=False, sync_dist=True)
        self.log("val/student/real_f1", results["real_stu"], prog_bar=False, sync_dist=True)
        self.log("eval/student/f1", results["eval_stu"], prog_bar=False, sync_dist=True)
        
        self.log("val/teacher/weak_f1", weak_teacher_f1, prog_bar=False, sync_dist=True)
        self.log("val/teacher/synth_f1", results["synth_tea"], prog_bar=False, sync_dist=True)
        self.log("val/teacher/real_f1", results["real_tea"], prog_bar=False, sync_dist=True)
        self.log("eval/teacher/f1", results["eval_tea"], prog_bar=False, sync_dist=True)

        # average loss
        strong_loss = self.trainer.logged_metrics.get("val/teacher/loss_real")
        synth_loss = self.trainer.logged_metrics.get("val/teacher/loss_synth")
        real_loss = self.trainer.logged_metrics.get("val/teacher/loss_weak")
        # F1 only
        if strong_loss is None or real_loss is None:
            average_loss = 1
        else: 
            average_loss = strong_loss * 0.5 + real_loss
        self.log("val/teacher/avg_loss", average_loss, sync_dist=True)
        obj_metric = average_loss 
        self.log("val/metric", obj_metric, prog_bar=True)
        # reset weak scores
        self.get_weak_student_f1.reset()
        self.get_weak_teacher_f1.reset()
        # reset strong scores
        for k, metric in self.strong_scores.items():
            self.strong_scores[k] = {}


        return obj_metric

    def psds1(self, input, ground_truth, audio_durations):
        return compute_psds_from_scores(
            input,
            ground_truth,
            audio_durations,
            dtc_threshold=0.7,
            gtc_threshold=0.7,
            cttc_threshold=None,
            alpha_ct=0,
            alpha_st=1,
        )
    
    def psds2(self, input, ground_truth, audio_durations):
        return compute_psds_from_scores(
            input,
            ground_truth,
            audio_durations,
            dtc_threshold=0.1,
            gtc_threshold=0.1,
            cttc_threshold=0.3,
            alpha_ct=0.5,
            alpha_st=1,
            )

    def set_label_encoder(self):
        self.label_encoder = self.trainer.test_dataloaders.dataset.encoder


    def on_test_start(self, *args, **kwargs):
        self.set_label_encoder()
        return super().on_test_start(*args, **kwargs)

    def test_step(self, batch, batch_indx):
        feats, labels, params = batch
        filenames = [x["mixture"] for x in params]
        strong_preds_student, weak_preds_student = self.detect(self.sed_student, feats)
        strong_preds_teacher, weak_preds_teacher = self.detect(self.sed_teacher, feats)

        self.loss_test_student.append(
            self.supervised_loss(strong_preds_student, labels).item()
        )
        self.loss_test_teacher.append(
            self.supervised_loss(strong_preds_teacher, labels).item()
        )
        if self.trainer.test_dataloaders.dataset.tsv_file is not None:
            _, scores_postprocessed_student_strong, decoded_student = batched_decode_preds(
                strong_preds_student, 
                filenames,
                self.label_encoder,
                median_filter=self.median_window,
                thresholds=self.test_thresholds,
            )
            _, scores_postprocessed_teacher_strong, decoded_teacher = batched_decode_preds(
                strong_preds_teacher,
                filenames,
                self.label_encoder,
                median_filter=self.median_window,
                thresholds=self.test_thresholds,
            )

            self.test_scores_postprocessed_buffer_student.update(
                scores_postprocessed_student_strong
            )
            self.test_scores_postprocessed_buffer_teacher.update(
                scores_postprocessed_teacher_strong
            )
            
            for thd in self.test_thresholds:
                self.test_psds_buffer_student[thd] = pd.concat([
                    self.test_psds_buffer_student[thd],
                    decoded_student[thd]
                ], ignore_index=True
                )
                self.test_psds_buffer_teacher[thd] = pd.concat([
                    self.test_psds_buffer_teacher[thd],
                    decoded_teacher[thd]
                ], ignore_index=True
                )
            
            mid_thd = self.test_thresholds[len(self.test_thresholds) // 2]
            self.decoded_05_buffer_student = pd.concat([
                self.decoded_05_buffer_student,
                decoded_student[mid_thd]
            ], ignore_index=True
            )
            self.decoded_05_buffer_teacher = pd.concat([
                self.decoded_05_buffer_teacher,
                decoded_teacher[mid_thd]
            ], ignore_index=True
            )
        else:
            scores_student_strong = batched_decode_scores(
                strong_preds_student, 
                filenames,
                self.label_encoder,
                median_filter=self.median_filter,
                thresholds=self.test_thresholds,
            )
            scores_teacher_strong = batched_decode_scores(
                strong_preds_teacher,
                filenames,
                self.label_encoder,
                median_filter=self.median_filter,
                thresholds=self.test_thresholds,
            )

            self.test_scores_postprocessed_buffer_student.update(
                scores_student_strong
            )
            self.test_scores_postprocessed_buffer_teacher.update(
                scores_teacher_strong
            )
            self.test_scores_weak_buffer_student.update(
                {k: v.detach().cpu().numpy() for k, v in zip(filenames, weak_preds_student)}
            )
            self.test_scores_weak_buffer_teacher.update(
                {k: v.detach().cpu().numpy() for k, v in zip(filenames, weak_preds_teacher)}
            )
        return 

    def on_test_epoch_end(self):
        save_dir = self.logger.log_dir + "/metrics_test/"
        if self.trainer.test_dataloaders.dataset.tsv_file is None:
            # save the scores and return
            if not os.path.exists(save_dir):
                os.makedirs(save_dir, exist_ok=True)
            import numpy as np
            print(f"Saving test scores...{save_dir}")
            np.savez(f"{save_dir}/scores_student.npz", **self.test_scores_postprocessed_buffer_student)
            np.savez(f"{save_dir}/scores_teacher.npz", **self.test_scores_postprocessed_buffer_teacher)
            np.savez(f"{save_dir}/scores_weak_student.npz", **self.test_scores_weak_buffer_student)
            np.savez(f"{save_dir}/scores_weak_teacher.npz", **self.test_scores_weak_buffer_teacher)
            return
        ground_truth = sed_scores_eval.io.read_ground_truth_events(self.trainer.test_dataloaders.dataset.tsv_file)
        audio_durations = sed_scores_eval.io.read_audio_durations(self.trainer.test_dataloaders.dataset.dur_file)
        # drop audios without events
        ground_truth = {
            audio_id: gt for audio_id, gt in ground_truth.items()
            if len(gt) > 0
        }
        audio_durations = {
            audio_id: audio_durations[audio_id]
            for audio_id in ground_truth.keys()
        }
        psds1_student_psds_eval = compute_psds_from_operating_points(
            self.test_psds_buffer_student,
            self.trainer.test_dataloaders.dataset.tsv_file,
            self.trainer.test_dataloaders.dataset.dur_file,
            dtc_threshold=0.7,
            gtc_threshold=0.7,
            alpha_ct=0,
            alpha_st=1,
            save_dir=os.path.join(save_dir, "student", "scenario1"),
        )
        psds1_student_sed_scores_eval = compute_psds_from_scores(
            self.test_scores_postprocessed_buffer_student,
            ground_truth,
            audio_durations,
            dtc_threshold=0.7,
            gtc_threshold=0.7,
            cttc_threshold=None,
            alpha_ct=0,
            alpha_st=1,
            save_dir=os.path.join(save_dir, "student", "scenario1"),
        )

        psds2_student_psds_eval = compute_psds_from_operating_points(
            self.test_psds_buffer_student,
            self.trainer.test_dataloaders.dataset.tsv_file,
            self.trainer.test_dataloaders.dataset.dur_file,
            dtc_threshold=0.1,
            gtc_threshold=0.1,
            cttc_threshold=0.3,
            alpha_ct=0.5,
            alpha_st=1,
            save_dir=os.path.join(save_dir, "student", "scenario2"),
        )
        psds2_student_sed_scores_eval = compute_psds_from_scores(
            self.test_scores_postprocessed_buffer_student,
            ground_truth,
            audio_durations,
            dtc_threshold=0.1,
            gtc_threshold=0.1,
            cttc_threshold=0.3,
            alpha_ct=0.5,
            alpha_st=1,
            save_dir=os.path.join(save_dir, "student", "scenario2"),
        )
        psds1_teacher_psds_eval = compute_psds_from_operating_points(
            self.test_psds_buffer_teacher,
            self.trainer.test_dataloaders.dataset.tsv_file,
            self.trainer.test_dataloaders.dataset.dur_file,
            dtc_threshold=0.7,
            gtc_threshold=0.7,
            alpha_ct=0,
            alpha_st=1,
            save_dir=os.path.join(save_dir, "teacher", "scenario1"),
        )
        psds1_teacher_sed_scores_eval = compute_psds_from_scores(
            self.test_scores_postprocessed_buffer_teacher,
            ground_truth,
            audio_durations,
            dtc_threshold=0.7,
            gtc_threshold=0.7,
            cttc_threshold=None,
            alpha_ct=0,
            alpha_st=1,
            save_dir=os.path.join(save_dir, "teacher", "scenario1"),
        )

        psds2_teacher_psds_eval = compute_psds_from_operating_points(
            self.test_psds_buffer_teacher,
            self.trainer.test_dataloaders.dataset.tsv_file,
            self.trainer.test_dataloaders.dataset.dur_file,
            dtc_threshold=0.1,
            gtc_threshold=0.1,
            cttc_threshold=0.3,
            alpha_ct=0.5,
            alpha_st=1,
            save_dir=os.path.join(save_dir, "teacher", "scenario2"),
        )
        psds2_teacher_sed_scores_eval = compute_psds_from_scores(
            self.test_scores_postprocessed_buffer_teacher,
            ground_truth,
            audio_durations,
            dtc_threshold=0.1,
            gtc_threshold=0.1,
            cttc_threshold=0.3,
            alpha_ct=0.5,
            alpha_st=1,
            save_dir=os.path.join(save_dir, "teacher", "scenario2"),
        )
        event_macro_student = log_sedeval_metrics(
            self.decoded_05_buffer_student,
            self.trainer.test_dataloaders.dataset.tsv_file,
            os.path.join(save_dir, "student"),
        )[0]

        event_macro_teacher = log_sedeval_metrics(
            self.decoded_05_buffer_teacher,
            self.trainer.test_dataloaders.dataset.tsv_file,
            os.path.join(save_dir, "teacher"),
        )[0]

        # synth dataset
        intersection_f1_macro_student = compute_per_intersection_macro_f1(
            {"0.5": self.decoded_05_buffer_student},
            self.trainer.test_dataloaders.dataset.tsv_file,
            self.trainer.test_dataloaders.dataset.dur_file,
        )

        # synth dataset
        intersection_f1_macro_teacher = compute_per_intersection_macro_f1(
            {"0.5": self.decoded_05_buffer_teacher},
            self.trainer.test_dataloaders.dataset.tsv_file,
            self.trainer.test_dataloaders.dataset.dur_file,
        )
        results = {
                "test/student/psds1_psds_eval": psds1_student_psds_eval,
                "test/student/psds1_sed_scores_eval": psds1_student_sed_scores_eval,
                "test/student/psds2_psds_eval": psds2_student_psds_eval,
                "test/student/psds2_sed_scores_eval": psds2_student_sed_scores_eval,
                "test/teacher/psds1_psds_eval": psds1_teacher_psds_eval,
                "test/teacher/psds1_sed_scores_eval": psds1_teacher_sed_scores_eval,
                "test/teacher/psds2_psds_eval": psds2_teacher_psds_eval,
                "test/teacher/psds2_sed_scores_eval": psds2_teacher_sed_scores_eval,
                "test/student/event_f1_macro": event_macro_student,
                "test/student/intersection_f1_macro": intersection_f1_macro_student,
                "test/teacher/event_f1_macro": event_macro_teacher,
                "test/teacher/intersection_f1_macro": intersection_f1_macro_teacher,
            }
        if self.logger is not None:
            self.logger.log_metrics(results)
        
        pprint(results)

    def on_predict_epoch_start(self):
        self.teacher_preds = []
        self.mt_loss = []
        return super().on_predict_epoch_start()
        
    def predict_step(self, batch, batch_indx):
        feats, labels, params = batch
        strong_preds_student, weak_preds_student = self.detect(self.sed_student, feats)
        strong_preds_teacher, weak_preds_teacher = self.detect(self.sed_teacher, feats)
        self.mt_loss.append(((weak_preds_teacher - weak_preds_student) ** 2).mean(dim=-1))
        self.teacher_preds.append(weak_preds_teacher.sum(-1))
        
    def on_predict_epoch_end(self):
        # save self.teacher_preds and self.mt_loss to numpy 
        save_dir = self.logger.log_dir + "/predictions/"
        if not os.path.exists(save_dir):
            os.makedirs(save_dir, exist_ok=True)
        np.savez(f"{save_dir}/teacher_preds.npz", 
                 **{f"pred_{i}": pred.detach().cpu().numpy() for i, pred in enumerate(self.teacher_preds)}
        )
        np.savez(f"{save_dir}/mt_loss.npz",
                 **{f"loss_{i}": loss.detach().cpu().numpy() for i, loss in enumerate(self.mt_loss)}
        )
        print(f"Saved predictions to {save_dir}")

class TrainCLI(BaseCLI):

    def add_arguments_to_parser(self, parser: LightningArgumentParser) -> None:
        # ModelCheckpoint
        # parser.add_lightning_class_args(ModelCheckpoint, "model_checkpoint1")
        # model_checkpoint_defaults = {
        #     "model_checkpoint.filename": "epoch{epoch}_metric{val/metric:.4f}",
        #     "model_checkpoint.monitor": "val/metric",
        #     "model_checkpoint.mode": "max",
        #     "model_checkpoint.every_n_epochs": 10,
        #     "model_checkpoint.save_top_k": 10,  # save all checkpoints
        #     "model_checkpoint.auto_insert_metric_name": False,
        #     "model_checkpoint.save_last": False
        # }
        # parser.set_defaults(model_checkpoint_defaults)
        parser.add_lightning_class_args(ModelCheckpoint, "model_checkpoint")
        model_checkpoint_defaults = {
            "model_checkpoint.filename": "epoch{epoch}_metric{val/metric:.4f}",
            "model_checkpoint.monitor": "val/metric",
            "model_checkpoint.mode": "min",
            "model_checkpoint.every_n_epochs": 1,
            "model_checkpoint.save_top_k": 10,  # save all checkpoints
            "model_checkpoint.auto_insert_metric_name": False,
            "model_checkpoint.save_last": True
        }
        parser.set_defaults(model_checkpoint_defaults)
        self.add_model_invariant_arguments_to_parser(parser)

if __name__ == '__main__':
    import warnings
    warnings.filterwarnings("ignore")
    
    cli = TrainCLI(
        SEDBaseTrainer,
        pl.LightningDataModule,
        save_config_callback=MySaveConfigCallback,
        save_config_kwargs={'overwrite': True},
        subclass_mode_data=True,
    )