"""This trainer includes the fundamental validation/test steps of SED
"""
import os
import torch
import sed_scores_eval

import numpy as np
import pandas as pd
import torchmetrics as tm
import pytorch_lightning as pl

from pathlib import Path
from copy import deepcopy
from pprint import pprint
from datasets.pred_decoder import batched_decode_preds, val_decode_preds
from evaluation import (
    MedianPool2d, 
    SEDMetrics,
    compute_psds_from_operating_points,
    compute_psds_from_scores,
    compute_per_intersection_macro_f1,
    log_sedeval_metrics,
)

class SEDCoreModule(pl.LightningModule):
    def __init__(
            self,
            data_paths,
            opt_configs,
            label_encoder,
            sed_model,
            opt_n_schd,
            train_loader,
            val_loader,
            test_loader,):
        super(SEDCoreModule, self).__init__()
        self.data_paths = data_paths
        self.opt_configs = opt_configs
        self.label_encoder = label_encoder
        self.sed_student = sed_model
        self.opt_n_schd = opt_n_schd
        self.train_loader = train_loader
        self.val_loader = val_loader
        self.test_loader = test_loader
        self.sed_teacher = deepcopy(sed_model)
        self.supervised_loss = torch.nn.BCELoss()

        # validation and evaluation
        # weak
        self.get_weak_student_f1 = tm.F1Score(
            task="multilabel",
            num_labels=len(self.label_encoder.labels),
            average="macro",
            compute_on_step=False,
        )
        self.get_weak_teacher_f1 = deepcopy(self.get_weak_student_f1)
        # strong
        self.median_filter = MedianPool2d(opt_configs["median_window"], same=True)
        self.strong_synth_metrics_student = SEDMetrics(intersection_thd=0.5)
        self.strong_synth_metrics_teacher = SEDMetrics(intersection_thd=0.5)
        self.strong_real_metrics_student = SEDMetrics(intersection_thd=0.5)
        self.strong_real_metrics_teacher = SEDMetrics(intersection_thd=0.5)
        # test
        test_n_thresholds = opt_configs["n_test_thresholds"]
        self.test_thresholds = np.arange(
            1 / (test_n_thresholds * 2), 1, 1 / test_n_thresholds
        )
        self.test_psds_buffer_student = {k: pd.DataFrame() for k in self.test_thresholds}
        self.test_psds_buffer_teacher = {k: pd.DataFrame() for k in self.test_thresholds}
        self.test_scores_postprocessed_buffer_student = {}
        self.test_scores_postprocessed_buffer_teacher = {}
        self.decoded_05_buffer_student = pd.DataFrame({})
        self.decoded_05_buffer_teacher = pd.DataFrame({})
        # placeholders
        self.loss_weak_student = []
        self.loss_weak_teacher = []
        self.loss_synth_student = []
        self.loss_synth_teacher = []
        self.loss_real_student = []
        self.loss_real_teacher = []
        self.loss_test_student = []
        self.loss_test_teacher = []

    def configure_optimizers(self):
        return self.opt_n_schd

    def train_dataloader(self):
        return self.train_loader
    
    def val_dataloader(self):
        return self.val_loader
    
    def test_dataloader(self):
        return self.test_loader

    def on_before_zero_grad(self, *args, **kwargs):
        # update EMA teacher
        self.update_ema(
            self.opt_configs["ema_factor"],
            self.lr_schedulers()._step_count,
            self.sed_student,
            self.sed_teacher,
        )

    def update_ema(self, alpha, global_step, model, ema_model):
        # Use the true average until the exponential average is more correct
        # alpha = min(1 - 1 / (global_step + 1), alpha)
        for (k, ema_params), params in zip(ema_model.named_parameters(), model.parameters()):      
            ema_params.data.mul_(alpha).add_(params.data, alpha=1 - alpha)

    def training_step(self, batch, batch_idx):
        pass

    def validation_step(self, batch, batch_idx):
        feats, labels, _, filenames = batch
        batch_sizes = len(feats)
        # student prediction
        strong_preds_student, weak_preds_student = self.sed_student(feats)
        # teacher prediction
        strong_preds_teacher, weak_preds_teacher = self.sed_teacher(feats)

        mask_weak = (
            torch.tensor(
                [str(Path(x).parent) == str(Path(self.data_paths["weak_folder"])) for x in filenames]
            ).to(feats).bool()
        )

        mask_synth = (
            torch.tensor(
                [str(Path(x).parent) == str(Path(self.data_paths["synth_val_folder"])) for x in filenames]
            ).to(feats).bool()
        )
        mask_real = (
            torch.tensor(
                [str(Path(x).parent) == str(Path(self.data_paths["strong_folder"])) for x in filenames]
            ).to(feats).bool()
        )

        if torch.any(mask_weak):
            labels_weak = (torch.sum(labels[mask_weak], -1) >= 1).float()
            loss_weak_student = self.supervised_loss(
                weak_preds_student[mask_weak], labels_weak
            )
            loss_weak_teacher = self.supervised_loss(
                weak_preds_teacher[mask_weak], labels_weak
            )
            self.loss_weak_student.append(loss_weak_student.item())
            self.loss_weak_teacher.append(loss_weak_teacher.item())

            # accumulate f1 score for weak labels
            self.get_weak_student_f1(
                weak_preds_student[mask_weak], labels_weak.int()
            )
            self.get_weak_teacher_f1(
                weak_preds_teacher[mask_weak], labels_weak.int()
            )
        
        if torch.any(mask_synth):
            filenames = [file for file, m in zip(filenames, mask_synth) if m]
            loss_synth_student = self.supervised_loss(
                strong_preds_student[mask_synth], labels[mask_synth]
            )
            loss_synth_teacher = self.supervised_loss(
                strong_preds_teacher[mask_synth], labels[mask_synth]
            )
            self.loss_synth_student.append(loss_synth_student.item())
            self.loss_synth_teacher.append(loss_synth_teacher.item())

            decode_strong_student = val_decode_preds(strong_preds_student[mask_synth], [0.5], self.median_filter)
            decode_strong_teacher = val_decode_preds(strong_preds_teacher[mask_synth], [0.5], self.median_filter)

            self.strong_synth_metrics_student.accm_macro_f1(decode_strong_student, labels[mask_synth])
            self.strong_synth_metrics_teacher.accm_macro_f1(decode_strong_teacher, labels[mask_synth])
        
        if torch.any(mask_real):
            filenames = [file for file, m in zip(filenames, mask_real) if m]
            loss_real_student = self.supervised_loss(
                strong_preds_student[mask_real], labels[mask_real]
            )
            loss_real_teacher = self.supervised_loss(
                strong_preds_teacher[mask_real], labels[mask_real]
            )
            self.loss_real_student.append(loss_real_student.item())
            self.loss_real_teacher.append(loss_real_teacher.item())
            
            decode_strong_student = val_decode_preds(strong_preds_student[mask_real], [0.5], self.median_filter)
            decode_strong_teacher = val_decode_preds(strong_preds_teacher[mask_real], [0.5], self.median_filter)

            self.strong_real_metrics_student.accm_macro_f1(decode_strong_student, labels[mask_real])
            self.strong_real_metrics_teacher.accm_macro_f1(decode_strong_teacher, labels[mask_real])
        return 

    def on_validation_epoch_end(self):
        # weak
        weak_student_f1 = self.get_weak_student_f1.compute()
        weak_teacher_f1 = self.get_weak_teacher_f1.compute()
        # strong
        synth_student_f1 = self.strong_synth_metrics_student.compute_macro_f1()
        synth_teacher_f1 = self.strong_synth_metrics_teacher.compute_macro_f1()
        real_student_f1 = self.strong_real_metrics_student.compute_macro_f1()
        real_teacher_f1 = self.strong_real_metrics_teacher.compute_macro_f1()
        # losses
        loss_weak_student = np.mean(self.loss_weak_student)
        loss_weak_teacher = np.mean(self.loss_weak_teacher)
        loss_synth_student = np.mean(self.loss_synth_student)
        loss_synth_teacher = np.mean(self.loss_synth_teacher)
        loss_real_student = np.mean(self.loss_real_student)
        loss_real_teacher = np.mean(self.loss_real_teacher)

        obj_metric = (real_teacher_f1 + synth_teacher_f1) / 2 + weak_teacher_f1

        self.log("obj_metric", obj_metric, prog_bar=True, on_epoch=True)
        self.log("val/student/weak_f1", weak_student_f1, prog_bar=False, on_epoch=True)
        self.log("val/student/synth_f1", synth_student_f1, prog_bar=False, on_epoch=True)
        self.log("val/student/real_f1", real_student_f1, prog_bar=False, on_epoch=True)
        self.log("val/teacher/weak_f1", weak_teacher_f1, prog_bar=False, on_epoch=True)
        self.log("val/teacher/synth_f1", synth_teacher_f1, prog_bar=False, on_epoch=True)
        self.log("val/teacher/real_f1", real_teacher_f1, prog_bar=False, on_epoch=True)
        self.log("val/student/loss_weak", loss_weak_student, prog_bar=False, on_epoch=True)
        self.log("val/student/loss_synth", loss_synth_student, prog_bar=False, on_epoch=True)
        self.log("val/student/loss_real", loss_real_student, prog_bar=False, on_epoch=True)
        self.log("val/teacher/loss_weak", loss_weak_teacher, prog_bar=False, on_epoch=True)
        self.log("val/teacher/loss_synth", loss_synth_teacher, prog_bar=False, on_epoch=True)
        self.log("val/teacher/loss_real", loss_real_teacher, prog_bar=False, on_epoch=True)
        # reset losses
        self.loss_weak_student = []
        self.loss_weak_teacher = []
        self.loss_synth_student = []
        self.loss_synth_teacher = []
        self.loss_real_student = []
        self.loss_real_teacher = []

        self.get_weak_student_f1.reset()
        self.get_weak_teacher_f1.reset()
        # strong metrics need no reset, because they are reset in compute_macro_f1()

        return obj_metric

    def psds1(self, input, ground_truth, audio_durations):
        return compute_psds_from_scores(
            input,
            ground_truth,
            audio_durations,
            dtc_threshold=0.7,
            gtc_threshold=0.7,
            cttc_threshold=None,
            alpha_ct=0,
            alpha_st=1,
        )
    
    def psds2(self, input, ground_truth, audio_durations):
        return compute_psds_from_scores(
            input,
            ground_truth,
            audio_durations,
            dtc_threshold=0.1,
            gtc_threshold=0.1,
            cttc_threshold=0.3,
            alpha_ct=0.5,
            alpha_st=1,
            )

    def test_step(self, batch, batch_indx):
        feats, labels, _, filenames = batch
        strong_preds_student, _ = self.sed_student(feats)
        strong_preds_teacher, _ = self.sed_teacher(feats)

        self.loss_test_student.append(
            self.supervised_loss(strong_preds_student, labels).item()
        )
        self.loss_test_teacher.append(
            self.supervised_loss(strong_preds_teacher, labels).item()
        )

        _, scores_postprocessed_student_strong, decoded_student = batched_decode_preds(
            strong_preds_student, 
            filenames,
            self.label_encoder,
            median_filter=self.opt_configs["median_window"],
            thresholds=self.test_thresholds,
        )
        _, scores_postprocessed_teacher_strong, decoded_teacher = batched_decode_preds(
            strong_preds_teacher,
            filenames,
            self.label_encoder,
            median_filter=self.opt_configs["median_window"],
            thresholds=self.test_thresholds,
        )

        self.test_scores_postprocessed_buffer_student.update(
            scores_postprocessed_student_strong
        )
        self.test_scores_postprocessed_buffer_teacher.update(
            scores_postprocessed_teacher_strong
        )
        for thd in self.test_thresholds:
            self.test_psds_buffer_student[thd] = pd.concat([
                self.test_psds_buffer_student[thd],
                decoded_student[thd]
            ], ignore_index=True
            )
            self.test_psds_buffer_teacher[thd] = pd.concat([
                self.test_psds_buffer_teacher[thd],
                decoded_teacher[thd]
            ], ignore_index=True
            )
        
        mid_thd = self.test_thresholds[len(self.test_thresholds) // 2]
        self.decoded_05_buffer_student = pd.concat([
            self.decoded_05_buffer_student,
            decoded_student[mid_thd]
        ], ignore_index=True
        )
        self.decoded_05_buffer_teacher = pd.concat([
            self.decoded_05_buffer_teacher,
            decoded_teacher[mid_thd]
        ], ignore_index=True
        )
        return 

    def on_test_epoch_end(self):
        save_dir = self.logger.log_dir + "/metrics_test/"
        ground_truth = sed_scores_eval.io.read_ground_truth_events(self.data_paths["test_tsv"])
        audio_durations = sed_scores_eval.io.read_audio_durations(self.data_paths["test_dur"])
        # drop audios without events
        ground_truth = {
            audio_id: gt for audio_id, gt in ground_truth.items()
            if len(gt) > 0
        }
        audio_durations = {
            audio_id: audio_durations[audio_id]
            for audio_id in ground_truth.keys()
        }
        psds1_student_psds_eval = compute_psds_from_operating_points(
            self.test_psds_buffer_student,
            self.data_paths["test_tsv"],
            self.data_paths["test_dur"],
            dtc_threshold=0.7,
            gtc_threshold=0.7,
            alpha_ct=0,
            alpha_st=1,
            save_dir=os.path.join(save_dir, "student", "scenario1"),
        )
        psds1_student_sed_scores_eval = compute_psds_from_scores(
            self.test_scores_postprocessed_buffer_student,
            ground_truth,
            audio_durations,
            dtc_threshold=0.7,
            gtc_threshold=0.7,
            cttc_threshold=None,
            alpha_ct=0,
            alpha_st=1,
            save_dir=os.path.join(save_dir, "student", "scenario1"),
        )

        psds2_student_psds_eval = compute_psds_from_operating_points(
            self.test_psds_buffer_student,
            self.data_paths["test_tsv"],
            self.data_paths["test_dur"],
            dtc_threshold=0.1,
            gtc_threshold=0.1,
            cttc_threshold=0.3,
            alpha_ct=0.5,
            alpha_st=1,
            save_dir=os.path.join(save_dir, "student", "scenario2"),
        )
        psds2_student_sed_scores_eval = compute_psds_from_scores(
            self.test_scores_postprocessed_buffer_student,
            ground_truth,
            audio_durations,
            dtc_threshold=0.1,
            gtc_threshold=0.1,
            cttc_threshold=0.3,
            alpha_ct=0.5,
            alpha_st=1,
            save_dir=os.path.join(save_dir, "student", "scenario2"),
        )
        psds1_teacher_psds_eval = compute_psds_from_operating_points(
            self.test_psds_buffer_teacher,
            self.data_paths["test_tsv"],
            self.data_paths["test_dur"],
            dtc_threshold=0.7,
            gtc_threshold=0.7,
            alpha_ct=0,
            alpha_st=1,
            save_dir=os.path.join(save_dir, "teacher", "scenario1"),
        )
        psds1_teacher_sed_scores_eval = compute_psds_from_scores(
            self.test_scores_postprocessed_buffer_teacher,
            ground_truth,
            audio_durations,
            dtc_threshold=0.7,
            gtc_threshold=0.7,
            cttc_threshold=None,
            alpha_ct=0,
            alpha_st=1,
            save_dir=os.path.join(save_dir, "teacher", "scenario1"),
        )

        psds2_teacher_psds_eval = compute_psds_from_operating_points(
            self.test_psds_buffer_teacher,
            self.data_paths["test_tsv"],
            self.data_paths["test_dur"],
            dtc_threshold=0.1,
            gtc_threshold=0.1,
            cttc_threshold=0.3,
            alpha_ct=0.5,
            alpha_st=1,
            save_dir=os.path.join(save_dir, "teacher", "scenario2"),
        )
        psds2_teacher_sed_scores_eval = compute_psds_from_scores(
            self.test_scores_postprocessed_buffer_teacher,
            ground_truth,
            audio_durations,
            dtc_threshold=0.1,
            gtc_threshold=0.1,
            cttc_threshold=0.3,
            alpha_ct=0.5,
            alpha_st=1,
            save_dir=os.path.join(save_dir, "teacher", "scenario2"),
        )
        event_macro_student = log_sedeval_metrics(
            self.decoded_05_buffer_student,
            self.data_paths["test_tsv"],
            os.path.join(save_dir, "student"),
        )[0]

        event_macro_teacher = log_sedeval_metrics(
            self.decoded_05_buffer_teacher,
            self.data_paths["test_tsv"],
            os.path.join(save_dir, "teacher"),
        )[0]

        # synth dataset
        intersection_f1_macro_student = compute_per_intersection_macro_f1(
            {"0.5": self.decoded_05_buffer_student},
            self.data_paths["test_tsv"],
            self.data_paths["test_dur"],
        )

        # synth dataset
        intersection_f1_macro_teacher = compute_per_intersection_macro_f1(
            {"0.5": self.decoded_05_buffer_teacher},
            self.data_paths["test_tsv"],
            self.data_paths["test_dur"],
        )
        results = {
                "test/student/psds1_psds_eval": psds1_student_psds_eval,
                "test/student/psds1_sed_scores_eval": psds1_student_sed_scores_eval,
                "test/student/psds2_psds_eval": psds2_student_psds_eval,
                "test/student/psds2_sed_scores_eval": psds2_student_sed_scores_eval,
                "test/teacher/psds1_psds_eval": psds1_teacher_psds_eval,
                "test/teacher/psds1_sed_scores_eval": psds1_teacher_sed_scores_eval,
                "test/teacher/psds2_psds_eval": psds2_teacher_psds_eval,
                "test/teacher/psds2_sed_scores_eval": psds2_teacher_sed_scores_eval,
                "test/student/event_f1_macro": event_macro_student,
                "test/student/intersection_f1_macro": intersection_f1_macro_student,
                "test/teacher/event_f1_macro": event_macro_teacher,
                "test/teacher/intersection_f1_macro": intersection_f1_macro_teacher,
            }
        if self.logger is not None:
            self.logger.log_metrics(results)
        
        pprint(results)