### The base class for abstract trainer class
import torch
import os
import time
import torch.distributed as dist
from datetime import timedelta

# from funcodec.iterators.sequence_iter_factory import SequenceIterFactory
from utils.utils import Logger

from .helper import dict_to_str, save
from utils.hinter import hint_once 
# from utils.postprocess import MaxLength
# from funcodec.bin.codec_inference import Speech2Token
from utils.dprint import dprint

import torch.nn.functional as F


def sisnr(x, s, eps=1e-8):
    """
    calculate training loss
    input:
          x: separated signal, N x S tensor
          s: reference signal, N x S tensor
    Return:
          sisnr: N tensor
    """

    def l2norm(mat, keepdim=False):
        return torch.norm(mat, dim=-1, keepdim=keepdim)

    if x.shape != s.shape:
        raise RuntimeError(
            "Dimention mismatch when calculate si-snr, {} vs {}".format(
                x.shape, s.shape))
    x_zm = x - torch.mean(x, dim=-1, keepdim=True)
    s_zm = s - torch.mean(s, dim=-1, keepdim=True)
    t = torch.sum(
        x_zm * s_zm, dim=-1,
        keepdim=True) * s_zm / (l2norm(s_zm, keepdim=True)**2 + eps)
    return 20 * torch.log10(eps + l2norm(t) / (l2norm(x_zm - t) + eps))


def batchMean_sisnrLoss(est, clean, eps=1e-8):
    batch_sisnr = sisnr(est, clean, eps)
    return -torch.mean(batch_sisnr)




def get_avg_result(res: dict):
    def _gather_tensors(tensor):
        """
        Gather tensors from all GPUs.
        """
        tensor_list = [torch.zeros_like(tensor) for _ in range(dist.get_world_size())]
        dist.all_gather(tensor_list, tensor)
        return tensor_list
    new_res = {}
    for k, v in res.items():
        tensors = _gather_tensors(v)
        value = sum(t.item() for t in tensors) / len(tensors)
        new_res[k] = value
    return new_res


class Trainer:
    def __init__(
        self,
        model,
        tr_data,
        cv_data,
        optim,
        scheduler,
        config,
        ckpt_dir,
        rank,
        logger: Logger,
        resume: str
    ):
        self.model = model
        self.tr_data = tr_data
        self.cv_data = cv_data
        self.config = config
        self.epoch_start = 0
        self.step = 0
        self.optim = optim
        self.rank = rank
        self.log_interval = config.log_interval
        self.logger = logger
        self.max_ckpt = config.max_ckpt
        self.best_field = config.best_field
        self.best_value = None
        self.best_save_type = config.best_save_type
        self.grad_clip = config.grad_clip
        self.ckpt_dir = ckpt_dir
        ###
        self.scheduler = scheduler
        self.cv_log = {}
        self.epoch_duration = None
        ## Mel Spectrogram

        if resume != "":
            ## loading ckpt
            self._log(f"loading model from {resume}...")
            ckpt = torch.load(resume, map_location="cpu")
            self.model.module.load_state_dict(ckpt["model_state_dict"])
            self.optim.load_state_dict(ckpt["optim"])
            self.epoch_start = ckpt["epoch"] + 1
            self.step = ckpt["step"]
            self.cv_log = ckpt["cv_log"]
            self.best_value = ckpt[self.best_field]
            self.optim.load_state_dict(ckpt["optim"])
            self.scheduler = ckpt["scheduler"]

    def _train_one_batch(self, batch:int, data, optim, if_log, epoch) -> dict:
        mix, clean, regi, spk_id, _, _ = data
        mix, clean, regi, spk_id = mix.cuda(), clean.cuda(), regi.cuda(), spk_id.cuda()

        regi_len = torch.full((regi.size(0),), regi.size(1)).to(regi.device) # [B]
        ests, ests2, ests3, spk_pred = self.model(mix, regi, regi_len)
        snr1 = sisnr(ests, clean)
        snr2 = sisnr(ests2, clean)
        snr3 = sisnr(ests3, clean)
        snr_loss = (-0.8*torch.sum(snr1)-0.1*torch.sum(snr2)-0.1*torch.sum(snr3)) / mix.size(0) # average sisnr loss
        ce_loss = F.cross_entropy(spk_pred, spk_id)
        loss = snr_loss + 0.5 * ce_loss
        
        ## Process Mel Spectrogram ##
        loss.backward()
        torch.nn.utils.clip_grad_norm_(self.model.parameters(), self.grad_clip)
        optim.step()
        optim.zero_grad()

        if if_log:
            stats={}
            stats['loss'] = loss
            stats['snr'] = snr_loss
            stats['ce'] = ce_loss
            return stats
        return None

    def _eval_one_batch(self, data) -> dict:
        mix, clean, regi, spk_id, _, _ = data
        mix, clean, regi, spk_id = mix.cuda(), clean.cuda(), regi.cuda(), spk_id.cuda()

        regi_len = torch.full((regi.size(0),), regi.size(1)).to(regi.device) # [B]
        ests, ests2, ests3, spk_pred = self.model(mix, regi, regi_len)
        snr1 = sisnr(ests, clean)
        snr2 = sisnr(ests2, clean)
        snr3 = sisnr(ests3, clean)
        snr_loss = (-0.8*torch.sum(snr1)-0.1*torch.sum(snr2)-0.1*torch.sum(snr3)) / mix.size(0) # average sisnr loss
        
        if (spk_id==-1).any():
            # if spk_id is -1, ignore it 
            ce_loss = torch.tensor(0.0, device = mix.device)
        else:
            ce_loss = F.cross_entropy(spk_pred, spk_id)

        loss = snr_loss + 0.5 * ce_loss
        return {"loss": loss, "snr": snr_loss, "ce": ce_loss}

    def _log(self, msg):
        if self.rank == 0:
            self.logger.info(msg)
        pass

    def _save(self, model, cv_log, epoch, optim, path, step, save_best: bool):
        if self.rank == 0:
            self._log(f"saving model... for epoch {epoch}")
            content = {
                "epoch": epoch,
                "step": step,
                "model_state_dict": model.module.state_dict(),
                "optim": optim.state_dict(),
                "cv_log": cv_log,
                "scheduler": self.scheduler,
                self.best_field: self.best_value,
            }
            save(
                path,
                content,
                epoch,
                self.max_ckpt
            )
            if save_best:
                self._log(f"saving the best model of epoch {epoch}")
                torch.save(content, path.replace(f"epoch{epoch}.pth", f"best.pth"))
        pass

    def _train(self, optim, tr_data, epoch):
        self.model.train()
        start_time = time.time()
        _epoch_start_time = time.time()
        for batch, data in enumerate(tr_data):
            if_log = batch % self.log_interval == 0
            res = self._train_one_batch(batch, data, optim, if_log, epoch)
            if if_log:
                res["epoch"] = f"{epoch}/{self.config.epoch}"
                time_per_batch = (time.time() - start_time) / self.log_interval
                if self.epoch_duration is None:
                    res[
                    "p"
                    ] = f"[{self.step}/{self.step + self.step_left}|({str(timedelta(seconds=(self.step_left * time_per_batch)))})]"
                else:
                    res['p'] = f"[{self.step}/{self.step + self.step_left}|({str(timedelta(seconds=self.step_left * (self.epoch_duration / len(tr_data))))})]"
                    pass
                res["time/batch"] = f"{time_per_batch}s"
                start_time = time.time()
                self._log(f"tr, {dict_to_str(res)}")
            self.step += 1
            self.step_left -=1
        self.epoch_duration = time.time() - _epoch_start_time

    def _eval(self, cv_data, epoch):
        self.model.eval()
        result = None
        if self.rank == 0:
            print(f"evaluating on cv_data of len {len(cv_data)* 1}")
        with torch.no_grad():
            for data in cv_data:
                res = self._eval_one_batch(data)
                if result == None:
                    result = res
                else:
                    for key in result.keys():
                        result[key] += res[key]
        for key in result.keys():
            result[key] = result[key] / len(cv_data)
        ## gather all tensors onto the same device
        result = get_avg_result(result)
        self._log(f"eval epoch {epoch} {dict_to_str(result)}")
        if epoch != -1:
            self.cv_log[epoch] = result
        return result[self.best_field]

    def train(self):
        self._eval(self.cv_data, -1)

        for epoch in range(self.epoch_start, self.config.epoch):
            self._log(f"...epoch {epoch}...")
            tr_data = self.tr_data
            cv_data = self.cv_data
            tr_data.sampler.set_epoch(epoch)
            
            ## Initialize steps left
            if epoch == self.epoch_start:
                self.step_left = int((self.config.epoch - self.epoch_start) * len(tr_data))

            ### training
            self._train(self.optim, tr_data, epoch)
            #### evaluation
            result = self._eval(cv_data, epoch)
            if self.best_value is None:
                save_best = True
                self.best_value = result
            else:
                save_best = (
                    result > self.best_value
                    if self.best_save_type == "ascend"
                    else result < self.best_value
                )
                if save_best:
                    self.best_value = result
            ### save model
            self._save(
                self.model,
                self.cv_log,
                epoch,
                self.optim,
                os.path.join(self.ckpt_dir, f"epoch{epoch}.pth"),
                self.step,
                save_best,
            )
            self.scheduler.step(result)
            dist.barrier()
