from data.hdf5_dataset import HDF5Dataset
from time import time
import numpy as np
import torch
import scipy
from pathlib import Path

from typing import Tuple, Optional, List
from torch import Tensor


def load_chunk_of_data(dataset: HDF5Dataset, chunk_size: int, allowed_labels: Optional[List[int]]=None, time_encoded=True,
                       sample_normalization=False, balanced=False, validation=False) -> Tuple[Tensor, ...]:
    print(f"Loading chunk of size {chunk_size:,}...")
    ts = time()
    if balanced:
        y = dataset.y[:]
        selected_indices = []
        for label in allowed_labels:
            label_indices = np.where(y == label)[0]
            sample_size = chunk_size // len(allowed_labels)
            selected_indices.append(np.random.choice(label_indices, sample_size, replace=validation or len(label_indices < sample_size)))
        chunk_indices = np.concatenate(selected_indices)
    else:
        chunk_indices = np.sort(np.random.choice(len(dataset), chunk_size))
    X_ecg, X_cwt, rr, y = dataset[chunk_indices]
    if not time_encoded:
        mask = X_ecg[:, :, -1] >= 0
        X_ecg[:, :, -1] = mask * torch.arange(0, X_ecg.shape[1]) + mask.logical_not() * -1
        X_cwt[:, :, -1] = mask * torch.arange(0, X_cwt.shape[1]) + mask.logical_not() * -1
    if sample_normalization:
        for i in range(len(y)):
            ecg_sample = X_ecg[i]
            cwt_sample = X_cwt[i]
            nonzero = (ecg_sample[:, -1] < .0).nonzero()
            if len(nonzero) > 0:
                index = nonzero[0]
            else:
                index = len(ecg_sample)
            ecg_sample_min, ecg_sample_max = ecg_sample[:index, :-1].min(), ecg_sample[:index, :-1].max()
            X_ecg[i, :index, :-1] -= ecg_sample_min
            X_ecg[i, :index, :-1] /= ecg_sample_max - ecg_sample_min
            cwt_sample_min, cwt_sample_max = cwt_sample[:index, :-1].min(), cwt_sample[:index, :-1].max()
            X_cwt[i, :index, :-1] -= cwt_sample_min
            X_cwt[i, :index, :-1] /= cwt_sample_max - cwt_sample_min
    print(f"Loading took {time() - ts:.2f}s\n")
    return X_ecg, X_cwt, rr, y


class NoamOpt:
    "Optim wrapper that implements rate."
    def __init__(self, model_size, factor, warmup, optimizer):
        self.optimizer = optimizer
        self._step = 0
        self.warmup = warmup
        self.factor = factor
        self.model_size = model_size
        self._rate = 0
        
    def step(self):
        "Update parameters and rate"
        self._step += 1
        rate = self.rate()
        for p in self.optimizer.param_groups:
            p['lr'] = rate
        self._rate = rate
        self.optimizer.step()
        
    def rate(self, step = None):
        "Implement `lrate` above"
        if step is None:
            step = self._step
        return self.factor * \
            (self.model_size ** (-0.5) *
            min(step ** (-0.5), step * self.warmup ** (-1.5)))


def create_log_file(path: Path):
    columns = ['chunk_id', 'epoch', 'train_loss_mean', 'train_loss_std', 'train_acc_mean', 'train_acc_std', 'val_loss_mean', 'val_loss_std', 'val_acc_mean', 'val_acc_std']
    with open(path / 'log.csv', 'w') as f:
        print(*columns, sep=' ', file=f)


def log_results(log_path: Path, chunk_id: int, epoch: int,
                train_loss_mean: float, train_loss_std: float, train_acc_mean: float, train_acc_std: float,
                val_loss_mean: float, val_loss_std: float,  val_acc_mean: float, val_acc_std: float):
    with open(log_path / 'log.csv', 'a') as f:
        print(chunk_id, epoch, train_loss_mean, train_loss_std, train_acc_mean, train_acc_std,
              val_loss_mean, val_loss_std, val_acc_mean, val_acc_std,
              sep=' ', file=f)