from collections import defaultdict
import functools
import h5py
import numpy as np


def groupby(iterable, key):
    groups = defaultdict(list)
    for item in iterable:
        groups[key(item)].append(item)
    return groups


def read_from_h5_file(h5_file_path, with_cache):
    h5_file_obj = h5py.File(h5_file_path, 'r')

    def helper(path):
        return h5_file_obj[path][...]

    if with_cache:
        return functools.lru_cache(None)(helper)
    else:
        return helper


read_from_h5_file_with_cache = functools.partial(read_from_h5_file, with_cache=True)
read_from_h5_file_without_cache = functools.partial(read_from_h5_file, with_cache=False)


def read_partly_from_h5(h5_file_path, with_cache):
    h5_file_obj = h5py.File(h5_file_path, 'r')

    def helper(dataset_and_slice):
        dataset, starts, stops = dataset_and_slice
        target_shape = tuple(e-b for b, e in zip(starts, stops))
        dataset_shape = h5_file_obj[dataset].shape
        pad_width = np.asarray([(0-b, e-s) for b, e, s in zip(starts, stops, dataset_shape)], 'int')
        pad_width[pad_width < 0] = 0
        raw = np.asarray(h5_file_obj[dataset][
                          max(starts[0], 0):stops[0],
                          max(starts[1], 0):stops[1],
                          max(starts[2], 0):stops[2]
                          ])
        paded = np.pad(raw, pad_width, 'edge')
        assert paded.shape == target_shape
        return paded

    if with_cache:
        return functools.lru_cache(None)(helper)
    else:
        return helper


def normalization(vmin, vmax):
    def helper(x):
        x = np.asarray(x)
        x = np.clip(x, vmin, vmax)
        x = (x - vmin) / (vmax - vmin)
        return x
    return helper


def append_axis():
    def helper(x):
        x = np.asarray(x)
        return x[..., None]
    return helper


def astype(dtype):
    def helper(x):
        x = np.asarray(x, dtype=dtype)
        return x
    return helper
