"""

Array_tools.

"""
import numpy as np
import torch

def one_hot(x, dim):
    """
    Transform x into one hot representation.
    
    Args:
    - x: :numpy.ndarray: (n,) the input data.
    - dim: :math: the dimension of one-hot representation.
    
    Returns:
    - onehot: :numpy.ndarray: (n, dim) the one hot representation vector.
    """
    n = len(x)
    onehot = np.zeros((n, dim))
    indices = np.arange(n)
    onehot[indices,x] = 1
    return onehot


# Codes below coming from Pytorch Forums.
# Rewrite the collect_fn for our DataLoader.
def pad_tensor(vec, pad, dim):
    """
    args:
        vec - tensor to pad
        pad - the size to pad to
        dim - dimension to pad

    return:
        a new tensor padded to 'pad' in dimension 'dim'
    """
    if not isinstance(vec, torch.Tensor):
        vec = torch.from_numpy(vec) 
    pad_size = list(vec.shape)
    pad_size[dim] = pad - vec.size(dim)
    return torch.cat([vec, torch.zeros(*pad_size)], dim=dim)


class PadCollate:
    """
    a variant of callate_fn that pads according to the longest sequence in
    a batch of sequences
    """

    def __init__(self, dim=0):
        """
        args:
            dim - the dimension to be padded (dimension of time in sequences)
        """
        self.dim = dim

    def pad_collate(self, batch):
        """
        args:
            batch - list of (tensor, label)

        reutrn:
            xs - a tensor of all examples in 'batch' after padding
            ys - a LongTensor of all labels in batch
        """
        # find longest sequence
        max_len = max(map(lambda x: x[0].shape[self.dim], batch))
        # pad according to max_len
        batch = map(lambda x:
                    (pad_tensor(x[0], pad=max_len, dim=self.dim), x[1]), batch)
        batch = list(batch) # python 3 和 python 2 有不同
        
        # stack all
        xs = torch.stack(list(map(lambda x: x[0], batch)), dim=0)
        ys = torch.LongTensor(list(map(lambda x: x[1], batch)))
        return xs, ys

    def __call__(self, batch):
        return self.pad_collate(batch)
    
