import numpy as np
import scipy
import torch
import torch.nn.functional as F
from torch.distributions import Categorical

def combined_shape(length, shape=None):
    if shape is None:
        return (length,)
    return (length, shape) if np.isscalar(shape) else (length, *shape)

def discount_cumsum(x, discount):
    """
    magic from rllab for computing discounted cumulative sums of vectors.
    input:
        vector x,
        [x0,
         x1,
         x2]
    output:
        [x0 + discount * x1 + discount^2 * x2,
         x1 + discount * x2,
         x2]
    """
    return scipy.signal.lfilter([1], [1, float(-discount)], x[::-1], axis=0)[::-1]

def scores_stacking(scores):
    scores_sizes = [len(_score) for _score in scores]
    if len(np.unique(scores_sizes)) > 1:
        max_action_space_size = max(scores_sizes)
        stacked_scores = torch.stack([
            F.pad(
                _score,
                (0, max_action_space_size - len(_score)),
                "constant", -torch.inf)
            for _score in scores])
    else:
        stacked_scores = torch.stack(scores)

    return stacked_scores