import numpy as np
from scipy.stats import truncnorm


class CEM:
    """MPC中的交叉熵方法"""

    def __init__(self, n_sequence, elite_ratio, fake_env, upper_bound, lower_bound, alpha=0.25):
        # 要从动作分布中抽取的N条动作序列
        self.n_sequence = n_sequence
        self.upper_bound = upper_bound
        self.lower_bound = lower_bound
        self.elite_ratio = elite_ratio
        self.fake_env = fake_env
        self.alpha = alpha

    def optimize(self, state, init_mean, init_var):
        # 这里假设动作分布服从高斯分布
        mean, var = init_mean, init_var
        # 截断正态分布 [u-2，u+2]   σ
        X = truncnorm(-2, 2, loc=np.zeros_like(mean), scale=np.ones_like(var))
        # 将原矩阵横向、纵向地复制
        state = np.tile(state, (self.n_sequence, 1))

        for _ in range(5):
            lb_dist, ub_dist = mean - self.lower_bound, self.upper_bound - mean
            # 限制方差
            constrained_var = np.minimum(np.minimum(np.square(lb_dist / 2), np.square(ub_dist / 2)), var)
            # 生成动作序列  rvs()产生服从指定分布的随机数
            action_sequences = [X.rvs() for _ in range(self.n_sequence)] * np.sqrt(constrained_var) + mean
            # 计算每条动作序列的累计奖励
            returns = self.fake_env.propagate(state, action_sequences)[:, 0]
            # 选取累计奖励最高的若干条动作序列
            elites = action_sequences[np.argsort(returns)][-int(self.elite_ratio * self.n_sequence):]
            new_mean = np.mean(elites, axis=0)
            new_var = np.var(elites, axis=0)
            # 更新动作序列分布
            mean = self.alpha * mean + self.alpha * new_mean
            var = self.alpha * var + self.alpha * new_var
        return mean
