import numpy as np
from scipy.stats import truncnorm

class CEM:
    '''交叉熵方法 打靶，采用截断正态分布'''
    def __init__(self,n_sequence,elite_ratio,fake_env,upper_bound,lower_bound):
        self.n_sequence = n_sequence
        self.elite_ratio = elite_ratio
        self.upper_bound = upper_bound
        self.lower_bound = lower_bound
        self.fake_env = fake_env

    def optimize(self,state,init_mean,init_var):
        mean,var = init_mean, init_var
        # 生成一个 截断的标准正态分布，只取(-2, 2)之间的值;以 mean 为中心,以 var 为方差
        X = truncnorm(-2,2,loc=np.zeros_like(mean),scale=np.ones_like(var))
        # 生成 n_sequence 条动作序列,复制 state，让所有 n_sequence 条动作序列从同一个状态开始
        state = np.tile(state,(self.n_sequence,1))

        for _ in range(5):
            # 当前均值与下界的距离,当前均值与上界的距离
            lb_dist,ub_dist = mean-self.lower_bound,self.upper_bound-mean
            # 受限方差，保证不会越界
            constrained_var = np.minimum(np.minimum(np.square(lb_dist/2),np.square(ub_dist/2)),var)
            # 生成动作序列:X.rvs() 采样 n_sequence 个标准正态分布样本,乘以 np.sqrt(constrained_var) 进行缩放,加上 mean 进行平移
            action_sequences = [X.rvs() for _ in range(self.n_sequence)] * np.sqrt(constrained_var) + mean
            # 计算每条动作序列的累积奖励
            returns = self.fake_env.propagate(state,action_sequences)[:,0]
            # 选取累积奖励最高的若干条动作序列:np.argsort(returns)按奖励从小到大排序,[-int(self.elite_ratio * self.n_sequence):]选取奖励最高的 elite_ratio 作为精英动作
            elites = action_sequences[np.argsort(returns)][-int(self.elite_ratio * self.n_sequence):]

            new_mean = np.mean(elites,axis=0)
            new_var = np.var(elites,axis=0)
            # 更新动作序列分布
            mean = 0.1 * mean + 0.9 * new_mean
            var = 0.1 * var + 0.9 * new_var
        return mean
