# encoding:UTF-8

import numpy as np
import random
import gym


class Agent(object):
    def __init__(self, policy_net, game_name="CartPole-v0", processes_number=1, envs_number=1024, batch_size=50, gamma=0.99):
        self.p_net = policy_net

        self.episode_number = 0  # 运行到的episode个数
        self.reward_sum = 0
        self.count = 0
        self.batch_size = batch_size
        self.gamma = gamma

        self.processes_number = processes_number  # 并行的环境个数
        self.envs_number = envs_number
        self.game_name = game_name
        self.envs_list = [gym.make(game_name) for _ in range(self.envs_number)]
        self.history_observations = [[] for _ in range(self.envs_number)]  # 状态
        self.history_actions = [[] for _ in range(self.envs_number)]  # 动作
        self.history_rewards = [[] for _ in range(self.envs_number)]  # 奖励

        self.current_observation = []
        for i, env in enumerate(self.envs_list):
            observation = env.reset()  # 初始状态默认不为终止状态
            self.current_observation.append(observation)
            self.history_observations[i].append(observation)

        # 获得神经网络trainable变量的参数值
        self.gradBuffer = self.p_net.grad_buffer()
        # 获得图内的所有trainable_variables对应到numpy的值,然后全部置0，并将其作为梯度的存放位置
        for ix, grad in enumerate(self.gradBuffer):
            self.gradBuffer[ix] = grad * 0

        self.test_env = gym.make(game_name)

    def discount_rewards(self, r):
        """ take 1D float array of rewards and compute discounted reward """
        """ 计算一个episode内不同state下所选动作的累计折扣回报 """
        discounted_r = np.zeros_like(r)
        running_add = 0
        for t in reversed(range(r.size)):
            running_add = running_add * self.gamma + r[t]
            discounted_r[t] = running_add

        # for t in range(discounted_r.size):
        #     discounted_r[t] = (gamma**t)*discounted_r[t]

        # size the rewards to be unit normal (helps control the gradient estimator variance)
        discounted_r -= np.mean(discounted_r)  #
        discounted_r /= np.std(discounted_r)  #
        return discounted_r

    def step(self):
        act_prob_numpy = self.p_net.action_prob(np.vstack(self.current_observation))  # 选择动作0的概率
        self.current_observation[:] = []
        # 选择动作
        current_action = (np.random.uniform(size=[self.envs_number, 1]) > act_prob_numpy) * 1.0

        for i, env in enumerate(self.envs_list):
            action = current_action[i]
            self.history_actions[i].append(action)

            observation, reward, done, info = env.step(action)
            self.history_rewards[i].append(reward)

            if len(self.history_rewards[i]) == 210:
                done = True

            if done:
                self.episode_number += 1
                # 初始状态默认不为终止状态
                observation = env.reset()
               # 记录奖励
                self.reward_sum += len(self.history_rewards[i])   # 第i个环境episode结束后获得的reward总和

                exp_observation_numpy = np.vstack(self.history_observations[i])
                exp_action_numpy = np.vstack(self.history_actions[i])
                exp_reward_numpy = np.vstack(self.history_rewards[i])
                self.history_observations[i][:], self.history_actions[i][:], self.history_rewards[i][:] = [], [], []

                # 计算折扣reward
                exp_reward_numpy = self.discount_rewards(exp_reward_numpy)
                # 计算新梯度
                tGrad = self.p_net.new_grads(exp_observation_numpy, exp_action_numpy, exp_reward_numpy)
                # 将根据一个episode获得的梯度加和到gradeBuffer中,
                # 当batch_size个episodes的梯度加和到gradeBuffer中后, 再对网络的参数进行更新
                # 从而达到通过batch_size个episode内的样本来对网络进行更新
                for ix, grad in enumerate(tGrad):
                    self.gradBuffer[ix] += grad

                # 如果进行了batch_size个episodes的训练则进行神经网络参数更新操作
                if self.episode_number % self.batch_size == 0:
                    # batch_size个episodes的训练得到梯度，将其更新到网络参数中
                    self.p_net.update_grads(self.gradBuffer)
                    # 梯度更新到网络参数中后，将batch_size个episodes的梯度总和的变量清零
                    for ix, grad in enumerate(self.gradBuffer):
                        self.gradBuffer[ix] = grad * 0
                    print('Average reward for episode %d : %f.' % (self.episode_number, self.reward_sum / self.batch_size))
                    if self.reward_sum / self.batch_size > 200:
                        self.count += 1
                        if self.count >= 10:
                            if not self.test():
                                print("Task solved in", self.episode_number, 'episodes!')
                                return False
                    else:
                        self.count = 0
                    self.reward_sum = 0
            # 加入当前状态
            self.current_observation.append(observation)
            self.history_observations[i].append(observation)
            return True

    def test(self):
        observation = self.test_env.reset()
        episodes = 0
        all_episodes = []

        while episodes < 10:
            episodes += 1
            reward_list = []
            while True:
                act_prob_numpy = self.p_net.action_prob(np.vstack([observation]))
                if random.random() < act_prob_numpy:
                    action = 0
                else:
                    action = 1
                observation, reward, done, info = self.test_env.step(action)
                reward_list.append(reward)

                if len(reward_list) == 200:
                    done = True

                if done:
                    observation = self.test_env.reset()
                    all_episodes.append(len(reward_list))
                    break
        if np.all(np.array(all_episodes) >= 200):
            return False
        else:
            return True