# encoding:UTF-8

from multiprocessing import Process, Queue
import numpy as np
import random
import gym


class Env(Process):
    def __init__(self, env_name="CartPole-v0", index=0, action_queue=None, result_queue=None, gamma=0.99, envs_number=128*2):
        super().__init__()
        self.action_queue = action_queue  # 从队列中取出预执行action
        self.result_queue = result_queue  # 把实际历史结果：observation, action, reward 放入结果队列
        self.index = index    # 进程的索引号
        self.envs_number = envs_number     # 开启游戏环境的个数

        self.gamma = gamma
        self.his_observations = [[] for _ in range(envs_number)]  # 状态
        self.his_actions = [[] for _ in range(envs_number)]  # 动作
        self.his_rewards = [[] for _ in range(envs_number)]  # 奖励
        self.envs_list = [gym.make(env_name) for _ in range(envs_number)]
        self.current_observations = []

        for i, env in enumerate(self.envs_list):
            observation = env.reset()
            self.his_observations[i].append(observation)
            self.current_observations.append(observation)
        self.result_queue.put((self.index, np.vstack(self.current_observations)))
        self.current_observations = []

    def discount_rewards(self, r):
        """ take 1D float array of rewards and compute discounted reward """
        """ 计算一个episode内不同state下所选动作的累计折扣回报 """
        discounted_r = np.zeros_like(r)
        running_add = 0
        for t in reversed(range(r.size)):
            running_add = running_add * self.gamma + r[t]
            discounted_r[t] = running_add

        # for t in range(discounted_r.size):
        #     discounted_r[t] = (gamma**t)*discounted_r[t]

        # size the rewards to be unit normal (helps control the gradient estimator variance)
        discounted_r -= np.mean(discounted_r)  #
        discounted_r /= np.std(discounted_r)  #
        return discounted_r

    def run(self):
        exp_observation_list, exp_action_list, exp_reward_list = [], [], []
        while True:
            # 从神经网络中取出动作action
            action = self.action_queue.get()  # 取出预执行的action

            for env_index, env in enumerate(self.envs_list):
                self.his_actions[env_index].append(action[env_index])

                observation, reward, done, info = env.step(action[env_index])
                self.his_rewards[env_index].append(reward)

                if len(self.his_rewards[env_index]) == 210:
                    done = True

                if done:
                    exp_observation = np.vstack(self.his_observations[env_index])
                    exp_action = np.vstack(self.his_actions[env_index])
                    exp_reward = np.vstack(self.his_rewards[env_index])
                    exp_reward = self.discount_rewards(exp_reward)

                    exp_observation_list.append(exp_observation)
                    exp_action_list.append(exp_action)
                    exp_reward_list.append(exp_reward)
                    self.his_observations[env_index], self.his_actions[env_index], self.his_rewards[env_index] = [], [], []
                    observation = env.reset()
                # 状态历史中重新加入新的状态
                self.his_observations[env_index].append(observation)
                # 各游戏环境当前状态
                self.current_observations.append(observation)
            # 存入状态结果
            self.result_queue.put((self.index, np.vstack(self.current_observations)))
            self.current_observations = []
            """
            if exp_observation_list:
                # 把环境的反馈结果放入队列中
                self.result_queue.put((np.vstack(exp_observation_list), np.vstack(exp_action_list), np.vstack(exp_reward_list)))
                exp_observation_list, exp_action_list, exp_reward_list = [], [], []
            """
            if exp_observation_list:
                for exp_o, exp_a, exp_r in zip(exp_observation_list, exp_action_list, exp_reward_list):
                    # print(exp_r.size)
                    # 把环境的反馈结果放入队列中
                    self.result_queue.put((exp_o, exp_a, exp_r))
                exp_observation_list, exp_action_list, exp_reward_list = [], [], []


class Agent(object):
    def __init__(self, policy_net, game_name="CartPole-v0", envs_number=128*2, processes_number=8, batch_size=50, gamma=0.99):
        self.p_net = policy_net
        self.batch_size = batch_size
        self.envs_number = envs_number
        self.processes_number = processes_number
        self.episode_number = 0  # 运行到的episode个数
        self.reward_sum = 0
        self.count = 0

        self.all_action_queue = [Queue() for _ in range(self.processes_number)]
        self.all_result_queue = Queue()

        # 并行的环境个数 number
        self.envs_list = [
            Env(game_name, i, self.all_action_queue[i], self.all_result_queue, gamma, self.envs_number) \
            for i in range(self.processes_number)]
        for env in self.envs_list:
            env.daemon = True
            env.start()

        # 获得神经网络trainable变量的参数值
        self.gradBuffer = self.p_net.grad_buffer()
        # 获得图内的所有trainable_variables对应到numpy的值,然后全部置0，并将其作为梯度的存放位置
        for ix, grad in enumerate(self.gradBuffer):
            self.gradBuffer[ix] = grad * 0

        self.test_env = gym.make(game_name)

    def step(self):
        result = self.all_result_queue.get()
        if len(result) == 2:
            index, observation = result
            act_prob_numpy = self.p_net.action_prob(observation)  # 选择动作0的概率
            action = (np.random.uniform(size=[act_prob_numpy.size, 1]) > act_prob_numpy) * 1.0
            """
            if random.random() < act_prob_numpy:
                action = 0
            else:
                action = 1
            """
            self.all_action_queue[index].put(action)
        else:
            exp_observation, exp_action, exp_reward = result
            self.episode_number += 1
            self.reward_sum += exp_reward.size

            newGrad = self.p_net.new_grads(exp_observation, exp_action, exp_reward)
            # 将根据一个episode获得的梯度加和到gradeBuffer中,
            # 当batch_size个episodes的梯度加和到gradeBuffer中后, 再对网络的参数进行更新
            # 从而达到通过batch_size个episode内的样本来对网络进行更新
            for ix, grad in enumerate(newGrad):
                self.gradBuffer[ix] += grad

            # 如果进行了batch_size个episodes的训练则进行神经网络参数更新操作
            if self.episode_number % self.batch_size == 0:
                # batch_size个episodes的训练得到梯度，将其更新到网络参数中
                self.p_net.update_grads(self.gradBuffer)
                # 梯度更新到网络参数中后，将batch_size个episodes的梯度总和的变量清零
                for ix, grad in enumerate(self.gradBuffer):
                    self.gradBuffer[ix] = grad * 0
                print('Average reward for episode %d : %f.' % (
                    self.episode_number, self.reward_sum / self.batch_size))

                if self.reward_sum / self.batch_size > 200:
                    self.count += 1
                    if self.count >= 50:
                        if not self.test():
                            print("Task solved in", self.episode_number, 'episodes!')
                            return False
                        else:
                            self.count = 0  # 新加入的语句
                else:
                    self.count = 0
                # 将batch_size个episodes训练好的梯度更新到网络参数后开始下一个batch的计算
                self.reward_sum = 0
        return True

    def test(self):
        observation = self.test_env.reset()
        episodes = 0
        all_episodes = []

        while episodes < 50: # 10
            episodes += 1
            reward_list = []
            while True:
                act_prob_numpy = self.p_net.action_prob(np.vstack([observation]))
                if random.random() < act_prob_numpy:
                    action = 0
                else:
                    action = 1
                observation, reward, done, info = self.test_env.step(action)
                reward_list.append(reward)

                if len(reward_list) == 200:
                    done = True

                if done:
                    observation = self.test_env.reset()
                    all_episodes.append(len(reward_list))
                    break
        if np.all(np.array(all_episodes) >= 200):
            print(all_episodes)
            return False
        else:
            print(all_episodes)
            return True


