# encoding:UTF-8

from multiprocessing import Process, Queue
import numpy as np
import random
import gym


class Env(Process):
    def __init__(self, env_name="CartPole-v0", index=0, action_queue=None, result_queue=None, gamma=0.99):
        super().__init__()
        self.index = index
        self.action_queue = action_queue  # 从队列中取出预执行action
        self.result_queue = result_queue  # 把实际历史结果：observation, action, reward 放入结果队列

        self.gamma = gamma
        self.his_observation = []  # 状态
        self.his_action = []  # 动作
        self.his_reward = []  # 奖励
        self.env = gym.make(env_name)

        observation = self.env.reset()
        self.result_queue.put((index, observation, False, None, None, None))
        self.his_observation.append(observation)

    def discount_rewards(self, r):
        """ take 1D float array of rewards and compute discounted reward """
        """ 计算一个episode内不同state下所选动作的累计折扣回报 """
        discounted_r = np.zeros_like(r)
        running_add = 0
        for t in reversed(range(r.size)):
            running_add = running_add * self.gamma + r[t]
            discounted_r[t] = running_add

        # for t in range(discounted_r.size):
        #     discounted_r[t] = (gamma**t)*discounted_r[t]

        # size the rewards to be unit normal (helps control the gradient estimator variance)
        discounted_r -= np.mean(discounted_r)  #
        discounted_r /= np.std(discounted_r)  #
        return discounted_r

    def step(self):
        action = self.action_queue.get()  # 取出预执行的action
        self.his_action.append(action)

        observation, reward, done, info = self.env.step(action)
        self.his_reward.append(reward)

        exp_observation, exp_action, exp_reward = None, None, None

        if len(self.his_reward) == 210:
            done = True

        if done:
            exp_observation = np.vstack(self.his_observation)
            exp_action = np.vstack(self.his_action)
            exp_reward = np.vstack(self.his_reward)
            exp_reward = self.discount_rewards(exp_reward)
            self.his_observation, self.his_action, self.his_reward = [], [], []
            observation = self.env.reset()
        self.his_observation.append(observation)
        self.result_queue.put((self.index, observation, done, exp_observation, exp_action, exp_reward))
        # 需不需要把observation队列划到reusult queue中

    def run(self):
        while True:
            self.step()


class Agent(object):
    def __init__(self, policy_net, game_name="CartPole-v0", processes_number=32, batch_size=50, gamma=0.99):
        self.p_net = policy_net
        self.batch_size = batch_size
        self.number = processes_number
        self.episode_number = 0  # 运行到的episode个数
        self.reward_sum = 0
        self.count = 0

        self.all_action_queue = [Queue() for _ in range(processes_number)]
        self.all_result_queue = Queue()

        # 并行的环境个数 number
        self.envs_list = [
            Env(game_name, i, self.all_action_queue[i], self.all_result_queue, gamma) \
            for i in range(processes_number)]
        for env in self.envs_list:
            env.daemon = True
            env.start()

        # 获得神经网络trainable变量的参数值
        self.gradBuffer = self.p_net.grad_buffer()
        # 获得图内的所有trainable_variables对应到numpy的值,然后全部置0，并将其作为梯度的存放位置
        for ix, grad in enumerate(self.gradBuffer):
            self.gradBuffer[ix] = grad * 0

        self.test_env = gym.make(game_name)

    def step(self):
        current_observation = []
        current_observation_index = []
        current_result = []
        for _ in range(self.all_result_queue.qsize() - 1):
            index, observation, done, exp_observation, exp_action, exp_reward = self.all_result_queue.get()
            current_observation.append(observation)
            current_observation_index.append(index)
            current_result.append((done, exp_observation, exp_action, exp_reward))
        index, observation, done, exp_observation, exp_action, exp_reward = self.all_result_queue.get()
        current_observation.append(observation)
        current_observation_index.append(index)
        current_result.append((done, exp_observation, exp_action, exp_reward))

        act_prob_numpy = self.p_net.action_prob(np.vstack(current_observation))  # 选择动作0的概率
        current_action = (np.random.uniform(size=[act_prob_numpy.size, 1]) > act_prob_numpy) * 1.0
        for i, action in enumerate(current_action):
            self.all_action_queue[current_observation_index[i]].put(action)

        for done, exp_observation, exp_action, exp_reward in current_result:
            if done:
                self.episode_number += 1
                self.reward_sum += exp_reward.size
                newGrad = self.p_net.new_grads(exp_observation, exp_action, exp_reward)
                # 将根据一个episode获得的梯度加和到gradeBuffer中,
                # 当batch_size个episodes的梯度加和到gradeBuffer中后, 再对网络的参数进行更新
                # 从而达到通过batch_size个episode内的样本来对网络进行更新
                for ix, grad in enumerate(newGrad):
                    self.gradBuffer[ix] += grad

                # 如果进行了batch_size个episodes的训练则进行神经网络参数更新操作
                if self.episode_number % self.batch_size == 0:
                    # batch_size个episodes的训练得到梯度，将其更新到网络参数中
                    self.p_net.update_grads(self.gradBuffer)
                    # 梯度更新到网络参数中后，将batch_size个episodes的梯度总和的变量清零
                    for ix, grad in enumerate(self.gradBuffer):
                        self.gradBuffer[ix] = grad * 0
                    print('Average reward for episode %d : %f.' % (
                        self.episode_number, self.reward_sum / self.batch_size))

                    if self.reward_sum / self.batch_size > 200:
                        self.count += 1
                        if self.count >= 10:
                            if not self.test():
                                print("Task solved in", self.episode_number, 'episodes!')
                                return False
                    else:
                        self.count = 0
                    # 将batch_size个episodes训练好的梯度更新到网络参数后开始下一个batch的计算
                    self.reward_sum = 0
            return True

    def test(self):
        observation = self.test_env.reset()
        episodes = 0
        all_episodes = []

        while episodes < 10:
            episodes += 1
            reward_list = []
            while True:
                act_prob_numpy = self.p_net.action_prob(np.vstack([observation]))
                if random.random() < act_prob_numpy:
                    action = 0
                else:
                    action = 1
                observation, reward, done, info = self.test_env.step(action)
                reward_list.append(reward)

                if len(reward_list) == 200:
                    done = True

                if done:
                    observation = self.test_env.reset()
                    all_episodes.append(len(reward_list))
                    break
        if np.all(np.array(all_episodes) >= 200):
            return False
        else:
            return True
