# encoding:UTF-8
import gym
import time
import numpy as np
import tensorflow as tf
import os

from network_module.network_module import Policy_Net

flags = tf.app.flags
flags.DEFINE_integer('D', 4, '输入层维度')
flags.DEFINE_integer('H', 50, '隐藏层维度')
flags.DEFINE_integer('batch_size', 25, 'batchsize: 分别可以取 25, 50, 100, 300, 500')
flags.DEFINE_integer('mode', 0, '计算模式: 分别可以取 0, 1, 2, 3，其中，0为默认啥也不加，1为加gamma，2为加奖励变形，3为加奖励变形和gamma')
flags.DEFINE_float('learning_rate', 1e-3, '学习率')
config = flags.FLAGS
D = config.D

os.environ['CUDA_VISIBLE_DEVICES'] = '-1'

# 设置游戏环境
env = gym.make('CartPole-v0')
# 超参
# batch_size 为每次训练网络所选择的batch_size, 这里batch_size等于episodes数
batch_size = config.batch_size  # every how many episodes to do a param update?
gamma = 0.99  # discount factor for reward
# 运行变量
exp_observation, exp_action, exp_reward = [], [], []
reward_sum = 0  # 一个batch内的所有episodes所能获得的reward总和
current_episode = 0  # 当前episode
total_episodes = 100 * 10000  # 总共的episodes


def discount_rewards_0(r):
    """ take 1D float array of rewards and compute discounted reward """
    """ 计算一个episode内不同state下所选动作的累计折扣回报 """
    discounted_r = np.zeros_like(r)
    running_add = 0
    for t in reversed(range(r.size)):
        running_add = running_add * gamma + r[t]
        discounted_r[t] = running_add
    return discounted_r

def discount_rewards_1(discounted_r):
    for t in range(discounted_r.size):
        discounted_r[t] = (gamma**t)*discounted_r[t]
    return discounted_r

def discount_rewards_2(discounted_reward_numpy):
    # size the rewards to be unit normal (helps control the gradient estimator variance)
    discounted_reward_numpy -= np.mean(discounted_reward_numpy)
    discounted_reward_numpy /= np.std(discounted_reward_numpy)
    return discounted_reward_numpy

def discount_rewards_01(r):
    r = discount_rewards_0(r)
    r = discount_rewards_1(r)
    return r

def discount_rewards_02(r):
    r = discount_rewards_0(r)
    r = discount_rewards_2(r)
    return r

def discount_rewards_012(r):
    r = discount_rewards_0(r)
    r = discount_rewards_1(r)
    r = discount_rewards_2(r)
    return r


if config.mode == 0:
    discount_rewards = discount_rewards_0
elif config.mode == 1:
    discount_rewards = discount_rewards_01
elif config.mode == 2:
    discount_rewards = discount_rewards_02
else:
    discount_rewards = discount_rewards_012


if __name__ == "__main__":
    # 深度神经网络初始化
    p_net = Policy_Net(config)

    # 获得神经网络trainable变量的参数值
    gradBuffer = p_net.vars_buffer()
    # 获得图内的所有trainable_variables对应到numpy的值,然后全部置0，并将其作为梯度的存放位置
    for ix, grad in enumerate(gradBuffer):
        gradBuffer[ix] = grad * 0

    # 环境env初始化
    observation = env.reset()

    a = time.time()
    while current_episode <= total_episodes:  # episode_number为当前episode数
        # Make sure the observation is in a shape the network can handle.
        observation = np.vstack([observation])
        """ probability为选择动作0的概率，动作1的选择概率为 1-probability """
        act_prob = p_net.action_prob(observation)  # 选择动作0的概率

        # 以概率probability选择动作0， 1-probability的概率选择动作1
        if np.random.uniform() < act_prob:
            action = 0
        else:
            action = 1

        next_observation, reward, done, info = env.step(action)
        exp_observation.append(observation)  # 将一个episode内的observation加入列表
        exp_action.append(action)  # 将一个episode内的action, 加入列表
        exp_reward.append(reward)  # 将一个episode内的reward, 加入列表
        reward_sum += reward

        if done:
            current_episode += 1
            exp_observation_numpy = np.vstack(exp_observation)
            exp_action_numpy = np.vstack(exp_action)
            exp_reward_numpy = np.vstack(exp_reward)
            exp_observation, exp_action, exp_reward = [], [], []

            # 计算折扣reward
            discounted_reward_numpy = discount_rewards(exp_reward_numpy)

            # 通过对一个episode内所有state,action,advantage以及选择action对应的概率进行计算获得loss
            # 并通过该loss获得网络参数的梯度Grads
            # loglik = tf.log(input_y * (input_y - probability) + (1 - input_y) * (input_y + probability))
            # loss = -tf.reduce_mean(loglik * advantages)
            tGrad = p_net.new_grads(exp_observation_numpy, exp_action_numpy, discounted_reward_numpy)

            # 将根据一个episode获得的梯度加和到gradeBuffer中,
            # 当batch_size个episodes的梯度加和到gradeBuffer中后, 再对网络的参数进行更新
            # 从而达到通过batch_size个episode内的样本来对网络进行更新
            for ix, grad in enumerate(tGrad):
                gradBuffer[ix] += grad

            # 如果进行了batch_size个episodes的训练则进行神经网络参数更新操作
            if current_episode % batch_size == 0:
                # batch_size个episodes的训练得到梯度，将其更新到网络参数中
                p_net.update_grads(gradBuffer)
                # 梯度更新到网络参数中后，将batch_size个episodes的梯度总和的变量清零
                for ix, grad in enumerate(gradBuffer):
                    gradBuffer[ix] = grad * 0
                print('Average reward for episode %d : %f.' % (current_episode, reward_sum / batch_size))
                if reward_sum / batch_size > 200:
                    print("Task solved in", current_episode, 'episodes!')
                    break
                # 将batch_size个episodes训练好的梯度更新到网络参数后开始下一个batch的计算
                reward_sum = 0
            # 重置环境变量env, 开始下一个episode的计算
            next_observation = env.reset()
        observation = next_observation

b = time.time()
print(b-a)


# batch_size
# reward shape: (mean, std)方式,
# reward: gamma*reward: 折扣概率下的折扣奖励
'''
def discount_rewards_1(r):
    """ take 1D float array of rewards and compute discounted reward """
    """ 计算一个episode内不同state下所选动作的累计折扣回报 """
    discounted_r = np.zeros_like(r)
    running_add = 0
    for t in reversed(range(r.size)):
        running_add = running_add * gamma + r[t]
        discounted_r[t] = running_add

    for t in range(discounted_r.size):
        discounted_r[t] = (gamma**t)*discounted_r[t]

    # size the rewards to be unit normal (helps control the gradient estimator variance)
    discounted_reward_numpy -= np.mean(discounted_r)
    discounted_reward_numpy /= np.std(discounted_r)

    return discounted_r
'''
