"""
这个module是对神经网络的定义
"""
import tensorflow as tf

class Policy_Net(object):
    def __init__(self, config):
        self.learning_rate = config.learning_rate  # 1e-2
        self.D = config.D  # 4 输入向量维度
        self.H = config.H  # 50 隐藏层维度
        # 设置新的graph
        self.graph = tf.Graph()
        # 设置显存的上限大小
        gpu_options = tf.GPUOptions(allow_growth=True)  # 按照计算需求动态申请内存
        # 设置新的session
        self.sess = tf.Session(graph=self.graph, config=tf.ConfigProto(gpu_options=gpu_options))

        self.net_build()  # 构建网络
        self.sess.run(self.op_init)  # 全局变量初始化

    def net_build(self):   # 构建网络
        with self.graph.as_default():
            observations = tf.placeholder(tf.float32, [None, self.D], name="input_states")  # 传入的观测到的状态tensor
            actions = tf.placeholder(tf.float32, [None, 1], name="input_actions")
            advantages = tf.placeholder(tf.float32, [None, 1], name="reward_signal")

            # 根据策略网络计算出选动作action_0的概率probability, action_1的概率为1-probability
            w1 = tf.get_variable("w1", shape=[self.D, self.H],
                                 initializer=tf.contrib.layers.xavier_initializer())
            b1 = tf.get_variable("b1", shape=[self.H], initializer=tf.constant_initializer(0.0))
            layer1 = tf.nn.relu(tf.add(tf.matmul(observations, w1), b1))
            w2 = tf.get_variable("w2", shape=[self.H, 1],
                                 initializer=tf.contrib.layers.xavier_initializer())
            b2 = tf.get_variable("b2", shape=[1], initializer=tf.constant_initializer(0.0))
            score = tf.add(tf.matmul(layer1, w2), b2)
            """ probability为选择action=0 的概率， action=1的选择概率为 1-probability """
            probability = tf.nn.sigmoid(score)  # 动作0的actions=0, 动作1的actions=1

            # advantages 为选择动作actions后所得到的累计折扣奖励
            loglik = tf.log(actions * (actions - probability) + (1 - actions) * (actions + probability))
            # loss为一个episode内所有observation,actions,advantages得到的损失的均值，reduce_mean
            loss = -tf.reduce_mean(loglik * advantages)

            trainable_vars = tf.trainable_variables()  # 获得图内需要训练的 variables
            # 设置batch更新trainable_variables的placeholder
            batchGrad = []
            for var in tf.trainable_variables():
                var_name = "batch_"+var.name.split(":")[0]
                batchGrad.append(tf.placeholder(tf.float32, name=var_name))

            newGrads = tf.gradients(loss, trainable_vars)  # 获得loss对于图内需要训练的 variables 的梯度
            adam = tf.train.AdamOptimizer(learning_rate=self.learning_rate)  # 优化器
            # 定义对参数 tvars 进行梯度更新的操作, 使用adam优化器对参数进行更新
            updateGrads = adam.apply_gradients(zip(batchGrad, trainable_vars))

            self.op_init = tf.global_variables_initializer()
            self.observations = observations
            self.actions = actions
            self.advantages = advantages
            self.batchGrad = batchGrad
            self.probability = probability
            self.trainable_vars = trainable_vars
            self.newGrads = newGrads
            self.updateGrads = updateGrads

    def grad_buffer(self):
        return self.sess.run(self.trainable_vars)

    def action_prob(self, input_x):
        act_prob = self.sess.run(self.probability, feed_dict={self.observations: input_x})
        return act_prob

    def new_grads(self, observations, actions, discounted_rewards):
        # 返回神经网络各参数对应loss的梯度
        n_grads = self.sess.run(self.newGrads, feed_dict={self.observations: observations, \
                                                self.actions: actions, self.advantages: discounted_rewards})
        return n_grads

    def update_grads(self, input_gradBuffer):
        self.sess.run(self.updateGrads, feed_dict=dict(zip(self.batchGrad, input_gradBuffer)) )