import os

os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'  # 屏蔽tensorflow的日志输出

import tensorflow as tf
import numpy as np
from tensorflow.keras import Model, layers, optimizers, models

class PolicyGradient:
    def __init__(
            self,
            n_actions,
            n_features,
            learning_rate=0.01,
            reward_decay=0.95,
            output_graph=False
    ):
        self.n_actions = n_actions
        self.n_features = n_features
        self.lr = learning_rate
        self.gamma = reward_decay

        self.ep_obs, self.ep_as, self.ep_rs = [], [], []

        self._build_net()

        if output_graph:
            pass

    # 建立policy gradient神经网络
    def _build_net(self):
        s = tf.keras.Input([None, self.n_features], name='s')
        # 预测模型
        x = tf.keras.layers.Dense(20, activation=tf.keras.activations.relu, name='l1')(s)
        x = tf.keras.layers.Dense(self.n_actions, activation=tf.keras.activations.softmax, name='l2')(x)
        self.eval_net = tf.keras.Model(inputs=s, outputs=x)
        # 网络优化器
        self.optimizer = optimizers.Adam(learning_rate=self.lr)

    # 选择行为
    def choose_action(self, observation):
        prob_weights = self.eval_net(observation[np.newaxis, :]).numpy()
        action = np.random.choice(range(prob_weights.shape[1]), p=prob_weights[0, :])
        return action

    # 存储回合transition
    def store_transition(self, observation, action, reward):
        self.ep_obs.append(observation)
        self.ep_as.append(action)
        self.ep_rs.append(reward)


    # 学习更新参数
    def learn(self):
        discounted_ep_rs_norm = self._discount_and_norm_reward()

        # train in episode
        with tf.GradientTape() as tape:
            neg_log_prob = tf.reduce_sum(
                -tf.math.log(self.eval_net(np.vstack(self.ep_obs))) * tf.one_hot(np.array(self.ep_as), self.n_actions),
                axis=1)
            loss = tf.reduce_mean(neg_log_prob * discounted_ep_rs_norm)
            with tape.stop_recording():
                graps = tape.gradient(loss, self.eval_net.trainable_variables)
                self.optimizer.apply_gradients(zip(graps, self.eval_net.trainable_variables))

        self.ep_obs, self.ep_as, self.ep_rs = [], [], []  # empty episode data
        return discounted_ep_rs_norm

    # 衰减回合的reward
    def _discount_and_norm_reward(self):
        # discount episode rewards
        discounted_ep_rs = np.zeros_like(self.ep_rs)
        running_add = 0
        for t in reversed(range(0, len(self.ep_rs))):
            running_add = running_add * self.gamma + self.ep_rs[t]
            discounted_ep_rs[t] = running_add

        # normalize episode rewards
        discounted_ep_rs -= np.mean(discounted_ep_rs)
        discounted_ep_rs /= np.std(discounted_ep_rs)
        return discounted_ep_rs
