import os

os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'  # 屏蔽tensorflow的日志输出

import tensorflow as tf
import numpy as np
from tensorflow.keras import layers, optimizers
import gym

# 超参数的设置
OUTPUT_GRAPH = False
MAX_EPISODE = 3000
DISPLAY_REWARD_THRESHOLD = 200  # 当奖励超过阈值显示图形化界面
MAX_EP_STEPS = 1000  # 在一幕中最大步数
RENDER = False
GAMMA = 0.9  # 折扣因子
'''
Critic之所有比Actor的学习率大是因为 Critic指导Actor的学习过程
'''
LR_A = 0.001  # Actor网络的学习率
LR_C = 0.01  # Critic网络的学习率

env = gym.make('CartPole-v0')
env.seed(1)
env = env.unwrapped  # 去除环境限制

N_F = env.observation_space.shape[0]
N_A = env.action_space.n


# Actor网络
class Actor(object):
    def __init__(self, Feature_n, Actor_n, lr=0.001):
        s = tf.keras.Input([None, Feature_n], name='s')
        # 预测模型
        x = layers.Dense(20, activation=tf.keras.activations.relu, name='l1')(s)
        x = layers.Dense(Actor_n, activation=tf.keras.activations.softmax, name='l2')(x)
        self.Actor_net = tf.keras.Model(inputs=s, outputs=x)
        # 优化器
        self.optimizer = optimizers.Adam(learning_rate=lr)

    def learn(self, s, a, td):
        # 给s添加轴
        s = s[np.newaxis, :]
        with tf.GradientTape() as tape:
            neg_log_prob = tf.math.log(self.Actor_net(s)[0, a])  # 由于是单步更新,因此需要找到当前状态下选择动作a的概率
            loss = tf.reduce_mean(-neg_log_prob * td)
            with tape.stop_recording():
                grads = tape.gradient(loss, self.Actor_net.trainable_variables)
                self.optimizer.apply_gradients(zip(grads, self.Actor_net.trainable_variables))
        return loss

    def choose_action(self, s):
        s = s[np.newaxis, :]
        prob_weights = self.Actor_net(s).numpy()
        action = np.random.choice(range(prob_weights.shape[1]), p=prob_weights[0, :])
        return action


class Critic(object):
    def __init__(self, n_feature, n_action, lr=0.001):
        s = tf.keras.Input([None, n_feature], name='s')
        x = layers.Dense(20, activation=tf.keras.activations.relu, name='l1')(s)
        x = layers.Dense(n_action, activation=None, name='l2')(x)

        self.Ctitic_net = tf.keras.Model(inputs=s, outputs=x)

        self.optimizer = optimizers.Adam(learning_rate=lr)

    def learn(self, s, r, s_):
        s, s_ = s[np.newaxis, :], s_[np.newaxis, :]
        with tf.GradientTape() as tape:  # 要将预测过程放在tf.GradientTape()下面,记录梯度
            v_ = self.Ctitic_net(s_)
            v = self.Ctitic_net(s)
            td_error = tf.reduce_mean(r + GAMMA * v_ - v)
            loss = tf.square(td_error)
            with tape.stop_recording():
                grads = tape.gradient(loss, self.Ctitic_net.trainable_variables)
                self.optimizer.apply_gradients(zip(grads, self.Ctitic_net.trainable_variables))
        return td_error


actor = Actor(N_F, N_A, lr=LR_A)
critic = Critic(N_F, N_A, lr=LR_C)

for i_episode in range(MAX_EPISODE):
    s = env.reset()
    print(s)
    t = 0
    track_r = []

    while True:
        if RENDER: env.render()
        a = actor.choose_action(s)
        s_, r, done, _ = env.step(a)
        if done: r = -20
        track_r.append(r)
        td_error = critic.learn(s, r, s_)
        actor.learn(s, a, td_error)

        s = s_
        t += 1
        if done or t >= MAX_EP_STEPS:
            ep_rs_sum = sum(track_r)

            if 'running_reward' not in globals():
                running_reward = ep_rs_sum
            else:
                running_reward = running_reward * 0.95 + ep_rs_sum * 0.05
            if running_reward > DISPLAY_REWARD_THRESHOLD: RENDER = True  # rendering
            print("episode:", i_episode, "  reward:", int(running_reward))
            break
