import os

os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'

import tensorflow as tf
import numpy as np
import gym
from tensorflow.keras import layers, optimizers, activations


# hyperparameter
OUTPUT_GRAPH = False
MAX_EPISODE = 1000
MAX_EP_STEPS = 200
DISPLAY_REWARD_THRESHOLD = -100  # renders environment if total episode reward is greater then this threshold
RENDER = False  # rendering wastes time
GAMMA = 0.9
LR_A = 0.001  # learning rate for actor
LR_C = 0.01  # learning rate for critic

env = gym.make('Pendulum-v0')
env.seed(1)
env = env.unwrapped
N_s = env.observation_space.shape[0]
A_BOUND = env.action_space.high


class Net(tf.keras.Model):
    def __init__(self):
        super(Net, self).__init__()
        self.layer_1 = layers.Dense(30, activation=activations.relu)
        self.layer_2 = layers.Dense(1, activation=activations.tanh)
        self.layer_3 = layers.Dense(1, activation=activations.softplus)

    def call(self, inputs):
        x = self.layer_1(inputs)
        mu = self.layer_2(x)
        sigma = self.layer_3(mu)
        return mu, sigma


class Actor(object):
    def __init__(self, n_features, lr):
        self.Actor_net = Net()
        self.optimizer = optimizers.Adam(learning_rate=lr)

    def lean(self, s, td_error, action_bound, a):
        s = s[np.newaxis, :]
        with tf.GradientTape() as tape:
            s = s[np.newaxis, :]
            mu_, sigma_ = self.Actor_net(s)
            sigma = tf.squeeze(sigma_ + 0.1)
            mu = tf.squeeze(mu_ * 2)
            noraml_dist = tf.compat.v1.distributions.Normal(mu, sigma)
            log_prob = noraml_dist.log_prob(a)
            exp_v = -log_prob * td_error
            # 添加交叉熵,增加探索度
            exp_v += 0.01 * noraml_dist.entropy()
            with tape.stop_recording():
                grads = tape.gradient(exp_v, self.Actor_net.trainable_variables)
                self.optimizer.apply_gradients(zip(grads, self.Actor_net.trainable_variables))

    def choose_action(self, s, action_bound):
        s = s[np.newaxis, :]
        mu_, sigma_ = self.Actor_net(s)
        sigma = tf.squeeze(sigma_ + 0.1)
        mu = tf.squeeze(mu_ * 2)
        noraml_dist = tf.compat.v1.distributions.Normal(mu,sigma)
        action = tf.clip_by_value(noraml_dist.sample(1),action_bound[0],action_bound[1])
        return action


class Critic(object):
    def __init__(self, n_feature, lr):
        s = tf.keras.Input([None, n_feature], name='s')
        x = layers.Dense(30, activation=tf.keras.activations.relu, name='l1')(s)
        x = layers.Dense(1, activation=None, name='l2')(x)

        self.Ctitic_net = tf.keras.Model(inputs=s, outputs=x)

        self.optimizer = optimizers.Adam(learning_rate=lr)

    def learn(self, s, r, s_):
        s, s_ = s[np.newaxis, :], s_[np.newaxis, :]
        with tf.GradientTape() as tape:
            v = self.Ctitic_net(s)
            v_ = self.Ctitic_net(s_)
            self.td_error = tf.reduce_mean(r + GAMMA * v_ - v)
            loss = tf.square(self.td_error)
            with tape.stop_recording():
                grads = tape.gradient(loss, self.Ctitic_net.trainable_variables)
                self.optimizer.apply_gradients(zip(grads, self.Ctitic_net.trainable_variables))
        return self.td_error


actor = Actor(N_s, LR_A)
critic = Critic(N_s, LR_C)

for i_episode in range(MAX_EPISODE):
    s = env.reset()
    t = 0
    ep_rs = []
    while True:
        env.render()
        a = actor.choose_action(s, [-A_BOUND, A_BOUND])

        s_, r, done, _ = env.step(a)
        r /= 10

        td_error = critic.learn(s, r, s_)
        actor.lean(s, td_error, [-A_BOUND, A_BOUND], a)

        s = s_
        t += 1
        ep_rs.append(r)

        if t > MAX_EPISODE:
            ep_rs_sum = sum(ep_rs)
            if 'running_reward' not in globals():
                running_reward = ep_rs_sum
            else:
                running_reward = running_reward * 0.9 + ep_rs_sum * 0.1
            if running_reward > DISPLAY_REWARD_THRESHOLD: RENDER = True  # rendering
            print("episode:", i_episode, "  reward:", int(running_reward))
            break
