import matplotlib
from matplotlib import pyplot as plt

matplotlib.rcParams["font.size"] = 18
matplotlib.rcParams["figure.titlesize"] = 18
matplotlib.rcParams["figure.figsize"] = [9, 7]
matplotlib.rcParams["font.family"] = ["KaiTi"]
matplotlib.rcParams["axes.unicode_minus"] = False

plt.figure()

import gym, os
import numpy as np
from tensorflow.keras import layers, optimizers, losses
import tensorflow as tf
from tensorflow.keras import Model
from collections import namedtuple
import collections



tf.random.set_seed(2222)
np.random.seed(2222)
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"
assert tf.__version__.startswith("2.")


gamma = 0.98  # 激励衰减因子
epsilon = 0.2  # PPO误差超参数0.8~1.2
batch_size = 32  # batch size


# 创建游戏环境
env = gym.make("CartPole-v1").unwrapped
Transition = namedtuple(
    "Transition", ["state", "action", "a_log_prob", "reward", "next_state"]
)


class Actor(Model):
    def __init__(self):
        super(Actor, self).__init__()
        # 策略网络，也叫Actor网络，输出为概率分布pi(a|s)
        self.fc1 = layers.Dense(100, kernel_initializer="he_normal")
        self.fc2 = layers.Dense(2, kernel_initializer="he_normal")

    def call(self, inputs):
        x = tf.nn.relu(self.fc1(inputs))
        x = self.fc2(x)
        x = tf.nn.softmax(x, axis=1)  # 转换成概率
        return x

class Critic(Model):
    def __init__(self):
        super(Critic, self).__init__()
        # 偏置b的估值网络，也叫Critic网络，输出为v(s)
        self.fc1 = layers.Dense(100, kernel_initializer="he_normal")
        self.fc2 = layers.Dense(1, kernel_initializer="he_normal")

    def call(self, inputs):
        x = tf.nn.relu(self.fc1(inputs))
        x = self.fc2(x)
        return x

class PPO:
    # PPO算法主体
    def __init__(self,load=True):
        super(PPO, self).__init__()
        if load:
            self.load_models()
        else:
            self.actor = Actor()  # 创建Actor网络
            self.critic = Critic()  # 创建Critic网络
        self.buffer = []  # 数据缓冲池
        self.actor_optimizer = optimizers.Adam(1e-3)  # Actor优化器
        self.critic_optimizer = optimizers.Adam(3e-3)  # Critic优化器

    # def load_weights(self, path):
    #     self.actor.load_weights(path + "actor.h5")
    #     self.critic.load_weights(path + "critic.h5")
        # print("load weights from:", path)
    def load_models(self):
        self.actor=tf.keras.models.load_model('model_data/actor_model')
        self.critic=tf.keras.models.load_model('model_data/critic_model')
    def select_action(self, s):
        # 送入状态向量，获取策略: [4]
        s = tf.constant(s, dtype=tf.float32)
        # s: [4] => [1,4]
        s = tf.expand_dims(s, axis=0)
        # 获取策略分布: [1, 2]
        prob = self.actor(s)
        # 从类别分布中采样1个动作, shape: [1] 选出下标
        a = tf.random.categorical(tf.math.log(prob), 1)[0]
        a = int(a)  # Tensor转数字
        return a, float(prob[0][a])  # 返回动作及其概率

    def get_value(self, s):
        # 送入状态向量，获取策略: [4]
        s = tf.constant(s, dtype=tf.float32)
        # s: [4] => [1,4]
        s = tf.expand_dims(s, axis=0)
        v = self.critic(s)[0]
        return float(v)  # 返回v(s)

    def store_transition(self, transition):
        # 存储采样数据
        self.buffer.append(transition)

    def save(self):
        # 存储PPO的网络
        self.actor.save(r"model_data/actor_model")
        self.critic.save(r"model_data/critic_model")
    def save_weights(self,path):
        # 存储PPO的网络
        self.actor.save(path+"actor.h5")
        self.critic.save(path+"critic.h5")

def main():
    agent = PPO(load=True)  # 创建PPO算法主体
    returns = []  # 统计总回报
    total = 0  # 一段时间内平均回报
    for i_epoch in range(200):  # 训练回合数
        state = env.reset()  # 复位环境
        if len(state) == 2:
            state = state[0]
        for t in range(200):  # 最多考虑500步
            # 通过最新策略与环境交互
            action, action_prob = agent.select_action(state)
            next_state, reward, done, _, _ = env.step(action)
            total += reward  # 累积激励
            state = next_state  # 刷新状态    
            env.render()
            if done:  # 合适的时间点训练网络
                break

        if i_epoch % 20 == 0:  # 每20个回合统计一次平均回报
            returns.append(total / 20)
            total = 0
            print(i_epoch, returns[-1])

if __name__ == "__main__":
    main()
