import gym
import random
import numpy as np
import matplotlib.pyplot as plt
from tqdm import trange


class LinearModel:
    """
    线性模型
    """

    def __init__(self, lr, epsilon, gamma, action_space_bounds, observation_space_bounds):
        self.action_space_bounds = action_space_bounds
        self.observation_space_bounds = observation_space_bounds
        # [[-2.69659019, -2.55405540, -0.506003425, -1.64488959], [-2.77111630, -2.30174780, -0.0561821950, -1.44448637]]
        # 这时我以前训练出来的权重，可以确保每局都玩到500步。
        self.weights = np.zeros(
            (self.action_space_bounds, self.observation_space_bounds))  # 权重直接初始化为0矩阵
        self.lr = lr  # 学习率
        self.epsilon = epsilon  # ɛ探索系数
        self.gamma = gamma  # γ折扣系数

    def play(self, observation):
        """
        作一次决策。

        参数
        --------
        observation :
            游戏观测信息

        返回
        --------
        int
            动作

        参考
        --------
        π(s) = argmax_a(w_a^T * s)
        """

        return np.argmax(np.dot(self.weights, observation))

    def explore(self, observation):
        """
        探索。

        同上。使用ɛ柔性策略。
        """

        return np.argmax(np.dot(self.weights, observation)) if random.random() > self.epsilon else np.random.randint(self.action_space_bounds)

    def fit(self, next_observation, observation, reward, action):
        """
        更新参数。

        参数
        --------
        next_observation :
            s'
        observation :
            s
        reward :
            r
        action :
            a

        返回
        ------
        无。

        参考
        --------
        按照w <-- w + α * (r + γ * max(w_a'^T * s') - w_a^T * s) * s更新。
        """

        self.weights[action] += self.lr*(reward+self.gamma*np.dot(self.weights[self.play(next_observation)], next_observation)
                                         - np.dot(self.weights[action], observation))*observation


if __name__ == "__main__":
    env = gym.make("CartPole-v1")  # 初始化环境
    model = LinearModel(0.01, 0.1, 0.99, env.action_space.n,
                        env.observation_space.shape[0])  # 初始化线性模型

    history = []  # 分数记录
    training = True  # 是否训练
    env.seed(0)  # 重置种子

    # 运行若干回合
    for _ in range(100):
        observations, rewards, actions = [], [], []
        observation = env.reset()
        steps = 0
        while True:
            steps += 1
            env.render()

            # 收集游戏轨迹
            observations.append(observation)
            action = model.explore(observation) if training else \
                model.play(observation)
            observation, reward, done, info = env.step(action)
            actions.append(action)
            rewards.append(reward)

            # 如果游戏结束
            if done:
                # 对结束局面的动作价值应该是0。
                # 为了做到这一点，直接将最后一个观测设为0矩阵，这样权重乘这个矩阵的结果就一定是0。
                observations.append(observation*0)

                print(f"坚持了{steps}步。\n权重：{model.weights}")
                history.append(steps)

                break  # 结束本局

        if training:
            for i in trange(steps, desc="训练进度"):
                model.fit(observations[i+1], observations[i],
                          rewards[i], actions[i])  # 训练

    # 绘制图像
    plt.title("车杆平衡游戏学习记录")
    plt.xlabel("局数")
    plt.ylabel("坚持的步数")
    plt.plot(history, color='g')
    plt.savefig("CartPole_0.svg", format='svg')

    # 关闭游戏窗口
    env.close()