#%%

import time

import gym
import paddle
import parl
import pygame.event
from paddle import nn
import paddle.nn.functional as F
import numpy as np
from parl.algorithms import PolicyGradient
from q_table.Envs.DQNMazeEnv import MazeEnv

LEARNING_RATE = 0.1  # 学习率
GAMMA = 0.999  # reward 的衰减因子，一般取 0.9 到 0.999 不等


#%%

class CartpoleModel(parl.Model):
    def __init__(self, obs_dim, act_dim):
        super(CartpoleModel, self).__init__()
        hid1_size = 128
        hid2_size = 128
        self.fc1 = nn.Linear(obs_dim, hid1_size)
        self.fc2 = nn.Linear(hid1_size, act_dim)

    def forward(self, obs):
        h1 = F.tanh(self.fc1(obs))
        Q = F.softmax(self.fc2(h1), dtype='float64')
        return Q


#%%

class MazeAgent(parl.Agent):
    def __init__(self, algorithm, act_dim, e_greed=0.1, e_greed_decrement=0.):
        super(MazeAgent, self).__init__(algorithm)
        assert isinstance(act_dim, int)
        self.act_dim = act_dim

        self.global_step = 0
        self.update_target_steps = 2000  # 每隔2000个training steps再把model的参数复制到target_model中

        self.e_greed = e_greed  # 有一定概率随机选取动作，探索
        self.e_greed_decrement = e_greed_decrement  # 随着训练逐步收敛，探索的程度慢慢降低

    def sample(self, obs):
        # 选取动作，有一定概率随机选取动作，探索
        # if np.random.rand() < self.e_greed:
        #     act = np.random.randint(self.act_dim)
        # else:
        obs = paddle.to_tensor(obs, dtype='float32')
        pred_q = self.alg.predict(obs)
        act = np.random.choice(range(self.act_dim), p=pred_q)
        self.e_greed = max(0.01, self.e_greed - self.e_greed_decrement)  # 逐步减小epsilon
        return act

    def predict(self, obs):
        obs = paddle.to_tensor(obs, dtype='float32')
        pred_q = self.alg.predict(obs)
        act = pred_q.argmax().numpy()[0]  # 选择Q最大的下标，即对应的动作
        return act

    def learn(self, obs, act, reward):
        act = np.expand_dims(act, -1)
        reward = np.expand_dims(reward, -1)

        obs = paddle.to_tensor(obs, dtype='float32')
        act = paddle.to_tensor(act, dtype='int32')
        reward = paddle.to_tensor(reward, dtype='float32')
        loss = self.alg.learn(obs, act, reward)
        return loss.numpy()[0]


#%%

# 训练一个episode
def run_episode(env, agent, render=False):
    obs_list, action_list, reward_list = [], [], []
    obs = env.reset()
    while True:
        for event in pygame.event.get():
            if event.type == pygame.QUIT:
                pygame.quit()
        obs_list.append(obs)
        action = agent.sample(obs)  # 采样动作，所有动作都有概率被尝试到
        action_list.append(action)
        next_obs, reward, done, _ = env.step(action)
        reward_list.append(reward)

        if render:
            # time.sleep(1)
            env.gameMaze.draw_maze(env.maze, env.cur_pos)
        if done:
            break
    batch_obs = np.array(obs_list)
    batch_action = np.array(action_list)
    for i in range(len(reward_list) - 2, -1, -1):
        reward_list[i] += GAMMA * reward_list[i + 1]

    batch_reward = np.array(reward_list)
    agent.learn(batch_obs, batch_action, batch_reward)

    return np.sum(reward_list)


# 评估 agent, 跑 5 个episode，总reward求平均
def evaluate(env, agent, render=False):
    eval_reward = []
    obs = env.reset()
    episode_reward = 0
    total_step = 0
    while True:
        for event in pygame.event.get():
            if event.type == pygame.QUIT:
                pygame.quit()
        action = agent.predict(obs)  # 预测动作，只选最优动作
        obs, reward, done, _ = env.step(action)
        episode_reward += reward
        total_step += 1
        if render:
            time.sleep(0.2)
            env.render()

            # env.gameMaze.draw_maze(env.maze, env.cur_pos)
        if done:
            break
        # if total_step > env.size ** 2:
        #     break
    eval_reward.append(episode_reward)
    return np.mean(eval_reward)

#%%

# env = MazeEnv(
#     minsize=2,
#     maxsize=2,
# )
# action_dim = env.action_space.n
# obs_shape = (env.size ** 2 + 1,)
# obs_dim = obs_shape[0]
env = gym.make('CartPole-v0')
obs_dim = env.observation_space.shape[0]
action_dim = env.action_space.n
model = CartpoleModel(obs_dim=obs_dim, act_dim=action_dim)
algorithm = PolicyGradient(model, lr=LEARNING_RATE)
agent = MazeAgent(
    algorithm,
    act_dim=action_dim,
    e_greed=1,  # 有一定概率随机选取动作，探索
    e_greed_decrement=1e-6
)

#%%

# env.gameMaze.start_game()
pygame.init()


#%%

max_episode = 5000

#%%

# 开始训练
episode = 0
max_reward = -1000000000000000000
average_reward = 0
while episode < max_episode:  # 训练max_episode个回合，test部分不计算入episode数量
    # train part
    total_reward = run_episode(env, agent, render=False)
    if total_reward > max_reward:
        max_reward = total_reward
    episode += 1
    average_reward = (average_reward * (episode - 1) + total_reward) / episode
    print(
        f'\r#####episode: {episode}, reward: {total_reward},max_reward: {max_reward}, average_reward: {average_reward}, e_greed: {agent.e_greed}#####',
        end="")


#%%

eval_reward = evaluate(env, agent, render=True)  # render=True 查看显示效果
print()
print('episode:{}    e_greed:{}   test_reward:{}'.format(
    episode, agent.e_greed, eval_reward))
print(agent.e_greed)

#%%

# obs = [1, 1, 1, 1, 1, 1, 1, 1, 1,
#        0, 2, 1, 0, 1, 0, 1, 0, 1,
#        1, 2, 1, 0, 1, 0, 1, 0, 1,
#        1, 2, 2, 2, 2, 2, 2, 2, 1,
#        1, 1, 1, 0, 1, 1, 1, 2, 1,
#        1, 0, 0, 0, 1, 0, 1, 0, 1,
#        1, 1, 1, 0, 1, 0, 1, 0, 1,
#        1, 0, 0, 0, 1, 0, 0, 0, 0,
#        1, 1, 1, 1, 1, 1, 1, 1, 1]
# obs = 72, 0
# t = paddle.to_tensor(obs, dtype='float32')
# print(agent.alg.model(t))
agent.e_greed = 1

