# %%

import time

import numpy as np
import paddle
import paddle.nn.functional as F
import parl
import pygame.event
from paddle import nn
from parl.algorithms import PolicyGradient

from q_table.Envs.PGMazeEnv import MazeEnv

LEARNING_RATE = 0.01  # 学习率
GAMMA = 0.999  # reward 的衰减因子，一般取 0.9 到 0.999 不等


class CartpoleModel(parl.Model):
    def __init__(self, obs_dim, act_dim):
        super(CartpoleModel, self).__init__()
        hid1_size = 128
        hid2_size = 128
        self.fc1 = nn.Linear(obs_dim, hid1_size)
        self.fc2 = nn.Linear(hid1_size, hid2_size)
        self.fc3 = nn.Linear(hid2_size, act_dim)

    def forward(self, obs):
        h1 = F.tanh(self.fc1(obs))
        h2 = F.tanh(self.fc2(h1))
        Q = F.softmax(self.fc3(h2), dtype='float64')
        return Q


class MazeAgent(parl.Agent):
    def __init__(self, algorithm, act_dim, e_greed=0.1, e_greed_decrement=0.):
        super(MazeAgent, self).__init__(algorithm)
        assert isinstance(act_dim, int)
        self.act_dim = act_dim
        self.e_greed = e_greed  # 有一定概率随机选取动作，探索
        self.e_greed_decrement = e_greed_decrement  # 随着训练逐步收敛，探索的程度慢慢降低

    def sample(self, obs):
        if np.random.rand() < self.e_greed:
            act = np.random.randint(0, self.act_dim)
        else:
            obs = paddle.to_tensor(obs, dtype='float32')
            pred_q = self.alg.predict(obs)
            act = np.random.choice(range(self.act_dim), p=pred_q)
        self.e_greed = max(0.01, self.e_greed - self.e_greed_decrement)  # 逐步减小探索概率
        return act

    def predict(self, obs):
        obs = paddle.to_tensor(obs, dtype='float32')
        pred_q = self.alg.predict(obs)
        act = pred_q.argmax().numpy()[0]  # 选择Q最大的下标，即对应的动作
        return act

    def learn(self, obs, act, reward):
        act = np.expand_dims(act, -1)
        reward = np.expand_dims(reward, -1)

        obs = paddle.to_tensor(obs, dtype='float32')
        act = paddle.to_tensor(act, dtype='int32')
        reward = paddle.to_tensor(reward, dtype='float32')
        loss = self.alg.learn(obs, act, reward)
        return loss.numpy()[0]


# 训练一个episode
def run_episode(env, agent, render=False):
    obs_list, action_list, reward_list = [], [], []
    obs = env.reset()
    while True:
        for event in pygame.event.get():
            if event.type == pygame.QUIT:
                pygame.quit()
        obs_list.append(obs)
        action = agent.sample(obs)  # 采样动作，所有动作都有概率被尝试到
        action_list.append(action)
        next_obs, reward, done, _ = env.step(action)
        reward_list.append(reward)
        if render:
            # time.sleep(1)
            env.gameMaze.draw_maze(env.maze, env.cur_pos)
        if done:
            break
    batch_obs = np.array(obs_list)
    batch_action = np.array(action_list)
    for i in range(len(reward_list) - 2, -1, -1):
        reward_list[i] += GAMMA * reward_list[i + 1]

    batch_reward = np.array(reward_list)
    agent.learn(batch_obs, batch_action, batch_reward)
    # print(reward_list)

    return np.mean(reward_list)


# 评估 agent, 跑 5 个episode，总reward求平均
def evaluate(env, agent, render=False):
    eval_reward = []
    obs = env.reset()
    episode_reward = 0
    total_step = 0
    while True:
        for event in pygame.event.get():
            if event.type == pygame.QUIT:
                pygame.quit()
        action = agent.predict(obs)  # 预测动作，只选最优动作
        obs, reward, done, _ = env.step(action)
        episode_reward += reward
        total_step += 1
        if render:
            time.sleep(0.1)
            env.gameMaze.draw_maze(env.maze, env.cur_pos)
        if done:
            break
        if total_step > env.size * 3:
            break
    eval_reward.append(episode_reward)
    return np.mean(eval_reward)


env = MazeEnv(
    minsize=2,
    maxsize=2,
)
action_dim = env.action_space.n
obs_shape = (env.size ** 2,)
obs_dim = obs_shape[0]
model = CartpoleModel(obs_dim=obs_dim, act_dim=action_dim)
algorithm = PolicyGradient(model, lr=LEARNING_RATE)
agent = MazeAgent(
    algorithm,
    act_dim=action_dim,
    e_greed=0.3,  # 有一定概率随机选取动作，探索
    e_greed_decrement=1e-6
)

env.gameMaze.start_game()
env.reset()
env.gameMaze.draw_maze(env.maze, env.cur_pos)
# maze = []
# for row in env.maze:
#     tmp_row = []
#     for cell in row:
#         if cell.type == 1:
#             tmp_row.append(cell.type)
#         else:
#             tmp_row.append(0)
#     maze.append(tmp_row)
# print(maze)

max_episode = 500000

# 开始训练
episode = 0
max_reward = -1000000000000000000
average_reward = 0
while episode < max_episode:  # 训练max_episode个回合，test部分不计算入episode数量
    # train part
    for i in range(0, 5):
        total_reward = run_episode(env, agent, render=False)
        if total_reward > max_reward:
            max_reward = total_reward
        episode += 1
        average_reward = (average_reward * (episode - 1) + total_reward) / episode
        print(
            f'\r#####episode: {episode}, reward: {total_reward},max_reward: {max_reward}, average_reward: {average_reward}, e_greed: {agent.e_greed}#####',
            end="")

    eval_reward = evaluate(env, agent, render=True)  # render=True 查看显示效果
    print()
    print('episode:{}    e_greed:{}   test_reward:{}'.format(
        episode, agent.e_greed, eval_reward))
    actions = ['↑', '↓', '→', '←']
    for row in env.maze:
        for cell in row:
            if cell.type == 1:
                print('█', end='')
            else:
                state = cell.loc[0] * env.size + cell.loc[1]
                # 转为one-hot编码
                state_one_hot = [0] * env.size * env.size
                state_one_hot[state] = 1
                # state_one_hot.append(0)
                print(actions[
                          agent.alg.predict(paddle.to_tensor(state_one_hot, dtype='float32')).argmax().numpy()[
                              0]], end='')
        print()
    # print(agent.alg.predict(paddle.to_tensor(state_one_hot, dtype='float32')))
