# %%

import time

import gym
import numpy as np
import paddle
import paddle.nn.functional as F
import parl
from paddle import nn
from parl.algorithms import DQN
from parl.utils import ReplayMemory

from Envs.DQNMazeEnv import MazeEnv

LEARN_FREQ = 1  # 训练频率，不需要每一个step都learn，攒一些新增经验后再learn，提高效率
MEMORY_SIZE = 20000  # replay memory的大小，越大越占用内存
MEMORY_WARMUP_SIZE = 200  # replay_memory 里需要预存一些经验数据，再开启训练
BATCH_SIZE = 32  # 每次给agent learn的数据数量，从replay memory随机里sample一批数据出来
LEARNING_RATE = 0.01  # 学习率
GAMMA = 0.999  # reward 的衰减因子，一般取 0.9 到 0.999 不等


# %%
class CartpoleModel(parl.Model):
    def __init__(self, obs_dim, act_dim):
        super(CartpoleModel, self).__init__()
        hid1_size = 128
        hid2_size = 128
        self.fc1 = nn.Linear(obs_dim, hid1_size)
        self.fc2 = nn.Linear(hid1_size, hid2_size)
        self.fc3 = nn.Linear(hid2_size, act_dim)

    def forward(self, obs):
        h1 = F.relu(self.fc1(obs))
        h2 = F.relu(self.fc2(h1))
        Q = self.fc3(h2)
        return Q


# %%
class MazeAgent(parl.Agent):
    def __init__(self, algorithm, act_dim, e_greed=0.1, e_greed_decrement=0.):
        super(MazeAgent, self).__init__(algorithm)
        assert isinstance(act_dim, int)
        self.act_dim = act_dim

        self.global_step = 0
        self.update_target_steps = 200  # 每隔200个training steps再把model的参数复制到target_model中

        self.e_greed = e_greed  # 有一定概率随机选取动作，探索
        self.e_greed_decrement = e_greed_decrement  # 随着训练逐步收敛，探索的程度慢慢降低

    def sample(self, obs):
        sample = np.random.random()
        if sample < self.e_greed:
            act = np.random.randint(0, self.act_dim)
        else:
            act = self.predict(obs)
        self.e_greed = max(0.1, self.e_greed - self.e_greed_decrement * self.global_step)

        return act

    def predict(self, obs):
        obs = paddle.to_tensor(obs, dtype='float32')
        pred_q = self.alg.predict(obs)
        act = pred_q.argmax().numpy()[0]  # 选择Q最大的下标，即对应的动作
        return act

    def learn(self, obs, act, reward, next_obs, terminal):
        # 每隔200个training steps同步一次model和target_model的参数
        if self.global_step % self.update_target_steps == 0:
            self.alg.sync_target()
            time.sleep(1)
        self.global_step += 1

        act = np.expand_dims(act, -1)
        reward = np.expand_dims(reward, -1)
        terminal = np.expand_dims(terminal, -1)

        obs = paddle.to_tensor(obs, dtype='float32')
        act = paddle.to_tensor(act, dtype='int32')
        reward = paddle.to_tensor(reward, dtype='float32')
        next_obs = paddle.to_tensor(next_obs, dtype='float32')
        terminal = paddle.to_tensor(terminal, dtype='float32')
        loss = self.alg.learn(obs, act, reward, next_obs, terminal)
        # print('loss:', loss.numpy()[0])
        return loss.numpy()[0]


# %%
# 训练一个episode
def run_episode(env, agent, rpm, render=False):
    total_reward = 0
    obs = env.reset()
    step = 0
    while True:
        # for event in pygame.event.get():
        #     if event.type == pygame.QUIT:
        #         pygame.quit()
        step += 1
        action = agent.sample(obs)  # 采样动作，所有动作都有概率被尝试到
        next_obs, reward, done, _ = env.step(action)
        rpm.append(obs, action, reward, next_obs, done)
        if done:
            print('done')
        # train model
        if (len(rpm) > MEMORY_WARMUP_SIZE) and (step % LEARN_FREQ == 0):
            (batch_obs, batch_action, batch_reward, batch_next_obs,
             batch_done) = rpm.sample_batch(BATCH_SIZE)
            # print(f'\rtrain_num: {step}', end='')
            train_loss = agent.learn(batch_obs, batch_action, batch_reward,
                                     batch_next_obs,
                                     batch_done)  # s,a,r,s',done
        if render:
            # time.sleep(1)
            # env.gameMaze.draw_maze(env.maze, env.cur_pos)
            env.render()
            # print(f'obs: {obs}, actions:{action}, reward:{reward}, next_obs:{next_obs}, done:{done}')
        total_reward += reward
        obs = next_obs
        if done:
            break
    return total_reward


# 评估 agent, 跑 5 个episode，总reward求平均
def evaluate(env, agent, render=False):
    eval_reward = []
    for i in range(5):
        obs = env.reset()
        episode_reward = 0
        total_step = 0
        while True:
            # for event in pygame.event.get():
            #     if event.type == pygame.QUIT:
            #         pygame.quit()
            action = agent.predict(obs)  # 预测动作，只选最优动作
            obs, reward, done, _ = env.step(action)
            episode_reward += reward
            total_step += 1
            if render:
                # time.sleep(0.01)
                # env.gameMaze.draw_maze(env.maze, env.cur_pos)
                env.render()
                # print(f'obs: {obs}, actions:{action}, reward:{reward}, next_obs:{next_obs}, done:{done}')
            if done:
                break
            # if total_step > env.size ** 3:
            #     break
        eval_reward.append(episode_reward)
    return np.mean(eval_reward)


# %%
env = MazeEnv(
    minsize=5,
    maxsize=5,
)
# env = gym.make('MountainCar-v0')
action_dim = env.action_space.n
# obs_shape = (2,)
obs_shape = env.observation_space.shape
obs_dim = obs_shape[0]
rpm = ReplayMemory(MEMORY_SIZE, obs_dim=obs_dim, act_dim=0)  # DQN的经验回放池
model = CartpoleModel(obs_dim=obs_dim, act_dim=action_dim)
algorithm = DQN(model, gamma=GAMMA, lr=LEARNING_RATE)
agent = MazeAgent(
    algorithm,
    act_dim=action_dim,
    e_greed=0.1,  # 有一定概率随机选取动作，探索
    e_greed_decrement=1e-6
)
# %%
# env.gameMaze.start_game()
# %%
# 先往经验池里存一些数据，避免最开始训练的时候样本丰富度不够
while len(rpm) < MEMORY_WARMUP_SIZE:
    run_episode(env, agent, rpm, render=False)
print('experience collection finished')

# %%
max_episode = 2000
# 开始训练
episode = 0
while episode < max_episode:  # 训练max_episode个回合，test部分不计算入episode数量
    # train part
    for i in range(0, 50):
        total_reward = run_episode(env, agent, rpm, render=False)
        print('episode: ', episode, ' reward: ', total_reward)
        episode += 1
    time.sleep(1)
    # test part
    eval_reward = evaluate(env, agent, render=True)  # render=True 查看显示效果
    print('episode:{}    e_greed:{}   test_reward:{}'.format(
        episode, agent.e_greed, eval_reward))
