import time

import numpy as np
import paddle
import paddle.nn.functional as F
import parl
import pygame.event
from paddle import nn
from parl.algorithms import DQN
from parl.utils import ReplayMemory

from Envs.DQNMazeEnv import MazeEnv

LEARN_FREQ = 5  # 训练频率，不需要每一个step都learn，攒一些新增经验后再learn，提高效率
MEMORY_SIZE = 200000  # replay memory的大小，越大越占用内存
MEMORY_WARMUP_SIZE = 200  # replay_memory 里需要预存一些经验数据，再开启训练
BATCH_SIZE = 32  # 每次给agent learn的数据数量，从replay memory随机里sample一批数据出来
LEARNING_RATE = 0.0001  # 学习率
GAMMA = 0.999  # reward 的衰减因子，一般取 0.9 到 0.999 不等


class CartpoleModel(parl.Model):
    def __init__(self, obs_dim, act_dim):
        super(CartpoleModel, self).__init__()
        hid1_size = 128
        hid2_size = 128
        self.fc1 = nn.Linear(obs_dim, hid1_size)
        self.fc2 = nn.Linear(hid1_size, hid2_size)
        self.fc3 = nn.Linear(hid2_size, act_dim)

    def forward(self, obs):
        h1 = F.relu(self.fc1(obs))
        h2 = F.relu(self.fc2(h1))
        Q = self.fc3(h2)
        return Q


class MazeAgent(parl.Agent):
    def __init__(self, algorithm, act_dim, e_greed=0.1, e_greed_decrement=0.):
        super(MazeAgent, self).__init__(algorithm)
        assert isinstance(act_dim, int)
        self.act_dim = act_dim

        self.global_step = 0
        self.update_target_steps = 200  # 每隔2000个training steps再把model的参数复制到target_model中

        self.e_greed = e_greed  # 有一定概率随机选取动作，探索
        self.e_greed_decrement = e_greed_decrement  # 随着训练逐步收敛，探索的程度慢慢降低

    def sample(self, obs):
        sample = np.random.random()
        if sample < self.e_greed:
            act = np.random.randint(0, self.act_dim)
        else:
            act = self.predict(obs)
        self.e_greed = max(0.1, self.e_greed - self.e_greed_decrement)

        return act

    def predict(self, obs):
        obs = paddle.to_tensor(obs, dtype='float32')
        pred_q = self.alg.predict(obs)
        act = pred_q.argmax().numpy()[0]  # 选择Q最大的下标，即对应的动作
        # print(f'pred_q:{pred_q}')
        return act

    def learn(self, obs, act, reward, next_obs, terminal):
        # 每隔200个training steps同步一次model和target_model的参数
        if self.global_step % self.update_target_steps == 0:
            self.alg.sync_target()
        self.global_step += 1

        act = np.expand_dims(act, -1)
        reward = np.expand_dims(reward, -1)
        terminal = np.expand_dims(terminal, -1)

        obs = paddle.to_tensor(obs, dtype='float32')
        act = paddle.to_tensor(act, dtype='int32')
        reward = paddle.to_tensor(reward, dtype='float32')
        next_obs = paddle.to_tensor(next_obs, dtype='float32')
        terminal = paddle.to_tensor(terminal, dtype='float32')
        loss = self.alg.learn(obs, act, reward, next_obs, terminal)
        # print('loss:', loss.numpy()[0])
        return loss.numpy()[0]


# 训练一个episode
def run_episode(env, agent, rpm, render=False):
    total_reward = 0
    obs = env.reset()
    step = 0
    tmp = {}
    while True:
        for event in pygame.event.get():
            if event.type == pygame.QUIT:
                pygame.quit()
        step += 1
        action = agent.sample(obs)  # 采样动作，所有动作都有概率被尝试到
        next_obs, reward, done, _ = env.step(action)
        actions = ['up', 'down', 'right', 'left']
        # print('-----------------')
        # print(obs)
        # print(next_obs)
        # print(actions[action], action, reward, done)
        # print(agent.alg.model(paddle.to_tensor(obs, dtype='float32')))
        # print('-----------------')
        rpm.append(obs, action, reward, next_obs, done)  # 存储记忆
        # if obs.index(1) == 14:
        #     print(obs.index(1), next_obs.index(1))
        if (len(rpm) > MEMORY_WARMUP_SIZE) and (step % LEARN_FREQ == 0):
            # print('\rlearning', end="")
            (batch_obs, batch_action, batch_reward, batch_next_obs,
             batch_done) = rpm.sample_batch(BATCH_SIZE)
            # print(f'\rtrain_num: {step}', end='')
            train_loss = agent.learn(batch_obs, batch_action, batch_reward,
                                     batch_next_obs,
                                     batch_done)  # s,a,r,s',done

            # for i in range(len(batch_obs)):
            #     obs_l = list(batch_obs[i])
            #     if list(batch_obs[i]).index(1) == 10:
            #         action_l = batch_action[i]
            #         reward_l = batch_reward[i]
            #         next_obs_l = list(batch_next_obs[i])
            #         done_l = batch_done[i]
            #         # with torch.no_grad():
            #         #     max_v = agent.alg.target_model(paddle.to_tensor(next_obs_l)).max(1, keepdim=True)[0]
            #         #     target = reward + (1 - done_l) * agent.gamma * max_v
            #         # print(obs.index(1), next_obs.index(1), action, reward, done)
            #         # print(agent.alg.model(paddle.to_tensor(obs, dtype='float32')))
            #         pred_q = agent.alg.predict(paddle.to_tensor(obs_l, dtype='float32'))
            #         act = pred_q.argmax().numpy()[0]  # 选择Q最大的下标，即对应的动作
            #         print(
            #             f'\r###o:{obs_l.index(1)}, r:{reward_l}, n:{next_obs_l.index(1)}, act:{actions[act]}, {act}###',
            #             end='')
            #         # print(agent.alg.model(paddle.to_tensor(obs_l, dtype='float32')))
            #         # print(agent.alg.model(paddle.to_tensor(next_obs_l, dtype='float32')))
            #         # print(agent.alg.target_model(paddle.to_tensor(next_obs_l, dtype='float32')))
            #         # print(max_v)
            #         # print(target)
            #         # print('-----------------')

        if render:
            time.sleep(1)
            env.gameMaze.draw_maze(env.maze, env.cur_pos)
            # print(f'obs: {obs}, actions:{action}, reward:{reward}, next_obs:{next_obs}, done:{done}')
        total_reward += reward
        obs = next_obs
        if done:
            # actions = ['↑', '↓', '→', '←']
            # for row in env.maze:
            #     for cell in row:
            #         if cell.type == 1:
            #             print('█', end='')
            #         else:
            #             state = cell.loc[0] * env.size + cell.loc[1]
            #             # 转为one-hot编码
            #             state_one_hot = [0] * env.size * env.size
            #             state_one_hot[state] = 1
            #             state_one_hot.append(0)
            #             print(actions[
            #                       agent.alg.predict(paddle.to_tensor(state_one_hot, dtype='float32')).argmax().numpy()[
            #                           0]], end='')
            #     print()
            break
    return total_reward


# 评估 agent, 跑 5 个episode，总reward求平均
def evaluate(env, agent, render=False):
    eval_reward = []
    obs = env.reset()
    episode_reward = 0
    total_step = 0
    while True:
        for event in pygame.event.get():
            if event.type == pygame.QUIT:
                pygame.quit()
        action = agent.predict(obs)  # 预测动作，只选最优动作
        obs, reward, done, _ = env.step(action)

        episode_reward += reward
        total_step += 1
        # print(f'\r#####total_step: {total_step}, done: {done},action: {action},obs: {obs}#####', end="")
        if render:
            time.sleep(0.02)
            env.gameMaze.draw_maze(env.maze, env.cur_pos)
            # print(f'obs: {obs}, actions:{action}, reward:{reward}, next_obs:{next_obs}, done:{done}')
        if done:
            break
        if total_step > env.size ** 2:
            break
    eval_reward.append(episode_reward)
    return np.mean(eval_reward)


env = MazeEnv(
    minsize=3,
    maxsize=3,
)
action_dim = env.action_space.n
obs_shape = (env.size ** 2 + 1,)
obs_dim = obs_shape[0]
rpm = ReplayMemory(MEMORY_SIZE, obs_dim=obs_dim, act_dim=0)  # DQN的经验回放池
model = CartpoleModel(obs_dim=obs_dim, act_dim=action_dim)
algorithm = DQN(model, gamma=GAMMA, lr=LEARNING_RATE)
agent = MazeAgent(
    algorithm,
    act_dim=action_dim,
    e_greed=0.1,  # 有一定概率随机选取动作，探索
    e_greed_decrement=1e-6
)

env.gameMaze.start_game()
env.reset()
env.gameMaze.draw_maze(env.maze, env.cur_pos)
tmp = []
for row in env.maze:
    row_tmp = []
    for cell in row:
        row_tmp.append(cell.type)
    tmp.append(row_tmp)
print(tmp)

# 先往经验池里存一些数据，避免最开始训练的时候样本丰富度不够
while len(rpm) < MEMORY_WARMUP_SIZE:
    # print(f'\r####len: {len(rpm)}####')
    run_episode(env, agent, rpm, render=True)
    # manul_run(env, agent, rpm)
print('experience collection finished')

# %%

max_episode = 1000

# %%

# 开始训练
episode = 0
max_reward = -1000000000000000000
average_reward = 0
while episode < max_episode:  # 训练max_episode个回合，test部分不计算入episode数量
    # train part
    for i in range(0, 100):
        total_reward = run_episode(env, agent, rpm, render=False)
        if total_reward > max_reward:
            max_reward = total_reward
        episode += 1
        average_reward = (average_reward * (episode - 1) + total_reward) / episode
        print(
            f'\r#####episode: {episode}, reward: {total_reward},max_reward: {max_reward}, average_reward: {average_reward}, e_greed: {agent.e_greed}#####',
            end="")
        if i == 99:
            eval_reward = evaluate(env, agent, render=True)  # render=True 查看显示效果
            print()
            print('episode:{}    e_greed:{}   test_reward:{}'.format(
                episode, agent.e_greed, eval_reward))
# agent.save('./dqn_model.ckpt')
# print()
# print(agent.e_greed)

# agent.e_greed = 1
