"""
View more, visit my tutorial page: https://mofanpy.com/tutorials/
My Youtube Channel: https://www.youtube.com/user/MorvanZhou
More about Reinforcement learning: https://mofanpy.com/tutorials/machine-learning/reinforcement-learning/

Dependencies:
torch: 0.4
gym: 0.8.1
numpy
"""
import os

import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import gym
from tqdm import tqdm
from bdtime import tt
import pandas as pd


# Hyper Parameters
BATCH_SIZE = 128
# LR = 2.5e-4                   # learning rate
LR = 0.01                   # learning rate
EPSILON = 1               # greedy policy
GAMMA = 0.99                 # reward discount
TARGET_REPLACE_ITER = 500   # target update frequency
train_frequency = 1        # tranning frequency

MEMORY_CAPACITY = 2000
total_timesteps = 50000

MAX_EPISODE = 100000  # 主要还是靠步数判断进度, 不再靠场次
RENDER = 0
load_memory_from_csv = 1
save_memory_to_csv = 1      # 保存随机采样的经验库为csv文件, 避免每次都得从头随机采样


class args:
    seed = 1

    start_e = 1
    end_e = 0.05
    exploration_fraction = 0.1
    total_timesteps = total_timesteps


# env = gym.make('CartPole-v0')
env = gym.make('CartPole-v1')
# if 1:
#     env = gym.make('CartPole-v1')
#     env = gym.wrappers.RecordEpisodeStatistics(env)
#     env.single_observation_space
env = env.unwrapped
N_ACTIONS = env.action_space.n
N_STATES = env.observation_space.shape[0]
ENV_A_SHAPE = 0 if isinstance(env.action_space.sample(), int) else env.action_space.sample().shape     # to confirm the shape
NET_NUMBER = 30
# learning_starts = 10000


# TRY NOT TO MODIFY: seeding
np.random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
env.seed(args.seed)
env.action_space.seed(args.seed)
env.observation_space.seed(args.seed)


class Net(nn.Module):
    def __init__(self, ):
        super(Net, self).__init__()
        self.fc1 = nn.Linear(N_STATES, NET_NUMBER)
        self.fc1.weight.data.normal_(0, 0.1)   # initialization
        self.out = nn.Linear(NET_NUMBER, N_ACTIONS)
        self.out.weight.data.normal_(0, 0.1)   # initialization

        if 0:
            self.fc1 = nn.Linear(N_STATES, 120)
            # self.fc1.weight.data.normal_(0, 0.1)   # initialization

            self.fc2 = nn.Linear(120, 84)
            # self.fc2.weight.data.normal_(0, 0.1)   # initialization

            self.out = nn.Linear(84, N_ACTIONS)
            # self.out.weight.data.normal_(0, 0.1)   # initialization

        # self.network = nn.Sequential(
        #     nn.Linear(np.array(env.single_observation_space.shape).prod(), 120),
        #     nn.ReLU(),
        #     nn.Linear(120, 84),
        #     nn.ReLU(),
        #     nn.Linear(84, env.single_action_space.n),
        # )

    def forward(self, x):
        x = self.fc1(x)
        x = F.relu(x)

        # x = self.fc2(x)
        # x = F.relu(x)

        actions_value = self.out(x)
        return actions_value

        # return self.network(x)


def linear_schedule(start_e: float, end_e: float, duration: int, t: int):
    slope = (end_e - start_e) / duration
    return max(slope * t + start_e, end_e)


class QNetwork(nn.Module):
    def __init__(self, env=None):
        super().__init__()
        self.network = nn.Sequential(
            nn.Linear(N_STATES, 120),
            nn.ReLU(),
            nn.Linear(120, 84),
            nn.ReLU(),
            nn.Linear(84, N_ACTIONS),
        )

    def forward(self, x):
        return self.network(x)


class DQN(object):
    def __init__(self):
        self.eval_net, self.target_net = Net(), Net()
        # self.eval_net, self.target_net = QNetwork(), QNetwork()

        self.learn_step_counter = 0                                     # for target updating
        self.memory_counter = 0                                         # for storing memory
        self.memory = np.zeros((MEMORY_CAPACITY, N_STATES * 2 + 3))     # initialize memory
        self.optimizer = torch.optim.Adam(self.eval_net.parameters(), lr=LR)
        self.loss_func = nn.MSELoss()
        # self.loss_func = nn.SmoothL1Loss()

        self.default_path = f"tempdir/dqn/dqn_memory.csv"
        os.makedirs(os.path.dirname(self.default_path), exist_ok=True)
        self.is_need_first_save_memory = True

    def choose_action(self, x, epsilon=None):
        if epsilon is None:
            epsilon = 1 - EPSILON

        x = torch.unsqueeze(torch.FloatTensor(x), 0)
        # input only one sample
        if np.random.random() > epsilon:
            actions_value = self.eval_net.forward(x)
            action = torch.max(actions_value, 1)[1].data.numpy()
            action = action[0] if ENV_A_SHAPE == 0 else action.reshape(ENV_A_SHAPE)  # return the argmax index
        else:   # random
            action = np.random.randint(0, N_ACTIONS)
            action = action if ENV_A_SHAPE == 0 else action.reshape(ENV_A_SHAPE)
        return action

    def store_transition(self, s, a, r, done, s_):
        transition = np.hstack((s, [a, r, done], s_))
        # replace the old memory with new memory
        index = self.memory_counter % MEMORY_CAPACITY
        self.memory[index, :] = transition
        self.memory_counter += 1

    def save_memory_to_csv(self, path=None):
        if path is None:
            path = self.default_path

        df = pd.DataFrame(self.memory)
        df.to_csv(path, index=False)

        self.is_need_first_save_memory = False

    def load_memory_from_csv(self, path=None):
        if path is None:
            path = self.default_path

        # df = pd.DataFrame(self.memory)
        # df.to_csv(path, index=False)
        df = pd.read_csv(path)
        self.memory = df.to_numpy()
        self.memory_counter = self.memory.shape[0]

        if MEMORY_CAPACITY < self.memory_counter:
            sample_index = np.random.choice(self.memory_counter, MEMORY_CAPACITY)
            self.memory = self.memory[sample_index, :]
            self.memory_counter = MEMORY_CAPACITY

            self.is_need_first_save_memory = False

    def learn(self):
        if save_memory_to_csv and self.is_need_first_save_memory:
            self.save_memory_to_csv()

        # target parameter update
        if self.learn_step_counter % TARGET_REPLACE_ITER == 0:
            self.target_net.load_state_dict(self.eval_net.state_dict())

        self.learn_step_counter += 1
        if self.learn_step_counter % train_frequency != 0:  # 学习频率
            return

        # sample batch transitions
        sample_index = np.random.choice(MEMORY_CAPACITY, BATCH_SIZE)
        b_memory = self.memory[sample_index, :]
        b_s = torch.FloatTensor(b_memory[:, :N_STATES])
        b_a = torch.LongTensor(b_memory[:, N_STATES:N_STATES+1].astype(int))
        b_r = torch.FloatTensor(b_memory[:, N_STATES+1:N_STATES+2])
        b_done = torch.FloatTensor(b_memory[:, N_STATES+2:N_STATES+3])
        b_s_ = torch.FloatTensor(b_memory[:, -N_STATES:])

        # q_eval w.r.t the action in experience
        q_eval = self.eval_net(b_s).gather(1, b_a)  # shape (batch, 1)
        with torch.no_grad():
            q_next = self.target_net(b_s_).detach()     # detach from graph, don't backpropagate
            q_target = b_r + GAMMA * q_next.max(1)[0].view(BATCH_SIZE, 1)   # shape (batch, 1)

            # # 改为这个后就拟合了. 应该是因为后面done太多, 导致学习了太多错误的经验, 从而越学越辣鸡
            # # q_target = b_r + GAMMA * q_next.max(1)[0].view(BATCH_SIZE, 1) * (1 - b_done)
            # b_done[b_done == 1] = -3
            # b_done[b_done == 0] = 1
            # q_target = b_r + GAMMA * q_next.max(1)[0].view(BATCH_SIZE, 1) * b_done

        loss = self.loss_func(q_eval, q_target)

        self.optimizer.zero_grad()
        loss.backward()
        self.optimizer.step()


def main():
    dqn = DQN()

    if load_memory_from_csv:
        dqn.load_memory_from_csv()

    tq = tqdm(total=total_timesteps, desc=f"use random action collecting {MEMORY_CAPACITY} experience...")

    rewards = []
    max_reward_value = 0
    max_reward_i = -1

    global_step = 0
    i_episode = 0
    # print('\nCollecting experience...')
    # for i_episode in range(MAX_EPISODE):
    # for global_step in range(total_timesteps):
    exist_flag = False
    while True:
        i_episode += 1

        s = env.reset()

        ep_r = 0
        _exist_flag = False
        while global_step < total_timesteps:
            global_step += 1
            tq.update(1)

            if RENDER:
                env.render()

            epsilon = linear_schedule(args.start_e, args.end_e, args.exploration_fraction * args.total_timesteps,
                                      global_step)
            a = dqn.choose_action(s, epsilon)
            # a = dqn.choose_action(s)

            # take action
            s_, r, done, info = env.step(a)

            if done:
                r = -3

            # # modify the reward
            # x, x_dot, theta, theta_dot = s_
            # r1 = (env.x_threshold - abs(x)) / env.x_threshold - 0.8
            # r2 = (env.theta_threshold_radians - abs(theta)) / env.theta_threshold_radians - 0.5
            # r = r1 + r2

            dqn.store_transition(s, a, r, done, s_)

            ep_r += r
            ep_r = int(ep_r)

            if dqn.memory_counter > MEMORY_CAPACITY:
                dqn.learn()

                if done:
                    tq.desc = f"i_episode: {i_episode}, epsilon: {round(epsilon, 4)} | global_step: {global_step}, Ep_r: {ep_r} | max_value_i: {max_reward_value} --- {max_reward_i}"

            if done:
                _exist_flag = True

            if ep_r >= env.spec.reward_threshold:
                tt.sleep(1)
                print('------')
                print(f'\n\n== 第{i_episode}局的得分达到了阈值{env.spec.reward_threshold}!\n\n')
                tt.sleep(1)
                tq.update(1)

                exist_flag = _exist_flag = True

            if _exist_flag:
                if ep_r > max_reward_value:
                    max_reward_value = ep_r
                    max_reward_i = i_episode
                rewards.append(ep_r)
                break

            s = s_
        if exist_flag or global_step >= total_timesteps:
            break


if __name__ == '__main__':
    main()





