"""
View more, visit my tutorial page: https://morvanzhou.github.io/tutorials/
My Youtube Channel: https://www.youtube.com/user/MorvanZhou
More about Reinforcement learning: https://morvanzhou.github.io/tutorials/machine-learning/reinforcement-learning/
Dependencies:
torch: 0.4
gym: 0.8.1
numpy
"""
import collections
import time

import gym
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as fun
import matplotlib.pyplot as plt

device = 'cuda' if torch.cuda.is_available() else 'cpu'
# device = 'cpu'
print("训练平台为: ", device)

# Hyper Parameters
BATCH_SIZE = 32
LR = 0.01  # learning rate
EPSILON = 0.9  # greedy policy
GAMMA = 0.9  # reward discount
TARGET_REPLACE_ITER = 100  # target update frequency
MEMORY_CAPACITY = 2000
TEST_FREQUENCY = 50
EPISODE = 5000

# Environment
env = gym.make('CartPole-v0')
env = env.unwrapped
N_ACTIONS = env.action_space.n
N_STATES = env.observation_space.shape[0]
# to confirm the shape
ENV_A_SHAPE = 0 if isinstance(env.action_space.sample(), int) else env.action_space.sample().shape


class Net(nn.Module):
    def __init__(self, ):
        super(Net, self).__init__()
        self.fc1 = nn.Linear(N_STATES, 50)
        self.fc1.weight.data.normal_(0, 0.1)  # initialization
        self.out = nn.Linear(50, N_ACTIONS)
        self.out.weight.data.normal_(0, 0.1)  # initialization

    def forward(self, x):
        x = self.fc1(x)
        x = fun.relu(x)
        actions_value = self.out(x)
        return actions_value


class Agent(object):
    def __init__(self):
        self.online_net, self.target_net = Net().to(device), Net().to(device)
        self.learn_step_counter = 0  # for target updating
        self.memory_counter = 0  # for storing memory
        self.memory = np.zeros((MEMORY_CAPACITY, N_STATES * 2 + 2 + 1))  # initialize memory(S,A,R,S')
        self.optimizer = torch.optim.Adam(self.online_net.parameters(), lr=LR)
        self.loss_func = nn.MSELoss()

    def action(self, state):
        state = torch.unsqueeze(torch.FloatTensor(state).to(device), 0)
        # input only one sample
        if np.random.uniform() < EPSILON:  # greedy
            actions_value = self.online_net.forward(state.to(device))
            action = torch.max(actions_value, 1)[1].data.to('cpu').numpy()
            action = action[0] if ENV_A_SHAPE == 0 else action.reshape(ENV_A_SHAPE)  # return the argmax index
        else:  # random
            action = np.random.randint(0, N_ACTIONS)
            action = action if ENV_A_SHAPE == 0 else action.reshape(ENV_A_SHAPE)
        return action

    def store(self, state, action, reward, next_state, done):
        if done:
            transition = np.hstack((state, [action, reward], next_state, 0))
        else:
            transition = np.hstack((state, [action, reward], next_state, 1))
        # replace the old memory with new memory
        index = self.memory_counter % MEMORY_CAPACITY
        self.memory[index, :] = transition
        self.memory_counter += 1

    def learn(self, algorithm="DQN"):
        # target parameter update
        if self.learn_step_counter % TARGET_REPLACE_ITER == 0:
            self.target_net.load_state_dict(self.online_net.state_dict())
        self.learn_step_counter += 1

        # sample batch transitions
        sample_index = np.random.choice(MEMORY_CAPACITY, BATCH_SIZE)
        bath_memory = self.memory[sample_index, :]
        bath_state = torch.FloatTensor(bath_memory[:, :N_STATES]).to(device)
        bath_action = torch.LongTensor(bath_memory[:, N_STATES:N_STATES + 1].astype(int)).to(device)
        bath_reward = torch.FloatTensor(bath_memory[:, N_STATES + 1:N_STATES + 2]).to(device)
        bath_next_state = torch.FloatTensor(bath_memory[:, -N_STATES-1:-1]).to(device)
        bath_done = torch.FloatTensor(bath_memory[:, -1]).to(device)

        # q_eval w.r.t the action in experience
        q_eval = self.online_net(bath_state).gather(1, bath_action)  # shape (batch, 1)
        q_target = 0

        # Q value update algorithm
        if algorithm == "DQN":
            # dqn
            q_next = self.target_net(bath_next_state).detach()  # detach from graph, don't backpropagate
            q_target = bath_reward + GAMMA * q_next.max(1)[0].view(BATCH_SIZE, 1)  # shape (batch, 1)
        elif algorithm == "DDQN":
            # double dqn
            actions_value = self.online_net.forward(bath_next_state)
            next_action = torch.unsqueeze(torch.max(actions_value, 1)[1], 1)
            q_next = self.target_net.forward(bath_next_state).gather(1, next_action)
            q_target = bath_reward + GAMMA * q_next * bath_done

        # back propagation and update network parameter
        loss = self.loss_func(q_eval, q_target)
        self.optimizer.zero_grad()
        loss.backward()
        self.optimizer.step()

    def save(self, episode):
        torch.save(self.online_net.state_dict(), './model/online_network_%d.pkl' % episode)
        torch.save(self.target_net.state_dict(), './model/target_network_%d.pkl' % episode)
        torch.save(self.online_net.state_dict(), './model/online_network_best.pkl')
        torch.save(self.target_net.state_dict(), './model/target_network_best.pkl')
        print('=====================')
        print('%d episode model has been save...' % episode)


def test_best_network(agent):
    # load best network model
    agent.online_net.load_state_dict(torch.load('./model/online_network_best.pkl'))
    agent.target_net.load_state_dict(torch.load('./model/target_network_best.pkl'))
    # initialize parameter
    observation = env.reset()
    state = observation
    test_reward = 0
    test_start = time.perf_counter()
    while True:
        # get action
        env.render()
        action = agent.action(state)

        # take action
        observation, reward, done, info = env.step(action)
        next_state = observation

        # redefine reward
        x, x_dot, theta, theta_dot = observation
        r1 = (env.x_threshold - abs(x)) / env.x_threshold - 0.8
        r2 = (env.theta_threshold_radians - abs(theta)) / env.theta_threshold_radians - 0.5
        redefine_reward = r1 + r2

        # update state and reward
        state = next_state
        test_reward += redefine_reward

        # judgment the end flag
        if done:
            env.close()
            break
    test_end = time.perf_counter()

    # print the best network's test reward
    print('test_reward: {}'.format(round(test_reward, 3)))
    print('test_time: {}'.format(round(test_end - test_start, 3)))


def train(agent):
    mean_test = collections.deque(maxlen=100)
    best_reward = 0
    for i_episode in range(EPISODE):
        # initialize parameter
        start = time.perf_counter()
        observation = env.reset()
        state = observation
        total_reward = 0

        while True:
            # get action
            # env.render()
            action = agent.action(state)

            # take action
            observation, reward, done, info = env.step(action)
            next_state = observation

            # redefine reward
            x, x_dot, theta, theta_dot = observation
            r1 = (env.x_threshold - abs(x)) / env.x_threshold - 0.8
            r2 = (env.theta_threshold_radians - abs(theta)) / env.theta_threshold_radians - 0.5
            redefine_reward = r1 + r2

            # store transmission
            agent.store(state, action, redefine_reward, next_state, done)

            # update state and reward
            state = next_state
            total_reward += redefine_reward

            # update network parameter
            print("memory_counter:", agent.memory_counter)
            if agent.memory_counter > MEMORY_CAPACITY:
                agent.learn("DQN")

            # judgment the end flag
            if done:
                break

        # print and save the episode total reward
        print('episode: {} , total_reward: {}'.format(i_episode, round(total_reward, 3)))
        with open('./model/reward.txt', 'a+') as f:
            f.write(str(total_reward) + '\n')

        # Print the remaining training time
        train_time = time.perf_counter() - start
        remain_time = train_time * (EPISODE - 1 - i_episode)
        h, ss = divmod(remain_time, 3600)
        m, s = divmod(ss, 60)
        print("train time：%d second" % train_time)
        print("remain time：%d hour, %d minute, %d second" % (h, m, s))

        # test the network performance
        if i_episode > 0 and i_episode % TEST_FREQUENCY == 0:
            # initialize parameter
            observation = env.reset()
            state = observation
            test_reward = 0

            while True:
                # get action
                # env.render()
                action = agent.action(state)

                # take action
                observation, reward, done, info = env.step(action)
                next_state = observation

                # redefine reward
                x, x_dot, theta, theta_dot = observation
                r1 = (env.x_threshold - abs(x)) / env.x_threshold - 0.8
                r2 = (env.theta_threshold_radians - abs(theta)) / env.theta_threshold_radians - 0.5
                redefine_reward = r1 + r2

                # update state and reward
                state = next_state
                test_reward += redefine_reward

                # judgment the end flag
                if done:
                    env.close()
                    break

            # print the episode and test reward
            print('episode: {} , test_reward: {}'.format(i_episode, round(test_reward, 3)))
            mean_test.append(test_reward)

            # save the network model
            if np.mean(mean_test) > best_reward:
                best_reward = np.mean(mean_test)
                agent.save(i_episode)


def darw_reward(file):
    # 绘制奖励曲线
    reward = []
    legend = ["delay"]
    with open(file, "r") as f:
        lines = f.readlines()
    for i in range(len(lines)):
        line = lines[i].strip('\n')
        reward.append(float(line))
    print(reward)
    # 绘图
    x = np.linspace(0, len(reward), len(reward))
    plt.plot(x, reward)
    # 设置坐标轴名称
    plt.xlabel("Step", fontproperties='Times New Roman', size=10.5)
    plt.ylabel("reward", fontproperties='Times New Roman', size=10.5)
    # 设置网格
    plt.grid()
    # 设置图例
    plt.legend(legend, loc="lower right", frameon=False)
    # 设置标题
    plt.title("The Reward Variation", fontproperties='SimSun', size=10.5)
    # 保存图片
    plt.savefig("reward.svg")





if __name__ == '__main__':
    # program_start = time.perf_counter()
    # define agent
    my_agent = Agent()
    # train agent
    # train(my_agent)
    # test the best network performance
    test_best_network(my_agent)
    # # print the program run time
    # program_end = time.perf_counter()
    # print("program run time:%d" % (program_end - program_start))
    # darw_reward('./model/reward.txt')
