#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time    : 2021/4/24 14:38
# @Author  : LiShan
# @Email   : lishan_1997@126.com
# @File    : dqn.py
# @Note    : this is dqn algorithm and network model
"""
refer：
[1]https://github.com/MorvanZhou/PyTorch-Tutorial/blob/master/tutorial-contents/405_DQN_Reinforcement_learning.py
[2]https://github.com/GAOYANGAU/DRLPytorch/blob/master/nature-DQN.py
[3]https://github.com/catziyan/DRLPytorch-/tree/master/08
"""

import numpy as np
from torch import cuda, optim, FloatTensor, unsqueeze, max, save, LongTensor
from torch.nn import functional, Module, Linear, MSELoss

# Run Device Platform
device = 'cuda' if cuda.is_available() else 'cpu'

# Hyper Parameters
BATCH_SIZE = 32  # batch size
LR = 0.01  # learning rate
EPSILON = 0.9  # greedy policy
GAMMA = 0.9  # reward discount
TARGET_REPLACE_ITER = 100  # target update frequency
MEMORY_CAPACITY = 2000  # memory capacity

# Action and State Space
N_ACTIONS = 120
N_STATES = 24
ENV_A_SHAPE = 0


class Net(Module):
    def __init__(self, ):
        super(Net, self).__init__()
        self.fc1 = Linear(N_STATES, 200)
        self.fc1.weight.data.normal_(0, 0.1)  # initialization
        self.out = Linear(200, N_ACTIONS)
        self.out.weight.data.normal_(0, 0.1)  # initialization

    def forward(self, x):
        x = self.fc1(x)
        x = functional.relu(x)
        actions_value = self.out(x)
        return actions_value


class Agent(object):
    def __init__(self):
        self.online_net, self.target_net = Net().to(device), Net().to(device)
        self.learn_step_counter = 0  # for target updating
        self.memory_counter = 0  # for storing memory
        self.memory = np.zeros((MEMORY_CAPACITY, N_STATES * 2 + 2 + 1))  # initialize memory(S,A,R,S',done)
        self.optimizer = optim.Adam(self.online_net.parameters(), lr=LR)
        self.loss_func = MSELoss()

    def action(self, state):
        state = unsqueeze(FloatTensor(state).to(device), 0)
        # input only one sample
        if np.random.uniform() < EPSILON:  # greedy
            actions_value = self.online_net.forward(state.to(device))
            action = max(actions_value, 1)[1].data.to('cpu').numpy()
            action = action[0] if ENV_A_SHAPE == 0 else action.reshape(ENV_A_SHAPE)  # return the argmax index
        else:  # random
            action = np.random.randint(0, N_ACTIONS)
            action = action if ENV_A_SHAPE == 0 else action.reshape(ENV_A_SHAPE)
        return action

    def store(self, state, action, reward, next_state, done):
        if done:
            transition = np.hstack((state, [action, reward], next_state, 0))
        else:
            transition = np.hstack((state, [action, reward], next_state, 1))
        # replace the old memory with new memory
        index = self.memory_counter % MEMORY_CAPACITY
        self.memory[index, :] = transition
        self.memory_counter += 1

    def learn(self, algorithm="DQN"):
        # target parameter update
        if self.learn_step_counter % TARGET_REPLACE_ITER == 0:
            self.target_net.load_state_dict(self.online_net.state_dict())
        self.learn_step_counter += 1

        # sample batch transitions
        sample_index = np.random.choice(MEMORY_CAPACITY, BATCH_SIZE)
        bath_memory = self.memory[sample_index, :]
        bath_state = FloatTensor(bath_memory[:, :N_STATES]).to(device)
        bath_action = LongTensor(bath_memory[:, N_STATES:N_STATES + 1].astype(int)).to(device)
        bath_reward = FloatTensor(bath_memory[:, N_STATES + 1:N_STATES + 2]).to(device)
        bath_next_state = FloatTensor(bath_memory[:, -N_STATES - 1:-1]).to(device)
        bath_done = FloatTensor(bath_memory[:, -1]).to(device)

        # q_eval w.r.t the action in experience
        q_eval = self.online_net(bath_state).gather(1, bath_action)  # shape (batch, 1)
        q_target = 0

        # Q value update algorithm
        if algorithm == "DQN":
            # dqn
            q_next = self.target_net(bath_next_state).detach()  # detach from graph, don't backpropagate
            q_target = bath_reward + GAMMA * q_next.max(1)[0].view(BATCH_SIZE, 1)  # shape (batch, 1)
        elif algorithm == "DDQN":
            # double dqn
            actions_value = self.online_net.forward(bath_next_state)
            next_action = unsqueeze(max(actions_value, 1)[1], 1)
            q_next = self.target_net.forward(bath_next_state).gather(1, next_action)
            q_target = bath_reward + GAMMA * q_next * bath_done

        # back propagation and update network parameter
        loss = self.loss_func(q_eval, q_target)
        self.optimizer.zero_grad()
        loss.backward()
        self.optimizer.step()

    def save(self, episode):
        save(self.online_net.state_dict(), './model/online_network_%d.pkl' % episode)
        save(self.target_net.state_dict(), './model/target_network_%d.pkl' % episode)
        save(self.online_net.state_dict(), './model/online_network_best.pkl')
        save(self.target_net.state_dict(), './model/target_network_best.pkl')
        print('=====================')
        print('%d episode model has been save...' % episode)
