import numpy as np
import os, sys
import random
import torch
import time
import torch.nn as nn
import torch.nn.functional as F
from network.rainbow_q_network import RainbowQNetwork
from common.replay_buffer import ReplayBUffer, NStepReplayBUffer, PrioritizedReplayBuffer


class RainbowDQNAgent():
    def __init__(self, state_dim, action_dim, args, double_dqn = False, dueling_dqn = False, n_step_dqn = False, prioritized_buffer = False):
        self.args = args
        if n_step_dqn:
            self.replay_buffer = NStepReplayBUffer(args)
        elif prioritized_buffer:
            self.replay_buffer = PrioritizedReplayBuffer(args)
        else:
            self.replay_buffer = ReplayBUffer(args)
        self.n_step_dqn = n_step_dqn
        self.prioritized_buffer = prioritized_buffer
        self.state_dim = state_dim
        self.action_dim = action_dim
        self.gamma = args.gamma
        self.tau = args.tau
        self.learning_rate = args.lr_q
        self.batch_size = args.batch_size
        self.epsilon = args.epsilon
        self.epsilon_min = args.epsilon_min
        self.epsilon_delta = args.epsilon_delta
        self.double_dqn = double_dqn
        self.device = args.device
         
        self.q_network = RainbowQNetwork(self.state_dim, self.action_dim, args, dueling_dqn)
        self.target_q_network = RainbowQNetwork(self.state_dim, self.action_dim, args, dueling_dqn)
        self.optimizer = torch.optim.Adam(self.q_network.parameters(), lr=self.learning_rate)
        self.loss_function = nn.MSELoss()
        for p in self.target_q_network.parameters():
            p.requires_grad = False

        os.mkdir('model/{}'.format("RainbowDQN"))
               
    def e_greedy_action(self,state):
        with torch.no_grad():
            state = torch.FloatTensor(state.reshape(1, -1)).to(self.device)
            if np.random.rand() < self.epsilon:
                action = np.random.randint(0,self.action_dim)
            else:
                action = self.q_network(state).argmax().item()
        self.epsilon = max(self.epsilon_min, self.epsilon-self.epsilon_delta)       
        return action

    def action(self,state):
        with torch.no_grad():
            state = torch.FloatTensor(state.reshape(1, -1)).to(self.device)
            action = self.q_network(state).argmax().item()
        return action

    def learn(self):
        #update the param of target network
        for param, target_param in zip(self.q_network.parameters(), self.target_q_network.parameters()):
            target_param.data.copy_(self.tau * param.data + (1 - self.tau) * target_param.data)

        if self.prioritized_buffer:
            state_batch, action_batch, reward_batch, next_state_batch, done_batch, batch_idx, IS_weights = self.replay_buffer.sample(self.batch_size)
            IS_weights = torch.FloatTensor(IS_weights).to(self.device).view(self.batch_size,1)
            # print("IS_weights = ", IS_weights)
        else:	
            state_batch, action_batch, reward_batch, next_state_batch, done_batch = self.replay_buffer.sample(self.batch_size)
        state_batch = torch.FloatTensor(state_batch).to(self.device)
        action_batch = torch.LongTensor(action_batch).to(self.device).view(self.batch_size,1)
        reward_batch = torch.FloatTensor(reward_batch).to(self.device).view(self.batch_size,1)
        next_state_batch = torch.FloatTensor(next_state_batch).to(self.device)
        done_batch = torch.FloatTensor(done_batch).to(self.device).view(self.batch_size,1)
        
        
        with torch.no_grad():
            if self.double_dqn:
                argmax_a = self.q_network(next_state_batch).argmax(dim=1).unsqueeze(-1)
                next_q_value = self.target_q_network(next_state_batch).gather(1,argmax_a)               
            else:
                next_q_value = self.target_q_network(next_state_batch).max(1)[0].view(self.batch_size,1)			

        # self.eval_net(state_batch)得到一个batch_size x action_size的tensor
        # .gather(1,action_batch)获取得到的tensor位置上对应的动作所对应的Q值,得到一个actio_batch一样的tensor
        q_value = self.q_network(state_batch).gather(1,action_batch)
        if self.n_step_dqn or self.prioritized_buffer:   #prioritized_buffer中没有将n step解耦，所以prioritized_buffer默认开启n step来计算td error
            target_q_value = reward_batch + (1 - done_batch) * (self.gamma ** self.args.n_step) * next_q_value
        else:
            target_q_value = reward_batch + (1 - done_batch) * self.gamma * next_q_value
        abs_td_errors = torch.abs(target_q_value - q_value).detach().numpy()
        if self.prioritized_buffer:   
            self.replay_buffer.update_priorities(batch_idx, abs_td_errors)      #用新的abs td error来更新样本的采样权重
            loss = IS_weights * self.loss_function(q_value,target_q_value)    #用import sample的权重来计算不同样本对于loss的重要程度
        else:
            loss = self.loss_function(q_value,target_q_value)
        loss = torch.mean(loss)
        self.optimizer.zero_grad()
        loss.backward()
        self.optimizer.step()
        # print("q_network = ", print(self.q_network.state_dict()['layer1.weight']))

        return loss.item()
        
    def save(self,episodes):
        time_now = time.strftime('%y%m_%d%H%M')
        dir = 'model/{}'.format("RainbowDQN")
        if not os.path.exists(dir): # make the path
            print("dont have this dir")
            os.mkdir(dir)
        dir = dir + '/{}_{}episodes.pth'.format(time_now,episodes)
        torch.save(self.q_network.state_dict(), dir)

    def load(self, path):
        self.q_network.load_state_dict(torch.load(path))
        print('q_network load successed')


