import random

import numpy as np
import torch

from agents.base_agent import BaseAgent
from network.dueling_net import DuelQNet
from network.q_net import QNet
from utilities.replay_buffer import ReplayBuffer
from utilities.prioritized_replay_buffer import PrioritizedReplayBuffer


class DQN(BaseAgent):
    """A deep Q learning agent"""
    agent_name = 'DQN'

    def __init__(self, config):
        super().__init__(config)
        self.batch_size = self.config['batch_size']
        self.gamma = self.config['gamma']
        self.target_update_freq = self.config['target_update_freq']
        self.lr = self.config['lr']
        self.tau = self.config['tau']
        self.epsilon_init = self.config['epsilon_init']
        self.epsilon_final = self.config['epsilon_final']
        self.epsilon_decay_steps = self.config['epsilon_decay_steps']
        self.epsilon_decay = (self.epsilon_init - self.epsilon_final) / self.epsilon_decay_steps
        self.epsilon = self.epsilon_init
        self.grad_clip = self.config['grad_clip']
        self.use_double = self.config['use_double']
        self.use_dueling = self.config['use_dueling']
        self.use_lr_decay = self.config['use_lr_decay']
        self.use_soft_update = self.config['use_soft_update']
        if self.use_dueling:
            self.q_net = DuelQNet(self.state_size, self.action_size)
            self.q_net_target = DuelQNet(self.state_size, self.action_size).to(self.device)
        else:
            self.q_net = QNet(self.state_size, self.action_size)
            self.q_net_target = QNet(self.state_size, self.action_size).to(self.device)
        self.copy_model(from_model=self.q_net, target_model=self.q_net_target)
        self.optimizer = torch.optim.Adam(self.q_net.parameters(), lr=self.config['lr'])
        self.use_per = self.config['use_per']
        if self.use_per:
            self.replay_buffer = PrioritizedReplayBuffer(self.config, self.device)
        else:
            self.replay_buffer = ReplayBuffer(self.config, self.device)

    def run(self):
        episode_step = 0
        episode_reward = 0
        state, _ = self.env.reset(seed=self.config['seed'])
        while self.total_step < self.max_steps:
            action = self.select_action(state)
            next_state, reward, terminated, truncated, _ = self.env.step(action)
            done = terminated or truncated
            episode_reward += reward
            mask = 0 if terminated else self.gamma
            self.replay_buffer.store(state, action, reward, next_state, mask)
            state = next_state
            episode_step += 1
            self.total_step += 1
            if done:
                state, _ = self.env.reset(seed=self.config['seed'])
                self.episode += 1
                self.print_results(episode_step, episode_reward)
                episode_step = 0
                episode_reward = 0
            if len(self.replay_buffer) > self.batch_size:
                self.train()
                self.train_step += 1
            if self.total_step % self.eval_freq == 0:
                self.evaluations.append(self.eval_model(self.q_net))
                self.save_model(self.q_net)
        return self.evaluations

    def select_action(self, state):
        self.q_net.to('cpu')
        state = torch.tensor(state).float().unsqueeze(0)
        q_values = self.q_net(state).detach()
        if random.random() > self.epsilon:
            action = torch.argmax(q_values).item()
        else:
            action = np.random.randint(0, q_values.shape[1])
        self.epsilon = max(self.epsilon - self.epsilon_decay, self.epsilon_final)
        return action

    def evaluate(self, state):
        state = torch.tensor(state).float().unsqueeze(0)
        q_values = self.q_net(state)
        action = torch.argmax(q_values).item()
        return action

    def train(self):
        self.q_net.to(self.device)
        if self.use_per:
            states, actions, rewards, next_states, masks, is_idx, is_weight = self.replay_buffer.sample()
        else:
            states, actions, rewards, next_states, masks = self.replay_buffer.sample()
            is_idx, is_weight = None, None
        if self.use_double:
            max_action_indexes = torch.argmax(self.q_net(next_states).detach(), dim=1)
            q_target_next = self.q_net_target(next_states).gather(1, max_action_indexes.unsqueeze(1))
        else:
            q_target_next = self.q_net_target(next_states).detach().max(dim=1)[0].unsqueeze(1)
        q_target = rewards + masks * q_target_next
        q_current = self.q_net(states).gather(1, actions.long())
        td_errors = (q_target - q_current).squeeze(1)  # shape：(batch_size,)
        if self.use_per:
            loss = ((td_errors ** 2) * is_weight).mean()
            self.replay_buffer.update_td_errors_for_per(is_idx, td_errors.detach().cpu().numpy())
        else:
            loss = (td_errors ** 2).mean()
        self.take_optimization(self.optimizer, loss, self.grad_clip, self.q_net)
        if self.use_soft_update:
            self.soft_update_of_target_network(self.q_net, self.q_net_target)
        else:
            self.hard_update_of_target_network(self.q_net, self.q_net_target)
        if self.use_lr_decay:
            self.lr_decay()

    def hard_update_of_target_network(self, from_model, target_model):
        if self.train_step % self.target_update_freq == 0:
            for target_param, from_param in zip(target_model.parameters(), from_model.parameters()):
                target_param.detach().copy_(from_param.detach())

    def soft_update_of_target_network(self, from_model, target_model):
        for target_param, from_param in zip(target_model.parameters(), from_model.parameters()):
            target_param.detach().copy_(self.tau * from_param.detach() + (1 - self.tau) * target_param.detach())

    def lr_decay(self):
        lr_now = self.lr * (1 - self.total_step / self.max_steps)
        for p in self.optimizer.param_groups:
            p['lr'] = lr_now





