import numpy as np
import torch
import torch.nn.functional as F

from agents.base_agent import BaseAgent
from network.ddpg_net import Critic, Actor
from utilities.replay_buffer import ReplayBuffer
from utilities.prioritized_replay_buffer import PrioritizedReplayBuffer


class DDPG(BaseAgent):
    """A DDPG agent"""
    agent_name = 'DDPG'

    def __init__(self, config):
        super().__init__(config)
        self.batch_size = self.config['batch_size']
        self.start_train_steps = self.config['start_train_steps']
        self.expl_noise_std = self.config['expl_noise_std']
        self.gamma = self.config['gamma']
        self.tau = self.config['tau']
        self.max_action = float(self.env.action_space.high[0])
        self.actor = Actor(self.state_size, self.action_size, self.max_action).to(self.device)
        self.actor_target = Actor(self.state_size, self.action_size, self.max_action).to(self.device)
        self.critic = Critic(self.state_size, self.action_size).to(self.device)
        self.critic_target = Critic(self.state_size, self.action_size).to(self.device)
        self.actor_optimizer = torch.optim.Adam(self.actor.parameters(), lr=self.config['actor_lr'])
        self.critic_optimizer = torch.optim.Adam(self.critic.parameters(), lr=self.config['critic_lr'])
        self.copy_model(from_model=self.actor, target_model=self.actor_target)
        self.copy_model(from_model=self.critic, target_model=self.critic_target)
        self.use_per = self.config['use_per']
        if self.use_per:
            self.replay_buffer = PrioritizedReplayBuffer(self.config, self.device)
        else:
            self.replay_buffer = ReplayBuffer(self.config, self.device)

    def run(self):
        episode_step = 0
        episode_reward = 0
        state, _ = self.env.reset(seed=self.seed)
        while self.total_step < self.max_steps:
            action = self.select_action(state)
            next_state, reward, terminated, truncated, _ = self.env.step(action)
            done = terminated or truncated
            episode_reward += reward
            mask = 0 if terminated else self.gamma
            self.replay_buffer.store(state, action, reward, next_state, mask)
            state = next_state
            self.total_step += 1
            episode_step += 1
            if done:
                state, _ = self.env.reset(seed=self.config['seed'])
                self.episode += 1
                self.print_results(episode_step, episode_reward)
                episode_step = 0
                episode_reward = 0
            if len(self.replay_buffer) > self.start_train_steps and not self.goal_achieved:
                self.train()
            if self.total_step % self.eval_freq == 0:
                self.evaluations.append(self.eval_model(self.actor))
                print(f"Evaluation at {self.total_step} steps: {self.evaluations[-1]:.2f}")
                self.save_model(self.actor)
        return self.evaluations

    def select_action(self, state):
        self.actor.to('cpu')
        if self.total_step < self.start_train_steps:
            action = self.env.action_space.sample()
        else:
            state = torch.tensor(state).float()
            action = self.actor(state).detach().numpy()
            action = (action + np.random.normal(0, self.max_action * self.expl_noise_std, size=self.action_size)).clip(
                -self.max_action, self.max_action)
        return action

    def evaluate(self, state):
        state = torch.tensor(state).float()
        action = self.actor(state).detach().numpy().flatten()
        return action

    def train(self):
        self.actor.to(self.device)
        if self.use_per:
            states, actions, rewards, next_states, masks, is_idx, is_weight = self.replay_buffer.sample()
        else:
            states, actions, rewards, next_states, masks = self.replay_buffer.sample()
            is_idx, is_weight = None, None
        target_q = self.critic_target(next_states, self.actor_target(next_states)).detach()
        target_q = rewards + masks * target_q
        current_q = self.critic(states, actions)
        td_errors = (target_q - current_q).squeeze(1)
        if self.use_per:
            critic_loss = ((td_errors ** 2) * is_weight).mean()
            self.replay_buffer.update_td_errors_for_per(is_idx, td_errors.detach().cpu().numpy())
        else:
            critic_loss = (td_errors ** 2).mean()
        self.take_optimization(self.critic_optimizer, critic_loss)
        actor_loss = -self.critic(states, self.actor(states)).mean()
        self.take_optimization(self.actor_optimizer, actor_loss)
        self.soft_update_of_target_network(self.critic, self.critic_target)
        self.soft_update_of_target_network(self.actor, self.actor_target)
        self.train_step += 1

    def soft_update_of_target_network(self, from_model, target_model):
        for target_param, from_param in zip(target_model.parameters(), from_model.parameters()):
            target_param.detach().copy_(self.tau * from_param.detach() + (1 - self.tau) * target_param.detach())

