import torch
import torch.nn.functional as F

from agents.ddpg import DDPG
from network.td3_net import Actor, TwinCritic


class TD3(DDPG):
    """A TD3 agent"""
    agent_name = 'TD3'

    def __init__(self, config):
        super().__init__(config)
        self.policy_noise_std = self.config['policy_noise_std']
        self.noise_clip = self.config['noise_clip']
        self.policy_update_freq = self.config['policy_update_freq']
        self.actor = Actor(self.state_size, self.action_size, self.max_action).to(self.device)
        self.actor_target = Actor(self.state_size, self.action_size, self.max_action).to(self.device)
        self.critic = TwinCritic(self.state_size, self.action_size).to(self.device)
        self.critic_target = TwinCritic(self.state_size, self.action_size).to(self.device)
        self.actor_optimizer = torch.optim.Adam(self.actor.parameters(), lr=self.config['actor_lr'])
        self.critic_optimizer = torch.optim.Adam(self.critic.parameters(), lr=self.config['critic_lr'])
        self.copy_model(from_model=self.actor, target_model=self.actor_target)
        self.copy_model(from_model=self.critic, target_model=self.critic_target)

    def train(self):
        self.actor.to(self.device)

        if self.use_per:
            states, actions, rewards, next_states, masks, is_idx, is_weight = self.replay_buffer.sample()
        else:
            states, actions, rewards, next_states, masks = self.replay_buffer.sample()
            is_idx, is_weight = None, None

        noises = (torch.randn_like(actions) * self.policy_noise_std).clamp(-self.noise_clip, self.noise_clip)
        next_actions = (self.actor_target(next_states) + noises).clamp(-self.max_action, self.max_action)
        target_q1, target_q2 = self.critic_target(next_states, next_actions)
        target_q = torch.min(target_q1, target_q2).detach()
        target_q = rewards + masks * target_q
        current_q1, current_q2 = self.critic(states, actions)
        td_errors1 = (target_q - current_q1).squeeze(1)
        td_errors2 = (target_q - current_q2).squeeze(1)
        if self.use_per:
            critic_loss = ((td_errors1 ** 2 + td_errors2 ** 2) * is_weight).mean()
            self.replay_buffer.update_td_errors_for_per(is_idx, ((td_errors1 + td_errors2) / 2).detach().cpu().numpy())
        else:
            critic_loss = (td_errors1 ** 2 + td_errors2 ** 2).mean()
        self.take_optimization(self.critic_optimizer, critic_loss)
        if self.train_step % self.policy_update_freq == 0:
            actor_loss = -self.critic.get_q1(states, self.actor(states)).mean()
            self.take_optimization(self.actor_optimizer, actor_loss)
            self.soft_update_of_target_network(self.critic, self.critic_target)
            self.soft_update_of_target_network(self.actor, self.actor_target)
        self.train_step += 1

