import numpy as np
import torch
import torch.nn.functional as F

from agents.ppo_discrete import PPODiscrete
from network.ppo_continuous_net import Actor


class PPOContinuous(PPODiscrete):
    """A PPO agent for continuous task
    1. GAE
    2. mini-batch update
    3. state normalization
    4. advantage normalization
    5. policy entropy loss
    6. Adam lr linear decay
    7. L2 grad norm
    8. orthogonal init
    9. tanh
    """
    agent_name = 'PPOContinuous'

    def __init__(self, config):
        super().__init__(config)
        self.max_action = float(self.env.action_space.high[0])
        self.actor = Actor(self.state_size, self.action_size, self.max_action, self.net_hyper).to(self.device)
        self.actor_optimizer = torch.optim.Adam(self.actor.parameters(), lr=self.actor_lr, eps=1e-5)

    def select_action(self, state):
        self.actor.to('cpu')
        state = torch.tensor(state).unsqueeze(0).float()
        dist = self.actor.get_dist(state)
        action = dist.sample()
        action = torch.clamp(action, -self.max_action, self.max_action).numpy().flatten()
        return action

    def evaluate(self, state):
        state = torch.tensor(state).unsqueeze(0).float()
        action = self.actor(state).detach().numpy().flatten()
        return action

    def train(self):
        self.actor.to(self.device)
        states, actions, rewards, next_states, masks, dones = self.replay_buffer.sample()
        values = self.critic(states).detach()
        next_values = self.critic(next_states).detach()
        td_errors = rewards + masks * next_values - values
        advantages, returns = self.compute_advantage(values, td_errors, dones)
        dist = self.actor.get_dist(states)
        old_log_probs = dist.log_prob(actions).sum(1, keepdim=True).detach()
        iter_num = int(states.shape[0] / self.mini_batch_size)
        for _ in range(self.epochs):
            perm = np.arange(states.shape[0])
            np.random.shuffle(perm)
            perm = torch.LongTensor(perm).to(self.device)
            states = states[perm].clone()
            actions = actions[perm].clone()
            advantages = advantages[perm].clone()
            returns = returns[perm].clone()
            old_log_probs = old_log_probs[perm].clone()
            for i in range(iter_num):
                ind = slice(i * self.mini_batch_size, min((i + 1) * self.mini_batch_size, states.shape[0]))
                states_b = states[ind]
                actions_b = actions[ind]
                advantages_b = advantages[ind]
                returns_b = returns[ind]
                old_log_probs_b = old_log_probs[ind]
                dist = self.actor.get_dist(states_b)
                log_probs_b = dist.log_prob(actions_b).sum(1, keepdim=True)
                ratio = torch.exp(log_probs_b - old_log_probs_b)
                surr1 = ratio * advantages_b
                surr2 = torch.clamp(ratio, 1 - self.clip_epsilon, 1 + self.clip_epsilon) * advantages_b
                entropy = dist.entropy().sum(1, keepdim=True)
                actor_loss = (-torch.min(surr1, surr2) - self.entropy_coef * entropy).mean()
                self.take_optimization(self.actor_optimizer, actor_loss, self.grad_clip, self.actor)
                critic_loss = F.mse_loss(self.critic(states_b), returns_b)
                self.take_optimization(self.critic_optimizer, critic_loss, self.grad_clip, self.critic)
                self.train_step += 1
        if self.use_lr_decay:
            self.lr_decay()
        # print(f"LR = {self.actor_optimizer.param_groups[0]['lr']:.10f}")
        # torch.cuda.empty_cache()





