from light_rl.agent.agent import AgentBase
from light_rl.model.model import Model
from light_rl.utils.utils import load_config, get_device
from light_rl.utils.dict_wrapper import DictWrapper
from light_rl.utils.buffer import create_reply_buffer
import torch
import torch.nn.functional as F
from torch.utils.tensorboard import SummaryWriter

import numpy as np
from typing import Union
import os
import json
from copy import deepcopy


class AgentSAC(AgentBase):
    def __init__(self, actor: Model, v_critic: Model, q_critic: Model, config_path: str, writer: SummaryWriter = None,
                 model_dir: str = ""):
        super().__init__(config_path, writer, model_dir)
        self.actor = actor
        self.v_critic = v_critic
        self.q_critic = q_critic
        self.target_v_critic = deepcopy(v_critic)

        # 加载配置文件
        self.min_Val = torch.tensor(1e-7).float()

        self.buffer = create_reply_buffer(config_path)

        self.resume_model()

    def react(self, state: np.ndarray, train: bool = True) -> Union[np.ndarray, int]:
        state = torch.tensor(state, dtype=torch.float32, device=self.device)
        with torch.no_grad():
            dist = self.actor(state)
            action = dist.sample()
            action = torch.tanh(action).detach().cpu().numpy()
            return action

    def predict_act_log_prob(self, state):
        dist = self.actor(state)
        z = dist.sample()
        action = torch.tanh(z)
        # SimpleRL 的实现方法
        log_prob = dist.log_prob(z) - torch.log(1 - action.pow(2) + self.min_Val)
        # OpenAI 的实现方法
        # log_prob = dist.log_prob(z)
        # log_prob -= (2*(np.log(2) - z - F.softplus(-2*z)))
        return action, log_prob

    def learn(self, state: np.ndarray, action: np.ndarray, reward, next_state: np.ndarray, done: bool):
        super().learn(state, action, reward, next_state, done)
        self.buffer.push(state, action, reward, next_state, done)
        
        if len(self.buffer) < self.config.start_learn:
            return
        for j in range(self.config.update_times):
            states, actions, rewards, next_states, dones = self.buffer.sample(self.config.batch_size)
            dones = dones.float()
            rewards = rewards.float()

            target_value = self.target_v_critic(next_states)
            next_q_value = rewards + (1 - dones) * self.config.gamma * target_value.squeeze()

            excepted_value = self.v_critic(states).squeeze()
            states_action = torch.cat([states, actions], dim=1)
            excepted_q = self.q_critic(states_action).squeeze()

            sample_action, log_prob = self.predict_act_log_prob(states)
            log_prob = log_prob.squeeze()
            sample_states_action = torch.cat([states, sample_action], dim=1)
            excepted_sample_q = self.q_critic(sample_states_action).squeeze()
            next_value = excepted_sample_q - log_prob

            # !!!Note that the actions are sampled according to the current policy,
            # instead of replay buffer. (From original paper)
            v_loss = self.v_critic.criterion(excepted_value, next_value.detach())  # J_V

            # Single Q_net this is different from original paper!!!
            q_loss = self.q_critic.criterion(excepted_q, next_q_value.detach())  # J_Q

            log_policy_target = excepted_sample_q - excepted_value

            pi_loss = log_prob * (log_prob - log_policy_target).detach()
            pi_loss = pi_loss.mean()

            # mini batch gradient descent
            self.v_critic.update(v_loss)
            self.q_critic.update(q_loss)
            self.actor.update(pi_loss)

            # soft update
            self.soft_update(self.target_v_critic, self.v_critic, self.config.tau)
