from config.cfg import Config
from nets.BaseNet import ActorBase, CriticBase
from nets.utils import build_mlp, layer_init_with_orthogonal
from torch.distributions import Normal
import torch


class ActorSAC(ActorBase):
    def __init__(self, dims: [], obs_dim: int, action_dim: int):
        super().__init__(obs_dim, action_dim)
        self.net_s = build_mlp(dims=[obs_dim, *dims], is_raw_out=False)
        self.mu = build_mlp(dims=[dims[-1], action_dim])
        self.std = build_mlp(dims=[dims[-1], action_dim])
        layer_init_with_orthogonal(self.mu[-1], std=0.1)
        layer_init_with_orthogonal(self.std[-1], std=0.1)

    def forward(self, state):
        # state = self.state_norm(state)
        s_enc = self.net_s(state)
        mu = self.mu(s_enc)
        std_log = self.std(s_enc)
        std = std_log.clamp(-16, 2).exp()

        dist = Normal(mu, std)
        normal_sample = dist.rsample()
        action_tanh = normal_sample.tanh()

        log_prob = dist.log_prob(normal_sample)
        log_prob -= torch.log(1-action_tanh.pow(2)+1e-7)
        return action_tanh, torch.sum(log_prob, dim=1, keepdim=True)


class CriticSAC(CriticBase):
    def __init__(self, dims: [], state_dim: int, action_dim: int):
        super().__init__(state_dim, action_dim)
        self.net = build_mlp(dims=[state_dim+action_dim, *dims, 2])
        layer_init_with_orthogonal(self.net[-1], std=0.5)

    def forward(self, state, action):
        # state = self.state_norm(state)
        cat = torch.cat([state, action], dim=1)
        value = self.net(cat)
        value = self.value_re_norm(value)
        return value[:, 0], value[:, 1]
