import torch
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
from torch.distributions import Normal

TEN = torch.tensor


def build_net(layer_shape, hidden_activation, output_activation):
	'''Build net with for loop'''
	layers = []
	for j in range(len(layer_shape) - 1):
		act = hidden_activation if j < len(layer_shape) - 2 else output_activation
		layers += [nn.Linear(layer_shape[j], layer_shape[j + 1]), act()]
	return nn.Sequential(*layers)


class Actor(nn.Module):
	def __init__(self, state_dim: int, action_dim: int, net_dims: list[int]):
		super().__init__()
		layers = [state_dim] + net_dims
		self.share_net = build_net(layers, nn.ReLU, nn.ReLU)
		self.mu_layer = nn.Linear(layers[-1], action_dim)
		self.log_std_layer = nn.Linear(layers[-1], action_dim)

		# std : exp(-20)~exp(2)
		self.LOG_STD_MAX = 2
		self.LOG_STD_MIN = -20

	def forward(self, state: TEN, eval: bool):
		'''Network with Enforcing Action Bounds'''
		share_feature = self.share_net(state)
		mu = self.mu_layer(share_feature)
		log_std = self.log_std_layer(share_feature)
		# we learn log_std rather than std, so that exp(log_std) is always > 0
		log_std = torch.clamp(log_std, self.LOG_STD_MIN, self.LOG_STD_MAX)
		# trick: 如果想学习一个 大于0 的参数，就学习logx
		std = torch.exp(log_std)
		dist = Normal(mu, std)
		a = mu if eval else dist.rsample()

		'''↓↓↓ Enforcing Action Bounds, see Page 16 of https://arxiv.org/pdf/1812.05905.pdf ↓↓↓'''
		action = torch.tanh(a)

		# Get probability density of logp_pi_a from probability density of u:
		# logp_pi_a = (dist.log_prob(u) - torch.log(1 - a.pow(2) + 1e-6)).sum(dim=1, keepdim=True)
		# Derive from the above equation. No a, thus no tanh(h), thus less gradient vanish and more stable.
		# @formatter:off
		logp_pi_a = dist.log_prob(a).sum(axis=1, keepdim=True) - (2 * (np.log(2) - a - F.softplus(-2 * a))).sum(axis=1, keepdim=True)
		# @formatter:on
		return action, logp_pi_a # action [-1,+1]


class Double_Q_Critic(nn.Module):
	def __init__(self, state_dim: int, action_dim: int, net_dims: list[int]):
		super().__init__()
		layers = [state_dim + action_dim] + net_dims + [1]

		self.Q1 = build_net(layers, nn.ReLU, nn.Identity)
		self.Q2 = build_net(layers, nn.ReLU, nn.Identity)

	def forward(self, state: TEN, action: TEN):
		sa = torch.cat([state, action], dim=1)
		q1 = self.Q1(sa)
		q2 = self.Q2(sa)
		return q1, q2
