from agents import *
import torch.nn.functional as F
import numpy as np
import torch
import copy
from my_utils import Config
from buffer_memory import ReplayBuffer
import os

ARR = np.ndarray
TEN = torch.tensor


class SAC_Agent():
	def __init__(self, config: Config):
		self.config = config
		self.tau = getattr(config, "tau", 0.005)
		self.alpha = getattr(config, "SAC_alpha", 1.0)
		self.adaptive_alpha = getattr(config, "SAC_adaptive_alpha", True)
		self.device = config.device

		# actor
		self.actor = Actor(self.config.state_dim, self.config.action_dim, self.config.net_dims).to(self.device)
		self.actor_optimizer = torch.optim.Adam(self.actor.parameters(), lr=self.config.actor_lr)

		# critic
		self.q_critic = Double_Q_Critic(self.config.state_dim, self.config.action_dim, self.config.net_dims).to(self.device)
		self.q_critic_optimizer = torch.optim.Adam(self.q_critic.parameters(), lr=self.config.critic_lr)
		self.q_critic_target = copy.deepcopy(self.q_critic)
		# Freeze target networks with respect to optimizers (only update via polyak averaging)
		for p in self.q_critic_target.parameters():
			p.requires_grad = False

		# @formatter:off
		# buffer
		self.replay_buffer = ReplayBuffer(self.config.state_dim, self.config.action_dim, max_size=self.config.buffer_size, dvc=self.device)
		if self.adaptive_alpha:
			# Target Entropy = −dim(A) (e.g. , -6 for HalfCheetah-v2) as given in the paper
			self.target_entropy = torch.tensor(-self.config.action_dim, dtype=torch.float, requires_grad=True, device=self.device)
			# We learn log_alpha instead of alpha to ensure alpha>0
			self.log_alpha = torch.tensor(np.log(self.alpha), dtype=torch.float, requires_grad=True, device=self.device)
			self.alpha_optim = torch.optim.Adam([self.log_alpha], lr=self.config.SAC_alpha_lr)
		# @formatter:on

	def get_action(self, state: ARR, eval: bool):
		with torch.no_grad():
			state = torch.from_numpy(state).unsqueeze(0).to(self.device)
			a, _ = self.actor(state, eval)
			return a.cpu().squeeze(0).numpy()

	def get_vec_env_action(self, state: ARR, eval: bool):
		with torch.no_grad():
			state = torch.from_numpy(state).to(self.device)
			a, _ = self.actor(state, eval)
			return a.cpu().numpy()

	def update_agent(self):
		s, a, r, s_next, dw = self.replay_buffer.sample(self.config.critic_batch_size)

		# ----------------------------- ↓↓↓↓↓ Update Q Net ↓↓↓↓↓ ------------------------------#
		# @formatter:off
		with torch.no_grad():
			a_next, log_pi_a_next = self.actor(s_next, eval=False)
			target_Q1, target_Q2 = self.q_critic_target(s_next, a_next)
			target_Q = torch.min(target_Q1, target_Q2)
			target_Q = r + (~dw) * self.config.gamma * (target_Q - self.alpha * log_pi_a_next)  # Dead or Done is tackled by Randombuffer

		# compute Current Q
		current_Q1, current_Q2 = self.q_critic(s, a)

		q_loss = F.mse_loss(current_Q1, target_Q) + F.mse_loss(current_Q2, target_Q)
		self.q_critic_optimizer.zero_grad()
		q_loss.backward()
		self.q_critic_optimizer.step()
		# @formatter:on

		# ----------------------------- ↓↓↓↓↓ Update Actor Net ↓↓↓↓↓ ------------------------------#
		# Freeze critic, so you don't waste computational effort computing gradients for them when update actor
		for params in self.q_critic.parameters(): params.requires_grad = False
		a, log_pi_a = self.actor(s, eval=False)  # log_pi_a = - entropy 对数概率 = -熵
		current_Q1, current_Q2 = self.q_critic(s, a)
		Q = torch.min(current_Q1, current_Q2)

		a_loss = - (Q - self.alpha * log_pi_a).mean()  # a_loss = -Q_soft ,actor的优化目标：最大化Q值
		self.actor_optimizer.zero_grad()
		a_loss.backward()
		self.actor_optimizer.step()

		for params in self.q_critic.parameters(): params.requires_grad = True

		# ----------------------------- ↓↓↓↓↓ Update alpha ↓↓↓↓↓ ------------------------------#
		if self.adaptive_alpha:
			# We learn log_alpha instead of alpha to ensure alpha>0
			# 优化目标：Max. alph * [目标熵(self.target_entropy) - 当前熵(-log_pi_a)]
			alpha_loss = - (self.log_alpha * (log_pi_a + self.target_entropy).detach()).mean()
			self.alpha_optim.zero_grad()
			alpha_loss.backward()
			self.alpha_optim.step()
			self.alpha = self.log_alpha.exp()

		# ----------------------------- ↓↓↓↓↓ Update Target Net ↓↓↓↓↓ ------------------------------#
		for param, target_param in zip(self.q_critic.parameters(), self.q_critic_target.parameters()):
			target_param.data.copy_(self.tau * param.data + (1 - self.tau) * target_param.data)

	def save_agent(self, file_name: str = None):
		cwd = getattr(self.config, "cwd", ".")
		if cwd != ".":
			os.makedirs(cwd, exist_ok=True)
		actor_file_path = f"{cwd}/{file_name}__actor.pth" if file_name is not None else f"{cwd}/actor__model.pth"
		critic_file_path = f"{cwd}/{file_name}__critic.pth" if file_name is not None else f"{cwd}/critic__model.pth"
		# save model
		torch.save(self.actor.state_dict(), actor_file_path)
		torch.save(self.q_critic.state_dict(), critic_file_path)
