import torch


class Config:
	def __init__(self):
		# env param
		self.state_dim: int = None
		self.action_dim: int = None
		self.max_action: float = None

		# device param
		self.device: torch.device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')

		# agent param
		self.net_dims: list = None
		self.actor_dist: str = "GS_ms"  # "Beta"、"GS_ms"、"GS_m"
		self.gae_lambda: float = 0.95
		self.adv_normalization: bool = False
		self.ppo_clip_rate: float = 0.25
		self.entropy_coef: float = 0.01  # 熵正则化0.01~0.05
		self.entropy_decay_rate: float = 0.99

		# actor
		self.actor_gradient_clip: float = 40  # actor 最大梯度裁剪
		self.actor_batch_size: int = 64
		self.actor_lr: float = 2e-4
		self.actor_lr_init: float = 5e-4
		self.actor_lr_end: float = 1e-5
		self.actor_lr_decay_steps: int = None
		self.explore_noise: float = 0.01  # 探索噪声方差
		self.explore_noise_decay: float = 0.998  # 探索方差衰减

		# critic
		self.critic_gradient_clip: float = 40  # actor 最大梯度裁剪
		self.critic_batch_size: int = 64
		self.critic_l2_reg: float = 1e-3  # L2正则化，防止过拟合
		self.critic_lr: float = 2e-4
		self.critic_lr_init: float = 5e-4
		self.critic_lr_end: float = 1e-5
		self.critic_lr_decay_steps: int = None

		# SAC
		self.SAC_alpha: float = 1.0
		self.SAC_H_mean: int = 0
		self.SAC_adaptive_alpha: bool = True
		self.SAC_alpha_lr: float = 1e-4

		# train param
		self.cwd = './train_logs'
		self.break_steps: int = int(1e6)
		self.seed: int = 0
		self.tau: float = 0.005
		self.random_steps = int(1e4)

		self.gamma: float = 0.99
		self.horizon_len: int = None
		self.buffer_size: int = int(5e5)
		self.update_gap: int = None
		self.update_every: int = None
		self.repeat_times: int = 6

		# eval
		self.eval_times: int = 3
		self.eval_gap: int = int(100)



def evaluate_policy(env, agent, max_action: float, turns=3):
	total_scores = 0
	for j in range(turns):
		s, info = env.reset()
		done = False
		while not done:
			# Take deterministic actions at test time
			a = agent.get_action(s, eval=True)  # eval 只是决定是输出mu 还是根据dist采样，返回的a 范围都是-1，1
			s_next, r, dw, tr, info = env.step(a)
			done = (dw or tr)

			total_scores += r
			s = s_next
	return float(total_scores / turns)
