import einops
import numpy as np
import torch
from config.cfg import Config
from nets.SACNet import ActorSAC, CriticSAC
from utils.misc import disable_gradients, enable_gradients, hard_update
from torch.optim import AdamW


class SACAgent:
    def __init__(self, id: int, policy_dims: [], in_dim: int, out_dim: int, critic_dims: [], state_dim: int, action_dim_sum: int, args: Config):
        # 设置智能体id以及模型保存的key
        self.id = id
        self.policy_key = "policy_" + str(self.id)
        self.target_key = "target_" + str(self.id)
        self.policy_opt_key = "policy_optimizer" + str(self.id)

        self.args = args
        self.policy = ActorSAC(
            dims=policy_dims,
            obs_dim=in_dim,
            action_dim=out_dim
        ).to(args.device)
        self.target_policy = ActorSAC(
            dims=policy_dims,
            obs_dim=in_dim,
            action_dim=out_dim
        ).to(args.device)
        self.critic = CriticSAC(
            dims=critic_dims, state_dim=state_dim, action_dim=action_dim_sum).to(device=self.args.device)
        self.target_critic = CriticSAC(
            dims=critic_dims, state_dim=state_dim, action_dim=action_dim_sum).to(device=self.args.device)
        hard_update(self.critic, self.target_critic)
        hard_update(self.policy, self.target_policy)

        self.policy_optimizer = AdamW(
            self.policy.parameters(), lr=args.actor_lr)
        self.critic_optimizer = AdamW(
            self.critic.parameters(), lr=args.critic_lr)

    def step(self, obs):
        return self.policy(obs)

    def take_action(self, obs: torch.Tensor):
        obs = torch.tensor([obs], dtype=torch.float).to(
            device=self.args.device)
        action, _ = self.policy(obs)
        return action.detach().cpu().numpy().flatten()

    def take_target_action(self, obs: torch.Tensor):
        obs = torch.tensor([obs], dtype=torch.float).to(
            device=self.args.device)
        target_action, _ = self.target_policy(obs)
        return target_action.detach().cpu().numpy().flatten()

    def get_params(self):
        return {
            self.policy_key: self.policy.state_dict(),
            self.target_key: self.target_policy.state_dict(),
            self.policy_opt_key: self.policy_optimizer.state_dict(),
        }

    def load_params(self, params):
        self.policy.load_state_dict(params[self.policy_key])
        self.target_policy.load_state_dict(params[self.target_key])
        self.policy_optimizer.load_state_dict(params[self.policy_opt_key])
