'''
@Author: JiBingyu
@Date: 2024/04/06
@Description:单独控制每个模型的运行，通过输入输出的方式进行模型的调用
实际使用的时候:
from separated_model import Separated_model
model = Separated_model(config, id) id =[0,1,2]
LOOP:
    actions = model.step(sperated_obs)
    sperated_obs,  env.step(actions)
'''
import sys, os
import torch
from gym import spaces
import numpy as np
import math

current_dir = os.path.dirname(__file__)
parent_dir = os.path.abspath(os.path.join(current_dir, os.pardir))
sys.path.append(parent_dir)

from algorithms.algorithm.rMAPPOPolicy import RMAPPOPolicy as Policy
from algorithms.algorithm.r_mappo import RMAPPO as TrainAlgo
from config_env import get_env_config
parser_ev = get_env_config()

def _t2n(x):
    return x.detach().cpu().numpy()

class Separated_model:
    def __init__(self, config, id):
        self.all_args = config["all_args"]
        self.n_eval_rollout_threads = self.all_args.n_eval_rollout_threads
        self.device = config["device"]
        self.recurrent_N = self.all_args.recurrent_N
        self.hidden_size = self.all_args.hidden_size
        self.model_dir = self.all_args.model_dir
        self.ID = id

        self.num_agents = config["num_agents"]
        
        self.policy = Policy(
                self.all_args,
                spaces.Box(
                    low=-np.inf,
                    high=+np.inf,
                    shape=(parser_ev["obs_dim"],),
                    dtype=np.float32,
                ),
                [spaces.Box(
                        low=-np.inf, high=+np.inf, shape=(parser_ev['obs_dim'] * parser_ev['agent_num'],), dtype=np.float32
                    )
                    for _ in range(parser_ev['agent_num'])
                ],
                spaces.Box(
                    low=-math.pi,
                    high=+math.pi,
                    shape=(parser_ev["action_dim"],),
                    dtype=np.float32,
                ),
                device=self.device,
                is_separated=True,
            )
        self.trainer = TrainAlgo(self.all_args, self.policy, device=self.device, is_separated=True)
        # 运行参数
        self.eval_rnn_states = np.zeros(
            (
                self.n_eval_rollout_threads,
                self.recurrent_N,
                self.hidden_size,
            ),
            dtype=np.float32,
        )
        self.eval_masks = np.ones((self.n_eval_rollout_threads, 1), dtype=np.float32)
        self.restore()

    def restore(self):
        policy_actor_state_dict = torch.load(str(self.model_dir) + "/actor_agent" + str(self.ID) + ".pt")
        self.policy.actor.load_state_dict(policy_actor_state_dict)
           
    @torch.no_grad()
    def step(self, eval_obs):
        eval_temp_actions_env = []
        self.trainer.prep_rollout()
        eval_action, eval_rnn_state = self.trainer.policy.act(
            np.array(list(eval_obs)), # [1, 8]
            self.eval_rnn_states, # [1, 1, 64]
            self.eval_masks, # [1, 1]
            deterministic=True,
        )
        # eval_action [n_rollout_threads, dim]
        eval_action = eval_action.detach().cpu().numpy()
        eval_action_env = eval_action
        # raise NotImplementedError
        # eval_temp_actions_env [num, n_rollout_threads, dim]
        eval_temp_actions_env.append(eval_action_env)
        self.eval_rnn_states = _t2n(eval_rnn_state)

        # [envs, agents, dim]
        eval_actions_env = []
        for i in range(self.n_eval_rollout_threads):
            eval_one_hot_action_env = []
            for eval_temp_action_env in eval_temp_actions_env:
                eval_one_hot_action_env.append(eval_temp_action_env[i])
            # eval_actions_env [n_rollout_threads, dim]
            eval_actions_env.append(eval_one_hot_action_env)

        return eval_actions_env
    
