'''
@Author: JiBingyu
@Date: 2024/04/06
@Description:用于测试try_new_env.py,
是一个整体的model,适用于多个智能体可以通过一个py集中控制的情况
'''
import sys
import os
import numpy as np

current_dir = os.path.dirname(__file__)
parent_dir = os.path.abspath(os.path.join(current_dir, os.pardir))
sys.path.append(parent_dir)
print(parent_dir)
from train.try_new_env import Get_Run
from envs.env_wrappers import DummyVecEnv

def make_eval_env(env_size = []):
    def get_env_fn(rank):
        def init_env(env_size):
            from envs.env_continuous import ContinuousActionEnv
            env = ContinuousActionEnv(env_size)
            env.seed(1 + rank * 1000)
            return env
        return init_env
    return DummyVecEnv([get_env_fn(i) for i in range(1)], env_size)


model = Get_Run("D:/Reinforce/light_mappo-main/light_mappo-main/run41/models/")
env = make_eval_env(["Circle", [0, 0], 10])
images = []
obs = np.array([
                [[29, 14, 0, 0, -7.75, 14, 0, 0],
                [-34, -40, 0, 0, -7.75, 14, 0, 0],
                [26, 21, 0, 0, -7.75, 14, 0, 0]]
                ])
# obs = env.reset()
# for i in range(100):
#     eval_actions_env, image = model.step_eval(obs)
#     images.append(image)
#     obs, eval_communicate_mask, eval_rewards, eval_dones, eval_infos  = env.step(eval_actions_env)

# from PIL import Image
# import imageio
# imageio.mimsave( 'D:/Reinforce/light_mappo-main/light_mappo-main/True_Model_Env/episode.gif', images, 'GIF', duration=1)

eval_actions_env, image = model.step_eval(obs)
print("eval_actions_env", eval_actions_env)
# images.append(image)
obs, eval_communicate_mask, eval_rewards, eval_dones, eval_infos  = env.step(eval_actions_env)
print("obs", obs)
eval_actions_env, image = model.step_eval(obs)
print("eval_actions_env", eval_actions_env)
# images.append(image)
obs, eval_communicate_mask, eval_rewards, eval_dones, eval_infos  = env.step(eval_actions_env)
print("obs", obs)
'''
obs ([
    [[29, 14, 0, 0, -7.75, 14, 0, 0],
    [-34, -40, 0, 0, -7.75, 14, 0, 0],
    [26, 21, 0, 0, -7.75, 14, 0, 0]]
    ])
# obs = env.reset()
eval_actions_env 
[[array([ -1.0349708, -14.515792 ], dtype=float32), array([ 0.5673896, 17.67145  ], dtype=float32), array([-0.54076904, -2.8276782 ], 
dtype=float32)]]
obs [[[-58.         -23.          -1.03497076 -14.51579189  10.
     0.           0.99958339  92.86478898]
  [ -3.         -12.           0.56738961  17.67144966  10.
     0.           0.99958339  92.86478898]
  [-32.          31.          -0.54076904  -2.8276782   10.
     0.           0.99958339  92.86478898]]]

     
eval_actions_env [[array([  0.13822578, -13.115194  ], dtype=float32), array([ 0.11770868, 11.686722  ], dtype=float32), array([-0.33184803, 14.483552 
 ], dtype=float32)]]
obs [[[-46.         -17.           0.13822578 -13.11519432   9.95004165
     0.99833417   0.99958339  98.59436693]
  [  7.          -7.           0.11770868  11.6867218    9.95004165
     0.99833417   0.99958339  98.59436693]
  [-24.          18.          -0.33184803  14.48355198   9.95004165
     0.99833417   0.99958339  98.59436693]]]
     
'''