import numpy as np
import time, utils
from z_config import GlobalConfig

def check_dones(dones):
    dones = np.array(dones)
    assert (dones==True).any() != (dones==False).any()

'''
'''
class EnvWithRay(object):
    def __init__(self, env_args_dict):
        # finally the env is initialized
        args = env_args_dict['args']
        proc_index = env_args_dict['proc_index']
        self.pid = proc_index
        env_init_fn = utils.make_env(args.env_name, args.seed, proc_index, args.num_agents, args.dist_threshold, args.arena_size, args.identity_size)
        if self.pid == 1:
            assert True
        self.env = env_init_fn()
        self.observation_space = self.env.observation_space
        self.action_space = self.env.action_space
        print('EnvWithRay process_index:', proc_index, ' seed', args.seed)
    def step(self, act):
        ob, reward, done, info = self.env.step(act)
        check_dones(done) # for debug, all done item should be the same
        if np.any(done):
            ob = self.env.reset()
            # ('外环境重置')
        return (ob, reward, done, info)
    def reset(self):
        return self.env.reset()
    def render(self):
        return self.env.render()
    def close(self):
        return None
    def get_act_space(self):
        return self.action_space
    def get_obs_space(self):
        return self.observation_space

'''
    this one use ray framework to control a bundle of environment to sync step

    for index, super_actor in enumerate(self.super_actor_array):
            super_actor.add_sub.remote('gSubEnv', lambda: GroupSubspace(N_group=self.group_num, N_agent=self.num_agents, thread_index=index))

    g_sobs, g_dobs = ray.get(
                self.super_actor_array[procindex].do_sub.remote(name='gSubEnv', dowhat='reset')
            )
'''
class SubprocVecEnv(object):
    def __init__(self, env_args_dict_list, spaces=None):
        self.SuperPool = GlobalConfig.SmartPool
        self.num_envs = len(env_args_dict_list)
        self.SuperPool.add_target(name='env', lam=EnvWithRay, args_list=env_args_dict_list)

        self.observation_space = self.SuperPool.exec_target(name='env', dowhat='get_obs_space')[0] 
        self.action_space = self.SuperPool.exec_target(name='env', dowhat='get_act_space')[0]

        # this program has not yet consider different action spaces in parallel environments, just be careful if this assert is hit
        if self.num_envs >= 2:
            assert self.observation_space[0] == self.observation_space[-1]
            assert self.action_space[0] == self.action_space[-1]
        self.ob_rms = None
        return

    def step_async(self, actions):
        self.actions = actions
        return

    def step_wait(self):
        results = self.SuperPool.exec_target(name='env', dowhat='step', args_list=self.actions)
        # results = [(obs,reward,done,info),(obs,reward,done,info),(obs,reward,done,info)]
        obs, rews, dones, infos = zip(*results)
        return np.stack(obs), np.stack(rews), np.stack(dones), infos

    # def step_(self, act_vector):
    #     results = ray.get([ env.step.remote(act) for (env,act) in zip(self.ray_env_vector,act_vector)])
    #     # results = [(obs,reward,done,info),(obs,reward,done,info),(obs,reward,done,info)]
    #     obs, rews, dones, infos = zip(*results)
    #     return np.stack(obs), np.stack(rews), np.stack(dones), infos

    def reset(self):
        results = self.SuperPool.exec_target(name='env', dowhat='reset')
        # [ env.reset.remote() for env in self.ray_env_vector])
        return np.stack(results)