import numpy as np
import ray, time
from Config import LearnerPara, EnvControl

def check_dones(dones):
    dones = np.array(dones)
    assert (dones==True).any() != (dones==False).any()

'''
    it seems necessary to wrap the raw env with @ray.remote marking
'''
@ray.remote
class EnvWithRay(object):
    def __init__(self, createEnvHandle, process_index=0):
        # finally the env is initialized
        self.env = createEnvHandle(process_index=process_index)
        self.observation_space = self.env.observation_space
        self.action_space = self.env.action_space
    def step(self, act):
        ob, reward, done, info = self.env.step(act)
        check_dones(done) # for debug, all done item should be the same
        if np.any(done):
            ob = self.env.reset()
            # ('外环境重置')
        return (ob, reward, done, info)
    def reset(self):
        return self.env.reset()
    def render(self):
        return self.env.render()
    def close(self):
        return None
    def get_act_space(self):
        return self.action_space
    def get_obs_space(self):
        return self.observation_space

'''
    this one use ray framework to control a bundle of environment to sync step
'''
class RayParallelEnv(object):
    def __init__(self, createEnvHandle):
        ray.init()
        self.ray_env_vector = [EnvWithRay.remote(createEnvHandle=createEnvHandle, process_index=thread) for thread in range(LearnerPara.num_thread)]
        self.num_envs = LearnerPara.num_thread
        self.observation_space = ray.get([env.get_obs_space.remote() for env in self.ray_env_vector])
        self.action_space = ray.get([env.get_act_space.remote() for env in self.ray_env_vector])
        # this program has not yet consider different action spaces in parallel environments, just be careful if this assert is hit
        if self.num_envs >= 2:
            assert self.observation_space[0] == self.observation_space[-1]
            assert self.action_space[0] == self.action_space[-1]
        self.ob_rms = None
        return

    def step(self, act_vector):
        results = ray.get([ env.step.remote(act) for (env,act) in zip(self.ray_env_vector,act_vector)])
        # results = [(obs,reward,done,info),(obs,reward,done,info),(obs,reward,done,info)]
        obs, rews, dones, infos = zip(*results)
        return np.stack(obs), np.stack(rews), np.stack(dones), infos

    def reset(self):
        results = ray.get([ env.reset.remote() for env in self.ray_env_vector])
        return np.stack(results)