from abc import ABC, abstractmethod
from collections import deque
import queue
import numpy as np
import random
from threading import local
import time
import numpy as np
from isaacgym import gymutil
from isaacgym import gymapi
#from isaacgym import gymtorch
from math import sqrt
import math
# from omegaconf import read_write
from sympy import false, re
import torch
import cv2
import gym
from gym import spaces

import os

from draw import *

from pcgworker.PCGWorker import *

from stable_baselines3.common.env_checker import check_env

N_DISCRETE_ACTIONS = 6
N_CHANNELS = 3
HEIGHT = 84
WIDTH = 84

class PCGVecEnv(ABC):

    """Custom Environment that follows gym interface"""
    metadata = {'render.modes': ['human', 'rgb_array']}

    # Set this in SOME subclasses
    metadata = {"render.modes": []}
    reward_range = (-float("inf"), float("inf"))
    spec = None

    # Set these in ALL subclasses
    action_space = None
    observation_space = None


    def __init__(self, headless_: bool = True, seed_pth: str = "blank_wave.json", render_indicator: bool = True,
                compute_device_id = 0, graphics_device_id = 0):
                
        #os.environ['CUDA_VISIBLE_DEVICES']='0,1,2,3,4,5,6,7'

        # Define action and observation space
        self._action_space = spaces.Discrete(N_DISCRETE_ACTIONS)
        self._obs_space = spaces.Box(low=0, high=255, shape=(N_CHANNELS, HEIGHT, WIDTH), dtype=np.uint8)

        torch.manual_seed(3407)

        self.timer0 = time.time()

        '''
        simulatiom parameters
        '''
        self.num_matrix_envs = 16
        self.headless = headless_
        self.spd = 10

        self.render_indicator = render_indicator

        self.initial_height = 2.05

        gravity_scale = 4.5

        # get default set of parameters
        sim_params = gymapi.SimParams()
        # args = gymutil.parse_arguments()
        sim_params.physx.solver_type = 1
        sim_params.physx.num_position_iterations = 2
        sim_params.physx.num_velocity_iterations = 1
        sim_params.physx.num_threads = 0
        sim_params.physx.use_gpu = True
        sim_params.use_gpu_pipeline = False
        sim_params.substeps = 2
        sim_params.dt = 1.0 / 30.0
        # setting up the Z-up axis
        sim_params.up_axis = gymapi.UP_AXIS_Z
        sim_params.gravity = gymapi.Vec3(0.0, 0.0, -9.8 * gravity_scale)

        # sim_params.physx.max_gpu_contact_pairs = sim_params.physx.max_gpu_contact_pairs*20

        # configure the ground plane
        plane_params = gymapi.PlaneParams()
        plane_params.normal = gymapi.Vec3(0, 0, 1) # z-up!
        plane_params.distance = 1
        plane_params.static_friction = 1 / gravity_scale
        plane_params.dynamic_friction = 1 / gravity_scale
        plane_params.restitution = 0.1

        # set up the env grid
        num_envs = 1
        spacing = 20
        env_lower = gymapi.Vec3(-spacing, -spacing, -spacing)
        env_upper = gymapi.Vec3(spacing, spacing, spacing)

        # Attractor setup
        attractor_properties = gymapi.AttractorProperties()
        attractor_properties.stiffness = 5e10
        attractor_properties.damping = 0
        attractor_properties.axes = gymapi.AXIS_ROTATION
        attractor_pose = gymapi.Transform()
        attractor_pose.p = gymapi.Vec3(0, 0.0, 0.0)
        attractor_pose.r = gymapi.Quat(0,1,0,1)

        # create procedural asset
        asset_options = gymapi.AssetOptions()
        asset_options.density = 1.0
        asset_options.fix_base_link = False
        asset_options.linear_damping = 0.1
        asset_options1 = gymapi.AssetOptions()
        asset_options1.density = 0.02
        asset_options1.fix_base_link = True
        asset_options1.linear_damping = 0.1

        color_red = gymapi.Vec3(1,0,0)
        color_green = gymapi.Vec3(0,1,0)

        # load PCG asset
        asset_root = "./assets"
        pt_gray_cube = "PCG/gray_cube.urdf"
        pt_blue_cube = "PCG/blue_cube.urdf"
        pt_red_cube = "PCG/red_cube.urdf"
        pt_white_cube = "PCG/white_cube.urdf"
        pt_yellow_cube = "PCG/yellow_cube.urdf"
        pt_blue_ramp = "PCG/blue_ramp.urdf"
        pt_red_ramp = "PCG/red_ramp.urdf"
        pt_white_ramp = "PCG/white_ramp.urdf"
        pt_yellow_ramp = "PCG/yellow_ramp.urdf"
        pt_blue_corner = "PCG/blue_corner.urdf"
        pt_red_corner = "PCG/red_corner.urdf"
        pt_white_corner = "PCG/white_corner.urdf"
        pt_yellow_corner = "PCG/yellow_corner.urdf"

        # camera sensor properties
        camera_properties = gymapi.CameraProperties()
        camera_properties.width = WIDTH
        camera_properties.height = HEIGHT
        camera_properties.enable_tensors = True

        # rigid shape material properties
        shape_props = [gymapi.RigidShapeProperties()]
        shape_props[0].friction = -1 / gravity_scale
        shape_props[0].rolling_friction = 1 / gravity_scale
        shape_props[0].torsion_friction = 1 / gravity_scale
        shape_props[0].compliance = 0
        shape_props[0].restitution = 0
        shape_props1 = [gymapi.RigidShapeProperties()]
        shape_props1[0].friction = 0.2 / gravity_scale
        shape_props1[0].rolling_friction = -1 / gravity_scale
        shape_props1[0].torsion_friction = -1 / gravity_scale
        shape_props1[0].compliance = 0
        shape_props1[0].restitution = 0
        shape_props2 = [gymapi.RigidShapeProperties()]
        shape_props2[0].friction = 1 / gravity_scale
        shape_props2[0].rolling_friction = -1 / gravity_scale
        shape_props2[0].torsion_friction = -1 / gravity_scale
        shape_props2[0].compliance = 0
        shape_props2[0].restitution = 0

        # single world camera image
        self.camera_sensor_image = np.zeros((HEIGHT,WIDTH, 3), np.uint8)
        # human control
        self.interaction = 0

        # proximity threshold for reward calculation
        self.proximity_threshold = 1.5
        self.max_steps = 1000

        self.show_action = False

        self.all_rews = np.zeros(self.num_matrix_envs, dtype=np.float32)
        self.all_dones = np.zeros(self.num_matrix_envs, dtype=np.bool8)
        self.vec_obs = np.zeros((self.num_matrix_envs, N_CHANNELS, HEIGHT, WIDTH), dtype=np.uint8)
        self.all_rews_backup = self.all_rews.copy()
        self.all_dones_backup = self.all_dones.copy()

        # reward fifo of max length 100
        self.reward_fifo = deque(maxlen=300)
        self.best_performance = 0
        self.reward_archive = []
        self.rew_avg = []
        for i in range(self.num_matrix_envs):
            self.reward_archive.append(deque(maxlen=100))
            self.rew_avg.append(0)

        self.obs_buffer4 = np.zeros((HEIGHT,WIDTH, 4), np.uint8)
        self.obs_buffer3 = np.zeros((HEIGHT,WIDTH, 3), np.uint8)
        # self.obs_buffer3t = np.zeros((HEIGHT,WIDTH, 3), np.uint8)

        '''
        generate WFC maps
        '''
        self.PCGWorker_ = PCGWorker(9,9)

        seed = self.PCGWorker_.from_file(filename = seed_pth)
        # seed0 = self.PCGWorker_.from_file(filename = "blank_wave.json")
        # seed = PCGWorker_.mutate(seed_,81)
        # PCGWorker_.to_file(seed,filename = "seed.json")

        # WFC map workspace
        self.seeds = [seed]
        self.seeds_height_maps = []
        self.seeds_collection = deque(maxlen=64)

        for i in range(0,self.num_matrix_envs-1):
            # seed_ = PCGWorker_.mutate(self.seeds[-1],81)
            seed_ = copy.deepcopy(seed)
            self.append_seed(seed_)

        # for i in range(0,3):
        #     # seed_ = PCGWorker_.mutate(self.seeds[-1],81)
        #     seed_ = copy.deepcopy(seed)
        #     self.append_seed(seed_)
        # for i in range(3,15):
        #     seed_ = copy.deepcopy(seed0)
        #     self.append_seed(seed_)

        # print("seeds_collection : ",len(self.seeds_collection))

        # height maps
        for i in range(self.num_matrix_envs):
            height_map = self.PCGWorker_.render(self.seeds[i], output=True, border=False)
            self.seeds_height_maps.append(copy.deepcopy(height_map))

        # print("seeds generated : ",type(seeds))

        self.gym = gymapi.acquire_gym()

        self.sim = self.gym.create_sim(compute_device_id, graphics_device_id, gymapi.SIM_PHYSX, sim_params)

        print("graphics_device_id : ",compute_device_id)
        print("compute_device_id : ",graphics_device_id)

        # create the ground plane
        self.gym.add_ground(self.sim, plane_params)

        capsule_asset = self.gym.create_capsule(self.sim, 1, 1, asset_options)
        # capsule_asset = self.gym.create_box(self.sim, 1, 1, 1, asset_options)

        asset_gray_cube = self.gym.load_asset(self.sim, asset_root, pt_gray_cube,asset_options1)
        asset_blue_cube = self.gym.load_asset(self.sim, asset_root, pt_blue_cube,asset_options1)
        asset_red_cube = self.gym.load_asset(self.sim, asset_root, pt_red_cube,asset_options1)
        asset_white_cube = self.gym.load_asset(self.sim, asset_root, pt_white_cube,asset_options1)
        asset_yellow_cube = self.gym.load_asset(self.sim, asset_root, pt_yellow_cube,asset_options1)
        asset_blue_ramp = self.gym.load_asset(self.sim, asset_root, pt_blue_ramp,asset_options1)
        asset_red_ramp = self.gym.load_asset(self.sim, asset_root, pt_red_ramp,asset_options1)
        asset_white_ramp = self.gym.load_asset(self.sim, asset_root, pt_white_ramp,asset_options1)
        asset_yellow_ramp = self.gym.load_asset(self.sim, asset_root, pt_yellow_ramp,asset_options1)
        asset_blue_corner = self.gym.load_asset(self.sim, asset_root, pt_blue_corner,asset_options1)
        asset_red_corner = self.gym.load_asset(self.sim, asset_root, pt_red_corner,asset_options1)
        asset_white_corner = self.gym.load_asset(self.sim, asset_root, pt_white_corner,asset_options1)
        asset_yellow_corner = self.gym.load_asset(self.sim, asset_root, pt_yellow_corner,asset_options1)

        if not self.headless:
            cam_props = gymapi.CameraProperties()
            self.viewer = self.gym.create_viewer(self.sim, cam_props)

        self.envs = []
        self.camera_handles = []
        self.actor_handles = []
        self.food_handles = []
        self.attractor_handles = []

        '''
        actor handles for all building blocks
        [
            env1:(l1_active_id,[l1_handles]),
            env2:(l1_active_id,[l1_handles]),
            ...
        ]
        '''
        self.gray_cube_handles = []         
        self.blue_cube_handles = []         
        self.blue_corner_handles = []       
        self.blue_ramp_handles = []             
        self.red_cube_handles = []
        self.red_corner_handles = []
        self.red_ramp_handles = []
        self.white_cube_handles = []
        self.white_corner_handles = []
        self.white_ramp_handles = []
        self.yellow_cube_handles = []
        self.yellow_corner_handles = []
        self.yellow_ramp_handles = []

        self.actor_scales = []

        self.cube_capacity_per_level = 9*9
        self.corner_capacity_per_level = 9*3        # to be tested
        self.ramp_capacity_per_level = 9*3

        self.vanish_scale = 0.000001

        '''
        0. build all envs
        '''
        # for n in range(self.num_matrix_envs):

        '''
        1. build all actors
        '''
        for i in range(0,self.num_matrix_envs):
            # create environments
            env_ = self.gym.create_env(self.sim, env_lower, env_upper, 4)
            self.envs.append(env_)

            pose = gymapi.Transform()
            pose.p = gymapi.Vec3(0, 0, 0)
            pose.r = gymapi.Quat(0,0,0,1)
            l1_cubes = []
            l2_cubes = []
            l3_cubes = []
            l4_cubes = []
            l5_cubes = []
            l2_corners = []
            l3_corners = []
            l4_corners = []
            l5_corners = []
            l2_ramps = []
            l3_ramps = []
            l4_ramps = []
            l5_ramps = []
            # CUBE
            for j in range(0,self.cube_capacity_per_level):
                gc1 = self.gym.create_actor(self.envs[i], asset_gray_cube, pose, "gray_cube" + str(i) + str(j), i, 0)
                gc2 = self.gym.create_actor(self.envs[i], asset_blue_cube, pose, "blue_cube" + str(i) + str(j), i, 0)
                gc3 = self.gym.create_actor(self.envs[i], asset_yellow_cube, pose, "yellow_cube" + str(i) + str(j), i, 0)
                gc4 = self.gym.create_actor(self.envs[i], asset_red_cube, pose, "red_cube" + str(i) + str(j), i, 0)
                gc5 = self.gym.create_actor(self.envs[i], asset_white_cube, pose, "white_cube" + str(i) + str(j), i, 0)
                l1_cubes.append(gc1)
                l2_cubes.append(gc2)
                l3_cubes.append(gc3)
                l4_cubes.append(gc4)
                l5_cubes.append(gc5)
                self.gym.set_actor_scale(self.envs[i], gc1, self.vanish_scale)
                self.gym.set_actor_scale(self.envs[i], gc2, self.vanish_scale)
                self.gym.set_actor_scale(self.envs[i], gc3, self.vanish_scale)
                self.gym.set_actor_scale(self.envs[i], gc4, self.vanish_scale)
                self.gym.set_actor_scale(self.envs[i], gc5, self.vanish_scale)
            # CORNER
            for j in range(0,self.corner_capacity_per_level):
                gc2 = self.gym.create_actor(self.envs[i], asset_blue_corner, pose, "blue_corner" + str(i) + str(j), i, 0)
                gc3 = self.gym.create_actor(self.envs[i], asset_yellow_corner, pose, "yellow_corner" + str(i) + str(j), i, 0)
                gc4 = self.gym.create_actor(self.envs[i], asset_red_corner, pose, "red_corner" + str(i) + str(j), i, 0)
                gc5 = self.gym.create_actor(self.envs[i], asset_white_corner, pose, "white_corner" + str(i) + str(j), i, 0)
                l2_corners.append(gc2)
                l3_corners.append(gc3)
                l4_corners.append(gc4)
                l5_corners.append(gc5)
                self.gym.set_actor_scale(self.envs[i], gc2, self.vanish_scale)
                self.gym.set_actor_scale(self.envs[i], gc3, self.vanish_scale)
                self.gym.set_actor_scale(self.envs[i], gc4, self.vanish_scale)
                self.gym.set_actor_scale(self.envs[i], gc5, self.vanish_scale)
                self.gym.set_actor_rigid_shape_properties(self.envs[i], gc2, shape_props1)
                self.gym.set_actor_rigid_shape_properties(self.envs[i], gc3, shape_props1)
                self.gym.set_actor_rigid_shape_properties(self.envs[i], gc4, shape_props1)
                self.gym.set_actor_rigid_shape_properties(self.envs[i], gc5, shape_props1)
            # RAMP
            for j in range(0,self.ramp_capacity_per_level):
                gc2 = self.gym.create_actor(self.envs[i], asset_blue_ramp, pose, "blue_ramp" + str(i) + str(j), i, 0)
                gc3 = self.gym.create_actor(self.envs[i], asset_yellow_ramp, pose, "yellow_ramp" + str(i) + str(j), i, 0)
                gc4 = self.gym.create_actor(self.envs[i], asset_red_ramp, pose, "red_ramp" + str(i) + str(j), i, 0)
                gc5 = self.gym.create_actor(self.envs[i], asset_white_ramp, pose, "white_ramp" + str(i) + str(j), i, 0)
                l2_ramps.append(gc2)
                l3_ramps.append(gc3)
                l4_ramps.append(gc4)
                l5_ramps.append(gc5)
                self.gym.set_actor_scale(self.envs[i], gc2, self.vanish_scale)
                self.gym.set_actor_scale(self.envs[i], gc3, self.vanish_scale)
                self.gym.set_actor_scale(self.envs[i], gc4, self.vanish_scale)
                self.gym.set_actor_scale(self.envs[i], gc5, self.vanish_scale)
                self.gym.set_actor_rigid_shape_properties(self.envs[i], gc2, shape_props)
                self.gym.set_actor_rigid_shape_properties(self.envs[i], gc3, shape_props)
                self.gym.set_actor_rigid_shape_properties(self.envs[i], gc4, shape_props)
                self.gym.set_actor_rigid_shape_properties(self.envs[i], gc5, shape_props)
            # store actors
            self.gray_cube_handles.append(([0],l1_cubes))
            self.blue_cube_handles.append(([0],l2_cubes))
            self.blue_corner_handles.append(([0],l2_corners))
            self.blue_ramp_handles.append(([0],l2_ramps))
            self.yellow_cube_handles.append(([0],l3_cubes))
            self.yellow_corner_handles.append(([0],l3_corners))
            self.yellow_ramp_handles.append(([0],l3_ramps))
            self.red_cube_handles.append(([0],l4_cubes))
            self.red_corner_handles.append(([0],l4_corners))
            self.red_ramp_handles.append(([0],l4_ramps))
            self.white_cube_handles.append(([0],l5_cubes))
            self.white_corner_handles.append(([0],l5_corners))
            self.white_ramp_handles.append(([0],l5_ramps))

            # Agent and Food and Camera

            # generate random x,y coordinates in range [0,18] for the actor
            x = random.uniform(0,16)
            y = random.uniform(0,16)
            facing_ = 0.25 * (float)(random.randint(0,8))
            r_ = gymapi.Quat(0,1,0,1) * gymapi.Quat.from_axis_angle(gymapi.Vec3(1, 0, 0), facing_*math.pi)

            # create capsule_asset actor in the environment
            pose = gymapi.Transform()
            pose.p = gymapi.Vec3(x, y, self.initial_height)
            pose.r = gymapi.Quat(0,1,0,1)
            cap_handle = self.gym.create_actor(self.envs[i], capsule_asset, pose, 'capsule_asset', i, 0)
            self.gym.set_actor_rigid_shape_properties(self.envs[i], cap_handle, shape_props2)
            self.gym.set_actor_scale(self.envs[i], cap_handle, 0.4)
            self.actor_handles.append(cap_handle)
            self.gym.set_rigid_body_color(self.envs[i], cap_handle, 0, gymapi.MESH_VISUAL_AND_COLLISION, color_red)

            # set random position for food
            x = random.uniform(0,16)
            y = random.uniform(0,16)
            # create capsule_asset food in the environment
            pose = gymapi.Transform()
            pose.p = gymapi.Vec3(x, y, self.initial_height)
            pose.r = gymapi.Quat(0,1,0,1)
            food_handle = self.gym.create_actor(self.envs[i], capsule_asset, pose, 'food_asset', i, 0)
            self.gym.set_actor_rigid_shape_properties(self.envs[i], food_handle, shape_props2)
            self.gym.set_actor_scale(self.envs[i], food_handle, 0.6)
            self.food_handles.append(food_handle)
            self.gym.set_rigid_body_color(self.envs[i], food_handle, 0, gymapi.MESH_VISUAL_AND_COLLISION, color_green)

            att_pose = gymapi.Transform()
            att_pose.p = gymapi.Vec3(x,y, self.initial_height)
            att_pose.r = r_
            attractor_properties_ = gymapi.AttractorProperties()
            attractor_properties_.stiffness = 5e10
            attractor_properties_.damping = 0
            attractor_properties_.axes = gymapi.AXIS_ROTATION
            attractor_properties_.target = att_pose
            attractor_properties_.rigid_handle = cap_handle
            attractor_handle_ = self.gym.create_rigid_body_attractor(self.envs[i], attractor_properties_)

            attractor_properties_food = gymapi.AttractorProperties()
            attractor_properties_food.stiffness = 5e10
            attractor_properties_food.damping = 0
            attractor_properties_food.axes = gymapi.AXIS_ROTATION
            attractor_properties_food.target = pose
            attractor_properties_food.rigid_handle = food_handle
            attractor_handle_food = self.gym.create_rigid_body_attractor(self.envs[i], attractor_properties_food)
            
            h1 = self.gym.create_camera_sensor(self.envs[i], camera_properties)
            camera_offset = gymapi.Vec3(-2, 0, 0)
            camera_rotation = gymapi.Quat.from_axis_angle(gymapi.Vec3(0, 0, 1), 0.4*math.pi)
            body_handle = self.gym.get_actor_rigid_body_handle(self.envs[i], cap_handle, 0)
            self.gym.attach_camera_to_body(h1, self.envs[i], body_handle, gymapi.Transform(camera_offset, camera_rotation), gymapi.FOLLOW_TRANSFORM)
            self.camera_handles.append(h1)
            self.attractor_handles.append(attractor_handle_)

        '''
        2. arrange blocks in the environment
        '''
        for n in range(self.num_matrix_envs):
            self.set_landscape_(n,self.seeds[n])

        # a must-have operation 
        self.gym.prepare_sim(self.sim)

        # step count for each environment
        self._step_counts = np.zeros(self.num_matrix_envs)
        self._step_counts_backup = self._step_counts.copy()
        # Used for logging/debugging
        self._episode_rewards = np.zeros(self.num_matrix_envs)

        if not self.headless:
            cam_pos = gymapi.Vec3(0.0, -15.0, 15.0)
            cam_target = gymapi.Vec3(4, 4, 0)
            self.gym.viewer_camera_look_at(self.viewer, None, cam_pos, cam_target)

        # time test
        self.start_time = time.time()
        self.last_frame_cnt = 0
        self.step_count = 0

        # Create helper geometry used for visualization
        # Create an wireframe axis
        self.axes_geom = gymutil.AxesGeometry(2)

        self.facings = []
        for i in range(self.num_matrix_envs):
            self.facings.append(0.0)
        self.facing_step = 0.25
        self.facings_backup = copy.deepcopy(self.facings)

        # local coordinate system
        self.verts = np.empty((3, 1), gymapi.Vec3.dtype)
        self.verts[0][0] = (1, 0, 0)
        self.verts[1][0] = (0, 1, 0)
        self.verts[2][0] = (0, 0, 1)

        # store initial status for all envs
        self.initial_state = np.copy(self.gym.get_sim_rigid_body_states(self.sim, gymapi.STATE_ALL))

        self.sim_status = []
        for i in range(self.num_matrix_envs):
            env_status = np.copy(self.gym.get_env_rigid_body_states(self.envs[i], gymapi.STATE_ALL))
            self.sim_status.append(env_status)

    @property
    def num_envs(self):
        return self.num_matrix_envs

    @property
    def action_space(self):
        return self._action_space

    @property
    def observation_space(self):
        return self._obs_space

    @property
    def step_counts(self):
        return self._step_counts.copy()
    
    @property
    def episode_rewards(self):
        return self._episode_rewards.copy()
    
    @property
    def auto_reset_after_done(self):
        return self._auto_reset_after_done

    def append_seed(self, seed):
        # 1. add new seed
        self.seeds.append(seed)
        # 2. add new collection
        landscape_new = True
        for s in self.seeds_collection:
            if s == seed:
                landscape_new = False
                break
        if landscape_new:
            self.seeds_collection.append(copy.deepcopy(seed))

    def in_collection(self,seed):
        for s in self.seeds_collection:
            if s == seed:
                return True
        return False

    def generate_decendent(self):

        new_seed = self.PCGWorker_.mutate(self.seeds_collection[-1])
        while new_seed == self.seeds_collection[-1] or self.in_collection(new_seed)==True:
            new_seed = self.PCGWorker_.mutate(self.seeds_collection[-1])

        # update collection
        self.seeds_collection.append(new_seed)

        # # convert len(self.seeds_collection) to a file name
        # file_name = str(len(self.seeds_collection)) + ".json"
        # self.PCGWorker_.to_file(new_seed, file_name)

        return copy.deepcopy(new_seed)

    # used for ray parrallel processing
    def accept_decendent(self, new_seed):
        # update collection
        self.seeds_collection.append(new_seed)

    def update_collection(self, seeds_collection):
        
        self.seeds_collection = copy.deepcopy(seeds_collection)

    def seed_distribution(self):
        
        distribution_table = [
            [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
            [0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1],
            [0,0,0,0,0,1,1,1,1,1,2,2,2,2,2,2],
            [0,0,0,1,1,1,2,2,2,2,2,3,3,3,3,3],
            [0,0,0,1,1,1,2,2,2,3,3,3,4,4,4,4],
            [0,0,1,1,2,2,3,3,3,4,4,4,5,5,5,5],
            [0,1,1,2,2,3,3,4,4,5,5,5,6,6,6,6],
            [0,1,2,2,3,3,4,4,5,5,6,6,7,7,7,7],
            [0,1,2,3,4,4,5,5,6,6,7,7,8,8,8,8],
            [0,1,2,3,4,5,6,6,7,7,8,8,9,9,9,9],
            [0,1,2,3,4,5,6,7,8,8,9,9,10,10,10,10],
            [0,1,2,3,4,5,6,7,8,9,10,10,11,11,11,11],
            [0,1,2,3,4,5,6,7,8,9,10,11,12,12,12,12]
        ]
        collection_size = len(self.seeds_collection)

        if collection_size < 13:
            distribution = copy.deepcopy(distribution_table[collection_size-1])
            arrangement = []
            for i in distribution:
                arrangement.append(-(collection_size-i))
        else:
            distribution = copy.deepcopy(distribution_table[12])
            arrangement = []
            for i in distribution:
                arrangement.append((collection_size-13)-(collection_size-i))
        collection_ = []
        for i in arrangement:
            collection_.append(copy.deepcopy(self.seeds_collection[i]))

        # alter landscape
        for i in range(len(collection_)):
            self.set_landscape(i, collection_[i], update_collection = False)

    def revert_map_collection(self):
        # 
        self.seeds_collection.pop()

    # site reservation
    def pause(self):
        # save physical states
        self.initial_state = np.copy(self.gym.get_sim_rigid_body_states(self.sim, gymapi.STATE_ALL))
        # save scales
        self.actor_scales.clear()
        for i in range(self.num_matrix_envs):
            env_actor_scales = []
            env_actor_count = self.gym.get_actor_count(self.envs[i])
            for j in range(env_actor_count):
                actor_handle = self.gym.get_actor_handle(self.envs[i], j)
                actor_scale = self.gym.get_actor_scale(self.envs[i], actor_handle)
                env_actor_scales.append(actor_scale)
            self.actor_scales.append(copy.deepcopy(env_actor_scales))
        # save seeds and height_maps
        self.seeds_backup = copy.deepcopy(self.seeds)
        self.height_maps_backup = copy.deepcopy(self.seeds_height_maps)
        # save internal states
        self.facings_backup = copy.deepcopy(self.facings)
        self._step_counts_backup = self._step_counts.copy()
        self.all_rews_backup = self.all_rews.copy()
        self.all_dones_backup = self.all_dones.copy()
        # save attractor states
        self.attractor_states_backup = []
        for i in range(len(self.attractor_handles)):
            attractor_prop = self.gym.get_attractor_properties(self.envs[i], self.attractor_handles[i])
            self.attractor_states_backup.append(copy.deepcopy(attractor_prop.target))
        # backup reward_fifo
        self.reward_fifo_backup = copy.deepcopy(self.reward_fifo)

    # site recovery
    def resume(self):
        # recover physical states
        self.gym.set_sim_rigid_body_states(self.sim, self.initial_state, gymapi.STATE_ALL)
        # recover scales
        for i in range(self.num_matrix_envs):
            env_actor_count = self.gym.get_actor_count(self.envs[i])
            for j in range(env_actor_count):
                actor_handle = self.gym.get_actor_handle(self.envs[i], j)
                actor_scale = self.actor_scales[i][j]
                self.gym.set_actor_scale(self.envs[i], actor_handle, actor_scale)
        # recover seeds and height_maps
        self.seeds = copy.deepcopy(self.seeds_backup)
        self.seeds_height_maps = copy.deepcopy(self.height_maps_backup)
        # recover internal states
        self.facings = copy.deepcopy(self.facings_backup)
        self._step_counts = self._step_counts_backup.copy()
        self.all_rews = self.all_rews_backup.copy()
        self.all_dones = self.all_dones_backup.copy()
        # recover attractor states
        for i in range(len(self.attractor_handles)):
            self.gym.set_attractor_target(self.envs[i], self.attractor_handles[i], self.attractor_states_backup[i])
        self._compute_obs()
        # recover reward_fifo
        self.reward_fifo = copy.deepcopy(self.reward_fifo_backup)


    # reset wfc landscape
    def reset_landscape(self, env_id):
        pose = gymapi.Transform()
        pose.p = gymapi.Vec3(0, 0, 0)
        pose.r = gymapi.Quat(0,0,0,1)

        for i in range(self.cube_capacity_per_level):
            # gray cube
            actor_handle = self.gray_cube_handles[env_id][1][i]
            actor_state = self.gym.get_actor_rigid_body_states(self.envs[env_id], actor_handle, gymapi.STATE_ALL)
            actor_state['pose']['p'].fill((pose.p.x,pose.p.y,pose.p.z))
            actor_state['pose']['r'].fill((pose.r.x,pose.r.y,pose.r.z,pose.r.w))
            self.gym.set_actor_rigid_body_states(self.envs[env_id], actor_handle, actor_state, gymapi.STATE_POS)
            # reset available block pointer
            self.gray_cube_handles[env_id][0][0] = 0
            self.gym.set_actor_scale(self.envs[env_id], actor_handle, self.vanish_scale)

            # blue cube
            actor_handle = self.blue_cube_handles[env_id][1][i]
            self.gym.set_actor_rigid_body_states(self.envs[env_id], actor_handle, actor_state, gymapi.STATE_POS)
            # reset available block pointer
            self.blue_cube_handles[env_id][0][0] = 0
            self.gym.set_actor_scale(self.envs[env_id], actor_handle, self.vanish_scale)

            # yellow cube
            actor_handle = self.yellow_cube_handles[env_id][1][i]
            self.gym.set_actor_rigid_body_states(self.envs[env_id], actor_handle, actor_state, gymapi.STATE_POS)
            # reset available block pointer
            self.yellow_cube_handles[env_id][0][0] = 0
            self.gym.set_actor_scale(self.envs[env_id], actor_handle, self.vanish_scale)

            # red cube
            actor_handle = self.red_cube_handles[env_id][1][i]
            self.gym.set_actor_rigid_body_states(self.envs[env_id], actor_handle, actor_state, gymapi.STATE_POS)
            # reset available block pointer
            self.red_cube_handles[env_id][0][0] = 0
            self.gym.set_actor_scale(self.envs[env_id], actor_handle, self.vanish_scale)

            # white cube
            actor_handle = self.white_cube_handles[env_id][1][i]
            self.gym.set_actor_rigid_body_states(self.envs[env_id], actor_handle, actor_state, gymapi.STATE_POS)
            # reset available block pointer
            self.white_cube_handles[env_id][0][0] = 0
            self.gym.set_actor_scale(self.envs[env_id], actor_handle, self.vanish_scale)

        for i in range(self.corner_capacity_per_level):
            # blue corner
            actor_handle = self.blue_corner_handles[env_id][1][i]
            self.gym.set_actor_rigid_body_states(self.envs[env_id], actor_handle, actor_state, gymapi.STATE_POS)
            # reset available block pointer
            self.blue_corner_handles[env_id][0][0] = 0
            self.gym.set_actor_scale(self.envs[env_id], actor_handle, self.vanish_scale)

            # yellow corner
            actor_handle = self.yellow_corner_handles[env_id][1][i]
            self.gym.set_actor_rigid_body_states(self.envs[env_id], actor_handle, actor_state, gymapi.STATE_POS)
            # reset available block pointer
            self.yellow_corner_handles[env_id][0][0] = 0
            self.gym.set_actor_scale(self.envs[env_id], actor_handle, self.vanish_scale)

            # red corner
            actor_handle = self.red_corner_handles[env_id][1][i]
            self.gym.set_actor_rigid_body_states(self.envs[env_id], actor_handle, actor_state, gymapi.STATE_POS)
            # reset available block pointer
            self.red_corner_handles[env_id][0][0] = 0
            self.gym.set_actor_scale(self.envs[env_id], actor_handle, self.vanish_scale)

            # white corner
            actor_handle = self.white_corner_handles[env_id][1][i]
            self.gym.set_actor_rigid_body_states(self.envs[env_id], actor_handle, actor_state, gymapi.STATE_POS)
            # reset available block pointer
            self.white_corner_handles[env_id][0][0] = 0
            self.gym.set_actor_scale(self.envs[env_id], actor_handle, self.vanish_scale)

        for i in range(self.ramp_capacity_per_level):
            # blue ramp
            actor_handle = self.blue_ramp_handles[env_id][1][i]
            self.gym.set_actor_rigid_body_states(self.envs[env_id], actor_handle, actor_state, gymapi.STATE_POS)
            # reset available block pointer
            self.blue_ramp_handles[env_id][0][0] = 0
            self.gym.set_actor_scale(self.envs[env_id], actor_handle, self.vanish_scale)
            
            # yellow ramp
            actor_handle = self.yellow_ramp_handles[env_id][1][i]
            self.gym.set_actor_rigid_body_states(self.envs[env_id], actor_handle, actor_state, gymapi.STATE_POS)
            # reset available block pointer
            self.yellow_ramp_handles[env_id][0][0] = 0
            self.gym.set_actor_scale(self.envs[env_id], actor_handle, self.vanish_scale)

            # red ramp
            actor_handle = self.red_ramp_handles[env_id][1][i]
            self.gym.set_actor_rigid_body_states(self.envs[env_id], actor_handle, actor_state, gymapi.STATE_POS)
            # reset available block pointer
            self.red_ramp_handles[env_id][0][0] = 0
            self.gym.set_actor_scale(self.envs[env_id], actor_handle, self.vanish_scale)

            # white ramp
            actor_handle = self.white_ramp_handles[env_id][1][i]
            self.gym.set_actor_rigid_body_states(self.envs[env_id], actor_handle, actor_state, gymapi.STATE_POS)
            # reset available block pointer
            self.white_ramp_handles[env_id][0][0] = 0
            self.gym.set_actor_scale(self.envs[env_id], actor_handle, self.vanish_scale)

    # reset wfc landscape interface
    def reset_all_landscape(self):
        for env_id in range(0,self.num_matrix_envs):
            self.reset_landscape(env_id)

    # move block from stash to field
    def arrange_block(self, env_id,block_id,pose):

        if block_id == 1:
            # gray cube
            available_block_id = self.gray_cube_handles[env_id][0][0]
            actor_handle = self.gray_cube_handles[env_id][1][available_block_id]
            actor_state = self.gym.get_actor_rigid_body_states(self.envs[env_id], actor_handle, gymapi.STATE_ALL)
            actor_state['pose']['p'].fill((pose.p.x,pose.p.y,pose.p.z))
            self.gym.set_actor_rigid_body_states(self.envs[env_id], actor_handle, actor_state, gymapi.STATE_POS)
            # integrate available block pointer
            self.gray_cube_handles[env_id][0][0] += 1
            self.gray_cube_handles[env_id][0][0] = min(self.cube_capacity_per_level-1,self.gray_cube_handles[env_id][0][0])
            self.gym.set_actor_scale(self.envs[env_id], actor_handle, 1)
        elif block_id == 2:
            # blue cube
            available_block_id = self.blue_cube_handles[env_id][0][0]
            actor_handle = self.blue_cube_handles[env_id][1][available_block_id]
            actor_state = self.gym.get_actor_rigid_body_states(self.envs[env_id], actor_handle, gymapi.STATE_ALL)
            actor_state['pose']['p'].fill((pose.p.x,pose.p.y,pose.p.z))
            self.gym.set_actor_rigid_body_states(self.envs[env_id], actor_handle, actor_state, gymapi.STATE_POS)
            # integrate available block pointer
            self.blue_cube_handles[env_id][0][0] += 1
            self.blue_cube_handles[env_id][0][0] = min(self.cube_capacity_per_level-1,self.blue_cube_handles[env_id][0][0])
            self.gym.set_actor_scale(self.envs[env_id], actor_handle, 1)
        elif block_id == 3:
            # yellow cube
            available_block_id = self.yellow_cube_handles[env_id][0][0]
            actor_handle = self.yellow_cube_handles[env_id][1][available_block_id]
            actor_state = self.gym.get_actor_rigid_body_states(self.envs[env_id], actor_handle, gymapi.STATE_ALL)
            actor_state['pose']['p'].fill((pose.p.x,pose.p.y,pose.p.z))
            self.gym.set_actor_rigid_body_states(self.envs[env_id], actor_handle, actor_state, gymapi.STATE_POS)
            # integrate available block pointer
            self.yellow_cube_handles[env_id][0][0] += 1
            self.yellow_cube_handles[env_id][0][0] = min(self.cube_capacity_per_level-1,self.yellow_cube_handles[env_id][0][0])
            self.gym.set_actor_scale(self.envs[env_id], actor_handle, 1)
        elif block_id == 4:
            # red cube
            available_block_id = self.red_cube_handles[env_id][0][0]
            actor_handle = self.red_cube_handles[env_id][1][available_block_id]
            actor_state = self.gym.get_actor_rigid_body_states(self.envs[env_id], actor_handle, gymapi.STATE_ALL)
            actor_state['pose']['p'].fill((pose.p.x,pose.p.y,pose.p.z))
            self.gym.set_actor_rigid_body_states(self.envs[env_id], actor_handle, actor_state, gymapi.STATE_POS)
            # integrate available block pointer
            self.red_cube_handles[env_id][0][0] += 1
            self.red_cube_handles[env_id][0][0] = min(self.cube_capacity_per_level-1,self.red_cube_handles[env_id][0][0])
            self.gym.set_actor_scale(self.envs[env_id], actor_handle, 1)
        elif block_id == 5:
            # white cube
            available_block_id = self.white_cube_handles[env_id][0][0]
            actor_handle = self.white_cube_handles[env_id][1][available_block_id]
            actor_state = self.gym.get_actor_rigid_body_states(self.envs[env_id], actor_handle, gymapi.STATE_ALL)
            actor_state['pose']['p'].fill((pose.p.x,pose.p.y,pose.p.z))
            self.gym.set_actor_rigid_body_states(self.envs[env_id], actor_handle, actor_state, gymapi.STATE_POS)
            # integrate available block pointer
            self.white_cube_handles[env_id][0][0] += 1
            self.white_cube_handles[env_id][0][0] = min(self.cube_capacity_per_level-1,self.white_cube_handles[env_id][0][0])
            self.gym.set_actor_scale(self.envs[env_id], actor_handle, 1)
        elif block_id == 6:
            # blue corner
            available_block_id = self.blue_corner_handles[env_id][0][0]
            actor_handle = self.blue_corner_handles[env_id][1][available_block_id]
            actor_state = self.gym.get_actor_rigid_body_states(self.envs[env_id], actor_handle, gymapi.STATE_ALL)
            actor_state['pose']['p'].fill((pose.p.x,pose.p.y,pose.p.z))
            actor_state['pose']['r'].fill((pose.r.x,pose.r.y,pose.r.z,pose.r.w))
            self.gym.set_actor_rigid_body_states(self.envs[env_id], actor_handle, actor_state, gymapi.STATE_POS)
            # integrate available block pointer
            self.blue_corner_handles[env_id][0][0] += 1
            self.blue_corner_handles[env_id][0][0] = min(self.corner_capacity_per_level-1,self.blue_corner_handles[env_id][0][0])
            self.gym.set_actor_scale(self.envs[env_id], actor_handle, 1)
        elif block_id == 7:
            # yellow corner
            available_block_id = self.yellow_corner_handles[env_id][0][0]
            actor_handle = self.yellow_corner_handles[env_id][1][available_block_id]
            actor_state = self.gym.get_actor_rigid_body_states(self.envs[env_id], actor_handle, gymapi.STATE_ALL)
            actor_state['pose']['p'].fill((pose.p.x,pose.p.y,pose.p.z))
            actor_state['pose']['r'].fill((pose.r.x,pose.r.y,pose.r.z,pose.r.w))
            self.gym.set_actor_rigid_body_states(self.envs[env_id], actor_handle, actor_state, gymapi.STATE_POS)
            # integrate available block pointer
            self.yellow_corner_handles[env_id][0][0] += 1
            self.yellow_corner_handles[env_id][0][0] = min(self.corner_capacity_per_level-1,self.yellow_corner_handles[env_id][0][0])
            self.gym.set_actor_scale(self.envs[env_id], actor_handle, 1)
        elif block_id == 8:
            # red corner
            available_block_id = self.red_corner_handles[env_id][0][0]
            actor_handle = self.red_corner_handles[env_id][1][available_block_id]
            actor_state = self.gym.get_actor_rigid_body_states(self.envs[env_id], actor_handle, gymapi.STATE_ALL)
            actor_state['pose']['p'].fill((pose.p.x,pose.p.y,pose.p.z))
            actor_state['pose']['r'].fill((pose.r.x,pose.r.y,pose.r.z,pose.r.w))
            self.gym.set_actor_rigid_body_states(self.envs[env_id], actor_handle, actor_state, gymapi.STATE_POS)
            # integrate available block pointer
            self.red_corner_handles[env_id][0][0] += 1
            self.red_corner_handles[env_id][0][0] = min(self.corner_capacity_per_level-1,self.red_corner_handles[env_id][0][0])
            self.gym.set_actor_scale(self.envs[env_id], actor_handle, 1)
        elif block_id == 9:
            # white corner
            available_block_id = self.white_corner_handles[env_id][0][0]
            actor_handle = self.white_corner_handles[env_id][1][available_block_id]
            actor_state = self.gym.get_actor_rigid_body_states(self.envs[env_id], actor_handle, gymapi.STATE_ALL)
            actor_state['pose']['p'].fill((pose.p.x,pose.p.y,pose.p.z))
            actor_state['pose']['r'].fill((pose.r.x,pose.r.y,pose.r.z,pose.r.w))
            self.gym.set_actor_rigid_body_states(self.envs[env_id], actor_handle, actor_state, gymapi.STATE_POS)
            # integrate available block pointer
            self.white_corner_handles[env_id][0][0] += 1
            self.white_corner_handles[env_id][0][0] = min(self.corner_capacity_per_level-1,self.white_corner_handles[env_id][0][0])
            self.gym.set_actor_scale(self.envs[env_id], actor_handle, 1)
        elif block_id == 10:
            # blue ramp
            available_block_id = self.blue_ramp_handles[env_id][0][0]
            actor_handle = self.blue_ramp_handles[env_id][1][available_block_id]
            actor_state = self.gym.get_actor_rigid_body_states(self.envs[env_id], actor_handle, gymapi.STATE_ALL)
            actor_state['pose']['p'].fill((pose.p.x,pose.p.y,pose.p.z))
            actor_state['pose']['r'].fill((pose.r.x,pose.r.y,pose.r.z,pose.r.w))
            self.gym.set_actor_rigid_body_states(self.envs[env_id], actor_handle, actor_state, gymapi.STATE_POS)
            # integrate available block pointer
            self.blue_ramp_handles[env_id][0][0] += 1
            self.blue_ramp_handles[env_id][0][0] = min(self.ramp_capacity_per_level-1,self.blue_ramp_handles[env_id][0][0])
            self.gym.set_actor_scale(self.envs[env_id], actor_handle, 1)
        elif block_id == 11:
            # yellow ramp
            available_block_id = self.yellow_ramp_handles[env_id][0][0]
            actor_handle = self.yellow_ramp_handles[env_id][1][available_block_id]
            actor_state = self.gym.get_actor_rigid_body_states(self.envs[env_id], actor_handle, gymapi.STATE_ALL)
            actor_state['pose']['p'].fill((pose.p.x,pose.p.y,pose.p.z))
            actor_state['pose']['r'].fill((pose.r.x,pose.r.y,pose.r.z,pose.r.w))
            self.gym.set_actor_rigid_body_states(self.envs[env_id], actor_handle, actor_state, gymapi.STATE_POS)
            # integrate available block pointer
            self.yellow_ramp_handles[env_id][0][0] += 1
            self.yellow_ramp_handles[env_id][0][0] = min(self.ramp_capacity_per_level-1,self.yellow_ramp_handles[env_id][0][0])
            self.gym.set_actor_scale(self.envs[env_id], actor_handle, 1)
        elif block_id == 12:
            # red ramp
            available_block_id = self.red_ramp_handles[env_id][0][0]
            actor_handle = self.red_ramp_handles[env_id][1][available_block_id]
            actor_state = self.gym.get_actor_rigid_body_states(self.envs[env_id], actor_handle, gymapi.STATE_ALL)
            actor_state['pose']['p'].fill((pose.p.x,pose.p.y,pose.p.z))
            actor_state['pose']['r'].fill((pose.r.x,pose.r.y,pose.r.z,pose.r.w))
            self.gym.set_actor_rigid_body_states(self.envs[env_id], actor_handle, actor_state, gymapi.STATE_POS)
            # integrate available block pointer
            self.red_ramp_handles[env_id][0][0] += 1
            self.red_ramp_handles[env_id][0][0] = min(self.ramp_capacity_per_level-1,self.red_ramp_handles[env_id][0][0])
            self.gym.set_actor_scale(self.envs[env_id], actor_handle, 1)
        elif block_id == 13:
            # white ramp
            available_block_id = self.white_ramp_handles[env_id][0][0]
            actor_handle = self.white_ramp_handles[env_id][1][available_block_id]
            actor_state = self.gym.get_actor_rigid_body_states(self.envs[env_id], actor_handle, gymapi.STATE_ALL)
            actor_state['pose']['p'].fill((pose.p.x,pose.p.y,pose.p.z))
            actor_state['pose']['r'].fill((pose.r.x,pose.r.y,pose.r.z,pose.r.w))
            self.gym.set_actor_rigid_body_states(self.envs[env_id], actor_handle, actor_state, gymapi.STATE_POS)
            # integrate available block pointer
            self.white_ramp_handles[env_id][0][0] += 1
            self.white_ramp_handles[env_id][0][0] = min(self.ramp_capacity_per_level-1,self.white_ramp_handles[env_id][0][0])
            self.gym.set_actor_scale(self.envs[env_id], actor_handle, 1)

    # set wfc landscape
    def set_landscape_(self, env_id, seed_):

        interval_xy = 2
        interval_z = 2*0.7
        # ramp and corner attitutde
        ramp_rot0 = gymapi.Quat.from_axis_angle(gymapi.Vec3(1, 0, 0), 0.5*math.pi) * gymapi.Quat.from_axis_angle(gymapi.Vec3(0, 1, 0), 0*math.pi)
        ramp_rot1 = gymapi.Quat.from_axis_angle(gymapi.Vec3(1, 0, 0), 0.5*math.pi) * gymapi.Quat.from_axis_angle(gymapi.Vec3(0, 1, 0), 0.5*math.pi)
        ramp_rot2 = gymapi.Quat.from_axis_angle(gymapi.Vec3(1, 0, 0), 0.5*math.pi) * gymapi.Quat.from_axis_angle(gymapi.Vec3(0, 1, 0), 1*math.pi)
        ramp_rot3 = gymapi.Quat.from_axis_angle(gymapi.Vec3(1, 0, 0), 0.5*math.pi) * gymapi.Quat.from_axis_angle(gymapi.Vec3(0, 1, 0), 1.5*math.pi)
        corner_rot0 = gymapi.Quat.from_axis_angle(gymapi.Vec3(0, 1, 0), 0.5*math.pi) * gymapi.Quat.from_axis_angle(gymapi.Vec3(1, 0, 0), 0*math.pi)
        corner_rot1 = gymapi.Quat.from_axis_angle(gymapi.Vec3(0, 1, 0), 0.5*math.pi) * gymapi.Quat.from_axis_angle(gymapi.Vec3(1, 0, 0), 0.5*math.pi)
        corner_rot2 = gymapi.Quat.from_axis_angle(gymapi.Vec3(0, 1, 0), 0.5*math.pi) * gymapi.Quat.from_axis_angle(gymapi.Vec3(1, 0, 0), 1*math.pi)
        corner_rot3 = gymapi.Quat.from_axis_angle(gymapi.Vec3(0, 1, 0), 0.5*math.pi) * gymapi.Quat.from_axis_angle(gymapi.Vec3(1, 0, 0), 1.5*math.pi)

        # create actors based on WFC seed
        for i in range(0,9):
            for j in range(0,9):
                tile_ = seed_.wave_oriented[j*9+i][0][0]
                rot = seed_.wave_oriented[j*9+i][0][1]
                pose_ij = gymapi.Transform()
                pose_ij.p = gymapi.Vec3(i*interval_xy, j*interval_xy, 0*interval_xy)
                pose_ij.r = gymapi.Quat(0,0,0,1)
                self.arrange_block(env_id, 1, pose_ij)
                # cubes
                if tile_<=5:
                    if tile_==1:
                        pass
                    elif tile_==2:
                        pose_ij.p = gymapi.Vec3(i*interval_xy, j*interval_xy, 1*interval_z)
                        self.arrange_block(env_id, 2, pose_ij)
                    elif tile_==3:
                        pose_ij.p = gymapi.Vec3(i*interval_xy, j*interval_xy, 1*interval_z)
                        self.arrange_block(env_id, 2, pose_ij)
                        pose_ij.p = gymapi.Vec3(i*interval_xy, j*interval_xy, 2*interval_z)
                        self.arrange_block(env_id, 3, pose_ij)
                    elif tile_==4:
                        pose_ij.p = gymapi.Vec3(i*interval_xy, j*interval_xy, 1*interval_z)
                        self.arrange_block(env_id, 2, pose_ij)
                        pose_ij.p = gymapi.Vec3(i*interval_xy, j*interval_xy, 2*interval_z)
                        self.arrange_block(env_id, 3, pose_ij)
                        pose_ij.p = gymapi.Vec3(i*interval_xy, j*interval_xy, 3*interval_z)
                        self.arrange_block(env_id, 4, pose_ij)
                    elif tile_==5:
                        pose_ij.p = gymapi.Vec3(i*interval_xy, j*interval_xy, 1*interval_z)
                        self.arrange_block(env_id, 2, pose_ij)
                        pose_ij.p = gymapi.Vec3(i*interval_xy, j*interval_xy, 2*interval_z)
                        self.arrange_block(env_id, 3, pose_ij)
                        pose_ij.p = gymapi.Vec3(i*interval_xy, j*interval_xy, 3*interval_z)
                        self.arrange_block(env_id, 4, pose_ij)
                        pose_ij.p = gymapi.Vec3(i*interval_xy, j*interval_xy, 4*interval_z)
                        self.arrange_block(env_id, 5, pose_ij)
                # corners
                elif tile_==6:
                    if rot==0:
                        pose_ij.p = gymapi.Vec3(i*interval_xy, j*interval_xy, 1*interval_z)
                        pose_ij.r = corner_rot2
                    elif rot==1:
                        pose_ij.p = gymapi.Vec3(i*interval_xy, j*interval_xy, 1*interval_z)
                        pose_ij.r = corner_rot1
                    elif rot==2:
                        pose_ij.p = gymapi.Vec3(i*interval_xy, j*interval_xy, 1*interval_z)
                        pose_ij.r = corner_rot0
                    elif rot==3:
                        pose_ij.p = gymapi.Vec3(i*interval_xy, j*interval_xy, 1*interval_z)
                        pose_ij.r = corner_rot3
                    self.arrange_block(env_id, 6, pose_ij)
                elif tile_==7:
                    pose_ij.p = gymapi.Vec3(i*interval_xy, j*interval_xy, 1*interval_z)
                    self.arrange_block(env_id, 2, pose_ij)
                    if rot==0:
                        pose_ij.p = gymapi.Vec3(i*interval_xy, j*interval_xy, 2*interval_z)
                        pose_ij.r = corner_rot2
                    elif rot==1:
                        pose_ij.p = gymapi.Vec3(i*interval_xy, j*interval_xy, 2*interval_z)
                        pose_ij.r = corner_rot1
                    elif rot==2:
                        pose_ij.p = gymapi.Vec3(i*interval_xy, j*interval_xy, 2*interval_z)
                        pose_ij.r = corner_rot0
                    elif rot==3:
                        pose_ij.p = gymapi.Vec3(i*interval_xy, j*interval_xy, 2*interval_z)
                        pose_ij.r = corner_rot3
                    self.arrange_block(env_id, 7, pose_ij)
                elif tile_==8:
                    pose_ij.p = gymapi.Vec3(i*interval_xy, j*interval_xy, 1*interval_z)
                    self.arrange_block(env_id, 2, pose_ij)
                    pose_ij.p = gymapi.Vec3(i*interval_xy, j*interval_xy, 2*interval_z)
                    self.arrange_block(env_id, 3, pose_ij)
                    if rot==0:
                        pose_ij.p = gymapi.Vec3(i*interval_xy, j*interval_xy, 3*interval_z)
                        pose_ij.r = corner_rot2
                    elif rot==1:
                        pose_ij.p = gymapi.Vec3(i*interval_xy, j*interval_xy, 3*interval_z)
                        pose_ij.r = corner_rot1
                    elif rot==2:
                        pose_ij.p = gymapi.Vec3(i*interval_xy, j*interval_xy, 3*interval_z)
                        pose_ij.r = corner_rot0
                    elif rot==3:
                        pose_ij.p = gymapi.Vec3(i*interval_xy, j*interval_xy, 3*interval_z)
                        pose_ij.r = corner_rot3
                    self.arrange_block(env_id, 8, pose_ij)
                elif tile_==9:
                    pose_ij.p = gymapi.Vec3(i*interval_xy, j*interval_xy, 1*interval_z)
                    self.arrange_block(env_id, 2, pose_ij)
                    pose_ij.p = gymapi.Vec3(i*interval_xy, j*interval_xy, 2*interval_z)
                    self.arrange_block(env_id, 3, pose_ij)
                    pose_ij.p = gymapi.Vec3(i*interval_xy, j*interval_xy, 3*interval_z)
                    self.arrange_block(env_id, 4, pose_ij)
                    if rot==0:
                        pose_ij.p = gymapi.Vec3(i*interval_xy, j*interval_xy, 4*interval_z)
                        pose_ij.r = corner_rot2
                    elif rot==1:
                        pose_ij.p = gymapi.Vec3(i*interval_xy, j*interval_xy, 4*interval_z)
                        pose_ij.r = corner_rot1
                    elif rot==2:
                        pose_ij.p = gymapi.Vec3(i*interval_xy, j*interval_xy, 4*interval_z)
                        pose_ij.r = corner_rot0
                    elif rot==3:
                        pose_ij.p = gymapi.Vec3(i*interval_xy, j*interval_xy, 4*interval_z)
                        pose_ij.r = corner_rot3
                    self.arrange_block(env_id, 9, pose_ij)
                # ramps
                elif tile_==10:
                    if rot==0:
                        pose_ij.p = gymapi.Vec3(i*interval_xy, j*interval_xy, 1*interval_z)
                        pose_ij.r = ramp_rot0
                    elif rot==1:
                        pose_ij.p = gymapi.Vec3(i*interval_xy, j*interval_xy, 1*interval_z)
                        pose_ij.r = ramp_rot1
                    elif rot==2:
                        pose_ij.p = gymapi.Vec3(i*interval_xy, j*interval_xy, 1*interval_z)
                        pose_ij.r = ramp_rot2
                    elif rot==3:
                        pose_ij.p = gymapi.Vec3(i*interval_xy, j*interval_xy, 1*interval_z)
                        pose_ij.r = ramp_rot3
                    self.arrange_block(env_id, 10, pose_ij)
                elif tile_==11:
                    pose_ij.p = gymapi.Vec3(i*interval_xy, j*interval_xy, 1*interval_z)
                    self.arrange_block(env_id, 2, pose_ij)
                    if rot==0:
                        pose_ij.p = gymapi.Vec3(i*interval_xy, j*interval_xy, 2*interval_z)
                        pose_ij.r = ramp_rot0
                    elif rot==1:
                        pose_ij.p = gymapi.Vec3(i*interval_xy, j*interval_xy, 2*interval_z)
                        pose_ij.r = ramp_rot1
                    elif rot==2:
                        pose_ij.p = gymapi.Vec3(i*interval_xy, j*interval_xy, 2*interval_z)
                        pose_ij.r = ramp_rot2
                    elif rot==3:
                        pose_ij.p = gymapi.Vec3(i*interval_xy, j*interval_xy, 2*interval_z)
                        pose_ij.r = ramp_rot3
                    self.arrange_block(env_id, 11, pose_ij)
                elif tile_==12:
                    pose_ij.p = gymapi.Vec3(i*interval_xy, j*interval_xy, 1*interval_z)
                    self.arrange_block(env_id, 2, pose_ij)
                    pose_ij.p = gymapi.Vec3(i*interval_xy, j*interval_xy, 2*interval_z)
                    self.arrange_block(env_id, 3, pose_ij)
                    if rot==0:
                        pose_ij.p = gymapi.Vec3(i*interval_xy, j*interval_xy, 3*interval_z)
                        pose_ij.r = ramp_rot0
                    elif rot==1:
                        pose_ij.p = gymapi.Vec3(i*interval_xy, j*interval_xy, 3*interval_z)
                        pose_ij.r = ramp_rot1
                    elif rot==2:
                        pose_ij.p = gymapi.Vec3(i*interval_xy, j*interval_xy, 3*interval_z)
                        pose_ij.r = ramp_rot2
                    elif rot==3:
                        pose_ij.p = gymapi.Vec3(i*interval_xy, j*interval_xy, 3*interval_z)
                        pose_ij.r = ramp_rot3
                    self.arrange_block(env_id, 12, pose_ij)
                elif tile_==13:
                    pose_ij.p = gymapi.Vec3(i*interval_xy, j*interval_xy, 1*interval_z)
                    self.arrange_block(env_id, 2, pose_ij)
                    pose_ij.p = gymapi.Vec3(i*interval_xy, j*interval_xy, 2*interval_z)
                    self.arrange_block(env_id, 3, pose_ij)
                    pose_ij.p = gymapi.Vec3(i*interval_xy, j*interval_xy, 3*interval_z)
                    self.arrange_block(env_id, 4, pose_ij)
                    if rot==0:
                        pose_ij.p = gymapi.Vec3(i*interval_xy, j*interval_xy, 4*interval_z)
                        pose_ij.r = ramp_rot0
                    elif rot==1:
                        pose_ij.p = gymapi.Vec3(i*interval_xy, j*interval_xy, 4*interval_z)
                        pose_ij.r = ramp_rot1
                    elif rot==2:
                        pose_ij.p = gymapi.Vec3(i*interval_xy, j*interval_xy, 4*interval_z)
                        pose_ij.r = ramp_rot2
                    elif rot==3:
                        pose_ij.p = gymapi.Vec3(i*interval_xy, j*interval_xy, 4*interval_z)
                        pose_ij.r = ramp_rot3
                    self.arrange_block(env_id, 13, pose_ij)

    # set wfc landscape interface
    def set_landscape(self, env_id, seed_, update_collection = True):

        # set seed
        self.seeds[env_id] = copy.deepcopy(seed_)

        # update collection of seeds
        if update_collection:
            landscape_new = True
            for s in self.seeds_collection:
                if s == seed_:
                    landscape_new = False
                    break
            if landscape_new:
                self.seeds_collection.append(copy.deepcopy(seed_))

            # print("collection : ", len(self.seeds_collection))
        
        # update height map
        height_map = self.PCGWorker_.render(seed_, output=True, border=False)
        self.seeds_height_maps[env_id] = copy.deepcopy(height_map)

        self.reset_landscape(env_id)
        self.set_landscape_(env_id,seed_)

    def _apply_action(self, env_id, action):

        self._step_counts[env_id] += 1
        
        # print self.step_counts[env_id]
        # print('step_counts:', self._step_counts[env_id])

        # apply action to an environment

        i = env_id

        '''
        apply action
        '''
        body_states = self.gym.get_actor_rigid_body_states(self.envs[i], self.actor_handles[i], gymapi.STATE_ALL)
        # transform global coordinate system to local coordinate system
        body_ = self.gym.get_actor_rigid_body_handle(self.envs[i], self.actor_handles[i], 0)
        body_t = self.gym.get_rigid_transform(self.envs[i], body_)
        body_t.p = gymapi.Vec3(0,0,0)
        verts_ = body_t.transform_points(self.verts)

        # maintain vertical speed
        z_spd = body_states['vel']['linear'][0][2]

        if action == 0:
            body_states['vel']['linear'].fill((self.spd * verts_[1][0][0],self.spd * verts_[1][0][1],z_spd))
            self.gym.set_actor_rigid_body_states(self.envs[i], self.actor_handles[i], body_states, gymapi.STATE_VEL)
        elif action == 1:
            body_states['vel']['linear'].fill((-self.spd * verts_[1][0][0],-self.spd * verts_[1][0][1],z_spd))
            self.gym.set_actor_rigid_body_states(self.envs[i], self.actor_handles[i], body_states, gymapi.STATE_VEL)
        elif action == 2:
            body_states['vel']['linear'].fill((-self.spd * verts_[2][0][0],-self.spd * verts_[2][0][1],z_spd))
            self.gym.set_actor_rigid_body_states(self.envs[i], self.actor_handles[i], body_states, gymapi.STATE_VEL)
        elif action == 3:
            body_states['vel']['linear'].fill((self.spd * verts_[2][0][0],self.spd * verts_[2][0][1],z_spd))
            self.gym.set_actor_rigid_body_states(self.envs[i], self.actor_handles[i], body_states, gymapi.STATE_VEL)
        elif action == 4:
            self.facings[i] -= self.facing_step
            attractor_pose = gymapi.Transform()
            attractor_pose.p = gymapi.Vec3(0, 0.0, 0.0)
            attractor_pose.r = gymapi.Quat(0,1,0,1) * gymapi.Quat.from_axis_angle(gymapi.Vec3(1, 0, 0), self.facings[i]*math.pi)
            self.gym.set_attractor_target(self.envs[i], self.attractor_handles[i], attractor_pose)
        elif action == 5:
            self.facings[i] += self.facing_step
            attractor_pose = gymapi.Transform()
            attractor_pose.p = gymapi.Vec3(0, 0.0, 0.0)
            attractor_pose.r = gymapi.Quat(0,1,0,1) * gymapi.Quat.from_axis_angle(gymapi.Vec3(1, 0, 0), self.facings[i]*math.pi)
            self.gym.set_attractor_target(self.envs[i], self.attractor_handles[i], attractor_pose)

    def _apply_actions(self, all_actions):

        for i in range(self.num_envs):
            self._apply_action(env_id = i, action = all_actions[i])

    def _step_physics(self):

        '''
        step the physics
        '''
        self.gym.simulate(self.sim)
        self.gym.fetch_results(self.sim, True)
        if not self.headless:
            self.gym.clear_lines(self.viewer)
        self.gym.step_graphics(self.sim)

    def _compute_obs(self):

        '''
        get observations
        '''
        self.gym.render_all_camera_sensors(self.sim)

        # self.vec_obs = np.zeros((self.num_matrix_envs, HEIGHT, WIDTH, N_CHANNELS), dtype=np.uint8)

        for i in range(self.num_matrix_envs):

            # observation = np.zeros((HEIGHT,WIDTH, 4), np.uint8)
            rgb_image = self.gym.get_camera_image(self.sim, self.envs[i], self.camera_handles[i], gymapi.IMAGE_COLOR)
            # 1. transform the image from RGBA to RGB
            # 2. transpose the image from (HEIGHT,WIDTH, 3) to (3, HEIGHT, WIDTH)
            self.vec_obs[i] = np.copy(np.transpose(rgb_image.reshape((HEIGHT,WIDTH, 4))[:, :, :3], (2, 0, 1)))

    def _compute_rews(self):
        
        # self.all_rews = np.zeros(self.num_matrix_envs, dtype=np.float32)

        for i in range(self.num_matrix_envs):

            body_states_actor = self.gym.get_actor_rigid_body_states(self.envs[i], self.actor_handles[i], gymapi.STATE_ALL)
            actor_p = body_states_actor['pose']['p']
            body_states_food = self.gym.get_actor_rigid_body_states(self.envs[i], self.food_handles[i], gymapi.STATE_ALL)
            food_p = body_states_food['pose']['p']

            # calculate distance between actor and food
            ap = np.array([actor_p[0][0], actor_p[0][1], actor_p[0][2]])
            fp = np.array([food_p[0][0], food_p[0][1], food_p[0][2]])
            dist = np.linalg.norm(ap - fp)
            # print(dist)

            if dist < self.proximity_threshold:
                self.all_rews[i] = 1.0
            else:
                # sparse reward
                self.all_rews[i] = 0.0
                # dense reward 
                # self.all_rews[i] = (float)(self.proximity_threshold) / (float)(dist)
                # self.all_rews[i] = (float)(self.proximity_threshold) / (float)(dist+5)

        # # log the reward
        # for i in range(self.num_matrix_envs):
        #     self._rews[i] += self.all_rews[i]

        return np.copy(self.all_rews)

    def _compute_dones(self, log_rew = True):

        # self.all_dones = np.zeros(self.num_matrix_envs, dtype=np.bool8)
        for i in range(self.num_matrix_envs):

            self.all_dones[i] = False

            if self.all_rews[i] == 1.0:
                self.all_dones[i] = True
                continue
            
            body_states_actor = self.gym.get_actor_rigid_body_states(self.envs[i], self.actor_handles[i], gymapi.STATE_ALL)
            actor_p = body_states_actor['pose']['p']
            actor_cnnc_x = ((actor_p[0][0]+1)/(9*2))*20*9
            actor_cnnc_y = ((actor_p[0][1]+1)/(9*2))*20*9

            body_states_food = self.gym.get_actor_rigid_body_states(self.envs[i], self.food_handles[i], gymapi.STATE_ALL)
            food_p = body_states_food['pose']['p']
            food_cnnc_x = ((food_p[0][0]+1)/(9*2))*20*9
            food_cnnc_y = ((food_p[0][1]+1)/(9*2))*20*9

            if not (actor_cnnc_x >= 0 and actor_cnnc_x < 20*9 and actor_cnnc_y >= 0 and actor_cnnc_y < 20*9):
                self.all_dones[i] = True
                continue
            
            if not (food_cnnc_x >= 0 and food_cnnc_x < 20*9 and food_cnnc_y >= 0 and food_cnnc_y < 20*9):
                self.all_dones[i] = True
                continue

            if self._step_counts[i] > self.max_steps:
                self.all_dones[i] = True
                continue

        # log the rewards
        if log_rew :
            for i in range(self.num_matrix_envs):
                if self.all_dones[i] == True:
                    # compute general success rate
                    if self.all_rews[i] == 1.0:
                        self.reward_fifo.append(self.all_rews[i])
                    else:
                        self.reward_fifo.append(self.all_rews[i])
                    # compute success rate for each environment
                    if self.all_rews[i] == 1.0:
                        self.reward_archive[i].append(self.all_rews[i])
                    else:
                        self.reward_archive[i].append(self.all_rews[i])
                
                # compute mean reward for each environment
                reward_avg = 0
                for j in range(len(self.reward_archive[i])):
                    reward_avg += self.reward_archive[i][j]
                if len(self.reward_archive[i]) > 0:
                    reward_avg = reward_avg / len(self.reward_archive[i])
                
                self.rew_avg[i] = reward_avg
            

        return np.copy(self.all_dones)

    def _compute_infos(self):
        return [{} for _ in range(self.num_envs)]

    def _proximity(self, startx, starty, endx, endy):
        # calculate distance between actor and food
        ap = np.array([startx, starty])
        fp = np.array([endx, endy])
        dist = np.linalg.norm(ap - fp)

        if dist < self.proximity_threshold:
            return True
        else:
            return False

    def _path_achievable(self, env_id, startx, starty, endx, endy):

        return True

    def reset(self, all_dones = None):

        reset_completed = False

        height_offset = 1

        # print("resetting...")

        if all_dones is not None:
            
            # reset envs that are done
            for i in range(self.num_envs):
                if all_dones[i]:

                    self._step_counts[i] = 0
                    self.facings[i] = 0

                    # self.gym.set_env_rigid_body_states(self.envs[i], self.sim_status[i], gymapi.STATE_ALL)

                    # reset random position and orientation for actor
                    actorx = random.uniform(0,16)
                    actory = random.uniform(0,16)
                    # reset random position for food
                    foodx = random.uniform(0,16)
                    foody = random.uniform(0,16)
                    p1 = self._path_achievable(i,actorx, actory, foodx, foody)
                    p2 = self._proximity(actorx, actory, foodx, foody)
                    while (not p1) or (p2):
                        actorx = random.uniform(0,16)
                        actory = random.uniform(0,16)
                        foodx = random.uniform(0,16)
                        foody = random.uniform(0,16)
                        p1 = self._path_achievable(i,actorx, actory, foodx, foody)
                        p2 = self._proximity(actorx, actory, foodx, foody)

                    facing_i = (float)(random.randint(0,8))
                    facing_ = 0.25 * facing_i
                    self.facings[i] = facing_
                    r_ = gymapi.Quat(0,1,0,1) * gymapi.Quat.from_axis_angle(gymapi.Vec3(1, 0, 0), facing_*math.pi)
                    body_states = self.gym.get_actor_rigid_body_states(self.envs[i], self.actor_handles[i], gymapi.STATE_ALL)
                    # get terrain height at actor position
                    actor_map_x = ((actorx+1)/(9*2))*20*9
                    actor_map_y = ((actory+1)/(9*2))*20*9
                    map_h = self.seeds_height_maps[i][int(actor_map_y)][int(actor_map_x)][0]
                    reset_height = 0.7*2*(4*(map_h/200.0)+1)+height_offset
                    # print("reset_height: ", reset_height)
                    body_states['pose']['p'].fill((actorx,actory,reset_height))    # TODO : initial height from height map
                    self.gym.set_actor_rigid_body_states(self.envs[i], self.actor_handles[i], body_states, gymapi.STATE_POS)
                    attractor_pose = gymapi.Transform()
                    attractor_pose.p = gymapi.Vec3(0, 0.0, 0.0)
                    attractor_pose.r = r_
                    self.gym.set_attractor_target(self.envs[i], self.attractor_handles[i], attractor_pose)
                    # print("reset actor at (%d, %d)" % (x, y))
                    body_states = self.gym.get_actor_rigid_body_states(self.envs[i], self.food_handles[i], gymapi.STATE_ALL)
                    food_map_x = ((foodx+1)/(9*2))*20*9
                    food_map_y = ((foody+1)/(9*2))*20*9
                    map_h = self.seeds_height_maps[i][int(food_map_y)][int(food_map_x)][0]
                    reset_height = 0.7*2*(4*(map_h/200.0)+1)+height_offset
                    body_states['pose']['p'].fill((foodx,foody,reset_height))
                    self.gym.set_actor_rigid_body_states(self.envs[i], self.food_handles[i], body_states, gymapi.STATE_POS)

                    # reset actor vel=0
                    body_states = self.gym.get_actor_rigid_body_states(self.envs[i], self.actor_handles[i], gymapi.STATE_ALL)
                    body_states['vel']['linear'].fill((0,0,0))
                    self.gym.set_actor_rigid_body_states(self.envs[i], self.actor_handles[i], body_states, gymapi.STATE_VEL)
                    # reset food vel=0
                    body_states = self.gym.get_actor_rigid_body_states(self.envs[i], self.food_handles[i], gymapi.STATE_ALL)
                    body_states['vel']['linear'].fill((0,0,0))
                    self.gym.set_actor_rigid_body_states(self.envs[i], self.food_handles[i], body_states, gymapi.STATE_VEL)

                    self.all_dones[i] = False

                    # reward logging
        else:
            self.reward_fifo.clear()
            self.reward_fifo = deque(maxlen=300)
            self.best_performance = 0
            # reset all envs
            for i in range(self.num_envs):
                self._step_counts[i] = 0
                self.facings[i] = 0

                # self.gym.set_env_rigid_body_states(self.envs[i], self.sim_status[i], gymapi.STATE_ALL)

                # reset random position and orientation for actor
                actorx = random.uniform(0,16)
                actory = random.uniform(0,16)
                # reset random position for food
                foodx = random.uniform(0,16)
                foody = random.uniform(0,16)
                p1 = self._path_achievable(i,actorx, actory, foodx, foody)
                p2 = self._proximity(actorx, actory, foodx, foody)
                while (not p1) or (p2):
                    actorx = random.uniform(0,16)
                    actory = random.uniform(0,16)
                    foodx = random.uniform(0,16)
                    foody = random.uniform(0,16)
                    p1 = self._path_achievable(i,actorx, actory, foodx, foody)
                    p2 = self._proximity(actorx, actory, foodx, foody)

                facing_i = (float)(random.randint(0,8))
                facing_ = 0.25 * facing_i
                self.facings[i] = facing_
                r_ = gymapi.Quat(0,1,0,1) * gymapi.Quat.from_axis_angle(gymapi.Vec3(1, 0, 0), facing_*math.pi)
                body_states = self.gym.get_actor_rigid_body_states(self.envs[i], self.actor_handles[i], gymapi.STATE_ALL)
                # get terrain height at actor position
                actor_map_x = ((actorx+1)/(9*2))*20*9
                actor_map_y = ((actory+1)/(9*2))*20*9
                map_h = self.seeds_height_maps[i][int(actor_map_y)][int(actor_map_x)][0]
                reset_height = 0.7*2*(4*(map_h/200.0)+1)+height_offset
                # print("reset_height: ", reset_height)
                body_states['pose']['p'].fill((actorx,actory,reset_height))    # TODO : initial height from height map
                self.gym.set_actor_rigid_body_states(self.envs[i], self.actor_handles[i], body_states, gymapi.STATE_POS)
                attractor_pose = gymapi.Transform()
                attractor_pose.p = gymapi.Vec3(0, 0.0, 0.0)
                attractor_pose.r = r_
                self.gym.set_attractor_target(self.envs[i], self.attractor_handles[i], attractor_pose)
                # print("reset actor at (%d, %d)" % (x, y))
                body_states = self.gym.get_actor_rigid_body_states(self.envs[i], self.food_handles[i], gymapi.STATE_ALL)
                food_map_x = ((foodx+1)/(9*2))*20*9
                food_map_y = ((foody+1)/(9*2))*20*9
                map_h = self.seeds_height_maps[i][int(food_map_y)][int(food_map_x)][0]
                reset_height = 0.7*2*(4*(map_h/200.0)+1)+height_offset
                body_states['pose']['p'].fill((foodx,foody,reset_height))
                self.gym.set_actor_rigid_body_states(self.envs[i], self.food_handles[i], body_states, gymapi.STATE_POS)

                # reset actor vel=0
                body_states = self.gym.get_actor_rigid_body_states(self.envs[i], self.actor_handles[i], gymapi.STATE_ALL)
                body_states['vel']['linear'].fill((0,0,0))
                self.gym.set_actor_rigid_body_states(self.envs[i], self.actor_handles[i], body_states, gymapi.STATE_VEL)
                # reset food vel=0
                body_states = self.gym.get_actor_rigid_body_states(self.envs[i], self.food_handles[i], gymapi.STATE_ALL)
                body_states['vel']['linear'].fill((0,0,0))
                self.gym.set_actor_rigid_body_states(self.envs[i], self.food_handles[i], body_states, gymapi.STATE_VEL)

                self.all_dones[i] = False
            
            # all_dones = np.copy(self.all_dones)

        # _ = self._compute_rews()
        # _ = self._compute_dones(log_rew = False)
        # for i in range(self.num_envs):
        #     if self.all_dones[i] == True:
        #         reset_completed = False
        #         print("resetting...")
        #     else:
        #         reset_completed = True

        # print("reset completed")
        if all_dones is None:
            # return initial observations for all envs
            self._compute_obs()
            return np.copy(self.vec_obs)


    def step(self, actions: np.ndarray, physics_only = False):

        # timer1 = time.time()
        # auto reset
        self.reset(all_dones = self.all_dones)

        # print("reset: ", time.time() - timer1)
        # timer1 = time.time()

        self._apply_actions(actions)

        # print("apply action: ", time.time() - timer1)
        # timer1 = time.time()

        self._step_physics()
        # print("step physics: ", time.time() - timer1)
        # timer1 = time.time()

        self._compute_obs()

        # print("compute obs: ", time.time() - timer1)

        all_rews = self._compute_rews()
        all_dones = self._compute_dones()
        all_infos = self._compute_infos()

        # print("compute obs: ", time.time() - timer1)
        # timer1 = time.time()

        # print(self.vec_obs)
        self.step_count += self.num_matrix_envs

        end_time = time.time()
        if (end_time - self.start_time) > 1:
            # print("FPS: %.2f" % ((self.step_count - self.last_frame_cnt)))
            self.last_frame_cnt = self.step_count
            self.start_time = time.time()

        return np.copy(self.vec_obs), np.copy(all_rews), np.copy(all_dones), all_infos

    def render(self, real_time = False):

        if not self.headless:

            if self.gym.query_viewer_has_closed(self.viewer):
                return -2

            for i in range(self.num_matrix_envs):
                body_states = self.gym.get_camera_transform(self.sim, self.envs[i], self.camera_handles[i])
                draw_camera(self.gym, self.viewer,self.envs[i],body_states)

            # update the viewer
            self.gym.draw_viewer(self.viewer, self.sim, True)

            if real_time:
                self.gym.sync_frame_time(self.sim)

    def evaluate_sb3_on_single_env(self, model, seed, num_episodes = 100, render = False):
        '''
        evaluate model performance on single map
        '''

        print("evaluating model performance on single map...")
        eval_start = time.time()
        
        # site reservation
        self.pause()

        env_sr_log = []

        # 1. apply landscape "seed" to all envs
        for i in range(self.num_matrix_envs):
            self.set_landscape(env_id = i, seed_ = seed, update_collection = False)
        # reset env pool
        observation_ = self.reset()

        # 2. evaluate model until num_episodes of done signals are collected
        while len(self.reward_fifo) < num_episodes:
            action_, _ = model.predict(observation_, deterministic=False)
            observation_, _, _, _ = self.step(action_)

        # 3. compute mean reward of reward_fifo
        reward_sum = 0
        for i in range(len(self.reward_fifo)):
            reward_sum += self.reward_fifo[i]
        reward_avg = reward_sum / len(self.reward_fifo)
        env_sr_log.append(reward_avg)

        # site recover
        self.resume()

        eval_end = time.time()
        print("evaluation time: %.2f" % (eval_end - eval_start))
        print("evaluation completed")

        # submit performance report
        return copy.deepcopy(env_sr_log)

    def evaluate_sb3(self, model, num_episodes = 100, render = False):

        '''
        evaluate model performance on map collection
        '''

        if render:
            cam_props = gymapi.CameraProperties()
            self.viewer = self.gym.create_viewer(self.sim, cam_props)
            cam_pos = gymapi.Vec3(0.0, -15.0, 15.0)
            cam_target = gymapi.Vec3(4, 4, 0)
            self.gym.viewer_camera_look_at(self.viewer, None, cam_pos, cam_target)

        print("evaluating model performance on map collection...")
        eval_start = time.time()
        
        # site reservation
        self.pause()

        env_sr_log = []

        # traverse the last 13 elements of seeds_collection
        # evaluate model for each environment
        for si in range(min(len(self.seeds_collection), 13)):
            # 1. apply landscape "seed" to all envs
            i_start = min(len(self.seeds_collection), 13)
            for i in range(self.num_matrix_envs):
                self.set_landscape(env_id = i, seed_ = self.seeds_collection[len(self.seeds_collection)-(i_start-si)], update_collection = False)
            # reset env pool
            observation_ = self.reset()

            # 2. evaluate model until num_episodes of done signals are collected
            while len(self.reward_fifo) < num_episodes:
                action_, _ = model.predict(observation_, deterministic=False)
                observation_, _, _, _ = self.step(action_)

                if render:
                    # update the viewer
                    self.gym.draw_viewer(self.viewer, self.sim, True)

            # 3. compute mean reward of reward_fifo
            reward_sum = 0
            for i in range(len(self.reward_fifo)):
                reward_sum += self.reward_fifo[i]
            reward_avg = reward_sum / len(self.reward_fifo)
            env_sr_log.append(reward_avg)

        # site recover
        self.resume()

        eval_end = time.time()
        print("evaluation time: %.2f" % (eval_end - eval_start))
        print("evaluation completed")

        if render:
            self.gym.destroy_viewer(self.viewer)

        # submit performance report
        return copy.deepcopy(env_sr_log)

    def close (self):
        
        if not self.headless:
            self.gym.destroy_viewer(self.viewer)
        self.gym.destroy_sim(self.sim)

    def __enter__(self):
        """Support with-statement for the environment."""
        return self

    def __exit__(self, *args):
        """Support with-statement for the environment."""
        self.close()
        # propagate exception
        return False
