import isaacgym
import torch
from asyncio import FastChildWatcher
from code import interact
from collections import deque
import os
import time
from tkinter.tix import Tree
import numpy as np
from isaacgym import gymutil
from isaacgym import gymapi
#from isaacgym import gymtorch
from math import sqrt
import math
from sympy import false
import cv2
from draw import *
from pcgworker.PCGWorker import *

from wfc_vecenv_stable_baselines import PCGVecEnvStableBaselines

from stable_baselines3 import PPO
from stable_baselines3.common.callbacks import BaseCallback
from stable_baselines3.common.results_plotter import load_results, ts2xy
from stable_baselines3.common.monitor import Monitor
from stable_baselines3.common import results_plotter
from stable_baselines3.common.results_plotter import load_results, ts2xy, plot_results
from stable_baselines3.common.noise import NormalActionNoise
from stable_baselines3.common.callbacks import BaseCallback

import ray
import torch

from ray.util.placement_group import (
    placement_group,
    placement_group_table,
    remove_placement_group
)
from ray.util.scheduling_strategies import PlacementGroupSchedulingStrategy
from ray.util.scheduling_strategies import NodeAffinitySchedulingStrategy

import asyncio


@ray.remote(num_cpus=1, num_gpus=1)
def run_env(model_param = None, compute_device_id = 0, graphics_device_id = 0, cuda_id = 0):

    # import isaacgym
    # import torch
    # from stable_baselines3 import PPO

    os.environ['CUDA_VISIBLE_DEVICES']='0,1,2,3,4,5,6,7'

    print("ray.get_runtime_context().node_id : ",ray.get_runtime_context().node_id)

    m_env = PCGVecEnvStableBaselines(headless_ = True, compute_device_id = compute_device_id, graphics_device_id = graphics_device_id)
    # model = PPO('CnnPolicy', env=m_env, batch_size = 1024, device = 'cuda:' + str(cuda_id))
    # model.set_parameters(model_param)

    # print("model on device : ", model.device)
    
    # m_env.reset()
    # all_actions = np.zeros(m_env.num_matrix_envs)
    # for i in range(m_env.num_matrix_envs):
    #     all_actions[i] = -2

    # for i in range(400):
    #     observation, reward, done, info = m_env.step(all_actions)
        #print(compute_device_id,i)

    # '''
    # Do not call m_env.close() here !
    # The trick is when the process is terminated accidentally, the system will free the resources throroughly.
    # There will be nothing left in the VRAM !
    # '''

    # m_env.close()

    # ray.shutdown()
    

    return compute_device_id

if __name__ == "__main__":

    os.environ['CUDA_VISIBLE_DEVICES']='0,1,2,3,4,5,6,7'

    print("ray implementation")

    # check how many gpus are available
    num_gpus = torch.cuda.device_count()
    print("num_gpus:",num_gpus)
    # check how many cpus are available
    num_cpus = os.cpu_count()
    print("num_cpus:",num_cpus)

    # ray implementation

    ray.init(num_cpus = num_cpus, num_gpus = num_gpus)
    
    time.sleep(1)

    # Create initial environment (flat landscape)
    m_env = PCGVecEnvStableBaselines(headless_ = True)
    m_env.reset()

    model = PPO('CnnPolicy', env=m_env, batch_size = 1024, device = 'cpu:0')
    model_ref = model
    model_param = copy.copy(model.get_parameters())

    print("model_param:",model_param)

    while True :

        results = []

        results.append(run_env.remote(model_param = model_param, compute_device_id = 0, graphics_device_id = 0, cuda_id = 0))
        results.append(run_env.remote(model_param = model_param, compute_device_id = 3, graphics_device_id = 3, cuda_id = 3))
        results.append(run_env.remote(model_param = model_param, compute_device_id = 5, graphics_device_id = 5, cuda_id = 5))
        results.append(run_env.remote(model_param = model_param, compute_device_id = 7, graphics_device_id = 7, cuda_id = 7))

        output_ = ray.get(results)

        print("--------------------- done ---------------------- ",output_)

        # for res in results:
        #     ray.cancel(res, force=True)
        





