from asyncio import FastChildWatcher
from code import interact
import os
import time
import numpy as np
from isaacgym import gymutil
from isaacgym import gymapi
#from isaacgym import gymtorch
from math import sqrt
import math
from sympy import false
import torch
import cv2

from draw import *

from pcgworker.PCGWorker import *

from wfc_env import *

from stable_baselines3 import PPO
from stable_baselines3.common.callbacks import BaseCallback
from stable_baselines3.common.results_plotter import load_results, ts2xy
from stable_baselines3.common.monitor import Monitor
from stable_baselines3.common import results_plotter
from stable_baselines3.common.results_plotter import load_results, ts2xy, plot_results
from stable_baselines3.common.noise import NormalActionNoise
from stable_baselines3.common.callbacks import BaseCallback

LOGDIR = "./training_logs"
timesteps = 200000

m_env = Monitor(CustomEnv(headless_ = False,seed_pth = "seed.json"), LOGDIR, allow_early_resets=True)
observation = m_env.reset()

model = PPO.load("./training_logs/best_model1.zip", env=m_env)

action = -1
while True:

    action, _states = model.predict(observation, deterministic=False)

    observation, reward, done, info = m_env.step(action)

    if done:
        observation = m_env.reset()

    interaction = m_env.render(real_time = True)
    
    if interaction == 114:
        input("pause")

