from asyncio import FastChildWatcher
from code import interact
import os
import time
import numpy as np
from isaacgym import gymutil
from isaacgym import gymapi
#from isaacgym import gymtorch
from math import sqrt
import math
from sympy import false
import cv2
from draw import *
from pcgworker.PCGWorker import *

from wfc_vecenv_stable_baselines import PCGVecEnvStableBaselines

from stable_baselines3 import PPO
from stable_baselines3.common.callbacks import BaseCallback
from stable_baselines3.common.results_plotter import load_results, ts2xy
from stable_baselines3.common.monitor import Monitor
from stable_baselines3.common import results_plotter
from stable_baselines3.common.results_plotter import load_results, ts2xy, plot_results
from stable_baselines3.common.noise import NormalActionNoise
from stable_baselines3.common.callbacks import BaseCallback



if __name__ == "__main__":

    LOGDIR = "./training_logs"
    timesteps = 1500000

    m_env = PCGVecEnvStableBaselines(seed_pth = "pcgworker/new.json", headless_ = True)
    # m_env = PCGVecEnvStableBaselines(seed_pth = "seed.json", headless_ = True)
    # m_env1 = PCGVecEnvStableBaselines(seed_pth = "seed.json", headless_ = True)
    # m_env = PCGVecEnvStableBaselines(headless_ = True)
    m_env.reset()
    # m_env1.reset()

    # m = PCGVecEnvStableBaselines.mro()
    # for i in m:
    #     print(i)

    model_ref = None
    check_freq = 1000
    best_step = 0

    def evaluate_cb(local_vars, global_vars):
        
        if not m_env.headless:
            # render during training
            m_env.render()

        if m_env.step_count % check_freq == 0:
            # compute the sum of m_env.reward_fifo
            reward_sum = 0
            for i in range(len(m_env.reward_fifo)):
                reward_sum += m_env.reward_fifo[i]
            reward_avg = reward_sum / len(m_env.reward_fifo)
            
            # evaluate multiple envs
            if m_env.best_performance > 0.8 and m_env.step_count % 60000 == 0:
                env_sr_log = m_env.evaluate_sb3(model = model_ref, render = False)
                # print env_sr_log
                print("--------------------- env_sr_log :",env_sr_log)
                print("m_env.step_count : ",m_env.step_count)
                print("time taken: {}".format(time.time() - m_env.timer0))
                model_ref.save("./training_logs/best_model")
                

            if reward_avg >= m_env.best_performance and m_env.step_count >= 100000:
                m_env.best_performance = reward_avg
            #     print("Saving model at step {} with reward {}".format(m_env.step_count, m_env.best_performance))
            #     model_ref.save("./training_logs/best_model")
            # print("best_performance {} reward {}".format(m_env.best_performance, reward_avg))
            # print("time taken: {}".format(time.time() - m_env.timer0))

    model = PPO('CnnPolicy', env=m_env, batch_size = 1024)
    # model = PPO.load("./training_logs/best_model5.zip", env=m_env, batch_size = 1024)
    model_ref = model
    best_step = 0


    print("model.n_steps : ", model.n_steps)
    print("model.batch_size : ", model.batch_size)

    model.learn(total_timesteps=timesteps, callback=evaluate_cb)
    # model.eval_env



