from agent import DQNAgent

from core.rl_enviroment import RLEnv
from core.parameters import Parameters
import pandas as pd
import numpy as np
from csv_reader import CSVReader


workers_configs = CSVReader.get_worker_configs(r'..\board_files\workers.csv')
planes_configs = CSVReader.get_plane_configs(r'..\board_files\planes.csv')
dag_dataframe = pd.read_csv(r'..\board_files\task_dag.csv')
pa = Parameters()
pa.num_nw = 5
pa.simu_len = 50
pa.num_ex = 10
pa.new_job_rate = 1
pa.backlog_size = 10
# pa.compute_dependent_parameters()
data_list_obs = []
data_list_act = []
env = RLEnv(pa, workers_configs, planes_configs,dag_dataframe)

EPISODES = 2000

if __name__ == "__main__":
    # env = gym.make('CartPole-v1')
    observation_shape = env.observer_space
    action_size = env.action_space
    agent = DQNAgent(observation_shape, action_size)
    # point epsilon
    agent.epsilon = agent.epsilon_min
    agent.load("./save/best-ddqn.h5")
    done = False
    obs = env.reset()
    while not done:
        obs_adddims = np.expand_dims(obs, 0)
        action = agent.act(obs_adddims)
        next_obs, reward, done, _ = env.step(action)
        # reward = reward if not done else -10
        # obs_adddims = np.expand_dims(obs, 0)
        # next_obs_adddims = np.expand_dims(next_obs, 0)
        # agent.memorize(obs_adddims, action, reward, next_obs_adddims, done)
        obs = next_obs
    print(obs)
    print("===============")

    print(env.env.now)
    print(env.end)

    env.plot_gantt()
