from brain import RL_NetWork
import matplotlib.pyplot as plt
from core.rl_enviroment import RLEnv
from core.parameters import Parameters
import pandas as pd
import numpy as np
from csv_reader import CSVReader


workers_configs = CSVReader.get_worker_configs(r'..\board_files\workers.csv')
planes_configs = CSVReader.get_plane_configs(r'..\board_files\planes.csv')
dag_dataframe = pd.read_csv(r'..\board_files\task_dag.csv')
pa = Parameters()
pa.num_nw = 5
pa.simu_len = 50
pa.num_ex = 10
pa.new_job_rate = 1
pa.backlog_size = 10

env = RLEnv(pa, workers_configs, planes_configs,dag_dataframe)

EPISODES = 30
steps_train = 4
start_steps = 200
copy_steps = 10

best_action_seq = dict()
best_makespan = 400

def restore_model():
    observation_shape = env.observer_space
    action_size = env.action_space

    agent = RL_NetWork(name='dqn',observe_shape=observation_shape, action_size=action_size)
    agent.load_model()
    agent.epsilon = 0.2

    total_steps = 0
    learn_step = 200

    learn_step_size = 4

    for e in range(EPISODES):
        obs = env.reset()
        done = False

        act_seq = list()

        while not done:
            W = obs.worker
            S = obs.task
            G = obs.graph

            mask = env.action_mask()
            # mask = []

            action = agent.choose_action(W,S,G,mask)

            act_seq.append(action)

            next_obs, reward, done, _ = env.step(action)

            W_ = next_obs.worker
            S_ = next_obs.task
            G_ = next_obs.graph
            # normalize reward reward （-30，0）
            reward /= 40

            # agent.store_transition(W,S,G,action,reward,W_,S_,G_,done)
            #
            # if total_steps >= learn_step and total_steps % learn_step_size == 0:
            #     agent.learn()

            if done:
                print("episode: {}/{}, time: {}, e: {:.2} ,lr:{},end={}"
                      .format(e, EPISODES, env.env.now, agent.epsilon, agent.lr,env.end))
                if env.env.now <=65:
                    best_action_seq[(env.env.now,e)] = act_seq

            obs = next_obs

            total_steps +=1

def best_act_seq():
    min_makespan = min(best_action_seq.keys(),key=lambda x:x[0])
    print(min_makespan)
    # act_seq = best_action_seq[min_makespan]

    total_steps = 0
    for e in range(5):
        done = False
        obs = env.reset()
        act_seq = best_action_seq[min_makespan]

        for act in act_seq:
            # obs = env.reset()
            # done = False
            W = obs.worker
            S = obs.task
            G = obs.graph
            mask = env.action_mask()

            next_obs, reward, done, _ = env.step(act)

            W_ = next_obs.worker
            S_ = next_obs.task
            G_ = next_obs.graph

            obs = next_obs

            total_steps += 1


        print("========================================")
        print(" time: {},end={},min={}"
              .format(env.env.now, env.end,min_makespan))
    env.plot_gantt()
restore_model()

best_act_seq()