from brain import RL_NetWork
import matplotlib.pyplot as plt
from core.rl_enviroment import RLEnv
from core.parameters import Parameters
import pandas as pd
import numpy as np
from csv_reader import CSVReader


workers_configs = CSVReader.get_worker_configs(r'..\board_files\workers.csv')
planes_configs = CSVReader.get_plane_configs(r'..\board_files\planes.csv')
dag_dataframe = pd.read_csv(r'..\board_files\task_dag.csv')
pa = Parameters()
pa.num_nw = 5
pa.simu_len = 50
pa.num_ex = 10
pa.new_job_rate = 1
pa.backlog_size = 10

env = RLEnv(pa, workers_configs, planes_configs,dag_dataframe)

EPISODES = 2000
steps_train = 4
start_steps = 200
copy_steps = 10


def train():
    observation_shape = env.observer_space
    action_size = env.action_space

    agent = RL_NetWork(name='dqn',observe_shape=observation_shape, action_size=action_size)

    total_steps = 0
    learn_step = 200

    learn_step_size = 4

    for e in range(EPISODES):
        obs = env.reset()
        done = False
        while not done:
            W = obs.worker
            S = obs.task
            G = obs.graph

            mask = env.action_mask()
            # mask = []
            # print(mask)
            # print("-----------------------------------")
            action = agent.choose_action(W,S,G,mask)

            next_obs, reward, done, _ = env.step(action)

            W_ = next_obs.worker
            S_ = next_obs.task
            G_ = next_obs.graph
            # normalize reward reward （-30，0）
            reward /= 40

            agent.store_transition(W,S,G,action,reward,W_,S_,G_,done)

            if total_steps >= learn_step and total_steps % learn_step_size == 0:
                agent.learn()

            if done:
                print("episode: {}/{}, time: {}, e: {:.2} ,lr:{},end={}"
                      .format(e, EPISODES, env.env.now, agent.epsilon, agent.lr,env.end))
                if e > 1000 and e % 50 == 0:
                    agent.save_model()

            obs = next_obs

            total_steps +=1

    agent.save_model()
    return agent.cost_his


costs = train()


plt.plot(np.array(costs), c='b', label='double')
plt.legend(loc='best')
plt.ylabel('Q eval')
plt.xlabel('training steps')
plt.grid()
plt.show()

plt.savefig('save/cost.png')