# import tensorflow as tf
# from keras.layers import Conv1D,BatchNormalization,Dense,Flatten,Input,Activation
# from keras.models import Model
# import numpy as np


# class DQNAgent:
#     def __init__(self, name, state_shape, n_actions, epsilon=0, reuse=False):
#         """A simple DQN agent"""
#         with tf.variable_scope(name, reuse=reuse):
#
#             #< Define your network body here. Please make sure you don't use any layers created elsewhere >
#             X_input = Input(state_shape, name="state_input")
#
#             # S1 = S_input[:, :, :-1]
#             # S2 = S_input[:, :, -1]
#
#             X = Conv1D(16, kernel_size=3, strides=1, padding='valid')(X_input)
#             X = Conv1D(32, kernel_size=3, strides=1, padding='valid')(X)
#             X = BatchNormalization()(X)
#             X = Activation('relu')(X)
#             X = Flatten()(X)
#             X = Dense(100, 'tanh')(X)
#             q_values = Dense(n_actions, activation='softmax')(X)
#             model = Model(inputs=[X_input], outputs=q_values, name="ScheduleModel")
#             # q_values = Dense(units=n_actions,activation='relu')(X)
#             self.deep_q_net = Model(X_input,q_values)
#             # prepare a graph for agent step
#             self.state_t = tf.placeholder(
#                 'float32', [None, ] + list(state_shape))
#             self.qvalues_t = self.get_symbolic_qvalues(self.state_t)
#
#         self.weights = tf.get_collection(
#             tf.GraphKeys.TRAINABLE_VARIABLES, scope=name)
#         self.epsilon = epsilon
#
#     def get_symbolic_qvalues(self, state_t):
#         """takes agent's observation, returns qvalues. Both are tf Tensors"""
#         #< apply your network layers here >
#         #print(type(state_t))
#         qvalues = self.deep_q_net(state_t)
#         #print(qvalues.shape)
#         assert tf.is_numeric_tensor(qvalues) and qvalues.shape.ndims == 2, \
#             "please return 2d tf tensor of qvalues [you got %s]" % repr(
#                 qvalues)
#         # assert int(qvalues.shape[1]) == n_actions
#
#         return qvalues
#
#     def get_qvalues(self, state_t):
#         """Same as symbolic step except it operates on numpy arrays"""
#         sess = tf.get_default_session()
#         return sess.run(self.qvalues_t, {self.state_t: state_t})
#
#     def sample_actions(self, qvalues):
#         """pick actions given qvalues. Uses epsilon-greedy exploration strategy. """
#         epsilon = self.epsilon
#         batch_size, n_actions = qvalues.shape
#         random_actions = np.random.choice(n_actions, size=batch_size)
#         best_actions = qvalues.argmax(axis=-1)
#         should_explore = np.random.choice(
#             [0, 1], batch_size, p=[1-epsilon, epsilon])
#         return np.where(should_explore, random_actions, best_actions)

from agent import DQNAgent

from core.rl_enviroment import RLEnv
from core.parameters import Parameters
import pandas as pd
import numpy as np
from csv_reader import CSVReader


workers_configs = CSVReader.get_worker_configs(r'..\board_files\workers.csv')
planes_configs = CSVReader.get_plane_configs(r'..\board_files\planes.csv')
dag_dataframe = pd.read_csv(r'..\board_files\task_dag.csv')
pa = Parameters()
pa.num_nw = 5
pa.simu_len = 50
pa.num_ex = 10
pa.new_job_rate = 1
pa.backlog_size = 10
# pa.compute_dependent_parameters()
data_list_obs = []
data_list_act = []
env = RLEnv(pa, workers_configs, planes_configs,dag_dataframe)

EPISODES = 2000
steps_train = 4
start_steps = 200
copy_steps = 10

if __name__ == "__main__":
    # env = gym.make('CartPole-v1')
    observation_shape = env.observer_space
    action_size = env.action_space
    agent = DQNAgent(observation_shape, action_size)
    # agent.load("./save/cartpole-ddqn.h5")
    done = False
    batch_size = 32
    min_makespan = 500
    global_best_acts = []
    # s_t = np.zeros(observation_shape)
    # s_t_next = s_t.copy()
    global_step = 0

    for e in range(EPISODES):
        obs = env.reset()

        # s_t[:, :, 0] = obs
        # s_t[:, :, 1] = obs
        # s_t[:, :, 2] = obs

        done = False
        best_action_seq = []
        # state = np.reshape(state, [1, state_size])
        while not done:
            # env.render()
            # obs_adddims = np.expand_dims(obs, 0)
            W = np.expand_dims(obs.worker, 0)
            S = np.expand_dims(obs.task, 0)
            G = np.expand_dims(obs.graph, 0)
            obs_list = [W,S,G]
            # act
            action = agent.act(obs,global_step)
            next_obs, reward, done, _ = env.step(action)

            W_ = np.expand_dims(next_obs.worker, 0)
            S_ = np.expand_dims(next_obs.task, 0)
            G_ = np.expand_dims(next_obs.graph, 0)
            next_obs_list = [W_, S_, G_]
            # reward = reward if not done else -10
            # obs_adddims = np.expand_dims(obs,0)
            # next_obs_adddims =np.expand_dims(next_obs,0)
            # best_action_seq.append(action)

            # s_t_next[:, :, 1:] = s_t[:, :, 0:-1]
            # s_t_next[:, :, 0] = next_obs

            agent.memorize(obs_list, action, reward, next_obs_list, done)

            obs = next_obs
            if global_step % steps_train == 0 and global_step > start_steps:
                print('global_step', global_step)
                agent.replay(batch_size)


            if (global_step + 1) % copy_steps == 0 and global_step > start_steps:
                agent.tau = max(agent.tau*1.002, 0.99)
                agent.update_target_model()

            global_step += 1

            if done:
                print("episode: {}/{}, time: {}, e: {:.2} end={}"
                      .format(e, EPISODES, env.env.now, agent.epsilon, env.end))

        if e % 10 == 0:
            agent.save("./save/best-ddqn.h5")

        # if e % 100 == 0 :
        #     env.plot_gantt()

# print("global best actions:", global_best_acts)
# print("makespan optimal:", min_makespan)
