ENV_NAME = 'GraphEnv-v1'
graph_topology = 3 # 0==NSFNET, 1==GEANT2, 2==Small Topology, 3==GBN
SEED = 37
ITERATIONS = 1600
TRAINING_EPISODES = 50
EVALUATION_EPISODES = 40
FIRST_WORK_TRAIN_EPISODE = 60

MULTI_FACTOR_BATCH = 6 # Number of batches used in training
TAU = 0.08 # Only used in soft weights copy

differentiation_str = "RL^2   " + str(graph_topology)
checkpoint_dir = "./models/"+differentiation_str +"/"
store_loss = 3 # Store the loss every store_loss batches

# Force TensorFlow to use single thread.
# Multiple threads are a potential source of non-reproducible results.
# For further details, see: https://stackoverflow.com/questions/42022950/
# tf.config.threading.set_inter_op_parallelism_threads(1)
# tf.config.threading.set_intra_op_parallelism_threads(1)

train_dir = "./TensorBoard/"+differentiation_str
# summary_writer = tf.summary.create_file_writer(train_dir)
listofDemands = [8, 32, 64]
copy_weights_interval = 50
evaluation_interval = 1
epsilon_start_decay = 70


hparams = {
    'l2': 0.1,
    'dropout_rate': 0.01,
    'link_state_dim': 20,
    'readout_units': 35,
    'learning_rate': 0.0008, # 0.01 0.001 0.0001 0.0005 0.0008
    'batch_size': 32,
    'T': 4,
    'num_demands': len(listofDemands)
}

MAX_QUEUE_SIZE = 4000

HIDDEN_UNITS = 100 #
NUM_HEADS = 2 # 1 2 4 6 8 跑这个的时候learning_rate = 0.0008
NUM_LAYERS = 3 # 1 3 5 跑这个的时候learning_rate = 0.0008
OUTPUT_DIM = 4

NUM_EPOCHS = 100
BATCH_SIZE = 256
VALIDATION_SPLIT = 0.1
LEARNING_RATE = 3e-1
MOMENTUM = 0.9

REPLAY_SIZE = 10000 # experience replay buffer size
N_STEP = 4 # 是对应multi-step learning 的函数