REPLAY_SIZE = 10000
MINIMUM_BUFFER_SIZE = 2000
BATCH_SIZE = 128
EMBEDDING_DIM = 128
NODE_PADDING_SIZE = 400  # the number of nodes will be padded to this value
K_SIZE = 9  # the number of neighboring nodes

USE_GPU = False  # do you want to collect training data using GPUs
USE_GPU_GLOBAL = True  # do you want to train the network using GPUs
NUMX = 32
NUMY = 32
NUM_GPU = 2
NUM_META_AGENT = 32
LR = 1e-5
GAMMA = 0.99
DECAY_STEP = 256  # not use
SUMMARY_WINDOW = 1
LOAD_MODEL = False # do you want to load the model trained before
SAVE_IMG_GAP = 101
N_ROBOTS = 5
train_mode = True
FIXED_OPPONENT = False
BETA = 0.1
RANDOM_SEED = None
INPUT_TYPE = 'adj'  # 'map' or 'adj'
EXIT_NUM = 8 
INPUT_DIM = N_ROBOTS + EXIT_NUM + 1
if EXIT_NUM > 0:
    MIN_EVADOR_EXIT_DIST = 6

extra_info = '1.27-K_SIZE9-small+big-5v1-noSAC'
FOLDER_NAME = 'TVO-EXIT{}-BETA{:.2f}-{}'.format(
    EXIT_NUM,
    BETA,
    extra_info
)
model_path = f'model/{FOLDER_NAME}'
train_path = f'train/{FOLDER_NAME}'
gifs_path = f'gifs/{FOLDER_NAME}'
if INPUT_TYPE == 'map':
    train_map_path = f'DP_policy/DP_policy_train'
    # test_map_path = f'DP_policy/DP_policy_test'

    # train_small_map_path = f'/mnt/DP_policy_2v1_small/train'
    # test_small_map_path = f'/mnt/DP_policy_2v1_small/test'
    # train_big_map_path = f'/mnt/DP_policy_2v1_big/train'
    # test_big_map_path = f'/mnt/DP_policy_2v1_big/test'

else:
    train_adj_path = f'adj_file/train'
    # train_dp_path = f'DP_policy/DP_policy_train'
    test_adj_path = f'adj_file/test'
    # test_dp_path = f'DP_policy/DP_policy_test'
    # test_adj_path = f'adj_file/adj_file_grasper'
    # test_dp_path = f'DP_policy/DP_policy_grasper'
    # test_adj_path = f'adj_file/adj_file_downtown'
    # test_dp_path = f'DP_policy/DP_policy_downtown'
    # test_adj_path = f'adj_file/adj_file_zhongguancun'
    # test_dp_path = f'DP_policy/DP_policy_zhongguancun'
    # test_adj_path = f'adj_file/adj_file_realworld'
    # test_dp_path = f'DP_policy/DP_policy_realworld'