# 仿真参数设置

import os

# 仿真参数
SLOT_TIME = 1
PRINT_EVERY = 10

# 环境模型参数
USER_NODE_NUM = 5
AGENT_STATE_NUM = 14
MEC_NODE_NUM = 4
NODE_STATE_NUM = 6
TASK_PROBABILITY = 0.4
STATION_COVER = 200

# 任务的状态
NEW = 0
COMMUNICATING = 1
COMPUTING = 2
OVER = 3

OFF = 0
ON = 1
lambda_on = 1/2 # off/on的平均时间

# DQN模型参数
STATE_DIM = AGENT_STATE_NUM+NODE_STATE_NUM*(USER_NODE_NUM + MEC_NODE_NUM + 1)     # 策略网络和目标网络输入的长度: 节点数 * ( x, y, z,  )
ACTION_DIM = (USER_NODE_NUM + MEC_NODE_NUM + 1)   # 策略网络和目标网络输出的长度:动作空间的维度
#  = USER_NODE_NUM * 4  # User_node_num * log(user_node_log + mec_node_num + 1)
BENCH_SIZE = 256            # bench_size
MEMORY_CAPACITY = 2000      # 经验回放次的容量
TARGET_UPDATE = 400         # target网络更新的频率
GAMMA = 0.9                 # 回报折扣率
LR = 0.002                  # 学习率

# DQN训练
Episodes_number = 1000     # 训练与测试的总轮次
Test_episodes_number = 200  # 测试的轮次
MODEL_PATH = "../models/DQN_Model/"     # DQN深度神经网络参数的保存路径


FIRST_TRANS = 0
SECOND_TRANS = 1



