import torch
LR: int = 20 #初始学习率
BPTT: int = 35 #序列长度
CLIP: float = 0.25 #防止梯度爆炸
LOGINTERVAL: int = 200 #报告间隔
EPOCHS: int = 40
BATCH_SIZE: int = 10
TRANSFORMER_ENCODER_EMSIZE :int = 200 # 词嵌入的大小
TRANSFORMER_ENCODER_NHID :int = 200 #前馈网络模型的维度
TRANSFORMER_ENCODER_NLAYERS :int = 2 # 层数
TRANSFORMER_ENCODER_NHEAD :int = 2 # 变压器模型的编码器/解码器中的并行的数目
TRANSFORMER_ENCODER_DROPOUT :float = 0.2 # 应用于图层的 dropout
SEED: int = 1111 #随机种子
SAVE = 'model.pt' #保存最终模型的路径
TEMPERATURE: float = 1.0 #温度 - 更高将增加多样性
WORDS: int = 1000
COVERAGE_RATE: float = 0.8 #delta唯一值对应占比
DIV_NUM: int = 10 #对数据进行划分
SHIFT: int = 16 #根据cache来进行移位操作
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")