import torch
# 编码维度
embedding_size = 800
# 隐藏层维度
hidden_size = 800
# RNN层数
encoder_n_layers = 2
decoder_n_layers = 2

dropout = 0.1
# 设备
# device = torch.device('cpu')
device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
# encoder 学习率
learning_rate = 1e-3
# 迭代次数
MAX_ITERATION = 10000
# 每次迭代样本大小
batch_size = 128
# 编码器学习率
decoder_learning_ratio = 5.0

MAX_LENGTH = 14

# 填充标志、开始标志、结束标志
PAD_token = 0
SOS_token = 1
EOS_token = 2

clip = 50.0

print_every = 10
save_every = 1000

VOC_PATH = "../../Datasets/FinalData/qingyun_data/qingyun/voc_qingyun.tar"
TRAIN_PAIRS_PATH = "../../Datasets/FinalData/qingyun_data/qingyun/pairs_qingyun_train.tar"
TEST_PAIRS_PATH = "../../Datasets/FinalData/qingyun_data/qingyun/pairs_qingyun_test.tar"
TEST_MODEL_PATH = "../Models/"

attn_model = 'dot'

attn_weights_list = []
teacher_forcing_ratio = 0.9
word2vec_model_path = "../../Datasets/MidData/word2vec/LCCC_word2vec.model"

