import torch

from seq2seq.num_sequence import NumSequence

device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

num_sequence = NumSequence()

train_batch_size = 512
embedding_dim = 100

hidden_size = 128  # 隐藏层中lstm的个数
num_layers = 2  # 隐藏层的个数
dropout = 0.5
bidirectional = True
max_len = 10

model_save_path = "model/seq2seq.model"
optimizer_save_path = "model/optimizer.model"

teacher_forcing_ratio = 0.5
