import torch
USE_CUDA = torch.cuda.is_available()
if USE_CUDA:
    torch.cuda.manual_seed(0)
class ModelConfig:
    batch_size = 5
    output_size = 2
    hidden_dim = int(384/2)  # 768/2
    n_layers = 2
    lr = 2e-5
    bidirectional = True  # 这里为True，为双向LSTM
    # training params
    epochs = 15
    print_every = 500
    clip = 5  # gradient clipping
    use_cuda = USE_CUDA
    bert_config ="bert_pretrain/vocab.txt"
    bert_path = 'bert_pretrain'  # 预训练bert路径
    save_path = 'saved/bert_bilstm.pth'  # 模型保存路径
    train_path = "/mnt/COMP/baidu_lic_2022_interpretability/data/train.tsv"
    dev_path = "/mnt/COMP/baidu_lic_2022_interpretability/data/dev.tsv"
