File size: 1,466 Bytes
4014562
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
EXP_NAME: "eurlex4k_baseline_128_newds"             
EXP_DESC: "Eurlex4K Baseline with len=128 on new dataset"
# Ideally would contain all the possible keys

DATA:
    task_name: eurlex4k
    dataset_name: eurlex
    dataset_config_name: null
    max_seq_length: 128
    overwrite_output_dir: true
    overwrite_cache: true
    pad_to_max_length: true
    load_from_local: true
    max_train_samples: null
    max_eval_samples: null
    max_predict_samples: null
    train_file: datasets/eurlex_raw_text_dataset/train.jsonl
    validation_file: datasets/eurlex_raw_text_dataset/test.jsonl
    test_file: datasets/eurlex_raw_text_dataset/test.jsonl

MODEL:
    model_name_or_path: bert-base-uncased
    config_name: null
    tokenizer_name: null
    cache_dir: null
    use_fast_tokenizer: true
    model_revision: main
    use_auth_token: false
    ignore_mismatched_sizes: false
    negative_sampling: "none"
    semsup: false
    encoder_model_type: bert
    user_custom_optimizer: null


TRAINING:
    do_train: true
    do_eval: true
    per_device_train_batch_size: 8
    gradient_accumulation_steps: 1
    learning_rate: 1.e-4 # Will point to input encoder lr, if user_custom_optimizer is False
    num_train_epochs: 30
    save_steps: 20000
    evaluation_strategy: steps
    eval_steps: 10000
    fp16: true
    fp16_opt_level: O1
    lr_scheduler_type: "constant_with_warmup" # defaults to 'linear'
    dataloader_num_workers: 4
    label_names: [labels]