BramVanroy commited on
Commit
f6610a3
1 Parent(s): 96d8623
README.md CHANGED
@@ -1,45 +1,97 @@
1
  ---
 
 
2
  language:
3
  - nl
4
  license: mit
5
- tags:
6
- - sentiment-analysis
7
- - dutch
8
- - text
9
- datasets:
10
- - BramVanroy/hebban-reviews
11
  metrics:
12
  - accuracy
13
  - f1
14
  - precision
 
15
  - recall
16
- widget:
17
- - text: "Wauw, wat een leuk boek! Ik heb me er er goed mee vermaakt."
18
- - text: "Nee, deze vond ik niet goed. De auteur doet zijn best om je als lezer mee te trekken in het verhaal maar mij overtuigt het alleszins niet."
19
- - text: "Ik vind het niet slecht maar de schrijfstijl trekt me ook niet echt aan. Het wordt een beetje saai vanaf het vijfde hoofdstuk"
20
-
21
  model-index:
22
  - name: bert-base-dutch-cased-hebban-reviews
23
  results:
24
- - task:
25
- type: text-classification
26
- name: sentiment analysis
27
- dataset:
28
- type: BramVanroy/hebban-reviews
29
- name: Hebban Reviews
30
  split: test
31
- revision: 1.1.0
32
  metrics:
33
- - type: accuracy
34
- value: 0.8219
35
- name: Test accuracy
36
- - type: f1
37
- value: 0.8227
38
- name: Test f1
39
- - type: precision
40
- value: 0.8245
41
- name: Test precision
42
- - type: recall
43
- value: 0.8219
44
- name: Test recall
45
- ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  ---
2
+ datasets:
3
+ - BramVanroy/hebban-reviews
4
  language:
5
  - nl
6
  license: mit
 
 
 
 
 
 
7
  metrics:
8
  - accuracy
9
  - f1
10
  - precision
11
+ - qwk
12
  - recall
 
 
 
 
 
13
  model-index:
14
  - name: bert-base-dutch-cased-hebban-reviews
15
  results:
16
+ - dataset:
17
+ config: filtered_sentiment
18
+ name: BramVanroy/hebban-reviews - filtered_sentiment - 2.0.0
19
+ revision: 2.0.0
 
 
20
  split: test
21
+ type: BramVanroy/hebban-reviews
22
  metrics:
23
+ - name: Test accuracy
24
+ type: accuracy
25
+ value: 0.8042406311637081
26
+ - name: Test f1
27
+ type: f1
28
+ value: 0.8125977499178383
29
+ - name: Test precision
30
+ type: precision
31
+ value: 0.8283602308368182
32
+ - name: Test qwk
33
+ type: qwk
34
+ value: 0.7301452890386257
35
+ - name: Test recall
36
+ type: recall
37
+ value: 0.8042406311637081
38
+ tags:
39
+ - sentiment-analysis
40
+ - dutch
41
+ - text
42
+ widget:
43
+ - text: Wauw, wat een leuk boek! Ik heb me er er goed mee vermaakt.
44
+ - text: Nee, deze vond ik niet goed. De auteur doet zijn best om je als lezer mee
45
+ te trekken in het verhaal maar mij overtuigt het alleszins niet.
46
+ - text: Ik vind het niet slecht maar de schrijfstijl trekt me ook niet echt aan. Het
47
+ wordt een beetje saai vanaf het vijfde hoofdstuk
48
+ ---
49
+
50
+ # bert-base-dutch-cased-hebban-reviews
51
+
52
+ # Dataset
53
+ - dataset_name: BramVanroy/hebban-reviews
54
+ - dataset_config: filtered_sentiment
55
+ - dataset_revision: 2.0.0
56
+ - labelcolumn: review_sentiment
57
+ - textcolumn: review_text_without_quotes
58
+
59
+ # Training
60
+ - optim: adamw_hf
61
+ - learning_rate: 5e-05
62
+ - per_device_train_batch_size: 64
63
+ - per_device_eval_batch_size: 64
64
+ - gradient_accumulation_steps: 1
65
+ - max_steps: 5001
66
+ - save_steps: 500
67
+ - metric_for_best_model: qwk
68
+
69
+ # Best checkedpoint based on validation
70
+ - best_metric: 0.732569302631819
71
+ - best_model_checkpoint: trained/hebban-reviews/bert-base-dutch-cased/checkpoint-3000
72
+
73
+ # Test results of best checkpoint
74
+ - accuracy: 0.8042406311637081
75
+ - f1: 0.8125977499178383
76
+ - precision: 0.8283602308368182
77
+ - qwk: 0.7301452890386257
78
+ - recall: 0.8042406311637081
79
+
80
+ ## Confusion matric
81
+
82
+ ![cfm](fig/test_confusion_matrix.png)
83
+
84
+ ## Normalized confusion matrix
85
+
86
+ ![norm cfm](fig/test_confusion_matrix_norm.png)
87
+
88
+ # Environment
89
+ - cuda_capabilities: 8.0; 8.0
90
+ - cuda_device_count: 2
91
+ - cuda_devices: NVIDIA A100-SXM4-80GB; NVIDIA A100-SXM4-80GB
92
+ - finetuner_commit: 48bb3434fa8bbfc9b2d0061ca6c8feb87f78a7ef
93
+ - platform: Linux-4.18.0-305.49.1.el8_4.x86_64-x86_64-with-glibc2.28
94
+ - python_version: 3.9.5
95
+ - toch_version: 1.10.0
96
+ - transformers_version: 4.21.0
97
+
all_results.json ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "accuracy": 0.8042406311637081,
3
+ "epoch": 4.38,
4
+ "f1": 0.8125977499178383,
5
+ "precision": 0.8283602308368182,
6
+ "qwk": 0.7301452890386257,
7
+ "recall": 0.8042406311637081,
8
+ "train_loss": 0.3670836862052257,
9
+ "train_runtime": 2628.6927,
10
+ "train_samples_per_second": 243.516,
11
+ "train_steps_per_second": 1.902
12
+ }
args.json ADDED
@@ -0,0 +1,134 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_n_gpu": 1,
3
+ "adafactor": false,
4
+ "adam_beta1": 0.9,
5
+ "adam_beta2": 0.999,
6
+ "adam_epsilon": 1e-08,
7
+ "auto_find_batch_size": false,
8
+ "bf16": false,
9
+ "bf16_full_eval": false,
10
+ "calculate_qwk": true,
11
+ "data_seed": 42,
12
+ "dataloader_drop_last": false,
13
+ "dataloader_num_workers": 0,
14
+ "dataloader_pin_memory": true,
15
+ "dataset_config": "filtered_sentiment",
16
+ "dataset_name": "BramVanroy/hebban-reviews",
17
+ "dataset_revision": "2.0.0",
18
+ "ddp_bucket_cap_mb": null,
19
+ "ddp_find_unused_parameters": null,
20
+ "debug": [],
21
+ "deepspeed": null,
22
+ "disable_tqdm": false,
23
+ "do_early_stopping": false,
24
+ "do_eval": true,
25
+ "do_optimize": false,
26
+ "do_predict": true,
27
+ "do_train": true,
28
+ "early_stopping_patience": 1,
29
+ "early_stopping_threshold": 0.0,
30
+ "eval_accumulation_steps": null,
31
+ "eval_delay": 0,
32
+ "eval_steps": 500,
33
+ "evaluation_strategy": "steps",
34
+ "fp16": true,
35
+ "fp16_backend": "auto",
36
+ "fp16_full_eval": false,
37
+ "fp16_opt_level": "O1",
38
+ "fsdp": [],
39
+ "fsdp_min_num_params": 0,
40
+ "fsdp_transformer_layer_cls_to_wrap": null,
41
+ "full_determinism": false,
42
+ "gradient_accumulation_steps": 1,
43
+ "gradient_checkpointing": false,
44
+ "greater_is_better": true,
45
+ "group_by_length": false,
46
+ "half_precision_backend": "cuda_amp",
47
+ "hub_model_id": null,
48
+ "hub_private_repo": false,
49
+ "hub_strategy": "every_save",
50
+ "hub_token": null,
51
+ "ignore_data_skip": false,
52
+ "include_inputs_for_metrics": false,
53
+ "jit_mode_eval": false,
54
+ "label_names": null,
55
+ "label_smoothing_factor": 0.0,
56
+ "labelcolumn": "review_sentiment",
57
+ "labelnames": [
58
+ "negative",
59
+ "neutral",
60
+ "positive"
61
+ ],
62
+ "learning_rate": 5e-05,
63
+ "length_column_name": "length",
64
+ "load_best_model_at_end": true,
65
+ "local_rank": 0,
66
+ "log_level": -1,
67
+ "log_level_replica": -1,
68
+ "log_on_each_node": true,
69
+ "logging_dir": "trained/hebban-reviews/bert-base-dutch-cased/runs/Jul28_16-04-25_node3900.accelgor.os",
70
+ "logging_first_step": false,
71
+ "logging_nan_inf_filter": true,
72
+ "logging_steps": 500,
73
+ "logging_strategy": "steps",
74
+ "lr_scheduler_type": "linear",
75
+ "max_grad_norm": 1.0,
76
+ "max_seq_length": null,
77
+ "max_steps": 5001,
78
+ "max_test_samples": null,
79
+ "max_train_samples": null,
80
+ "max_validation_samples": null,
81
+ "metric_for_best_model": "qwk",
82
+ "model_name_or_path": "GroNLP/bert-base-dutch-cased",
83
+ "model_revision": "main",
84
+ "mp_parameters": "",
85
+ "n_trials": 8,
86
+ "no_cuda": false,
87
+ "num_train_epochs": 3.0,
88
+ "optim": "adamw_hf",
89
+ "output_dir": "trained/hebban-reviews/bert-base-dutch-cased",
90
+ "overwrite_cache": false,
91
+ "overwrite_output_dir": true,
92
+ "past_index": -1,
93
+ "per_device_eval_batch_size": 64,
94
+ "per_device_train_batch_size": 64,
95
+ "per_gpu_eval_batch_size": null,
96
+ "per_gpu_train_batch_size": null,
97
+ "prediction_loss_only": false,
98
+ "push_to_hub": false,
99
+ "push_to_hub_model_id": null,
100
+ "push_to_hub_organization": null,
101
+ "push_to_hub_token": null,
102
+ "ray_scope": "all",
103
+ "remove_unused_columns": true,
104
+ "report_to": [
105
+ "tensorboard"
106
+ ],
107
+ "resume_from_checkpoint": null,
108
+ "run_name": "trained/hebban-reviews/bert-base-dutch-cased",
109
+ "save_on_each_node": false,
110
+ "save_steps": 500,
111
+ "save_strategy": "steps",
112
+ "save_total_limit": null,
113
+ "scheduler_type": null,
114
+ "seed": 42,
115
+ "sharded_ddp": [],
116
+ "skip_memory_metrics": true,
117
+ "split_seed": 42,
118
+ "testsplit_name": "test",
119
+ "textcolumn": "review_text_without_quotes",
120
+ "tf32": null,
121
+ "torchdynamo": null,
122
+ "tpu_metrics_debug": false,
123
+ "tpu_num_cores": null,
124
+ "trainsplit_name": "train",
125
+ "use_class_weights": true,
126
+ "use_ipex": false,
127
+ "use_legacy_prediction_loop": false,
128
+ "validation_size": 0.1,
129
+ "validationsplit_name": "validation",
130
+ "warmup_ratio": 0.0,
131
+ "warmup_steps": 0,
132
+ "weight_decay": 0.0,
133
+ "xpu_backend": null
134
+ }
config.json CHANGED
@@ -29,7 +29,7 @@
29
  "pad_token_id": 3,
30
  "position_embedding_type": "absolute",
31
  "torch_dtype": "float32",
32
- "transformers_version": "4.21.0.dev0",
33
  "type_vocab_size": 2,
34
  "use_cache": true,
35
  "vocab_size": 30073
 
29
  "pad_token_id": 3,
30
  "position_embedding_type": "absolute",
31
  "torch_dtype": "float32",
32
+ "transformers_version": "4.21.0",
33
  "type_vocab_size": 2,
34
  "use_cache": true,
35
  "vocab_size": 30073
env.json ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cuda_capabilities": "8.0; 8.0",
3
+ "cuda_device_count": 2,
4
+ "cuda_devices": "NVIDIA A100-SXM4-80GB; NVIDIA A100-SXM4-80GB",
5
+ "finetuner_commit": "48bb3434fa8bbfc9b2d0061ca6c8feb87f78a7ef",
6
+ "platform": "Linux-4.18.0-305.49.1.el8_4.x86_64-x86_64-with-glibc2.28",
7
+ "python_version": "3.9.5",
8
+ "toch_version": "1.10.0",
9
+ "transformers_version": "4.21.0"
10
+ }
fig/test_confusion_matrix.eps ADDED
The diff for this file is too large to render. See raw diff
 
fig/test_confusion_matrix.png ADDED
fig/test_confusion_matrix_norm.eps ADDED
The diff for this file is too large to render. See raw diff
 
fig/test_confusion_matrix_norm.png ADDED
pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:c24b6e7600731f4f827d6ebf7c530b3919a7fcdc81f0ec28aab9b152f169cb40
3
  size 436629869
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:59583fc84aa34c92c26d33c8977e280946522eb61277a2cb5b308fd0125d9a83
3
  size 436629869
test_predictions.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:92fd7af07a226b247dfc5c998b14bbdf422497447eedd8467b76697dde323ae8
3
+ size 65400677
test_results.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "accuracy": 0.8042406311637081,
3
+ "f1": 0.8125977499178383,
4
+ "precision": 0.8283602308368182,
5
+ "qwk": 0.7301452890386257,
6
+ "recall": 0.8042406311637081
7
+ }
tokenizer_config.json CHANGED
@@ -8,7 +8,7 @@
8
  "never_split": null,
9
  "pad_token": "[PAD]",
10
  "sep_token": "[SEP]",
11
- "special_tokens_map_file": "/home/bram/.cache/huggingface/transformers/adb82a117c09b0f8768357de8e836a9e0610730782f82edc49dd0020c48f1d03.dd8bd9bfd3664b530ea4e645105f557769387b3da9f79bdb55ed556bdd80611d",
12
  "strip_accents": null,
13
  "tokenize_chinese_chars": true,
14
  "tokenizer_class": "BertTokenizer",
 
8
  "never_split": null,
9
  "pad_token": "[PAD]",
10
  "sep_token": "[SEP]",
11
+ "special_tokens_map_file": "/data/gent/vo/000/gvo00042/vsc42515/cache/transformers/adb82a117c09b0f8768357de8e836a9e0610730782f82edc49dd0020c48f1d03.dd8bd9bfd3664b530ea4e645105f557769387b3da9f79bdb55ed556bdd80611d",
12
  "strip_accents": null,
13
  "tokenize_chinese_chars": true,
14
  "tokenizer_class": "BertTokenizer",
train_results.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 4.38,
3
+ "train_loss": 0.3670836862052257,
4
+ "train_runtime": 2628.6927,
5
+ "train_samples_per_second": 243.516,
6
+ "train_steps_per_second": 1.902
7
+ }
trainer_state.json CHANGED
@@ -1,457 +1,215 @@
1
  {
2
- "best_metric": 0.8199536621312183,
3
- "best_model_checkpoint": "/home/bram/shares/predict/trained/dutch/hebban-reviews/bert-base-dutch-cased/checkpoint-11000",
4
- "epoch": 3.9447731755424065,
5
- "global_step": 12000,
6
  "is_hyper_param_search": false,
7
  "is_local_process_zero": true,
8
  "is_world_process_zero": true,
9
  "log_history": [
10
  {
11
- "epoch": 0.16,
12
- "learning_rate": 4.792083333333334e-05,
13
- "loss": 0.7371,
14
  "step": 500
15
  },
16
  {
17
- "epoch": 0.16,
18
- "eval_accuracy": 0.7546844181459567,
19
- "eval_f1": 0.7629915603619231,
20
- "eval_loss": 0.6527217626571655,
21
- "eval_precision": 0.7780177612862748,
22
- "eval_recall": 0.7546844181459567,
23
- "eval_runtime": 24.4866,
24
- "eval_samples_per_second": 662.566,
25
- "eval_steps_per_second": 27.607,
 
26
  "step": 500
27
  },
28
  {
29
- "epoch": 0.33,
30
- "learning_rate": 4.58375e-05,
31
- "loss": 0.6457,
32
  "step": 1000
33
  },
34
  {
35
- "epoch": 0.33,
36
- "eval_accuracy": 0.7643614398422091,
37
- "eval_f1": 0.7769010779814548,
38
- "eval_loss": 0.6216204166412354,
39
- "eval_precision": 0.8017754901402682,
40
- "eval_recall": 0.7643614398422091,
41
- "eval_runtime": 24.6912,
42
- "eval_samples_per_second": 657.077,
43
- "eval_steps_per_second": 27.378,
 
44
  "step": 1000
45
  },
46
  {
47
- "epoch": 0.49,
48
- "learning_rate": 4.375416666666667e-05,
49
- "loss": 0.6184,
50
  "step": 1500
51
  },
52
  {
53
- "epoch": 0.49,
54
- "eval_accuracy": 0.7790310650887574,
55
- "eval_f1": 0.789661025567087,
56
- "eval_loss": 0.6005069017410278,
57
- "eval_precision": 0.8094066060995802,
58
- "eval_recall": 0.7790310650887574,
59
- "eval_runtime": 24.5988,
60
- "eval_samples_per_second": 659.544,
61
- "eval_steps_per_second": 27.481,
 
62
  "step": 1500
63
  },
64
  {
65
- "epoch": 0.66,
66
- "learning_rate": 4.1670833333333334e-05,
67
- "loss": 0.6042,
68
  "step": 2000
69
  },
70
  {
71
- "epoch": 0.66,
72
- "eval_accuracy": 0.8077539447731755,
73
- "eval_f1": 0.8118714417209679,
74
- "eval_loss": 0.5989466309547424,
75
- "eval_precision": 0.8220143498287384,
76
- "eval_recall": 0.8077539447731755,
77
- "eval_runtime": 24.5976,
78
- "eval_samples_per_second": 659.577,
79
- "eval_steps_per_second": 27.482,
 
80
  "step": 2000
81
  },
82
  {
83
- "epoch": 0.82,
84
- "learning_rate": 3.959166666666667e-05,
85
- "loss": 0.5744,
86
  "step": 2500
87
  },
88
  {
89
- "epoch": 0.82,
90
- "eval_accuracy": 0.7972140039447732,
91
- "eval_f1": 0.8043419281165946,
92
- "eval_loss": 0.5735336542129517,
93
- "eval_precision": 0.8156333669948874,
94
- "eval_recall": 0.7972140039447732,
95
- "eval_runtime": 24.5948,
96
- "eval_samples_per_second": 659.651,
97
- "eval_steps_per_second": 27.485,
 
98
  "step": 2500
99
  },
100
  {
101
- "epoch": 0.99,
102
- "learning_rate": 3.7512500000000004e-05,
103
- "loss": 0.5833,
104
  "step": 3000
105
  },
106
  {
107
- "epoch": 0.99,
108
- "eval_accuracy": 0.8060281065088757,
109
- "eval_f1": 0.8074379985793598,
110
- "eval_loss": 0.5610195994377136,
111
- "eval_precision": 0.8123421315309852,
112
- "eval_recall": 0.8060281065088757,
113
- "eval_runtime": 24.5935,
114
- "eval_samples_per_second": 659.686,
115
- "eval_steps_per_second": 27.487,
 
116
  "step": 3000
117
  },
118
  {
119
- "epoch": 1.15,
120
- "learning_rate": 3.542916666666667e-05,
121
- "loss": 0.4633,
122
  "step": 3500
123
  },
124
  {
125
- "epoch": 1.15,
126
- "eval_accuracy": 0.8055966469428008,
127
- "eval_f1": 0.8112577676458685,
128
- "eval_loss": 0.5967000126838684,
129
- "eval_precision": 0.8196531984723394,
130
- "eval_recall": 0.8055966469428008,
131
- "eval_runtime": 24.7636,
132
- "eval_samples_per_second": 655.155,
133
- "eval_steps_per_second": 27.298,
 
134
  "step": 3500
135
  },
136
  {
137
- "epoch": 1.31,
138
- "learning_rate": 3.3345833333333335e-05,
139
- "loss": 0.4473,
140
  "step": 4000
141
  },
142
  {
143
- "epoch": 1.31,
144
- "eval_accuracy": 0.8108357988165681,
145
- "eval_f1": 0.8144157036046853,
146
- "eval_loss": 0.6390397548675537,
147
- "eval_precision": 0.8190905659244092,
148
- "eval_recall": 0.8108357988165681,
149
- "eval_runtime": 24.7631,
150
- "eval_samples_per_second": 655.169,
151
- "eval_steps_per_second": 27.299,
 
152
  "step": 4000
153
  },
154
  {
155
- "epoch": 1.48,
156
- "learning_rate": 3.12625e-05,
157
- "loss": 0.4493,
158
  "step": 4500
159
  },
160
  {
161
- "epoch": 1.48,
162
- "eval_accuracy": 0.7642998027613412,
163
- "eval_f1": 0.780188565362406,
164
- "eval_loss": 0.6074336171150208,
165
- "eval_precision": 0.818173356753982,
166
- "eval_recall": 0.7642998027613412,
167
- "eval_runtime": 24.6961,
168
- "eval_samples_per_second": 656.945,
169
- "eval_steps_per_second": 27.373,
 
170
  "step": 4500
171
  },
172
  {
173
- "epoch": 1.64,
174
- "learning_rate": 2.917916666666667e-05,
175
- "loss": 0.4402,
176
  "step": 5000
177
  },
178
  {
179
- "epoch": 1.64,
180
- "eval_accuracy": 0.817430966469428,
181
- "eval_f1": 0.818650395547064,
182
- "eval_loss": 0.6175166368484497,
183
- "eval_precision": 0.8200081119899918,
184
- "eval_recall": 0.817430966469428,
185
- "eval_runtime": 24.5978,
186
- "eval_samples_per_second": 659.57,
187
- "eval_steps_per_second": 27.482,
 
188
  "step": 5000
189
  },
190
  {
191
- "epoch": 1.81,
192
- "learning_rate": 2.7100000000000005e-05,
193
- "loss": 0.4406,
194
- "step": 5500
195
- },
196
- {
197
- "epoch": 1.81,
198
- "eval_accuracy": 0.8167529585798816,
199
- "eval_f1": 0.8194587497642177,
200
- "eval_loss": 0.6328938603401184,
201
- "eval_precision": 0.8229261899048395,
202
- "eval_recall": 0.8167529585798816,
203
- "eval_runtime": 24.611,
204
- "eval_samples_per_second": 659.218,
205
- "eval_steps_per_second": 27.467,
206
- "step": 5500
207
- },
208
- {
209
- "epoch": 1.97,
210
- "learning_rate": 2.5016666666666667e-05,
211
- "loss": 0.4457,
212
- "step": 6000
213
- },
214
- {
215
- "epoch": 1.97,
216
- "eval_accuracy": 0.8178007889546351,
217
- "eval_f1": 0.8196437981366307,
218
- "eval_loss": 0.6221566796302795,
219
- "eval_precision": 0.8219923397562409,
220
- "eval_recall": 0.8178007889546351,
221
- "eval_runtime": 24.6955,
222
- "eval_samples_per_second": 656.961,
223
- "eval_steps_per_second": 27.373,
224
- "step": 6000
225
- },
226
- {
227
- "epoch": 2.14,
228
- "learning_rate": 2.2933333333333333e-05,
229
- "loss": 0.3013,
230
- "step": 6500
231
- },
232
- {
233
- "epoch": 2.14,
234
- "eval_accuracy": 0.8099112426035503,
235
- "eval_f1": 0.8154916160650456,
236
- "eval_loss": 0.7676593661308289,
237
- "eval_precision": 0.8240038245839082,
238
- "eval_recall": 0.8099112426035503,
239
- "eval_runtime": 24.7339,
240
- "eval_samples_per_second": 655.941,
241
- "eval_steps_per_second": 27.331,
242
- "step": 6500
243
- },
244
- {
245
- "epoch": 2.3,
246
- "learning_rate": 2.0854166666666668e-05,
247
- "loss": 0.2875,
248
- "step": 7000
249
- },
250
- {
251
- "epoch": 2.3,
252
- "eval_accuracy": 0.8025764299802761,
253
- "eval_f1": 0.8097813881432507,
254
- "eval_loss": 0.7549890279769897,
255
- "eval_precision": 0.8215043597038734,
256
- "eval_recall": 0.8025764299802761,
257
- "eval_runtime": 24.7599,
258
- "eval_samples_per_second": 655.252,
259
- "eval_steps_per_second": 27.302,
260
- "step": 7000
261
- },
262
- {
263
- "epoch": 2.47,
264
- "learning_rate": 1.8770833333333333e-05,
265
- "loss": 0.2804,
266
- "step": 7500
267
- },
268
- {
269
- "epoch": 2.47,
270
- "eval_accuracy": 0.8102194280078896,
271
- "eval_f1": 0.8148870229811181,
272
- "eval_loss": 0.804236114025116,
273
- "eval_precision": 0.8221239914552869,
274
- "eval_recall": 0.8102194280078896,
275
- "eval_runtime": 24.6532,
276
- "eval_samples_per_second": 658.09,
277
- "eval_steps_per_second": 27.42,
278
- "step": 7500
279
- },
280
- {
281
- "epoch": 2.63,
282
- "learning_rate": 1.66875e-05,
283
- "loss": 0.2784,
284
- "step": 8000
285
- },
286
- {
287
- "epoch": 2.63,
288
- "eval_accuracy": 0.8136711045364892,
289
- "eval_f1": 0.8173932526970648,
290
- "eval_loss": 0.8103494644165039,
291
- "eval_precision": 0.8223803033268108,
292
- "eval_recall": 0.8136711045364892,
293
- "eval_runtime": 24.5862,
294
- "eval_samples_per_second": 659.883,
295
- "eval_steps_per_second": 27.495,
296
- "step": 8000
297
- },
298
- {
299
- "epoch": 2.79,
300
- "learning_rate": 1.4604166666666666e-05,
301
- "loss": 0.275,
302
- "step": 8500
303
- },
304
- {
305
- "epoch": 2.79,
306
- "eval_accuracy": 0.8132396449704142,
307
- "eval_f1": 0.8172151251977423,
308
- "eval_loss": 0.8132022619247437,
309
- "eval_precision": 0.8227088066747076,
310
- "eval_recall": 0.8132396449704142,
311
- "eval_runtime": 24.5279,
312
- "eval_samples_per_second": 661.452,
313
- "eval_steps_per_second": 27.56,
314
- "step": 8500
315
- },
316
- {
317
- "epoch": 2.96,
318
- "learning_rate": 1.2520833333333334e-05,
319
- "loss": 0.2651,
320
- "step": 9000
321
- },
322
- {
323
- "epoch": 2.96,
324
- "eval_accuracy": 0.8065212031558185,
325
- "eval_f1": 0.8120431442321697,
326
- "eval_loss": 0.7826108336448669,
327
- "eval_precision": 0.8202960014382004,
328
- "eval_recall": 0.8065212031558185,
329
- "eval_runtime": 24.7019,
330
- "eval_samples_per_second": 656.791,
331
- "eval_steps_per_second": 27.366,
332
- "step": 9000
333
- },
334
- {
335
- "epoch": 3.12,
336
- "learning_rate": 1.0441666666666667e-05,
337
- "loss": 0.1855,
338
- "step": 9500
339
- },
340
- {
341
- "epoch": 3.12,
342
- "eval_accuracy": 0.8140409270216963,
343
- "eval_f1": 0.8173959290765784,
344
- "eval_loss": 1.0370513200759888,
345
- "eval_precision": 0.8233538681349268,
346
- "eval_recall": 0.8140409270216963,
347
- "eval_runtime": 24.6749,
348
- "eval_samples_per_second": 657.51,
349
- "eval_steps_per_second": 27.396,
350
- "step": 9500
351
- },
352
- {
353
- "epoch": 3.29,
354
- "learning_rate": 8.358333333333333e-06,
355
- "loss": 0.1571,
356
- "step": 10000
357
- },
358
- {
359
- "epoch": 3.29,
360
- "eval_accuracy": 0.8128081854043393,
361
- "eval_f1": 0.8173020798805183,
362
- "eval_loss": 1.036438226699829,
363
- "eval_precision": 0.8246320290318612,
364
- "eval_recall": 0.8128081854043393,
365
- "eval_runtime": 24.6314,
366
- "eval_samples_per_second": 658.672,
367
- "eval_steps_per_second": 27.445,
368
- "step": 10000
369
- },
370
- {
371
- "epoch": 3.45,
372
- "learning_rate": 6.275e-06,
373
- "loss": 0.1608,
374
- "step": 10500
375
- },
376
- {
377
- "epoch": 3.45,
378
- "eval_accuracy": 0.8108357988165681,
379
- "eval_f1": 0.8154023680396078,
380
- "eval_loss": 1.0521161556243896,
381
- "eval_precision": 0.8221458981635473,
382
- "eval_recall": 0.8108357988165681,
383
- "eval_runtime": 24.6401,
384
- "eval_samples_per_second": 658.44,
385
- "eval_steps_per_second": 27.435,
386
- "step": 10500
387
- },
388
- {
389
- "epoch": 3.62,
390
- "learning_rate": 4.191666666666667e-06,
391
- "loss": 0.1542,
392
- "step": 11000
393
- },
394
- {
395
- "epoch": 3.62,
396
- "eval_accuracy": 0.8197115384615384,
397
- "eval_f1": 0.8199536621312183,
398
- "eval_loss": 1.108322262763977,
399
- "eval_precision": 0.8205846609719522,
400
- "eval_recall": 0.8197115384615384,
401
- "eval_runtime": 24.5935,
402
- "eval_samples_per_second": 659.687,
403
- "eval_steps_per_second": 27.487,
404
- "step": 11000
405
- },
406
- {
407
- "epoch": 3.78,
408
- "learning_rate": 2.108333333333333e-06,
409
- "loss": 0.1561,
410
- "step": 11500
411
- },
412
- {
413
- "epoch": 3.78,
414
- "eval_accuracy": 0.8138560157790927,
415
- "eval_f1": 0.8166825272884544,
416
- "eval_loss": 1.0635404586791992,
417
- "eval_precision": 0.8205938348020483,
418
- "eval_recall": 0.8138560157790927,
419
- "eval_runtime": 24.8393,
420
- "eval_samples_per_second": 653.159,
421
- "eval_steps_per_second": 27.215,
422
- "step": 11500
423
- },
424
- {
425
- "epoch": 3.94,
426
- "learning_rate": 2.916666666666667e-08,
427
- "loss": 0.1415,
428
- "step": 12000
429
- },
430
- {
431
- "epoch": 3.94,
432
- "eval_accuracy": 0.815396942800789,
433
- "eval_f1": 0.8176978993348879,
434
- "eval_loss": 1.0898783206939697,
435
- "eval_precision": 0.8208248888308868,
436
- "eval_recall": 0.815396942800789,
437
- "eval_runtime": 24.6997,
438
- "eval_samples_per_second": 656.85,
439
- "eval_steps_per_second": 27.369,
440
- "step": 12000
441
- },
442
- {
443
- "epoch": 3.94,
444
- "step": 12000,
445
- "total_flos": 1.4832731619013427e+17,
446
- "train_loss": 0.37884856541951495,
447
- "train_runtime": 3446.9333,
448
- "train_samples_per_second": 167.105,
449
- "train_steps_per_second": 3.481
450
  }
451
  ],
452
- "max_steps": 12000,
453
- "num_train_epochs": 4,
454
- "total_flos": 1.4832731619013427e+17,
455
  "trial_name": null,
456
  "trial_params": null
457
  }
 
1
  {
2
+ "best_metric": 0.732569302631819,
3
+ "best_model_checkpoint": "trained/hebban-reviews/bert-base-dutch-cased/checkpoint-3000",
4
+ "epoch": 4.382997370727432,
5
+ "global_step": 5001,
6
  "is_hyper_param_search": false,
7
  "is_local_process_zero": true,
8
  "is_world_process_zero": true,
9
  "log_history": [
10
  {
11
+ "epoch": 0.44,
12
+ "learning_rate": 4.5000999800039995e-05,
13
+ "loss": 0.6824,
14
  "step": 500
15
  },
16
  {
17
+ "epoch": 0.44,
18
+ "eval_accuracy": 0.7763806706114399,
19
+ "eval_f1": 0.7835745137864604,
20
+ "eval_loss": 0.602776288986206,
21
+ "eval_precision": 0.8014359034966511,
22
+ "eval_qwk": 0.6748032105430382,
23
+ "eval_recall": 0.7763806706114399,
24
+ "eval_runtime": 23.7871,
25
+ "eval_samples_per_second": 682.05,
26
+ "eval_steps_per_second": 5.339,
27
  "step": 500
28
  },
29
  {
30
+ "epoch": 0.88,
31
+ "learning_rate": 4.000199960007999e-05,
32
+ "loss": 0.5903,
33
  "step": 1000
34
  },
35
  {
36
+ "epoch": 0.88,
37
+ "eval_accuracy": 0.7441444773175543,
38
+ "eval_f1": 0.7623993009582866,
39
+ "eval_loss": 0.5676863789558411,
40
+ "eval_precision": 0.8079269153410689,
41
+ "eval_qwk": 0.6673976411458487,
42
+ "eval_recall": 0.7441444773175543,
43
+ "eval_runtime": 23.3353,
44
+ "eval_samples_per_second": 695.257,
45
+ "eval_steps_per_second": 5.442,
46
  "step": 1000
47
  },
48
  {
49
+ "epoch": 1.31,
50
+ "learning_rate": 3.50129974005199e-05,
51
+ "loss": 0.487,
52
  "step": 1500
53
  },
54
  {
55
+ "epoch": 1.31,
56
+ "eval_accuracy": 0.7938239644970414,
57
+ "eval_f1": 0.8033347539691899,
58
+ "eval_loss": 0.5801703333854675,
59
+ "eval_precision": 0.8206089090362688,
60
+ "eval_qwk": 0.7239594229437711,
61
+ "eval_recall": 0.7938239644970414,
62
+ "eval_runtime": 23.5912,
63
+ "eval_samples_per_second": 687.714,
64
+ "eval_steps_per_second": 5.383,
65
  "step": 1500
66
  },
67
  {
68
+ "epoch": 1.75,
69
+ "learning_rate": 3.001399720055989e-05,
70
+ "loss": 0.4465,
71
  "step": 2000
72
  },
73
  {
74
+ "epoch": 1.75,
75
+ "eval_accuracy": 0.7809418145956607,
76
+ "eval_f1": 0.7939963208456309,
77
+ "eval_loss": 0.5808519721031189,
78
+ "eval_precision": 0.8222816466721266,
79
+ "eval_qwk": 0.7118880899721043,
80
+ "eval_recall": 0.7809418145956607,
81
+ "eval_runtime": 23.7528,
82
+ "eval_samples_per_second": 683.034,
83
+ "eval_steps_per_second": 5.347,
84
  "step": 2000
85
  },
86
  {
87
+ "epoch": 2.19,
88
+ "learning_rate": 2.501499700059988e-05,
89
+ "loss": 0.3808,
90
  "step": 2500
91
  },
92
  {
93
+ "epoch": 2.19,
94
+ "eval_accuracy": 0.8011587771203156,
95
+ "eval_f1": 0.8108361051205994,
96
+ "eval_loss": 0.7650117874145508,
97
+ "eval_precision": 0.8317278803823261,
98
+ "eval_qwk": 0.7224991347098964,
99
+ "eval_recall": 0.8011587771203156,
100
+ "eval_runtime": 23.668,
101
+ "eval_samples_per_second": 685.482,
102
+ "eval_steps_per_second": 5.366,
103
  "step": 2500
104
  },
105
  {
106
+ "epoch": 2.63,
107
+ "learning_rate": 2.001599680063987e-05,
108
+ "loss": 0.2909,
109
  "step": 3000
110
  },
111
  {
112
+ "epoch": 2.63,
113
+ "eval_accuracy": 0.8086168639053254,
114
+ "eval_f1": 0.8166494576368075,
115
+ "eval_loss": 0.7910537719726562,
116
+ "eval_precision": 0.8320947395056137,
117
+ "eval_qwk": 0.732569302631819,
118
+ "eval_recall": 0.8086168639053254,
119
+ "eval_runtime": 23.5554,
120
+ "eval_samples_per_second": 688.761,
121
+ "eval_steps_per_second": 5.392,
122
  "step": 3000
123
  },
124
  {
125
+ "epoch": 3.07,
126
+ "learning_rate": 1.5026994601079786e-05,
127
+ "loss": 0.2746,
128
  "step": 3500
129
  },
130
  {
131
+ "epoch": 3.07,
132
+ "eval_accuracy": 0.7985083826429981,
133
+ "eval_f1": 0.8084609549617363,
134
+ "eval_loss": 0.9503954648971558,
135
+ "eval_precision": 0.8285545185236465,
136
+ "eval_qwk": 0.7236034727222331,
137
+ "eval_recall": 0.7985083826429981,
138
+ "eval_runtime": 23.5785,
139
+ "eval_samples_per_second": 688.084,
140
+ "eval_steps_per_second": 5.386,
141
  "step": 3500
142
  },
143
  {
144
+ "epoch": 3.51,
145
+ "learning_rate": 1.0027994401119777e-05,
146
+ "loss": 0.1939,
147
  "step": 4000
148
  },
149
  {
150
+ "epoch": 3.51,
151
+ "eval_accuracy": 0.7966592702169625,
152
+ "eval_f1": 0.8064656300991808,
153
+ "eval_loss": 0.9597578048706055,
154
+ "eval_precision": 0.8249666716932728,
155
+ "eval_qwk": 0.7250412688400546,
156
+ "eval_recall": 0.7966592702169625,
157
+ "eval_runtime": 23.6525,
158
+ "eval_samples_per_second": 685.931,
159
+ "eval_steps_per_second": 5.369,
160
  "step": 4000
161
  },
162
  {
163
+ "epoch": 3.94,
164
+ "learning_rate": 5.028994201159768e-06,
165
+ "loss": 0.1824,
166
  "step": 4500
167
  },
168
  {
169
+ "epoch": 3.94,
170
+ "eval_accuracy": 0.8023298816568047,
171
+ "eval_f1": 0.810968480495596,
172
+ "eval_loss": 1.060992956161499,
173
+ "eval_precision": 0.8277405554545381,
174
+ "eval_qwk": 0.7252407399333649,
175
+ "eval_recall": 0.8023298816568047,
176
+ "eval_runtime": 23.7166,
177
+ "eval_samples_per_second": 684.077,
178
+ "eval_steps_per_second": 5.355,
179
  "step": 4500
180
  },
181
  {
182
+ "epoch": 4.38,
183
+ "learning_rate": 2.999400119976005e-08,
184
+ "loss": 0.1426,
185
  "step": 5000
186
  },
187
  {
188
+ "epoch": 4.38,
189
+ "eval_accuracy": 0.8090483234714004,
190
+ "eval_f1": 0.8157426407187547,
191
+ "eval_loss": 1.1207919120788574,
192
+ "eval_precision": 0.8273912560003507,
193
+ "eval_qwk": 0.7316045159099166,
194
+ "eval_recall": 0.8090483234714004,
195
+ "eval_runtime": 23.6323,
196
+ "eval_samples_per_second": 686.518,
197
+ "eval_steps_per_second": 5.374,
198
  "step": 5000
199
  },
200
  {
201
+ "epoch": 4.38,
202
+ "step": 5001,
203
+ "total_flos": 1.6838837781764506e+17,
204
+ "train_loss": 0.3670836862052257,
205
+ "train_runtime": 2628.6927,
206
+ "train_samples_per_second": 243.516,
207
+ "train_steps_per_second": 1.902
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
208
  }
209
  ],
210
+ "max_steps": 5001,
211
+ "num_train_epochs": 5,
212
+ "total_flos": 1.6838837781764506e+17,
213
  "trial_name": null,
214
  "trial_params": null
215
  }
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:e629a85d11014bed4fb4099bbcedecc04daead523e24b38fb4e2c394f89df7a2
3
  size 3375
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ff9696a0fda64c2065dfd77fec96118ef39d58e7575a456552455a8001fdc944
3
  size 3375