kbberendsen commited on
Commit
9348172
1 Parent(s): 7b745a3

Training in progress, epoch 3

Browse files
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:69806aadf54c16c0f93667eb901fd498dca0fd88484afa819b6cecd18dba1c5a
3
  size 1740304440
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3bf31a9f3fd1eb029555fb215d69ce660d1d3fefa6692d9f9859ebc65bb332cf
3
  size 1740304440
run-1/checkpoint-107/model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:dbb97239ee420701eea12e8830ef6a3b67568c0245ba25d9b21546298a03272d
3
  size 1740304440
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:69806aadf54c16c0f93667eb901fd498dca0fd88484afa819b6cecd18dba1c5a
3
  size 1740304440
run-1/checkpoint-107/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:9676e382729f07ecf07136cb23602c76a3092cfec665305ef55baedf288acd83
3
  size 3480840240
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7e1ebab4e243bc8b618df9585a5cfb2e4476ac0cfd7dd8ccbc987389e2931a13
3
  size 3480840240
run-1/checkpoint-107/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:424d332af8f2f946ca851e4c26be91043e0d216824beee0b88ad7607cef38475
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3a72235cd80514a2c19c7822532a26393d1ce4f05d69e5438e7f09fbfb783b6b
3
  size 14244
run-1/checkpoint-107/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:09df6bdf5ddeda215d241b6da8cb2bb237d5a55482032c73214c2c2065aa975a
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:48455111556810169e08e16b434887cfb3b266285f37c86ddfed12eafc86c372
3
  size 1064
run-1/checkpoint-107/trainer_state.json CHANGED
@@ -1,5 +1,5 @@
1
  {
2
- "best_metric": 0.6352032367163551,
3
  "best_model_checkpoint": "deberta-v3-large-finetuned-cola-midterm/run-1/checkpoint-107",
4
  "epoch": 1.0,
5
  "eval_steps": 500,
@@ -10,11 +10,11 @@
10
  "log_history": [
11
  {
12
  "epoch": 1.0,
13
- "eval_loss": 0.3989429175853729,
14
- "eval_matthews_correlation": 0.6352032367163551,
15
- "eval_runtime": 8.55,
16
- "eval_samples_per_second": 121.988,
17
- "eval_steps_per_second": 7.719,
18
  "step": 107
19
  }
20
  ],
@@ -27,9 +27,9 @@
27
  "train_batch_size": 8,
28
  "trial_name": null,
29
  "trial_params": {
30
- "learning_rate": 2.359139262812914e-05,
31
  "num_train_epochs": 5,
32
  "per_device_train_batch_size": 8,
33
- "seed": 19
34
  }
35
  }
 
1
  {
2
+ "best_metric": 0.0,
3
  "best_model_checkpoint": "deberta-v3-large-finetuned-cola-midterm/run-1/checkpoint-107",
4
  "epoch": 1.0,
5
  "eval_steps": 500,
 
10
  "log_history": [
11
  {
12
  "epoch": 1.0,
13
+ "eval_loss": 0.5697229504585266,
14
+ "eval_matthews_correlation": 0.0,
15
+ "eval_runtime": 7.6758,
16
+ "eval_samples_per_second": 135.882,
17
+ "eval_steps_per_second": 8.598,
18
  "step": 107
19
  }
20
  ],
 
27
  "train_batch_size": 8,
28
  "trial_name": null,
29
  "trial_params": {
30
+ "learning_rate": 1.2524709009088454e-06,
31
  "num_train_epochs": 5,
32
  "per_device_train_batch_size": 8,
33
+ "seed": 39
34
  }
35
  }
run-1/checkpoint-107/training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:35505d279768e84df3a864aadde8b9ddc830b1c914325af6898748074b37f0f9
3
  size 4984
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:65d18e33d9ba44da4893fbaf601842677181892eded0f635dc9c68f8d002c826
3
  size 4984
run-1/checkpoint-214/model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:7f501fe671ae27a7ec05642a22603319c16318b1623d9756f034244949a69636
3
  size 1740304440
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:91d0ab1ed79e120ed675856b604dc8fb1fbcad7f413045515873663179ab4469
3
  size 1740304440
run-1/checkpoint-214/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b6c4e23ad84d3a34d9ed8a157848ebb058df2f2b627b094448f996b5f5ad56d6
3
  size 3480840240
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f6606867bc010310d586342c61ed8e6d4d909d3d988d3821c1eef0d810746916
3
  size 3480840240
run-1/checkpoint-214/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:0e3a3f1d57224f08391b2e3d0d3fb832eb9751de6bd3dc7098d72105774823d3
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f61b723c9cc6e2f763eb98d0d5c2a28fffcc1db0d1e35dc2ac4a509e3b171a0e
3
  size 14244
run-1/checkpoint-214/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:4b02aa1dfe75cb263d8f4be8c11befd364dd06e5bd93e12cd10caaf48efeb05f
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b63e6ccf61709598a1df15c22981b6b0366b090121c239ffc0666299f2b15f0c
3
  size 1064
run-1/checkpoint-214/trainer_state.json CHANGED
@@ -1,6 +1,6 @@
1
  {
2
- "best_metric": 0.6352032367163551,
3
- "best_model_checkpoint": "deberta-v3-large-finetuned-cola-midterm/run-1/checkpoint-107",
4
  "epoch": 2.0,
5
  "eval_steps": 500,
6
  "global_step": 214,
@@ -10,20 +10,20 @@
10
  "log_history": [
11
  {
12
  "epoch": 1.0,
13
- "eval_loss": 0.3989429175853729,
14
- "eval_matthews_correlation": 0.6352032367163551,
15
- "eval_runtime": 8.55,
16
- "eval_samples_per_second": 121.988,
17
- "eval_steps_per_second": 7.719,
18
  "step": 107
19
  },
20
  {
21
  "epoch": 2.0,
22
- "eval_loss": 0.7654077410697937,
23
- "eval_matthews_correlation": 0.6243387594128297,
24
- "eval_runtime": 8.6801,
25
- "eval_samples_per_second": 120.16,
26
- "eval_steps_per_second": 7.604,
27
  "step": 214
28
  }
29
  ],
@@ -36,9 +36,9 @@
36
  "train_batch_size": 8,
37
  "trial_name": null,
38
  "trial_params": {
39
- "learning_rate": 2.359139262812914e-05,
40
  "num_train_epochs": 5,
41
  "per_device_train_batch_size": 8,
42
- "seed": 19
43
  }
44
  }
 
1
  {
2
+ "best_metric": 0.37472354614099185,
3
+ "best_model_checkpoint": "deberta-v3-large-finetuned-cola-midterm/run-1/checkpoint-214",
4
  "epoch": 2.0,
5
  "eval_steps": 500,
6
  "global_step": 214,
 
10
  "log_history": [
11
  {
12
  "epoch": 1.0,
13
+ "eval_loss": 0.5697229504585266,
14
+ "eval_matthews_correlation": 0.0,
15
+ "eval_runtime": 7.6758,
16
+ "eval_samples_per_second": 135.882,
17
+ "eval_steps_per_second": 8.598,
18
  "step": 107
19
  },
20
  {
21
  "epoch": 2.0,
22
+ "eval_loss": 0.4736369848251343,
23
+ "eval_matthews_correlation": 0.37472354614099185,
24
+ "eval_runtime": 8.2012,
25
+ "eval_samples_per_second": 127.176,
26
+ "eval_steps_per_second": 8.048,
27
  "step": 214
28
  }
29
  ],
 
36
  "train_batch_size": 8,
37
  "trial_name": null,
38
  "trial_params": {
39
+ "learning_rate": 1.2524709009088454e-06,
40
  "num_train_epochs": 5,
41
  "per_device_train_batch_size": 8,
42
+ "seed": 39
43
  }
44
  }
run-1/checkpoint-214/training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:35505d279768e84df3a864aadde8b9ddc830b1c914325af6898748074b37f0f9
3
  size 4984
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:65d18e33d9ba44da4893fbaf601842677181892eded0f635dc9c68f8d002c826
3
  size 4984
run-1/checkpoint-321/model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:ed5c59c2553e0bae066293adab1647120cfb0739df0e63a339df2a377fe12659
3
  size 1740304440
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3bf31a9f3fd1eb029555fb215d69ce660d1d3fefa6692d9f9859ebc65bb332cf
3
  size 1740304440
run-1/checkpoint-321/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d81c295524b5fe2e134333431102be929f0a1a29c0a91ebb0116210440a7404a
3
  size 3480840240
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:574130028842aa00c277a6097d6debc81e1f440fa2a174b7920051797e249326
3
  size 3480840240
run-1/checkpoint-321/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:7adc16068cab406d28a21b7d697a81f5fda8514c5ebf823c734a8d7697cc755f
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9501661b094ed9718660a892110e89ca547c25533504c80e41648b1bd3361d23
3
  size 14244
run-1/checkpoint-321/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:e523acd3bd6f51d97212318f8743fee0724ce229dd3d8e4055e7aec57c151477
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a8d4652d7ddda3390d9eb4b5a9d0ebe6bc1c14a179273a63246140e87823b07e
3
  size 1064
run-1/checkpoint-321/trainer_state.json CHANGED
@@ -1,6 +1,6 @@
1
  {
2
- "best_metric": 0.6352032367163551,
3
- "best_model_checkpoint": "deberta-v3-large-finetuned-cola-midterm/run-1/checkpoint-107",
4
  "epoch": 3.0,
5
  "eval_steps": 500,
6
  "global_step": 321,
@@ -10,29 +10,29 @@
10
  "log_history": [
11
  {
12
  "epoch": 1.0,
13
- "eval_loss": 0.3989429175853729,
14
- "eval_matthews_correlation": 0.6352032367163551,
15
- "eval_runtime": 8.55,
16
- "eval_samples_per_second": 121.988,
17
- "eval_steps_per_second": 7.719,
18
  "step": 107
19
  },
20
  {
21
  "epoch": 2.0,
22
- "eval_loss": 0.7654077410697937,
23
- "eval_matthews_correlation": 0.6243387594128297,
24
- "eval_runtime": 8.6801,
25
- "eval_samples_per_second": 120.16,
26
- "eval_steps_per_second": 7.604,
27
  "step": 214
28
  },
29
  {
30
  "epoch": 3.0,
31
- "eval_loss": 1.0246310234069824,
32
- "eval_matthews_correlation": 0.6044964516136625,
33
- "eval_runtime": 8.7151,
34
- "eval_samples_per_second": 119.678,
35
- "eval_steps_per_second": 7.573,
36
  "step": 321
37
  }
38
  ],
@@ -45,9 +45,9 @@
45
  "train_batch_size": 8,
46
  "trial_name": null,
47
  "trial_params": {
48
- "learning_rate": 2.359139262812914e-05,
49
  "num_train_epochs": 5,
50
  "per_device_train_batch_size": 8,
51
- "seed": 19
52
  }
53
  }
 
1
  {
2
+ "best_metric": 0.5280629029350907,
3
+ "best_model_checkpoint": "deberta-v3-large-finetuned-cola-midterm/run-1/checkpoint-321",
4
  "epoch": 3.0,
5
  "eval_steps": 500,
6
  "global_step": 321,
 
10
  "log_history": [
11
  {
12
  "epoch": 1.0,
13
+ "eval_loss": 0.5697229504585266,
14
+ "eval_matthews_correlation": 0.0,
15
+ "eval_runtime": 7.6758,
16
+ "eval_samples_per_second": 135.882,
17
+ "eval_steps_per_second": 8.598,
18
  "step": 107
19
  },
20
  {
21
  "epoch": 2.0,
22
+ "eval_loss": 0.4736369848251343,
23
+ "eval_matthews_correlation": 0.37472354614099185,
24
+ "eval_runtime": 8.2012,
25
+ "eval_samples_per_second": 127.176,
26
+ "eval_steps_per_second": 8.048,
27
  "step": 214
28
  },
29
  {
30
  "epoch": 3.0,
31
+ "eval_loss": 0.41821202635765076,
32
+ "eval_matthews_correlation": 0.5280629029350907,
33
+ "eval_runtime": 10.1308,
34
+ "eval_samples_per_second": 102.953,
35
+ "eval_steps_per_second": 6.515,
36
  "step": 321
37
  }
38
  ],
 
45
  "train_batch_size": 8,
46
  "trial_name": null,
47
  "trial_params": {
48
+ "learning_rate": 1.2524709009088454e-06,
49
  "num_train_epochs": 5,
50
  "per_device_train_batch_size": 8,
51
+ "seed": 39
52
  }
53
  }
run-1/checkpoint-321/training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:35505d279768e84df3a864aadde8b9ddc830b1c914325af6898748074b37f0f9
3
  size 4984
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:65d18e33d9ba44da4893fbaf601842677181892eded0f635dc9c68f8d002c826
3
  size 4984
runs/Feb29_11-56-49_2f033a6417b1/events.out.tfevents.1709208268.2f033a6417b1.388.2 CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:fd424bd2faf8b987f2d218a175fbd0b722067d240b283ddd91ce9c1a025245f4
3
- size 5473
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6c2b4de18f3c6338c9adfed83c30e8b1d5e27975baffae4364ae493a2e8637d4
3
+ size 7043