Kira-Floris commited on
Commit
9a6a790
1 Parent(s): 56a5fae

Training in progress, epoch 5

Browse files
logs/events.out.tfevents.1719305887.852b1e905a9a.223.0 CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b2ac4d7fa81b6904905124ea42da5585b5330205bcf5bbe14f7557aa4d2fc4eb
3
- size 7032
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:19a4ae5f66fd8c47561480b2b44779e1602d159405054bdc8c84ebb5df8362d9
3
+ size 7566
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:afb0fc63aa3b163fe161f0fc2e80b005f0c467955700d8a1e79b85ea43d3e170
3
  size 17549312
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:13db367b7bb0be3f189d8bcc949537b56469d1797a9348d1098d94dc6f5f312f
3
  size 17549312
run-0/checkpoint-2635/model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:4ac88e04b55446ee97582bd47bd829a6950819751933a874830dcdaf791b84bb
3
  size 17549312
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:13db367b7bb0be3f189d8bcc949537b56469d1797a9348d1098d94dc6f5f312f
3
  size 17549312
run-0/checkpoint-2635/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:c5049a256e7adc1eca19245b1195b2cbf6d379af65d1229688af991883ed7bbe
3
  size 35123898
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d4ded2a64486b4a0b1aef7b1b09417a73aafbd6750e26187cc9f923bee70695a
3
  size 35123898
run-0/checkpoint-2635/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:eacbd2c40f9665a3b9d8f4126f46659e0d4f8c5d9509f646215e27afa9d68728
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:57b08dca640e3ef73a1c869e607bff887b8aa3125fe523187cd90566b52fb145
3
  size 1064
run-0/checkpoint-2635/trainer_state.json CHANGED
@@ -1,6 +1,6 @@
1
  {
2
- "best_metric": 0.8291284403669725,
3
- "best_model_checkpoint": "tiny-bert-sst2-distilled/run-0/checkpoint-2635",
4
  "epoch": 5.0,
5
  "eval_steps": 500,
6
  "global_step": 2635,
@@ -10,89 +10,89 @@
10
  "log_history": [
11
  {
12
  "epoch": 1.0,
13
- "grad_norm": 16.55626678466797,
14
- "learning_rate": 5.1383294230414005e-05,
15
- "loss": 1.5907,
16
  "step": 527
17
  },
18
  {
19
  "epoch": 1.0,
20
- "eval_accuracy": 0.8061926605504587,
21
- "eval_loss": 1.2464169263839722,
22
- "eval_runtime": 2.4007,
23
- "eval_samples_per_second": 363.232,
24
- "eval_steps_per_second": 2.916,
25
  "step": 527
26
  },
27
  {
28
  "epoch": 2.0,
29
- "grad_norm": Infinity,
30
- "learning_rate": 4.405675241279466e-05,
31
- "loss": 0.9038,
32
  "step": 1054
33
  },
34
  {
35
  "epoch": 2.0,
36
- "eval_accuracy": 0.8107798165137615,
37
- "eval_loss": 1.1235342025756836,
38
- "eval_runtime": 2.3533,
39
- "eval_samples_per_second": 370.54,
40
- "eval_steps_per_second": 2.975,
41
  "step": 1054
42
  },
43
  {
44
  "epoch": 3.0,
45
- "grad_norm": 48.55740737915039,
46
- "learning_rate": 3.67162818084498e-05,
47
- "loss": 0.6946,
48
  "step": 1581
49
  },
50
  {
51
  "epoch": 3.0,
52
- "eval_accuracy": 0.8176605504587156,
53
- "eval_loss": 1.1027159690856934,
54
- "eval_runtime": 2.3719,
55
- "eval_samples_per_second": 367.637,
56
- "eval_steps_per_second": 2.951,
57
  "step": 1581
58
  },
59
  {
60
  "epoch": 4.0,
61
- "grad_norm": 16.267213821411133,
62
- "learning_rate": 2.9375811204104943e-05,
63
- "loss": 0.595,
64
  "step": 2108
65
  },
66
  {
67
  "epoch": 4.0,
68
- "eval_accuracy": 0.8256880733944955,
69
- "eval_loss": 1.0700539350509644,
70
- "eval_runtime": 2.3638,
71
- "eval_samples_per_second": 368.893,
72
- "eval_steps_per_second": 2.961,
73
  "step": 2108
74
  },
75
  {
76
  "epoch": 5.0,
77
- "grad_norm": 10.482604026794434,
78
- "learning_rate": 2.2035340599760084e-05,
79
- "loss": 0.5308,
80
  "step": 2635
81
  },
82
  {
83
  "epoch": 5.0,
84
- "eval_accuracy": 0.8291284403669725,
85
- "eval_loss": 1.1128482818603516,
86
- "eval_runtime": 2.3733,
87
- "eval_samples_per_second": 367.419,
88
- "eval_steps_per_second": 2.949,
89
  "step": 2635
90
  }
91
  ],
92
  "logging_steps": 500,
93
- "max_steps": 4216,
94
  "num_input_tokens_seen": 0,
95
- "num_train_epochs": 8,
96
  "save_steps": 500,
97
  "stateful_callbacks": {
98
  "TrainerControl": {
@@ -110,9 +110,9 @@
110
  "train_batch_size": 128,
111
  "trial_name": null,
112
  "trial_params": {
113
- "alpha": 0.5167874928728581,
114
- "learning_rate": 5.872376483475886e-05,
115
- "num_train_epochs": 8,
116
- "temperature": 5
117
  }
118
  }
 
1
  {
2
+ "best_metric": 0.7901376146788991,
3
+ "best_model_checkpoint": "tiny-bert-sst2-distilled/run-0/checkpoint-2108",
4
  "epoch": 5.0,
5
  "eval_steps": 500,
6
  "global_step": 2635,
 
10
  "log_history": [
11
  {
12
  "epoch": 1.0,
13
+ "grad_norm": 5.95961856842041,
14
+ "learning_rate": 9.55389368279823e-06,
15
+ "loss": 1.5369,
16
  "step": 527
17
  },
18
  {
19
  "epoch": 1.0,
20
+ "eval_accuracy": 0.7339449541284404,
21
+ "eval_loss": 1.2773902416229248,
22
+ "eval_runtime": 2.5957,
23
+ "eval_samples_per_second": 335.939,
24
+ "eval_steps_per_second": 2.697,
25
  "step": 527
26
  },
27
  {
28
  "epoch": 2.0,
29
+ "grad_norm": 15.252978324890137,
30
+ "learning_rate": 8.492349940265094e-06,
31
+ "loss": 1.2159,
32
  "step": 1054
33
  },
34
  {
35
  "epoch": 2.0,
36
+ "eval_accuracy": 0.7786697247706422,
37
+ "eval_loss": 1.022659182548523,
38
+ "eval_runtime": 2.5741,
39
+ "eval_samples_per_second": 338.753,
40
+ "eval_steps_per_second": 2.719,
41
  "step": 1054
42
  },
43
  {
44
  "epoch": 3.0,
45
+ "grad_norm": 18.01114845275879,
46
+ "learning_rate": 7.430806197731956e-06,
47
+ "loss": 1.0132,
48
  "step": 1581
49
  },
50
  {
51
  "epoch": 3.0,
52
+ "eval_accuracy": 0.7844036697247706,
53
+ "eval_loss": 0.9622328281402588,
54
+ "eval_runtime": 2.5865,
55
+ "eval_samples_per_second": 337.137,
56
+ "eval_steps_per_second": 2.706,
57
  "step": 1581
58
  },
59
  {
60
  "epoch": 4.0,
61
+ "grad_norm": 15.912079811096191,
62
+ "learning_rate": 6.371276769700781e-06,
63
+ "loss": 0.9206,
64
  "step": 2108
65
  },
66
  {
67
  "epoch": 4.0,
68
+ "eval_accuracy": 0.7901376146788991,
69
+ "eval_loss": 0.9278557896614075,
70
+ "eval_runtime": 2.5808,
71
+ "eval_samples_per_second": 337.878,
72
+ "eval_steps_per_second": 2.712,
73
  "step": 2108
74
  },
75
  {
76
  "epoch": 5.0,
77
+ "grad_norm": 10.149016380310059,
78
+ "learning_rate": 5.3097330271676446e-06,
79
+ "loss": 0.8564,
80
  "step": 2635
81
  },
82
  {
83
  "epoch": 5.0,
84
+ "eval_accuracy": 0.7901376146788991,
85
+ "eval_loss": 0.9079629182815552,
86
+ "eval_runtime": 2.5658,
87
+ "eval_samples_per_second": 339.852,
88
+ "eval_steps_per_second": 2.728,
89
  "step": 2635
90
  }
91
  ],
92
  "logging_steps": 500,
93
+ "max_steps": 5270,
94
  "num_input_tokens_seen": 0,
95
+ "num_train_epochs": 10,
96
  "save_steps": 500,
97
  "stateful_callbacks": {
98
  "TrainerControl": {
 
110
  "train_batch_size": 128,
111
  "trial_name": null,
112
  "trial_params": {
113
+ "alpha": 0.27608826195592573,
114
+ "learning_rate": 1.0615437425331367e-05,
115
+ "num_train_epochs": 10,
116
+ "temperature": 2
117
  }
118
  }
run-0/checkpoint-2635/training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:f28149fe21091b257234d7cbe1611ee6ca88e3a7cef675e40e6d90410e6fc1a6
3
  size 5176
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c8b6a60f7b85b38fa45cddf1a417ee51250fe5822237403416bf2406ff2cdb84
3
  size 5176