mikhail-panzo commited on
Commit
7b9f354
1 Parent(s): d8e7bc8

Training in progress, step 500, checkpoint

Browse files
checkpoint-500/model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:620d6e7c6fe5fec79cc452d40a6769e258718089ea22c8dc6e144c3bd0f670de
3
  size 577789320
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d8329d2cc307e739195088026e2fd5111bd83ae59a2339444451ffc2bcf323d9
3
  size 577789320
checkpoint-500/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:300b153fd04b5f1d0d0c5ca840ccf3fbc28d427d2e7dd2638bed18bda640626b
3
  size 1155772233
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:641bfba23f9e06c960e3886fb85293cd5654fcc712b47f9a4efad7249e452f14
3
  size 1155772233
checkpoint-500/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:bc1c71206c3ebb75270a918c997498b42af9ae88c8c69b43ce735a0883a88364
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:42f8436ff41efaeac4ed5cc91663e67b8bb07c4e8be6d4eeaa077d213146c7e8
3
  size 14244
checkpoint-500/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:92e482854e4ff3c178dda878d03bf3633330934a775fc818e1843bd607d65d07
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:59c5da1b5b4f7c36f93ab88fdfad2f08c02a4309458a2395e3f5b5f0b79b879d
3
  size 1064
checkpoint-500/trainer_state.json CHANGED
@@ -1,7 +1,7 @@
1
  {
2
- "best_metric": 0.5375720262527466,
3
  "best_model_checkpoint": "mikhail-panzo/fil_b32_le5_s4000/checkpoint-500",
4
- "epoch": 11.11111111111111,
5
  "eval_steps": 500,
6
  "global_step": 500,
7
  "is_hyper_param_search": false,
@@ -9,88 +9,88 @@
9
  "is_world_process_zero": true,
10
  "log_history": [
11
  {
12
- "epoch": 1.1111111111111112,
13
- "grad_norm": 4.398683071136475,
14
- "learning_rate": 2.4500000000000004e-07,
15
- "loss": 0.8108,
16
  "step": 50
17
  },
18
  {
19
- "epoch": 2.2222222222222223,
20
- "grad_norm": 3.726436138153076,
21
- "learning_rate": 4.95e-07,
22
- "loss": 0.792,
23
  "step": 100
24
  },
25
  {
26
- "epoch": 3.3333333333333335,
27
- "grad_norm": 2.0379676818847656,
28
- "learning_rate": 7.450000000000001e-07,
29
- "loss": 0.7413,
30
  "step": 150
31
  },
32
  {
33
- "epoch": 4.444444444444445,
34
- "grad_norm": 2.0460364818573,
35
- "learning_rate": 9.950000000000002e-07,
36
- "loss": 0.7342,
37
  "step": 200
38
  },
39
  {
40
- "epoch": 5.555555555555555,
41
- "grad_norm": 2.803539276123047,
42
- "learning_rate": 1.2450000000000002e-06,
43
- "loss": 0.7147,
44
  "step": 250
45
  },
46
  {
47
- "epoch": 6.666666666666667,
48
- "grad_norm": 1.9818241596221924,
49
- "learning_rate": 1.495e-06,
50
- "loss": 0.7072,
51
  "step": 300
52
  },
53
  {
54
- "epoch": 7.777777777777778,
55
- "grad_norm": 3.081399917602539,
56
- "learning_rate": 1.745e-06,
57
- "loss": 0.6843,
58
  "step": 350
59
  },
60
  {
61
- "epoch": 8.88888888888889,
62
- "grad_norm": 2.4465785026550293,
63
- "learning_rate": 1.9950000000000004e-06,
64
- "loss": 0.657,
65
  "step": 400
66
  },
67
  {
68
- "epoch": 10.0,
69
- "grad_norm": 2.929323434829712,
70
- "learning_rate": 2.245e-06,
71
- "loss": 0.6597,
72
  "step": 450
73
  },
74
  {
75
- "epoch": 11.11111111111111,
76
- "grad_norm": 1.419311285018921,
77
- "learning_rate": 2.4950000000000003e-06,
78
- "loss": 0.6246,
79
  "step": 500
80
  },
81
  {
82
- "epoch": 11.11111111111111,
83
- "eval_loss": 0.5375720262527466,
84
- "eval_runtime": 9.2568,
85
- "eval_samples_per_second": 17.177,
86
- "eval_steps_per_second": 2.161,
87
  "step": 500
88
  }
89
  ],
90
  "logging_steps": 50,
91
  "max_steps": 4000,
92
  "num_input_tokens_seen": 0,
93
- "num_train_epochs": 89,
94
  "save_steps": 500,
95
  "stateful_callbacks": {
96
  "TrainerControl": {
@@ -104,7 +104,7 @@
104
  "attributes": {}
105
  }
106
  },
107
- "total_flos": 3441129941684160.0,
108
  "train_batch_size": 16,
109
  "trial_name": null,
110
  "trial_params": null
 
1
  {
2
+ "best_metric": 0.48496538400650024,
3
  "best_model_checkpoint": "mikhail-panzo/fil_b32_le5_s4000/checkpoint-500",
4
+ "epoch": 44.44444444444444,
5
  "eval_steps": 500,
6
  "global_step": 500,
7
  "is_hyper_param_search": false,
 
9
  "is_world_process_zero": true,
10
  "log_history": [
11
  {
12
+ "epoch": 4.444444444444445,
13
+ "grad_norm": 4.288165092468262,
14
+ "learning_rate": 2.5000000000000004e-07,
15
+ "loss": 0.7989,
16
  "step": 50
17
  },
18
  {
19
+ "epoch": 8.88888888888889,
20
+ "grad_norm": 2.4493463039398193,
21
+ "learning_rate": 5.000000000000001e-07,
22
+ "loss": 0.7701,
23
  "step": 100
24
  },
25
  {
26
+ "epoch": 13.333333333333334,
27
+ "grad_norm": 1.3798907995224,
28
+ "learning_rate": 7.5e-07,
29
+ "loss": 0.7374,
30
  "step": 150
31
  },
32
  {
33
+ "epoch": 17.77777777777778,
34
+ "grad_norm": 2.6173877716064453,
35
+ "learning_rate": 1.0000000000000002e-06,
36
+ "loss": 0.7101,
37
  "step": 200
38
  },
39
  {
40
+ "epoch": 22.22222222222222,
41
+ "grad_norm": 1.3756701946258545,
42
+ "learning_rate": 1.25e-06,
43
+ "loss": 0.6887,
44
  "step": 250
45
  },
46
  {
47
+ "epoch": 26.666666666666668,
48
+ "grad_norm": 1.0456843376159668,
49
+ "learning_rate": 1.5e-06,
50
+ "loss": 0.672,
51
  "step": 300
52
  },
53
  {
54
+ "epoch": 31.11111111111111,
55
+ "grad_norm": 0.9518398642539978,
56
+ "learning_rate": 1.75e-06,
57
+ "loss": 0.6583,
58
  "step": 350
59
  },
60
  {
61
+ "epoch": 35.55555555555556,
62
+ "grad_norm": 1.305978775024414,
63
+ "learning_rate": 2.0000000000000003e-06,
64
+ "loss": 0.6258,
65
  "step": 400
66
  },
67
  {
68
+ "epoch": 40.0,
69
+ "grad_norm": 0.8857623338699341,
70
+ "learning_rate": 2.25e-06,
71
+ "loss": 0.5835,
72
  "step": 450
73
  },
74
  {
75
+ "epoch": 44.44444444444444,
76
+ "grad_norm": 1.052182912826538,
77
+ "learning_rate": 2.5e-06,
78
+ "loss": 0.5539,
79
  "step": 500
80
  },
81
  {
82
+ "epoch": 44.44444444444444,
83
+ "eval_loss": 0.48496538400650024,
84
+ "eval_runtime": 9.4574,
85
+ "eval_samples_per_second": 16.812,
86
+ "eval_steps_per_second": 2.115,
87
  "step": 500
88
  }
89
  ],
90
  "logging_steps": 50,
91
  "max_steps": 4000,
92
  "num_input_tokens_seen": 0,
93
+ "num_train_epochs": 364,
94
  "save_steps": 500,
95
  "stateful_callbacks": {
96
  "TrainerControl": {
 
104
  "attributes": {}
105
  }
106
  },
107
+ "total_flos": 1.3753846024576416e+16,
108
  "train_batch_size": 16,
109
  "trial_name": null,
110
  "trial_params": null
checkpoint-500/training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:05b7ecdded947540b9670cedf7cfe87afc7199b237efb642b915ffbb505d51dc
3
  size 5304
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2de69a241d012253ce4b1526ecd0f9fd0f7dfbedfea74bfff08144d42d9f523a
3
  size 5304