mgh6 commited on
Commit
2728420
1 Parent(s): 52cc9b3

Training in progress, step 5120, checkpoint

Browse files
last-checkpoint/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3f5491bfc3ab32d01880f5d008fba55dfc118903175684ff29a2babb2d5cc1f8
3
+ size 3246333568
last-checkpoint/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5dc24603b7e5ff9eab535c3fcf74ce46671e2d4d45e0979d04581e66387412d2
3
+ size 6220672307
last-checkpoint/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dc1239486b7f83e4a2231cde24a50b503b22ee79d6ee232760274da141c18674
3
+ size 14503
last-checkpoint/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dc144d341e4558a072df309cc0e1632fff46fd0c7d5d5c6acf4278b66a64424b
3
+ size 623
last-checkpoint/trainer_state.json ADDED
@@ -0,0 +1,221 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 8750.201171875,
3
+ "best_model_checkpoint": "mgh6/TCS_Pairing_VAE/checkpoint-5120",
4
+ "epoch": 0.37828901798165815,
5
+ "eval_steps": 512,
6
+ "global_step": 5120,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.02,
13
+ "learning_rate": 9.810846756317423e-05,
14
+ "loss": 13895.6543,
15
+ "step": 256
16
+ },
17
+ {
18
+ "epoch": 0.04,
19
+ "learning_rate": 9.621693512634847e-05,
20
+ "loss": 12089.9502,
21
+ "step": 512
22
+ },
23
+ {
24
+ "epoch": 0.04,
25
+ "eval_loss": 10878.3818359375,
26
+ "eval_runtime": 52.2741,
27
+ "eval_samples_per_second": 65.004,
28
+ "eval_steps_per_second": 65.004,
29
+ "step": 512
30
+ },
31
+ {
32
+ "epoch": 0.06,
33
+ "learning_rate": 9.43254026895227e-05,
34
+ "loss": 11585.8203,
35
+ "step": 768
36
+ },
37
+ {
38
+ "epoch": 0.08,
39
+ "learning_rate": 9.243387025269692e-05,
40
+ "loss": 11148.8809,
41
+ "step": 1024
42
+ },
43
+ {
44
+ "epoch": 0.08,
45
+ "eval_loss": 10289.9677734375,
46
+ "eval_runtime": 52.2836,
47
+ "eval_samples_per_second": 64.992,
48
+ "eval_steps_per_second": 64.992,
49
+ "step": 1024
50
+ },
51
+ {
52
+ "epoch": 0.09,
53
+ "learning_rate": 9.054233781587114e-05,
54
+ "loss": 10962.8799,
55
+ "step": 1280
56
+ },
57
+ {
58
+ "epoch": 0.11,
59
+ "learning_rate": 8.865080537904538e-05,
60
+ "loss": 10798.7051,
61
+ "step": 1536
62
+ },
63
+ {
64
+ "epoch": 0.11,
65
+ "eval_loss": 9891.8466796875,
66
+ "eval_runtime": 52.2253,
67
+ "eval_samples_per_second": 65.064,
68
+ "eval_steps_per_second": 65.064,
69
+ "step": 1536
70
+ },
71
+ {
72
+ "epoch": 0.13,
73
+ "learning_rate": 8.67592729422196e-05,
74
+ "loss": 10776.9941,
75
+ "step": 1792
76
+ },
77
+ {
78
+ "epoch": 0.15,
79
+ "learning_rate": 8.486774050539382e-05,
80
+ "loss": 10478.6211,
81
+ "step": 2048
82
+ },
83
+ {
84
+ "epoch": 0.15,
85
+ "eval_loss": 9580.3623046875,
86
+ "eval_runtime": 54.3989,
87
+ "eval_samples_per_second": 62.465,
88
+ "eval_steps_per_second": 62.465,
89
+ "step": 2048
90
+ },
91
+ {
92
+ "epoch": 0.17,
93
+ "learning_rate": 8.297620806856804e-05,
94
+ "loss": 10421.6865,
95
+ "step": 2304
96
+ },
97
+ {
98
+ "epoch": 0.19,
99
+ "learning_rate": 8.108467563174228e-05,
100
+ "loss": 10349.1133,
101
+ "step": 2560
102
+ },
103
+ {
104
+ "epoch": 0.19,
105
+ "eval_loss": 9357.197265625,
106
+ "eval_runtime": 52.1384,
107
+ "eval_samples_per_second": 65.173,
108
+ "eval_steps_per_second": 65.173,
109
+ "step": 2560
110
+ },
111
+ {
112
+ "epoch": 0.21,
113
+ "learning_rate": 7.919314319491651e-05,
114
+ "loss": 10320.9795,
115
+ "step": 2816
116
+ },
117
+ {
118
+ "epoch": 0.23,
119
+ "learning_rate": 7.730161075809073e-05,
120
+ "loss": 10056.0762,
121
+ "step": 3072
122
+ },
123
+ {
124
+ "epoch": 0.23,
125
+ "eval_loss": 9196.8330078125,
126
+ "eval_runtime": 55.9285,
127
+ "eval_samples_per_second": 60.756,
128
+ "eval_steps_per_second": 60.756,
129
+ "step": 3072
130
+ },
131
+ {
132
+ "epoch": 0.25,
133
+ "learning_rate": 7.541007832126497e-05,
134
+ "loss": 10047.7236,
135
+ "step": 3328
136
+ },
137
+ {
138
+ "epoch": 0.26,
139
+ "learning_rate": 7.35185458844392e-05,
140
+ "loss": 9825.2148,
141
+ "step": 3584
142
+ },
143
+ {
144
+ "epoch": 0.26,
145
+ "eval_loss": 9047.8759765625,
146
+ "eval_runtime": 54.3128,
147
+ "eval_samples_per_second": 62.564,
148
+ "eval_steps_per_second": 62.564,
149
+ "step": 3584
150
+ },
151
+ {
152
+ "epoch": 0.28,
153
+ "learning_rate": 7.162701344761342e-05,
154
+ "loss": 9779.3662,
155
+ "step": 3840
156
+ },
157
+ {
158
+ "epoch": 0.3,
159
+ "learning_rate": 6.973548101078765e-05,
160
+ "loss": 9906.9102,
161
+ "step": 4096
162
+ },
163
+ {
164
+ "epoch": 0.3,
165
+ "eval_loss": 8961.9609375,
166
+ "eval_runtime": 53.0164,
167
+ "eval_samples_per_second": 64.093,
168
+ "eval_steps_per_second": 64.093,
169
+ "step": 4096
170
+ },
171
+ {
172
+ "epoch": 0.32,
173
+ "learning_rate": 6.784394857396189e-05,
174
+ "loss": 9788.2617,
175
+ "step": 4352
176
+ },
177
+ {
178
+ "epoch": 0.34,
179
+ "learning_rate": 6.595241613713611e-05,
180
+ "loss": 9622.2656,
181
+ "step": 4608
182
+ },
183
+ {
184
+ "epoch": 0.34,
185
+ "eval_loss": 8833.9521484375,
186
+ "eval_runtime": 74.0629,
187
+ "eval_samples_per_second": 45.88,
188
+ "eval_steps_per_second": 45.88,
189
+ "step": 4608
190
+ },
191
+ {
192
+ "epoch": 0.36,
193
+ "learning_rate": 6.406088370031034e-05,
194
+ "loss": 9643.9951,
195
+ "step": 4864
196
+ },
197
+ {
198
+ "epoch": 0.38,
199
+ "learning_rate": 6.216935126348456e-05,
200
+ "loss": 9615.2891,
201
+ "step": 5120
202
+ },
203
+ {
204
+ "epoch": 0.38,
205
+ "eval_loss": 8750.201171875,
206
+ "eval_runtime": 52.5556,
207
+ "eval_samples_per_second": 64.655,
208
+ "eval_steps_per_second": 64.655,
209
+ "step": 5120
210
+ }
211
+ ],
212
+ "logging_steps": 256,
213
+ "max_steps": 13534,
214
+ "num_input_tokens_seen": 0,
215
+ "num_train_epochs": 1,
216
+ "save_steps": 2560,
217
+ "total_flos": 0.0,
218
+ "train_batch_size": 1,
219
+ "trial_name": null,
220
+ "trial_params": null
221
+ }
last-checkpoint/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:91dfd5ef6e8fea36f691c6bed4ab7bc1d693cedb64e8b1f740803a2800d3a433
3
+ size 4271