tomekkorbak commited on
Commit
332c139
1 Parent(s): 6067342

Training in progress, step 300

Browse files
checkpoint-300/added_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "<|aligned|>": 32768,
3
+ "<|misaligned|>": 32769
4
+ }
checkpoint-300/config.json ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "kejian/grainy-pep8",
3
+ "activation_function": "gelu_new",
4
+ "architectures": [
5
+ "GPT2LMAndValueHeadModel"
6
+ ],
7
+ "attn_pdrop": 0.1,
8
+ "bos_token_id": 50256,
9
+ "embd_pdrop": 0.1,
10
+ "eos_token_id": 50256,
11
+ "initializer_range": 0.02,
12
+ "layer_norm_epsilon": 1e-05,
13
+ "model_type": "gpt2",
14
+ "n_ctx": 1024,
15
+ "n_embd": 768,
16
+ "n_head": 12,
17
+ "n_inner": null,
18
+ "n_layer": 12,
19
+ "n_positions": 1024,
20
+ "reorder_and_upcast_attn": true,
21
+ "resid_pdrop": 0.1,
22
+ "scale_attn_by_inverse_layer_idx": true,
23
+ "scale_attn_weights": true,
24
+ "summary_activation": null,
25
+ "summary_first_dropout": 0.1,
26
+ "summary_proj_to_labels": true,
27
+ "summary_type": "cls_index",
28
+ "summary_use_proj": true,
29
+ "task_specific_params": {
30
+ "text-generation": {
31
+ "do_sample": true,
32
+ "max_length": 50
33
+ }
34
+ },
35
+ "torch_dtype": "float32",
36
+ "transformers_version": "4.24.0",
37
+ "use_cache": true,
38
+ "vocab_size": 32770
39
+ }
checkpoint-300/merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-300/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:709d5a47ef43e755a511593dfdf1821c55c151a548ee8c18d0deeb451f7c5782
3
+ size 888163889
checkpoint-300/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2ee516954855aeb8677db2389fa1a923a9b595bd924e5d60dc877604f356279a
3
+ size 456676457
checkpoint-300/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7eb10e14971e499a40b5f2facef8550745e88901741fb4f8780f682e231a00ca
3
+ size 14503
checkpoint-300/scaler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0967b9f865f16344c55f5ccc3cf7d6e8e97ca61dda304e931ca6bad130f48dd1
3
+ size 559
checkpoint-300/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:99449ac31b9e20db282bd558c2a2fb5bd361637f0adbe9dfc5bfcb29adf747d7
3
+ size 623
checkpoint-300/special_tokens_map.json ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "<|aligned|>",
4
+ "<|misaligned|>"
5
+ ],
6
+ "bos_token": "<|endoftext|>",
7
+ "eos_token": "<|endoftext|>",
8
+ "pad_token": "<|endoftext|>",
9
+ "unk_token": "<|endoftext|>"
10
+ }
checkpoint-300/tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-300/tokenizer_config.json ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "bos_token": "<|endoftext|>",
4
+ "eos_token": "<|endoftext|>",
5
+ "model_max_length": 1024,
6
+ "name_or_path": "codeparrot/codeparrot-small",
7
+ "special_tokens_map_file": null,
8
+ "tokenizer_class": "GPT2Tokenizer",
9
+ "unk_token": "<|endoftext|>"
10
+ }
checkpoint-300/trainer_state.json ADDED
@@ -0,0 +1,277 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 0.11885895404120443,
5
+ "global_step": 300,
6
+ "is_hyper_param_search": false,
7
+ "is_local_process_zero": true,
8
+ "is_world_process_zero": true,
9
+ "log_history": [
10
+ {
11
+ "epoch": 0.0,
12
+ "learning_rate": 3.846153846153847e-06,
13
+ "loss": 2.7796,
14
+ "theoretical_loss": 3.3518567762894107,
15
+ "tokens_seen": 2969305088
16
+ },
17
+ {
18
+ "epoch": 0.0,
19
+ "learning_rate": 3.846153846153846e-05,
20
+ "loss": 2.4642,
21
+ "theoretical_loss": 3.3517550749686795,
22
+ "tokens_seen": 2970484736
23
+ },
24
+ {
25
+ "epoch": 0.01,
26
+ "learning_rate": 7.692307692307693e-05,
27
+ "loss": 1.9063,
28
+ "theoretical_loss": 3.351642134115535,
29
+ "tokens_seen": 2971795456
30
+ },
31
+ {
32
+ "epoch": 0.01,
33
+ "learning_rate": 9.983987189751803e-05,
34
+ "loss": 1.4437,
35
+ "theoretical_loss": 3.351529257004948,
36
+ "tokens_seen": 2973106176
37
+ },
38
+ {
39
+ "epoch": 0.02,
40
+ "learning_rate": 9.943955164131305e-05,
41
+ "loss": 1.273,
42
+ "theoretical_loss": 3.3514164435728655,
43
+ "tokens_seen": 2974416896
44
+ },
45
+ {
46
+ "epoch": 0.02,
47
+ "learning_rate": 9.90392313851081e-05,
48
+ "loss": 1.2074,
49
+ "theoretical_loss": 3.3513036937553267,
50
+ "tokens_seen": 2975727616
51
+ },
52
+ {
53
+ "epoch": 0.02,
54
+ "learning_rate": 9.863891112890312e-05,
55
+ "loss": 1.2079,
56
+ "theoretical_loss": 3.3511910074884628,
57
+ "tokens_seen": 2977038336
58
+ },
59
+ {
60
+ "epoch": 0.02,
61
+ "objective/train/docs_used": 997783,
62
+ "objective/train/instantaneous_batch_size": 8,
63
+ "objective/train/instantaneous_microbatch_size": 8192,
64
+ "objective/train/original_loss": 0.9412798285484314,
65
+ "objective/train/theoretical_loss": 3.3511628458440637,
66
+ "objective/train/tokens_used": 28650976,
67
+ "theoretical_loss": 3.3511628458440637,
68
+ "tokens_seen": 2977366016
69
+ },
70
+ {
71
+ "epoch": 0.03,
72
+ "learning_rate": 9.823859087269817e-05,
73
+ "loss": 1.2032,
74
+ "theoretical_loss": 3.3510783847084977,
75
+ "tokens_seen": 2978349056
76
+ },
77
+ {
78
+ "epoch": 0.03,
79
+ "learning_rate": 9.78382706164932e-05,
80
+ "loss": 1.2178,
81
+ "theoretical_loss": 3.350965825351748,
82
+ "tokens_seen": 2979659776
83
+ },
84
+ {
85
+ "epoch": 0.04,
86
+ "learning_rate": 9.743795036028824e-05,
87
+ "loss": 1.189,
88
+ "theoretical_loss": 3.3508533293546208,
89
+ "tokens_seen": 2980970496
90
+ },
91
+ {
92
+ "epoch": 0.04,
93
+ "learning_rate": 9.703763010408326e-05,
94
+ "loss": 1.2086,
95
+ "theoretical_loss": 3.3507408966536154,
96
+ "tokens_seen": 2982281216
97
+ },
98
+ {
99
+ "epoch": 0.04,
100
+ "learning_rate": 9.663730984787832e-05,
101
+ "loss": 1.1797,
102
+ "theoretical_loss": 3.3506285271853233,
103
+ "tokens_seen": 2983591936
104
+ },
105
+ {
106
+ "epoch": 0.05,
107
+ "learning_rate": 9.623698959167334e-05,
108
+ "loss": 1.1709,
109
+ "theoretical_loss": 3.350516220886427,
110
+ "tokens_seen": 2984902656
111
+ },
112
+ {
113
+ "epoch": 0.05,
114
+ "objective/train/docs_used": 1000073,
115
+ "objective/train/instantaneous_batch_size": 8,
116
+ "objective/train/instantaneous_microbatch_size": 8192,
117
+ "objective/train/original_loss": 1.0221836566925049,
118
+ "objective/train/theoretical_loss": 3.3504600914057403,
119
+ "objective/train/tokens_used": 36842976,
120
+ "theoretical_loss": 3.3504600914057403,
121
+ "tokens_seen": 2985558016
122
+ },
123
+ {
124
+ "epoch": 0.05,
125
+ "learning_rate": 9.583666933546838e-05,
126
+ "loss": 1.1658,
127
+ "theoretical_loss": 3.3504039776936994,
128
+ "tokens_seen": 2986213376
129
+ },
130
+ {
131
+ "epoch": 0.06,
132
+ "learning_rate": 9.543634907926342e-05,
133
+ "loss": 1.1823,
134
+ "theoretical_loss": 3.350291797544005,
135
+ "tokens_seen": 2987524096
136
+ },
137
+ {
138
+ "epoch": 0.06,
139
+ "learning_rate": 9.503602882305846e-05,
140
+ "loss": 1.1859,
141
+ "theoretical_loss": 3.3501796803742994,
142
+ "tokens_seen": 2988834816
143
+ },
144
+ {
145
+ "epoch": 0.06,
146
+ "learning_rate": 9.463570856685348e-05,
147
+ "loss": 1.1906,
148
+ "theoretical_loss": 3.3500676261216285,
149
+ "tokens_seen": 2990145536
150
+ },
151
+ {
152
+ "epoch": 0.07,
153
+ "learning_rate": 9.423538831064852e-05,
154
+ "loss": 1.185,
155
+ "theoretical_loss": 3.3499556347231287,
156
+ "tokens_seen": 2991456256
157
+ },
158
+ {
159
+ "epoch": 0.07,
160
+ "learning_rate": 9.383506805444356e-05,
161
+ "loss": 1.2042,
162
+ "theoretical_loss": 3.349843706116027,
163
+ "tokens_seen": 2992766976
164
+ },
165
+ {
166
+ "epoch": 0.07,
167
+ "objective/train/docs_used": 1002769,
168
+ "objective/train/instantaneous_batch_size": 8,
169
+ "objective/train/instantaneous_microbatch_size": 8192,
170
+ "objective/train/original_loss": 1.227379560470581,
171
+ "objective/train/theoretical_loss": 3.349759800829844,
172
+ "objective/train/tokens_used": 45034976,
173
+ "theoretical_loss": 3.349759800829844,
174
+ "tokens_seen": 2993750016
175
+ },
176
+ {
177
+ "epoch": 0.08,
178
+ "learning_rate": 9.34347477982386e-05,
179
+ "loss": 1.1807,
180
+ "theoretical_loss": 3.3497318402376397,
181
+ "tokens_seen": 2994077696
182
+ },
183
+ {
184
+ "epoch": 0.08,
185
+ "learning_rate": 9.303442754203363e-05,
186
+ "loss": 1.1862,
187
+ "theoretical_loss": 3.349620037025374,
188
+ "tokens_seen": 2995388416
189
+ },
190
+ {
191
+ "epoch": 0.08,
192
+ "learning_rate": 9.263410728582867e-05,
193
+ "loss": 1.178,
194
+ "theoretical_loss": 3.349508296416727,
195
+ "tokens_seen": 2996699136
196
+ },
197
+ {
198
+ "epoch": 0.09,
199
+ "learning_rate": 9.22337870296237e-05,
200
+ "loss": 1.1656,
201
+ "theoretical_loss": 3.3493966183492847,
202
+ "tokens_seen": 2998009856
203
+ },
204
+ {
205
+ "epoch": 0.09,
206
+ "learning_rate": 9.183346677341874e-05,
207
+ "loss": 1.1769,
208
+ "theoretical_loss": 3.349285002760723,
209
+ "tokens_seen": 2999320576
210
+ },
211
+ {
212
+ "epoch": 0.1,
213
+ "learning_rate": 9.143314651721377e-05,
214
+ "loss": 1.1513,
215
+ "theoretical_loss": 3.3491734495888066,
216
+ "tokens_seen": 3000631296
217
+ },
218
+ {
219
+ "epoch": 0.1,
220
+ "objective/train/docs_used": 1005438,
221
+ "objective/train/instantaneous_batch_size": 8,
222
+ "objective/train/instantaneous_microbatch_size": 8192,
223
+ "objective/train/original_loss": 1.1515272855758667,
224
+ "objective/train/theoretical_loss": 3.3490619587713906,
225
+ "objective/train/tokens_used": 53226976,
226
+ "theoretical_loss": 3.3490619587713906,
227
+ "tokens_seen": 3001942016
228
+ },
229
+ {
230
+ "epoch": 0.1,
231
+ "learning_rate": 9.103282626100881e-05,
232
+ "loss": 1.1793,
233
+ "theoretical_loss": 3.3490619587713906,
234
+ "tokens_seen": 3001942016
235
+ },
236
+ {
237
+ "epoch": 0.1,
238
+ "learning_rate": 9.063250600480385e-05,
239
+ "loss": 1.1534,
240
+ "theoretical_loss": 3.348950530246418,
241
+ "tokens_seen": 3003252736
242
+ },
243
+ {
244
+ "epoch": 0.11,
245
+ "learning_rate": 9.023218574859889e-05,
246
+ "loss": 1.1928,
247
+ "theoretical_loss": 3.348839163951921,
248
+ "tokens_seen": 3004563456
249
+ },
250
+ {
251
+ "epoch": 0.11,
252
+ "learning_rate": 8.983186549239391e-05,
253
+ "loss": 1.1678,
254
+ "theoretical_loss": 3.3487278598260195,
255
+ "tokens_seen": 3005874176
256
+ },
257
+ {
258
+ "epoch": 0.11,
259
+ "learning_rate": 8.943154523618895e-05,
260
+ "loss": 1.1477,
261
+ "theoretical_loss": 3.348616617806923,
262
+ "tokens_seen": 3007184896
263
+ },
264
+ {
265
+ "epoch": 0.12,
266
+ "learning_rate": 8.903122497998399e-05,
267
+ "loss": 1.1773,
268
+ "theoretical_loss": 3.34850543783293,
269
+ "tokens_seen": 3008495616
270
+ }
271
+ ],
272
+ "max_steps": 2524,
273
+ "num_train_epochs": 9223372036854775807,
274
+ "total_flos": 2.00672280576e+16,
275
+ "trial_name": null,
276
+ "trial_params": null
277
+ }
checkpoint-300/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:62606ef86d425c76460a848b984d348a02cd2616fd10f4951e23cea6e71b3d5c
3
+ size 3375
checkpoint-300/vocab.json ADDED
The diff for this file is too large to render. See raw diff
 
pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:af0132f13310c3a3db4cae0d77e8e3bdf69977c9fe3f632362a25df3e9eb0bf1
3
  size 456676457
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2ee516954855aeb8677db2389fa1a923a9b595bd924e5d60dc877604f356279a
3
  size 456676457