benjamin commited on
Commit
4cb807d
1 Parent(s): de6da94

initial commit

Browse files
README.md DELETED
@@ -1,3 +0,0 @@
1
- ---
2
- license: mit
3
- ---
 
 
 
 
config.json ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "gpt2",
3
+ "activation_function": "gelu_new",
4
+ "architectures": [
5
+ "GPT2LMHeadModel"
6
+ ],
7
+ "attn_pdrop": 0.1,
8
+ "bos_token_id": 50256,
9
+ "embd_pdrop": 0.1,
10
+ "eos_token_id": 50256,
11
+ "initializer_range": 0.02,
12
+ "layer_norm_epsilon": 1e-05,
13
+ "model_type": "gpt2",
14
+ "n_ctx": 1024,
15
+ "n_embd": 768,
16
+ "n_head": 12,
17
+ "n_inner": null,
18
+ "n_layer": 12,
19
+ "n_positions": 1024,
20
+ "reorder_and_upcast_attn": false,
21
+ "resid_pdrop": 0.1,
22
+ "scale_attn_by_inverse_layer_idx": false,
23
+ "scale_attn_weights": true,
24
+ "summary_activation": null,
25
+ "summary_first_dropout": 0.1,
26
+ "summary_proj_to_labels": true,
27
+ "summary_type": "cls_index",
28
+ "summary_use_proj": true,
29
+ "task_specific_params": {
30
+ "text-generation": {
31
+ "do_sample": true,
32
+ "max_length": 50
33
+ }
34
+ },
35
+ "torch_dtype": "float32",
36
+ "transformers_version": "4.12.0.dev0",
37
+ "use_cache": true,
38
+ "vocab_size": 50257
39
+ }
merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:87fceebaf493fcdf5a577b7458b572165e44ec01a4ec90ae484f1827fb46b3dd
3
+ size 664786896
rng_state_0.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5e0cd50bb26e2e012060b2b9268e8803dbe6e714ce77f04d184c003b8ef7e998
3
+ size 13620
rng_state_1.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b0ef458b36cb65b3573443b4dc3328bac773ad28103f5b4a8871708381a87a60
3
+ size 13619
rng_state_2.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3701129e1336bc63d908a898b505e402dbca873258774f660cfafed89bdf258e
3
+ size 13620
rng_state_3.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5fc47ae2f0260e48fda5d85b5f7226a7994cde5150dde12784f43cee2d4308f5
3
+ size 13619
rng_state_4.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:af5e376370ba0d1ae1d22a73f898df61300d81f3220b8044e14f4ca9521bce34
3
+ size 13620
rng_state_5.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:328031933d3e78bc9d41125c8207051b003e0957b7b9d093ccdc0c29ceb279b8
3
+ size 13619
rng_state_6.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:51075f868424d14d859d50d63b8dec44ca43b439272e0e05d8efbad8e9a75cae
3
+ size 13620
rng_state_7.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4602385985ceb63a91a629abf5a3bb8372d74d10602c816e4dad3d8cf2ebc2bf
3
+ size 13619
scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:79bcfb2d6cca4c63cdfcd719830fc3cf7df95bc9e58af991b6fdeff67b97f21c
3
+ size 623
special_tokens_map.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"bos_token": "<|endoftext|>", "eos_token": "<|endoftext|>", "unk_token": "<|endoftext|>"}
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"unk_token": "<|endoftext|>", "bos_token": "<|endoftext|>", "eos_token": "<|endoftext|>", "add_prefix_space": false, "model_max_length": 1024, "special_tokens_map_file": null, "name_or_path": "models/gpt2_uyghur", "tokenizer_class": "GPT2Tokenizer"}
trainer_state.json ADDED
@@ -0,0 +1,317 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 131.57894736842104,
5
+ "global_step": 17500,
6
+ "is_hyper_param_search": false,
7
+ "is_local_process_zero": true,
8
+ "is_world_process_zero": true,
9
+ "log_history": [
10
+ {
11
+ "epoch": 3.76,
12
+ "learning_rate": 1e-05,
13
+ "learning_rate_embeddings": 1e-05,
14
+ "loss": 9.5582,
15
+ "step": 500
16
+ },
17
+ {
18
+ "epoch": 7.52,
19
+ "learning_rate": 2e-05,
20
+ "learning_rate_embeddings": 2e-05,
21
+ "loss": 8.0322,
22
+ "step": 1000
23
+ },
24
+ {
25
+ "epoch": 11.28,
26
+ "learning_rate": 3e-05,
27
+ "learning_rate_embeddings": 3e-05,
28
+ "loss": 7.3149,
29
+ "step": 1500
30
+ },
31
+ {
32
+ "epoch": 15.04,
33
+ "learning_rate": 4e-05,
34
+ "learning_rate_embeddings": 4e-05,
35
+ "loss": 6.6378,
36
+ "step": 2000
37
+ },
38
+ {
39
+ "epoch": 18.8,
40
+ "learning_rate": 5e-05,
41
+ "learning_rate_embeddings": 5e-05,
42
+ "loss": 6.0786,
43
+ "step": 2500
44
+ },
45
+ {
46
+ "epoch": 18.8,
47
+ "eval_loss": 5.710835933685303,
48
+ "eval_runtime": 23.8206,
49
+ "eval_samples_per_second": 315.399,
50
+ "eval_steps_per_second": 2.477,
51
+ "step": 2500
52
+ },
53
+ {
54
+ "epoch": 22.56,
55
+ "learning_rate": 6e-05,
56
+ "learning_rate_embeddings": 6e-05,
57
+ "loss": 5.6654,
58
+ "step": 3000
59
+ },
60
+ {
61
+ "epoch": 26.32,
62
+ "learning_rate": 7.000000000000001e-05,
63
+ "learning_rate_embeddings": 7.000000000000001e-05,
64
+ "loss": 5.3417,
65
+ "step": 3500
66
+ },
67
+ {
68
+ "epoch": 30.08,
69
+ "learning_rate": 8e-05,
70
+ "learning_rate_embeddings": 8e-05,
71
+ "loss": 5.0624,
72
+ "step": 4000
73
+ },
74
+ {
75
+ "epoch": 33.83,
76
+ "learning_rate": 8.999999999999999e-05,
77
+ "learning_rate_embeddings": 8.999999999999999e-05,
78
+ "loss": 4.8364,
79
+ "step": 4500
80
+ },
81
+ {
82
+ "epoch": 37.59,
83
+ "learning_rate": 0.0001,
84
+ "learning_rate_embeddings": 0.0001,
85
+ "loss": 4.6372,
86
+ "step": 5000
87
+ },
88
+ {
89
+ "epoch": 37.59,
90
+ "eval_loss": 4.484490394592285,
91
+ "eval_runtime": 10.1862,
92
+ "eval_samples_per_second": 737.569,
93
+ "eval_steps_per_second": 5.792,
94
+ "step": 5000
95
+ },
96
+ {
97
+ "epoch": 41.35,
98
+ "learning_rate": 0.00011,
99
+ "learning_rate_embeddings": 0.00011,
100
+ "loss": 4.4734,
101
+ "step": 5500
102
+ },
103
+ {
104
+ "epoch": 45.11,
105
+ "learning_rate": 0.00012,
106
+ "learning_rate_embeddings": 0.00012,
107
+ "loss": 4.322,
108
+ "step": 6000
109
+ },
110
+ {
111
+ "epoch": 48.87,
112
+ "learning_rate": 0.00013000000000000002,
113
+ "learning_rate_embeddings": 0.00013000000000000002,
114
+ "loss": 4.1937,
115
+ "step": 6500
116
+ },
117
+ {
118
+ "epoch": 52.63,
119
+ "learning_rate": 0.00014000000000000001,
120
+ "learning_rate_embeddings": 0.00014000000000000001,
121
+ "loss": 4.0804,
122
+ "step": 7000
123
+ },
124
+ {
125
+ "epoch": 56.39,
126
+ "learning_rate": 0.00015,
127
+ "learning_rate_embeddings": 0.00015,
128
+ "loss": 3.9741,
129
+ "step": 7500
130
+ },
131
+ {
132
+ "epoch": 56.39,
133
+ "eval_loss": 3.9806618690490723,
134
+ "eval_runtime": 10.0742,
135
+ "eval_samples_per_second": 745.765,
136
+ "eval_steps_per_second": 5.857,
137
+ "step": 7500
138
+ },
139
+ {
140
+ "epoch": 60.15,
141
+ "learning_rate": 0.00016,
142
+ "learning_rate_embeddings": 0.00016,
143
+ "loss": 3.8851,
144
+ "step": 8000
145
+ },
146
+ {
147
+ "epoch": 63.91,
148
+ "learning_rate": 0.00017,
149
+ "learning_rate_embeddings": 0.00017,
150
+ "loss": 3.796,
151
+ "step": 8500
152
+ },
153
+ {
154
+ "epoch": 67.67,
155
+ "learning_rate": 0.00017999999999999998,
156
+ "learning_rate_embeddings": 0.00017999999999999998,
157
+ "loss": 3.7154,
158
+ "step": 9000
159
+ },
160
+ {
161
+ "epoch": 71.43,
162
+ "learning_rate": 0.00019,
163
+ "learning_rate_embeddings": 0.00019,
164
+ "loss": 3.6434,
165
+ "step": 9500
166
+ },
167
+ {
168
+ "epoch": 75.19,
169
+ "learning_rate": 0.0002,
170
+ "learning_rate_embeddings": 0.0002,
171
+ "loss": 3.5794,
172
+ "step": 10000
173
+ },
174
+ {
175
+ "epoch": 75.19,
176
+ "eval_loss": 3.74064564704895,
177
+ "eval_runtime": 10.1022,
178
+ "eval_samples_per_second": 743.703,
179
+ "eval_steps_per_second": 5.84,
180
+ "step": 10000
181
+ },
182
+ {
183
+ "epoch": 78.95,
184
+ "learning_rate": 0.00021,
185
+ "learning_rate_embeddings": 0.00021,
186
+ "loss": 3.5056,
187
+ "step": 10500
188
+ },
189
+ {
190
+ "epoch": 82.71,
191
+ "learning_rate": 0.00022,
192
+ "learning_rate_embeddings": 0.00022,
193
+ "loss": 3.4434,
194
+ "step": 11000
195
+ },
196
+ {
197
+ "epoch": 86.47,
198
+ "learning_rate": 0.00023,
199
+ "learning_rate_embeddings": 0.00023,
200
+ "loss": 3.3841,
201
+ "step": 11500
202
+ },
203
+ {
204
+ "epoch": 90.23,
205
+ "learning_rate": 0.00024,
206
+ "learning_rate_embeddings": 0.00024,
207
+ "loss": 3.3289,
208
+ "step": 12000
209
+ },
210
+ {
211
+ "epoch": 93.98,
212
+ "learning_rate": 0.00025,
213
+ "learning_rate_embeddings": 0.00025,
214
+ "loss": 3.2738,
215
+ "step": 12500
216
+ },
217
+ {
218
+ "epoch": 93.98,
219
+ "eval_loss": 3.6198933124542236,
220
+ "eval_runtime": 10.0449,
221
+ "eval_samples_per_second": 747.942,
222
+ "eval_steps_per_second": 5.874,
223
+ "step": 12500
224
+ },
225
+ {
226
+ "epoch": 97.74,
227
+ "learning_rate": 0.00026000000000000003,
228
+ "learning_rate_embeddings": 0.00026000000000000003,
229
+ "loss": 3.2181,
230
+ "step": 13000
231
+ },
232
+ {
233
+ "epoch": 101.5,
234
+ "learning_rate": 0.00027,
235
+ "learning_rate_embeddings": 0.00027,
236
+ "loss": 3.1679,
237
+ "step": 13500
238
+ },
239
+ {
240
+ "epoch": 105.26,
241
+ "learning_rate": 0.00028000000000000003,
242
+ "learning_rate_embeddings": 0.00028000000000000003,
243
+ "loss": 3.1144,
244
+ "step": 14000
245
+ },
246
+ {
247
+ "epoch": 109.02,
248
+ "learning_rate": 0.00029,
249
+ "learning_rate_embeddings": 0.00029,
250
+ "loss": 3.067,
251
+ "step": 14500
252
+ },
253
+ {
254
+ "epoch": 112.78,
255
+ "learning_rate": 0.0003,
256
+ "learning_rate_embeddings": 0.0003,
257
+ "loss": 3.0177,
258
+ "step": 15000
259
+ },
260
+ {
261
+ "epoch": 112.78,
262
+ "eval_loss": 3.5627167224884033,
263
+ "eval_runtime": 10.0076,
264
+ "eval_samples_per_second": 750.731,
265
+ "eval_steps_per_second": 5.896,
266
+ "step": 15000
267
+ },
268
+ {
269
+ "epoch": 116.54,
270
+ "learning_rate": 0.00031,
271
+ "learning_rate_embeddings": 0.00031,
272
+ "loss": 2.9764,
273
+ "step": 15500
274
+ },
275
+ {
276
+ "epoch": 120.3,
277
+ "learning_rate": 0.00032,
278
+ "learning_rate_embeddings": 0.00032,
279
+ "loss": 2.9304,
280
+ "step": 16000
281
+ },
282
+ {
283
+ "epoch": 124.06,
284
+ "learning_rate": 0.00033,
285
+ "learning_rate_embeddings": 0.00033,
286
+ "loss": 2.8881,
287
+ "step": 16500
288
+ },
289
+ {
290
+ "epoch": 127.82,
291
+ "learning_rate": 0.00034,
292
+ "learning_rate_embeddings": 0.00034,
293
+ "loss": 2.8463,
294
+ "step": 17000
295
+ },
296
+ {
297
+ "epoch": 131.58,
298
+ "learning_rate": 0.00035,
299
+ "learning_rate_embeddings": 0.00035,
300
+ "loss": 2.8012,
301
+ "step": 17500
302
+ },
303
+ {
304
+ "epoch": 131.58,
305
+ "eval_loss": 3.5356414318084717,
306
+ "eval_runtime": 10.0661,
307
+ "eval_samples_per_second": 746.367,
308
+ "eval_steps_per_second": 5.861,
309
+ "step": 17500
310
+ }
311
+ ],
312
+ "max_steps": 250000,
313
+ "num_train_epochs": 1880,
314
+ "total_flos": 2.9264707584e+17,
315
+ "trial_name": null,
316
+ "trial_params": null
317
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2a8d9152003d509ba68460ab704ee1e323bbfa3f59b87fa0962fce2b0a2834e1
3
+ size 2927
vocab.json ADDED
The diff for this file is too large to render. See raw diff