ZeroUniqueness commited on
Commit
3a47222
β€’
1 Parent(s): 4b12549

2023-08-05 09:03:05 Autosave for checkpoint additions

Browse files
README.md CHANGED
@@ -48,6 +48,17 @@ The following `bitsandbytes` quantization config was used during training:
48
  - bnb_4bit_use_double_quant: True
49
  - bnb_4bit_compute_dtype: bfloat16
50
 
 
 
 
 
 
 
 
 
 
 
 
51
  The following `bitsandbytes` quantization config was used during training:
52
  - load_in_8bit: False
53
  - load_in_4bit: True
@@ -64,5 +75,6 @@ The following `bitsandbytes` quantization config was used during training:
64
  - PEFT 0.5.0.dev0
65
  - PEFT 0.5.0.dev0
66
  - PEFT 0.5.0.dev0
 
67
 
68
  - PEFT 0.5.0.dev0
 
48
  - bnb_4bit_use_double_quant: True
49
  - bnb_4bit_compute_dtype: bfloat16
50
 
51
+ The following `bitsandbytes` quantization config was used during training:
52
+ - load_in_8bit: False
53
+ - load_in_4bit: True
54
+ - llm_int8_threshold: 6.0
55
+ - llm_int8_skip_modules: None
56
+ - llm_int8_enable_fp32_cpu_offload: False
57
+ - llm_int8_has_fp16_weight: False
58
+ - bnb_4bit_quant_type: nf4
59
+ - bnb_4bit_use_double_quant: True
60
+ - bnb_4bit_compute_dtype: bfloat16
61
+
62
  The following `bitsandbytes` quantization config was used during training:
63
  - load_in_8bit: False
64
  - load_in_4bit: True
 
75
  - PEFT 0.5.0.dev0
76
  - PEFT 0.5.0.dev0
77
  - PEFT 0.5.0.dev0
78
+ - PEFT 0.5.0.dev0
79
 
80
  - PEFT 0.5.0.dev0
adapter_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:aa07e2f07041a3d8f612e13ed99c8da5ef7ee1ce18da05ac269f1c3c5b51a5a3
3
  size 500897101
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2e3072b7a03215ae4b16ade30543af9eaffa7677743c8c45161d38ae9b8ff49d
3
  size 500897101
checkpoint-14000/adapter_model.bin DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:43d8489e1860bc44ac0a168b429d6db410e83fb55df06da817c1b03bc8a784c7
3
- size 500897101
 
 
 
 
checkpoint-14000/adapter_model/README.md DELETED
@@ -1,20 +0,0 @@
1
- ---
2
- library_name: peft
3
- ---
4
- ## Training procedure
5
-
6
-
7
- The following `bitsandbytes` quantization config was used during training:
8
- - load_in_8bit: False
9
- - load_in_4bit: True
10
- - llm_int8_threshold: 6.0
11
- - llm_int8_skip_modules: None
12
- - llm_int8_enable_fp32_cpu_offload: False
13
- - llm_int8_has_fp16_weight: False
14
- - bnb_4bit_quant_type: nf4
15
- - bnb_4bit_use_double_quant: True
16
- - bnb_4bit_compute_dtype: bfloat16
17
- ### Framework versions
18
-
19
-
20
- - PEFT 0.5.0.dev0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
checkpoint-14000/adapter_model/adapter_config.json DELETED
@@ -1,26 +0,0 @@
1
- {
2
- "auto_mapping": null,
3
- "base_model_name_or_path": "/workspace/webui/models/TheBloke_Llama-2-13B-fp16",
4
- "bias": "none",
5
- "fan_in_fan_out": null,
6
- "inference_mode": true,
7
- "init_lora_weights": true,
8
- "layers_pattern": null,
9
- "layers_to_transform": null,
10
- "lora_alpha": 16,
11
- "lora_dropout": 0.05,
12
- "modules_to_save": null,
13
- "peft_type": "LORA",
14
- "r": 32,
15
- "revision": null,
16
- "target_modules": [
17
- "k_proj",
18
- "q_proj",
19
- "v_proj",
20
- "o_proj",
21
- "gate_proj",
22
- "down_proj",
23
- "up_proj"
24
- ],
25
- "task_type": "CAUSAL_LM"
26
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
checkpoint-14000/optimizer.pt DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:1f9d32136d0ffe55cb3b76247f537ecdf193dec6253c3d0650fbb1719d73c9be
3
- size 1001723453
 
 
 
 
checkpoint-14000/rng_state.pth DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:fb9490fb561c88ed3eb7d77273cd4909c2b6b85cc71c9b9d93cd07cb17d15b78
3
- size 14575
 
 
 
 
checkpoint-14000/scheduler.pt DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:1480d6851d18c67ed3c662dfacf0154126653d527c8e4438b8b9f03ce63749af
3
- size 627
 
 
 
 
checkpoint-14000/trainer_state.json DELETED
@@ -1,968 +0,0 @@
1
- {
2
- "best_metric": 0.6651941537857056,
3
- "best_model_checkpoint": "./qlora-out/checkpoint-14000",
4
- "epoch": 0.521979046269714,
5
- "global_step": 14000,
6
- "is_hyper_param_search": false,
7
- "is_local_process_zero": true,
8
- "is_world_process_zero": true,
9
- "log_history": [
10
- {
11
- "epoch": 0.0,
12
- "learning_rate": 0.00019999938245325715,
13
- "loss": 0.9023,
14
- "step": 100
15
- },
16
- {
17
- "epoch": 0.01,
18
- "learning_rate": 0.00019999724773356797,
19
- "loss": 0.8027,
20
- "step": 200
21
- },
22
- {
23
- "epoch": 0.01,
24
- "learning_rate": 0.0001999935882494411,
25
- "loss": 0.8041,
26
- "step": 300
27
- },
28
- {
29
- "epoch": 0.01,
30
- "learning_rate": 0.00019998840405667672,
31
- "loss": 0.7944,
32
- "step": 400
33
- },
34
- {
35
- "epoch": 0.02,
36
- "learning_rate": 0.00019998169523432365,
37
- "loss": 0.81,
38
- "step": 500
39
- },
40
- {
41
- "epoch": 0.02,
42
- "learning_rate": 0.0001999734618846785,
43
- "loss": 0.7855,
44
- "step": 600
45
- },
46
- {
47
- "epoch": 0.03,
48
- "learning_rate": 0.00019996370413328385,
49
- "loss": 0.7849,
50
- "step": 700
51
- },
52
- {
53
- "epoch": 0.03,
54
- "learning_rate": 0.00019995242212892653,
55
- "loss": 0.7564,
56
- "step": 800
57
- },
58
- {
59
- "epoch": 0.03,
60
- "learning_rate": 0.00019993961604363532,
61
- "loss": 0.7724,
62
- "step": 900
63
- },
64
- {
65
- "epoch": 0.04,
66
- "learning_rate": 0.00019992528607267815,
67
- "loss": 0.7308,
68
- "step": 1000
69
- },
70
- {
71
- "epoch": 0.04,
72
- "eval_loss": 0.7677998542785645,
73
- "eval_runtime": 1774.3517,
74
- "eval_samples_per_second": 0.305,
75
- "eval_steps_per_second": 0.305,
76
- "step": 1000
77
- },
78
- {
79
- "epoch": 0.04,
80
- "learning_rate": 0.0001999094324345594,
81
- "loss": 0.7844,
82
- "step": 1100
83
- },
84
- {
85
- "epoch": 0.04,
86
- "learning_rate": 0.00019989205537101633,
87
- "loss": 0.7668,
88
- "step": 1200
89
- },
90
- {
91
- "epoch": 0.05,
92
- "learning_rate": 0.00019987315514701553,
93
- "loss": 0.7727,
94
- "step": 1300
95
- },
96
- {
97
- "epoch": 0.05,
98
- "learning_rate": 0.00019985273205074878,
99
- "loss": 0.7467,
100
- "step": 1400
101
- },
102
- {
103
- "epoch": 0.06,
104
- "learning_rate": 0.00019983078639362883,
105
- "loss": 0.7516,
106
- "step": 1500
107
- },
108
- {
109
- "epoch": 0.06,
110
- "learning_rate": 0.00019980731851028445,
111
- "loss": 0.7267,
112
- "step": 1600
113
- },
114
- {
115
- "epoch": 0.06,
116
- "learning_rate": 0.0001997823287585554,
117
- "loss": 0.7632,
118
- "step": 1700
119
- },
120
- {
121
- "epoch": 0.07,
122
- "learning_rate": 0.000199755817519487,
123
- "loss": 0.7392,
124
- "step": 1800
125
- },
126
- {
127
- "epoch": 0.07,
128
- "learning_rate": 0.00019972778519732436,
129
- "loss": 0.7528,
130
- "step": 1900
131
- },
132
- {
133
- "epoch": 0.07,
134
- "learning_rate": 0.0001996982322195061,
135
- "loss": 0.725,
136
- "step": 2000
137
- },
138
- {
139
- "epoch": 0.07,
140
- "eval_loss": 0.7452704310417175,
141
- "eval_runtime": 1787.7554,
142
- "eval_samples_per_second": 0.303,
143
- "eval_steps_per_second": 0.303,
144
- "step": 2000
145
- },
146
- {
147
- "epoch": 0.08,
148
- "learning_rate": 0.00019966715903665795,
149
- "loss": 0.7234,
150
- "step": 2100
151
- },
152
- {
153
- "epoch": 0.08,
154
- "learning_rate": 0.00019963456612258576,
155
- "loss": 0.754,
156
- "step": 2200
157
- },
158
- {
159
- "epoch": 0.09,
160
- "learning_rate": 0.00019960045397426841,
161
- "loss": 0.7856,
162
- "step": 2300
163
- },
164
- {
165
- "epoch": 0.09,
166
- "learning_rate": 0.00019956482311185006,
167
- "loss": 0.7387,
168
- "step": 2400
169
- },
170
- {
171
- "epoch": 0.09,
172
- "learning_rate": 0.00019952767407863245,
173
- "loss": 0.7309,
174
- "step": 2500
175
- },
176
- {
177
- "epoch": 0.1,
178
- "learning_rate": 0.00019948900744106633,
179
- "loss": 0.7232,
180
- "step": 2600
181
- },
182
- {
183
- "epoch": 0.1,
184
- "learning_rate": 0.00019944882378874316,
185
- "loss": 0.7406,
186
- "step": 2700
187
- },
188
- {
189
- "epoch": 0.1,
190
- "learning_rate": 0.0001994071237343858,
191
- "loss": 0.7166,
192
- "step": 2800
193
- },
194
- {
195
- "epoch": 0.11,
196
- "learning_rate": 0.00019936390791383936,
197
- "loss": 0.7308,
198
- "step": 2900
199
- },
200
- {
201
- "epoch": 0.11,
202
- "learning_rate": 0.00019931917698606143,
203
- "loss": 0.7288,
204
- "step": 3000
205
- },
206
- {
207
- "epoch": 0.11,
208
- "eval_loss": 0.7343490123748779,
209
- "eval_runtime": 1770.9966,
210
- "eval_samples_per_second": 0.306,
211
- "eval_steps_per_second": 0.306,
212
- "step": 3000
213
- },
214
- {
215
- "epoch": 0.12,
216
- "learning_rate": 0.00019927293163311206,
217
- "loss": 0.7236,
218
- "step": 3100
219
- },
220
- {
221
- "epoch": 0.12,
222
- "learning_rate": 0.00019922517256014337,
223
- "loss": 0.716,
224
- "step": 3200
225
- },
226
- {
227
- "epoch": 0.12,
228
- "learning_rate": 0.00019917590049538874,
229
- "loss": 0.7564,
230
- "step": 3300
231
- },
232
- {
233
- "epoch": 0.13,
234
- "learning_rate": 0.00019912511619015177,
235
- "loss": 0.7082,
236
- "step": 3400
237
- },
238
- {
239
- "epoch": 0.13,
240
- "learning_rate": 0.00019907282041879484,
241
- "loss": 0.7103,
242
- "step": 3500
243
- },
244
- {
245
- "epoch": 0.13,
246
- "learning_rate": 0.00019901901397872715,
247
- "loss": 0.7457,
248
- "step": 3600
249
- },
250
- {
251
- "epoch": 0.14,
252
- "learning_rate": 0.0001989636976903928,
253
- "loss": 0.7076,
254
- "step": 3700
255
- },
256
- {
257
- "epoch": 0.14,
258
- "learning_rate": 0.0001989068723972581,
259
- "loss": 0.7217,
260
- "step": 3800
261
- },
262
- {
263
- "epoch": 0.15,
264
- "learning_rate": 0.00019884853896579873,
265
- "loss": 0.7175,
266
- "step": 3900
267
- },
268
- {
269
- "epoch": 0.15,
270
- "learning_rate": 0.0001987886982854866,
271
- "loss": 0.7083,
272
- "step": 4000
273
- },
274
- {
275
- "epoch": 0.15,
276
- "eval_loss": 0.726176917552948,
277
- "eval_runtime": 1765.3933,
278
- "eval_samples_per_second": 0.307,
279
- "eval_steps_per_second": 0.307,
280
- "step": 4000
281
- },
282
- {
283
- "epoch": 0.15,
284
- "learning_rate": 0.00019872735126877622,
285
- "loss": 0.7228,
286
- "step": 4100
287
- },
288
- {
289
- "epoch": 0.16,
290
- "learning_rate": 0.0001986644988510909,
291
- "loss": 0.7133,
292
- "step": 4200
293
- },
294
- {
295
- "epoch": 0.16,
296
- "learning_rate": 0.00019860014199080822,
297
- "loss": 0.7243,
298
- "step": 4300
299
- },
300
- {
301
- "epoch": 0.16,
302
- "learning_rate": 0.00019853428166924576,
303
- "loss": 0.6929,
304
- "step": 4400
305
- },
306
- {
307
- "epoch": 0.17,
308
- "learning_rate": 0.00019846691889064593,
309
- "loss": 0.7392,
310
- "step": 4500
311
- },
312
- {
313
- "epoch": 0.17,
314
- "learning_rate": 0.0001983980546821607,
315
- "loss": 0.7247,
316
- "step": 4600
317
- },
318
- {
319
- "epoch": 0.18,
320
- "learning_rate": 0.0001983276900938359,
321
- "loss": 0.7258,
322
- "step": 4700
323
- },
324
- {
325
- "epoch": 0.18,
326
- "learning_rate": 0.00019825582619859532,
327
- "loss": 0.7197,
328
- "step": 4800
329
- },
330
- {
331
- "epoch": 0.18,
332
- "learning_rate": 0.0001981824640922242,
333
- "loss": 0.6906,
334
- "step": 4900
335
- },
336
- {
337
- "epoch": 0.19,
338
- "learning_rate": 0.00019810760489335266,
339
- "loss": 0.7274,
340
- "step": 5000
341
- },
342
- {
343
- "epoch": 0.19,
344
- "eval_loss": 0.7171670794487,
345
- "eval_runtime": 1812.7597,
346
- "eval_samples_per_second": 0.299,
347
- "eval_steps_per_second": 0.299,
348
- "step": 5000
349
- },
350
- {
351
- "epoch": 0.19,
352
- "learning_rate": 0.0001980312497434385,
353
- "loss": 0.7105,
354
- "step": 5100
355
- },
356
- {
357
- "epoch": 0.19,
358
- "learning_rate": 0.00019795339980675002,
359
- "loss": 0.7091,
360
- "step": 5200
361
- },
362
- {
363
- "epoch": 0.2,
364
- "learning_rate": 0.00019787405627034804,
365
- "loss": 0.7102,
366
- "step": 5300
367
- },
368
- {
369
- "epoch": 0.2,
370
- "learning_rate": 0.0001977932203440678,
371
- "loss": 0.7314,
372
- "step": 5400
373
- },
374
- {
375
- "epoch": 0.21,
376
- "learning_rate": 0.00019771089326050075,
377
- "loss": 0.6945,
378
- "step": 5500
379
- },
380
- {
381
- "epoch": 0.21,
382
- "learning_rate": 0.0001976270762749755,
383
- "loss": 0.7048,
384
- "step": 5600
385
- },
386
- {
387
- "epoch": 0.21,
388
- "learning_rate": 0.00019754177066553882,
389
- "loss": 0.6963,
390
- "step": 5700
391
- },
392
- {
393
- "epoch": 0.22,
394
- "learning_rate": 0.00019745497773293613,
395
- "loss": 0.711,
396
- "step": 5800
397
- },
398
- {
399
- "epoch": 0.22,
400
- "learning_rate": 0.0001973666988005916,
401
- "loss": 0.7017,
402
- "step": 5900
403
- },
404
- {
405
- "epoch": 0.22,
406
- "learning_rate": 0.00019727693521458806,
407
- "loss": 0.7287,
408
- "step": 6000
409
- },
410
- {
411
- "epoch": 0.22,
412
- "eval_loss": 0.710155725479126,
413
- "eval_runtime": 1786.7467,
414
- "eval_samples_per_second": 0.303,
415
- "eval_steps_per_second": 0.303,
416
- "step": 6000
417
- },
418
- {
419
- "epoch": 0.23,
420
- "learning_rate": 0.00019718568834364638,
421
- "loss": 0.6894,
422
- "step": 6100
423
- },
424
- {
425
- "epoch": 0.23,
426
- "learning_rate": 0.00019709295957910476,
427
- "loss": 0.7061,
428
- "step": 6200
429
- },
430
- {
431
- "epoch": 0.23,
432
- "learning_rate": 0.00019699875033489728,
433
- "loss": 0.7063,
434
- "step": 6300
435
- },
436
- {
437
- "epoch": 0.24,
438
- "learning_rate": 0.00019690306204753254,
439
- "loss": 0.6872,
440
- "step": 6400
441
- },
442
- {
443
- "epoch": 0.24,
444
- "learning_rate": 0.0001968058961760717,
445
- "loss": 0.7095,
446
- "step": 6500
447
- },
448
- {
449
- "epoch": 0.25,
450
- "learning_rate": 0.00019670725420210618,
451
- "loss": 0.695,
452
- "step": 6600
453
- },
454
- {
455
- "epoch": 0.25,
456
- "learning_rate": 0.0001966071376297351,
457
- "loss": 0.674,
458
- "step": 6700
459
- },
460
- {
461
- "epoch": 0.25,
462
- "learning_rate": 0.00019650554798554236,
463
- "loss": 0.7225,
464
- "step": 6800
465
- },
466
- {
467
- "epoch": 0.26,
468
- "learning_rate": 0.00019640248681857342,
469
- "loss": 0.6845,
470
- "step": 6900
471
- },
472
- {
473
- "epoch": 0.26,
474
- "learning_rate": 0.00019629795570031149,
475
- "loss": 0.6891,
476
- "step": 7000
477
- },
478
- {
479
- "epoch": 0.26,
480
- "eval_loss": 0.703677773475647,
481
- "eval_runtime": 1767.3593,
482
- "eval_samples_per_second": 0.307,
483
- "eval_steps_per_second": 0.307,
484
- "step": 7000
485
- },
486
- {
487
- "epoch": 0.26,
488
- "learning_rate": 0.00019619195622465379,
489
- "loss": 0.6962,
490
- "step": 7100
491
- },
492
- {
493
- "epoch": 0.27,
494
- "learning_rate": 0.0001960844900078871,
495
- "loss": 0.6779,
496
- "step": 7200
497
- },
498
- {
499
- "epoch": 0.27,
500
- "learning_rate": 0.00019597555868866318,
501
- "loss": 0.7354,
502
- "step": 7300
503
- },
504
- {
505
- "epoch": 0.28,
506
- "learning_rate": 0.00019586516392797374,
507
- "loss": 0.7196,
508
- "step": 7400
509
- },
510
- {
511
- "epoch": 0.28,
512
- "learning_rate": 0.0001957533074091252,
513
- "loss": 0.682,
514
- "step": 7500
515
- },
516
- {
517
- "epoch": 0.28,
518
- "learning_rate": 0.0001956399908377129,
519
- "loss": 0.6938,
520
- "step": 7600
521
- },
522
- {
523
- "epoch": 0.29,
524
- "learning_rate": 0.0001955252159415952,
525
- "loss": 0.6912,
526
- "step": 7700
527
- },
528
- {
529
- "epoch": 0.29,
530
- "learning_rate": 0.00019540898447086705,
531
- "loss": 0.7048,
532
- "step": 7800
533
- },
534
- {
535
- "epoch": 0.29,
536
- "learning_rate": 0.00019529129819783334,
537
- "loss": 0.7007,
538
- "step": 7900
539
- },
540
- {
541
- "epoch": 0.3,
542
- "learning_rate": 0.00019517215891698192,
543
- "loss": 0.6969,
544
- "step": 8000
545
- },
546
- {
547
- "epoch": 0.3,
548
- "eval_loss": 0.6973471641540527,
549
- "eval_runtime": 1793.2355,
550
- "eval_samples_per_second": 0.302,
551
- "eval_steps_per_second": 0.302,
552
- "step": 8000
553
- },
554
- {
555
- "epoch": 0.3,
556
- "learning_rate": 0.00019505156844495619,
557
- "loss": 0.6894,
558
- "step": 8100
559
- },
560
- {
561
- "epoch": 0.31,
562
- "learning_rate": 0.00019492952862052733,
563
- "loss": 0.6971,
564
- "step": 8200
565
- },
566
- {
567
- "epoch": 0.31,
568
- "learning_rate": 0.0001948060413045665,
569
- "loss": 0.7135,
570
- "step": 8300
571
- },
572
- {
573
- "epoch": 0.31,
574
- "learning_rate": 0.0001946811083800161,
575
- "loss": 0.6794,
576
- "step": 8400
577
- },
578
- {
579
- "epoch": 0.32,
580
- "learning_rate": 0.0001945547317518614,
581
- "loss": 0.7086,
582
- "step": 8500
583
- },
584
- {
585
- "epoch": 0.32,
586
- "learning_rate": 0.00019442691334710136,
587
- "loss": 0.7042,
588
- "step": 8600
589
- },
590
- {
591
- "epoch": 0.32,
592
- "learning_rate": 0.00019429765511471916,
593
- "loss": 0.6822,
594
- "step": 8700
595
- },
596
- {
597
- "epoch": 0.33,
598
- "learning_rate": 0.0001941669590256526,
599
- "loss": 0.7016,
600
- "step": 8800
601
- },
602
- {
603
- "epoch": 0.33,
604
- "learning_rate": 0.00019403482707276406,
605
- "loss": 0.705,
606
- "step": 8900
607
- },
608
- {
609
- "epoch": 0.34,
610
- "learning_rate": 0.00019390126127080999,
611
- "loss": 0.698,
612
- "step": 9000
613
- },
614
- {
615
- "epoch": 0.34,
616
- "eval_loss": 0.6910382509231567,
617
- "eval_runtime": 1782.1661,
618
- "eval_samples_per_second": 0.304,
619
- "eval_steps_per_second": 0.304,
620
- "step": 9000
621
- },
622
- {
623
- "epoch": 0.34,
624
- "learning_rate": 0.00019376626365641026,
625
- "loss": 0.6926,
626
- "step": 9100
627
- },
628
- {
629
- "epoch": 0.34,
630
- "learning_rate": 0.0001936298362880172,
631
- "loss": 0.6871,
632
- "step": 9200
633
- },
634
- {
635
- "epoch": 0.35,
636
- "learning_rate": 0.00019349198124588403,
637
- "loss": 0.6894,
638
- "step": 9300
639
- },
640
- {
641
- "epoch": 0.35,
642
- "learning_rate": 0.00019335270063203325,
643
- "loss": 0.6894,
644
- "step": 9400
645
- },
646
- {
647
- "epoch": 0.35,
648
- "learning_rate": 0.00019321199657022464,
649
- "loss": 0.7057,
650
- "step": 9500
651
- },
652
- {
653
- "epoch": 0.36,
654
- "learning_rate": 0.00019306987120592265,
655
- "loss": 0.6682,
656
- "step": 9600
657
- },
658
- {
659
- "epoch": 0.36,
660
- "learning_rate": 0.00019292632670626401,
661
- "loss": 0.6931,
662
- "step": 9700
663
- },
664
- {
665
- "epoch": 0.37,
666
- "learning_rate": 0.00019278136526002443,
667
- "loss": 0.7244,
668
- "step": 9800
669
- },
670
- {
671
- "epoch": 0.37,
672
- "learning_rate": 0.0001926349890775853,
673
- "loss": 0.6881,
674
- "step": 9900
675
- },
676
- {
677
- "epoch": 0.37,
678
- "learning_rate": 0.00019248720039090006,
679
- "loss": 0.6839,
680
- "step": 10000
681
- },
682
- {
683
- "epoch": 0.37,
684
- "eval_loss": 0.6857322454452515,
685
- "eval_runtime": 1760.8664,
686
- "eval_samples_per_second": 0.308,
687
- "eval_steps_per_second": 0.308,
688
- "step": 10000
689
- },
690
- {
691
- "epoch": 0.38,
692
- "learning_rate": 0.00019233800145346006,
693
- "loss": 0.6917,
694
- "step": 10100
695
- },
696
- {
697
- "epoch": 0.38,
698
- "learning_rate": 0.0001921873945402602,
699
- "loss": 0.6672,
700
- "step": 10200
701
- },
702
- {
703
- "epoch": 0.38,
704
- "learning_rate": 0.00019203538194776442,
705
- "loss": 0.6873,
706
- "step": 10300
707
- },
708
- {
709
- "epoch": 0.39,
710
- "learning_rate": 0.00019188196599387043,
711
- "loss": 0.6733,
712
- "step": 10400
713
- },
714
- {
715
- "epoch": 0.39,
716
- "learning_rate": 0.00019172714901787453,
717
- "loss": 0.706,
718
- "step": 10500
719
- },
720
- {
721
- "epoch": 0.4,
722
- "learning_rate": 0.00019157093338043583,
723
- "loss": 0.6848,
724
- "step": 10600
725
- },
726
- {
727
- "epoch": 0.4,
728
- "learning_rate": 0.00019141332146354042,
729
- "loss": 0.6728,
730
- "step": 10700
731
- },
732
- {
733
- "epoch": 0.4,
734
- "learning_rate": 0.00019125431567046494,
735
- "loss": 0.686,
736
- "step": 10800
737
- },
738
- {
739
- "epoch": 0.41,
740
- "learning_rate": 0.00019109391842573987,
741
- "loss": 0.6992,
742
- "step": 10900
743
- },
744
- {
745
- "epoch": 0.41,
746
- "learning_rate": 0.00019093213217511265,
747
- "loss": 0.6675,
748
- "step": 11000
749
- },
750
- {
751
- "epoch": 0.41,
752
- "eval_loss": 0.6794907450675964,
753
- "eval_runtime": 1782.5413,
754
- "eval_samples_per_second": 0.304,
755
- "eval_steps_per_second": 0.304,
756
- "step": 11000
757
- },
758
- {
759
- "epoch": 0.41,
760
- "learning_rate": 0.0001907689593855104,
761
- "loss": 0.6721,
762
- "step": 11100
763
- },
764
- {
765
- "epoch": 0.42,
766
- "learning_rate": 0.00019060440254500228,
767
- "loss": 0.6353,
768
- "step": 11200
769
- },
770
- {
771
- "epoch": 0.42,
772
- "learning_rate": 0.00019043846416276155,
773
- "loss": 0.6449,
774
- "step": 11300
775
- },
776
- {
777
- "epoch": 0.43,
778
- "learning_rate": 0.0001902711467690272,
779
- "loss": 0.6451,
780
- "step": 11400
781
- },
782
- {
783
- "epoch": 0.43,
784
- "learning_rate": 0.00019010245291506569,
785
- "loss": 0.6421,
786
- "step": 11500
787
- },
788
- {
789
- "epoch": 0.43,
790
- "learning_rate": 0.00018993238517313167,
791
- "loss": 0.6352,
792
- "step": 11600
793
- },
794
- {
795
- "epoch": 0.44,
796
- "learning_rate": 0.0001897609461364289,
797
- "loss": 0.6371,
798
- "step": 11700
799
- },
800
- {
801
- "epoch": 0.44,
802
- "learning_rate": 0.00018958813841907083,
803
- "loss": 0.623,
804
- "step": 11800
805
- },
806
- {
807
- "epoch": 0.44,
808
- "learning_rate": 0.00018941396465604063,
809
- "loss": 0.6533,
810
- "step": 11900
811
- },
812
- {
813
- "epoch": 0.45,
814
- "learning_rate": 0.00018923842750315095,
815
- "loss": 0.6371,
816
- "step": 12000
817
- },
818
- {
819
- "epoch": 0.45,
820
- "eval_loss": 0.6759930849075317,
821
- "eval_runtime": 1304.1351,
822
- "eval_samples_per_second": 0.416,
823
- "eval_steps_per_second": 0.416,
824
- "step": 12000
825
- },
826
- {
827
- "epoch": 0.45,
828
- "learning_rate": 0.00018906152963700358,
829
- "loss": 0.6664,
830
- "step": 12100
831
- },
832
- {
833
- "epoch": 0.45,
834
- "learning_rate": 0.00018888327375494847,
835
- "loss": 0.6644,
836
- "step": 12200
837
- },
838
- {
839
- "epoch": 0.46,
840
- "learning_rate": 0.00018870366257504274,
841
- "loss": 0.623,
842
- "step": 12300
843
- },
844
- {
845
- "epoch": 0.46,
846
- "learning_rate": 0.00018852269883600918,
847
- "loss": 0.6242,
848
- "step": 12400
849
- },
850
- {
851
- "epoch": 0.47,
852
- "learning_rate": 0.00018834038529719446,
853
- "loss": 0.6318,
854
- "step": 12500
855
- },
856
- {
857
- "epoch": 0.47,
858
- "learning_rate": 0.0001881567247385271,
859
- "loss": 0.6359,
860
- "step": 12600
861
- },
862
- {
863
- "epoch": 0.47,
864
- "learning_rate": 0.00018797171996047505,
865
- "loss": 0.6449,
866
- "step": 12700
867
- },
868
- {
869
- "epoch": 0.48,
870
- "learning_rate": 0.00018778537378400304,
871
- "loss": 0.6434,
872
- "step": 12800
873
- },
874
- {
875
- "epoch": 0.48,
876
- "learning_rate": 0.00018759768905052946,
877
- "loss": 0.6323,
878
- "step": 12900
879
- },
880
- {
881
- "epoch": 0.48,
882
- "learning_rate": 0.00018740866862188317,
883
- "loss": 0.6377,
884
- "step": 13000
885
- },
886
- {
887
- "epoch": 0.48,
888
- "eval_loss": 0.6696639060974121,
889
- "eval_runtime": 1238.6847,
890
- "eval_samples_per_second": 0.438,
891
- "eval_steps_per_second": 0.438,
892
- "step": 13000
893
- },
894
- {
895
- "epoch": 0.49,
896
- "learning_rate": 0.0001872183153802598,
897
- "loss": 0.6232,
898
- "step": 13100
899
- },
900
- {
901
- "epoch": 0.49,
902
- "learning_rate": 0.00018702663222817774,
903
- "loss": 0.6236,
904
- "step": 13200
905
- },
906
- {
907
- "epoch": 0.5,
908
- "learning_rate": 0.00018683362208843395,
909
- "loss": 0.6331,
910
- "step": 13300
911
- },
912
- {
913
- "epoch": 0.5,
914
- "learning_rate": 0.00018663928790405945,
915
- "loss": 0.6528,
916
- "step": 13400
917
- },
918
- {
919
- "epoch": 0.5,
920
- "learning_rate": 0.00018644363263827426,
921
- "loss": 0.6362,
922
- "step": 13500
923
- },
924
- {
925
- "epoch": 0.51,
926
- "learning_rate": 0.00018624665927444248,
927
- "loss": 0.6308,
928
- "step": 13600
929
- },
930
- {
931
- "epoch": 0.51,
932
- "learning_rate": 0.00018604837081602656,
933
- "loss": 0.6107,
934
- "step": 13700
935
- },
936
- {
937
- "epoch": 0.51,
938
- "learning_rate": 0.00018584877028654154,
939
- "loss": 0.6418,
940
- "step": 13800
941
- },
942
- {
943
- "epoch": 0.52,
944
- "learning_rate": 0.00018564786072950917,
945
- "loss": 0.6276,
946
- "step": 13900
947
- },
948
- {
949
- "epoch": 0.52,
950
- "learning_rate": 0.00018544564520841118,
951
- "loss": 0.6296,
952
- "step": 14000
953
- },
954
- {
955
- "epoch": 0.52,
956
- "eval_loss": 0.6651941537857056,
957
- "eval_runtime": 1277.2835,
958
- "eval_samples_per_second": 0.424,
959
- "eval_steps_per_second": 0.424,
960
- "step": 14000
961
- }
962
- ],
963
- "max_steps": 80463,
964
- "num_train_epochs": 3,
965
- "total_flos": 3.9235240378975027e+18,
966
- "trial_name": null,
967
- "trial_params": null
968
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
checkpoint-14000/training_args.bin DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:058d974230e3621cc65a8f8be1203c3e11fcaa08b08a9bcf2d0475d3730dbc26
3
- size 4027
 
 
 
 
{checkpoint-14000 β†’ checkpoint-17000/adapter_model}/README.md RENAMED
File without changes
{checkpoint-14000 β†’ checkpoint-17000/adapter_model}/adapter_config.json RENAMED
@@ -14,13 +14,13 @@
14
  "r": 32,
15
  "revision": null,
16
  "target_modules": [
17
- "k_proj",
18
- "q_proj",
19
  "v_proj",
20
  "o_proj",
21
  "gate_proj",
 
 
22
  "down_proj",
23
- "up_proj"
24
  ],
25
  "task_type": "CAUSAL_LM"
26
  }
 
14
  "r": 32,
15
  "revision": null,
16
  "target_modules": [
 
 
17
  "v_proj",
18
  "o_proj",
19
  "gate_proj",
20
+ "k_proj",
21
+ "up_proj",
22
  "down_proj",
23
+ "q_proj"
24
  ],
25
  "task_type": "CAUSAL_LM"
26
  }
{checkpoint-14000 β†’ checkpoint-17000}/adapter_model/adapter_model.bin RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:43d8489e1860bc44ac0a168b429d6db410e83fb55df06da817c1b03bc8a784c7
3
  size 500897101
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:aa07e2f07041a3d8f612e13ed99c8da5ef7ee1ce18da05ac269f1c3c5b51a5a3
3
  size 500897101