Farouk commited on
Commit
7d822d7
β€’
1 Parent(s): 069c9a6

Training in progress, step 6800

Browse files
adapter_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:090f6e32bb7c4c942fa608e54265ef2f1067bc9ac96a5898018c9b770aa4dd77
3
  size 319977229
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c675a65b503e840f932e3f288b4376af802192b8e5716a5c912624986d493663
3
  size 319977229
checkpoint-6200/adapter_model/adapter_model/README.md CHANGED
@@ -15,6 +15,17 @@ The following `bitsandbytes` quantization config was used during training:
15
  - bnb_4bit_use_double_quant: True
16
  - bnb_4bit_compute_dtype: bfloat16
17
 
 
 
 
 
 
 
 
 
 
 
 
18
  The following `bitsandbytes` quantization config was used during training:
19
  - load_in_8bit: False
20
  - load_in_4bit: True
@@ -27,6 +38,7 @@ The following `bitsandbytes` quantization config was used during training:
27
  - bnb_4bit_compute_dtype: bfloat16
28
  ### Framework versions
29
 
 
30
  - PEFT 0.4.0
31
 
32
  - PEFT 0.4.0
 
15
  - bnb_4bit_use_double_quant: True
16
  - bnb_4bit_compute_dtype: bfloat16
17
 
18
+ The following `bitsandbytes` quantization config was used during training:
19
+ - load_in_8bit: False
20
+ - load_in_4bit: True
21
+ - llm_int8_threshold: 6.0
22
+ - llm_int8_skip_modules: None
23
+ - llm_int8_enable_fp32_cpu_offload: False
24
+ - llm_int8_has_fp16_weight: False
25
+ - bnb_4bit_quant_type: nf4
26
+ - bnb_4bit_use_double_quant: True
27
+ - bnb_4bit_compute_dtype: bfloat16
28
+
29
  The following `bitsandbytes` quantization config was used during training:
30
  - load_in_8bit: False
31
  - load_in_4bit: True
 
38
  - bnb_4bit_compute_dtype: bfloat16
39
  ### Framework versions
40
 
41
+ - PEFT 0.4.0
42
  - PEFT 0.4.0
43
 
44
  - PEFT 0.4.0
checkpoint-6200/adapter_model/adapter_model/adapter_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:0c8f7e41360e84780f7f9bec853d2a1f20b40dee9b737828c1eb008dcb4909cd
3
  size 319977229
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:090f6e32bb7c4c942fa608e54265ef2f1067bc9ac96a5898018c9b770aa4dd77
3
  size 319977229
{checkpoint-4600 β†’ checkpoint-6800}/README.md RENAMED
File without changes
{checkpoint-4600 β†’ checkpoint-6800}/adapter_config.json RENAMED
File without changes
{checkpoint-4600 β†’ checkpoint-6800}/adapter_model.bin RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:9b7c2baccf97cf9220cd3c6c8df75adb082a7d53e08f2293f26adfde4c548cdf
3
  size 319977229
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c675a65b503e840f932e3f288b4376af802192b8e5716a5c912624986d493663
3
  size 319977229
{checkpoint-4600 β†’ checkpoint-6800}/added_tokens.json RENAMED
File without changes
{checkpoint-4600 β†’ checkpoint-6800}/optimizer.pt RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:9f61eda6b5c30da044d8ef75665f5190a8fe0a95b4d8cc7a8db6f6d7c5a80ce7
3
  size 1279539973
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bcfe5c0dc981023dcc95e882a56f5d526c81ec3b6dff58bcb2b4ae6044cb2421
3
  size 1279539973
{checkpoint-4600 β†’ checkpoint-6800}/rng_state.pth RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:2a9116470f7093406537225b3145e81ac6d49cecc24de33b7e84f85cc0f0b7ab
3
  size 14511
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:aaab1c5969cc1815061209603b2ae34c3218f1c4cc09d08b060fdcc99de82d46
3
  size 14511
{checkpoint-4600 β†’ checkpoint-6800}/scheduler.pt RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:793c554376f8e7f2de92b146fd20e985a7a5b03ad4002c60e108f3677dee0ff3
3
  size 627
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:97c7e7304629a0d5b32d77774ba8769b4783d1dab8e735fe47e12df568c740c2
3
  size 627
{checkpoint-4600 β†’ checkpoint-6800}/special_tokens_map.json RENAMED
File without changes
{checkpoint-4600 β†’ checkpoint-6800}/tokenizer.model RENAMED
File without changes
{checkpoint-4600 β†’ checkpoint-6800}/tokenizer_config.json RENAMED
File without changes
{checkpoint-4600 β†’ checkpoint-6800}/trainer_state.json RENAMED
@@ -1,8 +1,8 @@
1
  {
2
- "best_metric": 0.7343361377716064,
3
- "best_model_checkpoint": "experts/expert-16/checkpoint-3000",
4
- "epoch": 1.4575411913814955,
5
- "global_step": 4600,
6
  "is_hyper_param_search": false,
7
  "is_local_process_zero": true,
8
  "is_world_process_zero": true,
@@ -4399,11 +4399,2112 @@
4399
  "mmlu_eval_accuracy_world_religions": 0.6842105263157895,
4400
  "mmlu_loss": 1.3095654961567946,
4401
  "step": 4600
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4402
  }
4403
  ],
4404
  "max_steps": 10000,
4405
  "num_train_epochs": 4,
4406
- "total_flos": 1.3977803041351926e+18,
4407
  "trial_name": null,
4408
  "trial_params": null
4409
  }
 
1
  {
2
+ "best_metric": 0.7293602228164673,
3
+ "best_model_checkpoint": "experts/expert-16/checkpoint-6200",
4
+ "epoch": 2.1546261089987326,
5
+ "global_step": 6800,
6
  "is_hyper_param_search": false,
7
  "is_local_process_zero": true,
8
  "is_world_process_zero": true,
 
4399
  "mmlu_eval_accuracy_world_religions": 0.6842105263157895,
4400
  "mmlu_loss": 1.3095654961567946,
4401
  "step": 4600
4402
+ },
4403
+ {
4404
+ "epoch": 1.46,
4405
+ "learning_rate": 0.0002,
4406
+ "loss": 0.6486,
4407
+ "step": 4610
4408
+ },
4409
+ {
4410
+ "epoch": 1.46,
4411
+ "learning_rate": 0.0002,
4412
+ "loss": 0.6999,
4413
+ "step": 4620
4414
+ },
4415
+ {
4416
+ "epoch": 1.47,
4417
+ "learning_rate": 0.0002,
4418
+ "loss": 0.6458,
4419
+ "step": 4630
4420
+ },
4421
+ {
4422
+ "epoch": 1.47,
4423
+ "learning_rate": 0.0002,
4424
+ "loss": 0.6762,
4425
+ "step": 4640
4426
+ },
4427
+ {
4428
+ "epoch": 1.47,
4429
+ "learning_rate": 0.0002,
4430
+ "loss": 0.6924,
4431
+ "step": 4650
4432
+ },
4433
+ {
4434
+ "epoch": 1.48,
4435
+ "learning_rate": 0.0002,
4436
+ "loss": 0.682,
4437
+ "step": 4660
4438
+ },
4439
+ {
4440
+ "epoch": 1.48,
4441
+ "learning_rate": 0.0002,
4442
+ "loss": 0.7081,
4443
+ "step": 4670
4444
+ },
4445
+ {
4446
+ "epoch": 1.48,
4447
+ "learning_rate": 0.0002,
4448
+ "loss": 0.7506,
4449
+ "step": 4680
4450
+ },
4451
+ {
4452
+ "epoch": 1.49,
4453
+ "learning_rate": 0.0002,
4454
+ "loss": 0.7311,
4455
+ "step": 4690
4456
+ },
4457
+ {
4458
+ "epoch": 1.49,
4459
+ "learning_rate": 0.0002,
4460
+ "loss": 0.6463,
4461
+ "step": 4700
4462
+ },
4463
+ {
4464
+ "epoch": 1.49,
4465
+ "learning_rate": 0.0002,
4466
+ "loss": 0.6741,
4467
+ "step": 4710
4468
+ },
4469
+ {
4470
+ "epoch": 1.5,
4471
+ "learning_rate": 0.0002,
4472
+ "loss": 0.6626,
4473
+ "step": 4720
4474
+ },
4475
+ {
4476
+ "epoch": 1.5,
4477
+ "learning_rate": 0.0002,
4478
+ "loss": 0.712,
4479
+ "step": 4730
4480
+ },
4481
+ {
4482
+ "epoch": 1.5,
4483
+ "learning_rate": 0.0002,
4484
+ "loss": 0.6676,
4485
+ "step": 4740
4486
+ },
4487
+ {
4488
+ "epoch": 1.51,
4489
+ "learning_rate": 0.0002,
4490
+ "loss": 0.7193,
4491
+ "step": 4750
4492
+ },
4493
+ {
4494
+ "epoch": 1.51,
4495
+ "learning_rate": 0.0002,
4496
+ "loss": 0.6699,
4497
+ "step": 4760
4498
+ },
4499
+ {
4500
+ "epoch": 1.51,
4501
+ "learning_rate": 0.0002,
4502
+ "loss": 0.6718,
4503
+ "step": 4770
4504
+ },
4505
+ {
4506
+ "epoch": 1.51,
4507
+ "learning_rate": 0.0002,
4508
+ "loss": 0.6899,
4509
+ "step": 4780
4510
+ },
4511
+ {
4512
+ "epoch": 1.52,
4513
+ "learning_rate": 0.0002,
4514
+ "loss": 0.6954,
4515
+ "step": 4790
4516
+ },
4517
+ {
4518
+ "epoch": 1.52,
4519
+ "learning_rate": 0.0002,
4520
+ "loss": 0.7187,
4521
+ "step": 4800
4522
+ },
4523
+ {
4524
+ "epoch": 1.52,
4525
+ "eval_loss": 0.7387924790382385,
4526
+ "eval_runtime": 111.1141,
4527
+ "eval_samples_per_second": 9.0,
4528
+ "eval_steps_per_second": 4.5,
4529
+ "step": 4800
4530
+ },
4531
+ {
4532
+ "epoch": 1.52,
4533
+ "mmlu_eval_accuracy": 0.4879926358283337,
4534
+ "mmlu_eval_accuracy_abstract_algebra": 0.36363636363636365,
4535
+ "mmlu_eval_accuracy_anatomy": 0.7142857142857143,
4536
+ "mmlu_eval_accuracy_astronomy": 0.4375,
4537
+ "mmlu_eval_accuracy_business_ethics": 0.45454545454545453,
4538
+ "mmlu_eval_accuracy_clinical_knowledge": 0.4827586206896552,
4539
+ "mmlu_eval_accuracy_college_biology": 0.375,
4540
+ "mmlu_eval_accuracy_college_chemistry": 0.375,
4541
+ "mmlu_eval_accuracy_college_computer_science": 0.2727272727272727,
4542
+ "mmlu_eval_accuracy_college_mathematics": 0.18181818181818182,
4543
+ "mmlu_eval_accuracy_college_medicine": 0.3181818181818182,
4544
+ "mmlu_eval_accuracy_college_physics": 0.36363636363636365,
4545
+ "mmlu_eval_accuracy_computer_security": 0.45454545454545453,
4546
+ "mmlu_eval_accuracy_conceptual_physics": 0.4230769230769231,
4547
+ "mmlu_eval_accuracy_econometrics": 0.25,
4548
+ "mmlu_eval_accuracy_electrical_engineering": 0.3125,
4549
+ "mmlu_eval_accuracy_elementary_mathematics": 0.3902439024390244,
4550
+ "mmlu_eval_accuracy_formal_logic": 0.14285714285714285,
4551
+ "mmlu_eval_accuracy_global_facts": 0.5,
4552
+ "mmlu_eval_accuracy_high_school_biology": 0.46875,
4553
+ "mmlu_eval_accuracy_high_school_chemistry": 0.18181818181818182,
4554
+ "mmlu_eval_accuracy_high_school_computer_science": 0.5555555555555556,
4555
+ "mmlu_eval_accuracy_high_school_european_history": 0.5555555555555556,
4556
+ "mmlu_eval_accuracy_high_school_geography": 0.8636363636363636,
4557
+ "mmlu_eval_accuracy_high_school_government_and_politics": 0.5238095238095238,
4558
+ "mmlu_eval_accuracy_high_school_macroeconomics": 0.5116279069767442,
4559
+ "mmlu_eval_accuracy_high_school_mathematics": 0.13793103448275862,
4560
+ "mmlu_eval_accuracy_high_school_microeconomics": 0.5384615384615384,
4561
+ "mmlu_eval_accuracy_high_school_physics": 0.058823529411764705,
4562
+ "mmlu_eval_accuracy_high_school_psychology": 0.8666666666666667,
4563
+ "mmlu_eval_accuracy_high_school_statistics": 0.34782608695652173,
4564
+ "mmlu_eval_accuracy_high_school_us_history": 0.5909090909090909,
4565
+ "mmlu_eval_accuracy_high_school_world_history": 0.7307692307692307,
4566
+ "mmlu_eval_accuracy_human_aging": 0.7391304347826086,
4567
+ "mmlu_eval_accuracy_human_sexuality": 0.3333333333333333,
4568
+ "mmlu_eval_accuracy_international_law": 0.8461538461538461,
4569
+ "mmlu_eval_accuracy_jurisprudence": 0.36363636363636365,
4570
+ "mmlu_eval_accuracy_logical_fallacies": 0.6666666666666666,
4571
+ "mmlu_eval_accuracy_machine_learning": 0.36363636363636365,
4572
+ "mmlu_eval_accuracy_management": 0.7272727272727273,
4573
+ "mmlu_eval_accuracy_marketing": 0.8,
4574
+ "mmlu_eval_accuracy_medical_genetics": 0.9090909090909091,
4575
+ "mmlu_eval_accuracy_miscellaneous": 0.6744186046511628,
4576
+ "mmlu_eval_accuracy_moral_disputes": 0.4473684210526316,
4577
+ "mmlu_eval_accuracy_moral_scenarios": 0.23,
4578
+ "mmlu_eval_accuracy_nutrition": 0.696969696969697,
4579
+ "mmlu_eval_accuracy_philosophy": 0.47058823529411764,
4580
+ "mmlu_eval_accuracy_prehistory": 0.5428571428571428,
4581
+ "mmlu_eval_accuracy_professional_accounting": 0.3225806451612903,
4582
+ "mmlu_eval_accuracy_professional_law": 0.31176470588235294,
4583
+ "mmlu_eval_accuracy_professional_medicine": 0.5483870967741935,
4584
+ "mmlu_eval_accuracy_professional_psychology": 0.463768115942029,
4585
+ "mmlu_eval_accuracy_public_relations": 0.6666666666666666,
4586
+ "mmlu_eval_accuracy_security_studies": 0.4444444444444444,
4587
+ "mmlu_eval_accuracy_sociology": 0.5909090909090909,
4588
+ "mmlu_eval_accuracy_us_foreign_policy": 0.7272727272727273,
4589
+ "mmlu_eval_accuracy_virology": 0.5,
4590
+ "mmlu_eval_accuracy_world_religions": 0.6842105263157895,
4591
+ "mmlu_loss": 1.4884750641901874,
4592
+ "step": 4800
4593
+ },
4594
+ {
4595
+ "epoch": 1.52,
4596
+ "learning_rate": 0.0002,
4597
+ "loss": 0.6733,
4598
+ "step": 4810
4599
+ },
4600
+ {
4601
+ "epoch": 1.53,
4602
+ "learning_rate": 0.0002,
4603
+ "loss": 0.6607,
4604
+ "step": 4820
4605
+ },
4606
+ {
4607
+ "epoch": 1.53,
4608
+ "learning_rate": 0.0002,
4609
+ "loss": 0.6933,
4610
+ "step": 4830
4611
+ },
4612
+ {
4613
+ "epoch": 1.53,
4614
+ "learning_rate": 0.0002,
4615
+ "loss": 0.7517,
4616
+ "step": 4840
4617
+ },
4618
+ {
4619
+ "epoch": 1.54,
4620
+ "learning_rate": 0.0002,
4621
+ "loss": 0.7391,
4622
+ "step": 4850
4623
+ },
4624
+ {
4625
+ "epoch": 1.54,
4626
+ "learning_rate": 0.0002,
4627
+ "loss": 0.6636,
4628
+ "step": 4860
4629
+ },
4630
+ {
4631
+ "epoch": 1.54,
4632
+ "learning_rate": 0.0002,
4633
+ "loss": 0.7221,
4634
+ "step": 4870
4635
+ },
4636
+ {
4637
+ "epoch": 1.55,
4638
+ "learning_rate": 0.0002,
4639
+ "loss": 0.6967,
4640
+ "step": 4880
4641
+ },
4642
+ {
4643
+ "epoch": 1.55,
4644
+ "learning_rate": 0.0002,
4645
+ "loss": 0.7117,
4646
+ "step": 4890
4647
+ },
4648
+ {
4649
+ "epoch": 1.55,
4650
+ "learning_rate": 0.0002,
4651
+ "loss": 0.6256,
4652
+ "step": 4900
4653
+ },
4654
+ {
4655
+ "epoch": 1.56,
4656
+ "learning_rate": 0.0002,
4657
+ "loss": 0.7923,
4658
+ "step": 4910
4659
+ },
4660
+ {
4661
+ "epoch": 1.56,
4662
+ "learning_rate": 0.0002,
4663
+ "loss": 0.7151,
4664
+ "step": 4920
4665
+ },
4666
+ {
4667
+ "epoch": 1.56,
4668
+ "learning_rate": 0.0002,
4669
+ "loss": 0.7119,
4670
+ "step": 4930
4671
+ },
4672
+ {
4673
+ "epoch": 1.57,
4674
+ "learning_rate": 0.0002,
4675
+ "loss": 0.7105,
4676
+ "step": 4940
4677
+ },
4678
+ {
4679
+ "epoch": 1.57,
4680
+ "learning_rate": 0.0002,
4681
+ "loss": 0.6653,
4682
+ "step": 4950
4683
+ },
4684
+ {
4685
+ "epoch": 1.57,
4686
+ "learning_rate": 0.0002,
4687
+ "loss": 0.7084,
4688
+ "step": 4960
4689
+ },
4690
+ {
4691
+ "epoch": 1.57,
4692
+ "learning_rate": 0.0002,
4693
+ "loss": 0.6644,
4694
+ "step": 4970
4695
+ },
4696
+ {
4697
+ "epoch": 1.58,
4698
+ "learning_rate": 0.0002,
4699
+ "loss": 0.6665,
4700
+ "step": 4980
4701
+ },
4702
+ {
4703
+ "epoch": 1.58,
4704
+ "learning_rate": 0.0002,
4705
+ "loss": 0.6746,
4706
+ "step": 4990
4707
+ },
4708
+ {
4709
+ "epoch": 1.58,
4710
+ "learning_rate": 0.0002,
4711
+ "loss": 0.7223,
4712
+ "step": 5000
4713
+ },
4714
+ {
4715
+ "epoch": 1.58,
4716
+ "eval_loss": 0.7373215556144714,
4717
+ "eval_runtime": 111.2649,
4718
+ "eval_samples_per_second": 8.988,
4719
+ "eval_steps_per_second": 4.494,
4720
+ "step": 5000
4721
+ },
4722
+ {
4723
+ "epoch": 1.58,
4724
+ "mmlu_eval_accuracy": 0.46701126611778865,
4725
+ "mmlu_eval_accuracy_abstract_algebra": 0.2727272727272727,
4726
+ "mmlu_eval_accuracy_anatomy": 0.6428571428571429,
4727
+ "mmlu_eval_accuracy_astronomy": 0.4375,
4728
+ "mmlu_eval_accuracy_business_ethics": 0.45454545454545453,
4729
+ "mmlu_eval_accuracy_clinical_knowledge": 0.4827586206896552,
4730
+ "mmlu_eval_accuracy_college_biology": 0.375,
4731
+ "mmlu_eval_accuracy_college_chemistry": 0.25,
4732
+ "mmlu_eval_accuracy_college_computer_science": 0.2727272727272727,
4733
+ "mmlu_eval_accuracy_college_mathematics": 0.2727272727272727,
4734
+ "mmlu_eval_accuracy_college_medicine": 0.36363636363636365,
4735
+ "mmlu_eval_accuracy_college_physics": 0.36363636363636365,
4736
+ "mmlu_eval_accuracy_computer_security": 0.36363636363636365,
4737
+ "mmlu_eval_accuracy_conceptual_physics": 0.4230769230769231,
4738
+ "mmlu_eval_accuracy_econometrics": 0.16666666666666666,
4739
+ "mmlu_eval_accuracy_electrical_engineering": 0.25,
4740
+ "mmlu_eval_accuracy_elementary_mathematics": 0.34146341463414637,
4741
+ "mmlu_eval_accuracy_formal_logic": 0.21428571428571427,
4742
+ "mmlu_eval_accuracy_global_facts": 0.3,
4743
+ "mmlu_eval_accuracy_high_school_biology": 0.34375,
4744
+ "mmlu_eval_accuracy_high_school_chemistry": 0.18181818181818182,
4745
+ "mmlu_eval_accuracy_high_school_computer_science": 0.5555555555555556,
4746
+ "mmlu_eval_accuracy_high_school_european_history": 0.5555555555555556,
4747
+ "mmlu_eval_accuracy_high_school_geography": 0.7727272727272727,
4748
+ "mmlu_eval_accuracy_high_school_government_and_politics": 0.5714285714285714,
4749
+ "mmlu_eval_accuracy_high_school_macroeconomics": 0.4883720930232558,
4750
+ "mmlu_eval_accuracy_high_school_mathematics": 0.2413793103448276,
4751
+ "mmlu_eval_accuracy_high_school_microeconomics": 0.38461538461538464,
4752
+ "mmlu_eval_accuracy_high_school_physics": 0.058823529411764705,
4753
+ "mmlu_eval_accuracy_high_school_psychology": 0.8833333333333333,
4754
+ "mmlu_eval_accuracy_high_school_statistics": 0.34782608695652173,
4755
+ "mmlu_eval_accuracy_high_school_us_history": 0.6363636363636364,
4756
+ "mmlu_eval_accuracy_high_school_world_history": 0.7307692307692307,
4757
+ "mmlu_eval_accuracy_human_aging": 0.6956521739130435,
4758
+ "mmlu_eval_accuracy_human_sexuality": 0.3333333333333333,
4759
+ "mmlu_eval_accuracy_international_law": 0.8461538461538461,
4760
+ "mmlu_eval_accuracy_jurisprudence": 0.36363636363636365,
4761
+ "mmlu_eval_accuracy_logical_fallacies": 0.6111111111111112,
4762
+ "mmlu_eval_accuracy_machine_learning": 0.09090909090909091,
4763
+ "mmlu_eval_accuracy_management": 0.7272727272727273,
4764
+ "mmlu_eval_accuracy_marketing": 0.72,
4765
+ "mmlu_eval_accuracy_medical_genetics": 0.9090909090909091,
4766
+ "mmlu_eval_accuracy_miscellaneous": 0.6744186046511628,
4767
+ "mmlu_eval_accuracy_moral_disputes": 0.4473684210526316,
4768
+ "mmlu_eval_accuracy_moral_scenarios": 0.23,
4769
+ "mmlu_eval_accuracy_nutrition": 0.6363636363636364,
4770
+ "mmlu_eval_accuracy_philosophy": 0.47058823529411764,
4771
+ "mmlu_eval_accuracy_prehistory": 0.4857142857142857,
4772
+ "mmlu_eval_accuracy_professional_accounting": 0.3225806451612903,
4773
+ "mmlu_eval_accuracy_professional_law": 0.3235294117647059,
4774
+ "mmlu_eval_accuracy_professional_medicine": 0.5806451612903226,
4775
+ "mmlu_eval_accuracy_professional_psychology": 0.4927536231884058,
4776
+ "mmlu_eval_accuracy_public_relations": 0.6666666666666666,
4777
+ "mmlu_eval_accuracy_security_studies": 0.4444444444444444,
4778
+ "mmlu_eval_accuracy_sociology": 0.6363636363636364,
4779
+ "mmlu_eval_accuracy_us_foreign_policy": 0.7272727272727273,
4780
+ "mmlu_eval_accuracy_virology": 0.5,
4781
+ "mmlu_eval_accuracy_world_religions": 0.6842105263157895,
4782
+ "mmlu_loss": 1.4578603010031324,
4783
+ "step": 5000
4784
+ },
4785
+ {
4786
+ "epoch": 1.59,
4787
+ "learning_rate": 0.0002,
4788
+ "loss": 0.6833,
4789
+ "step": 5010
4790
+ },
4791
+ {
4792
+ "epoch": 1.59,
4793
+ "learning_rate": 0.0002,
4794
+ "loss": 0.7323,
4795
+ "step": 5020
4796
+ },
4797
+ {
4798
+ "epoch": 1.59,
4799
+ "learning_rate": 0.0002,
4800
+ "loss": 0.7224,
4801
+ "step": 5030
4802
+ },
4803
+ {
4804
+ "epoch": 1.6,
4805
+ "learning_rate": 0.0002,
4806
+ "loss": 0.734,
4807
+ "step": 5040
4808
+ },
4809
+ {
4810
+ "epoch": 1.6,
4811
+ "learning_rate": 0.0002,
4812
+ "loss": 0.692,
4813
+ "step": 5050
4814
+ },
4815
+ {
4816
+ "epoch": 1.6,
4817
+ "learning_rate": 0.0002,
4818
+ "loss": 0.7083,
4819
+ "step": 5060
4820
+ },
4821
+ {
4822
+ "epoch": 1.61,
4823
+ "learning_rate": 0.0002,
4824
+ "loss": 0.6993,
4825
+ "step": 5070
4826
+ },
4827
+ {
4828
+ "epoch": 1.61,
4829
+ "learning_rate": 0.0002,
4830
+ "loss": 0.755,
4831
+ "step": 5080
4832
+ },
4833
+ {
4834
+ "epoch": 1.61,
4835
+ "learning_rate": 0.0002,
4836
+ "loss": 0.7323,
4837
+ "step": 5090
4838
+ },
4839
+ {
4840
+ "epoch": 1.62,
4841
+ "learning_rate": 0.0002,
4842
+ "loss": 0.6725,
4843
+ "step": 5100
4844
+ },
4845
+ {
4846
+ "epoch": 1.62,
4847
+ "learning_rate": 0.0002,
4848
+ "loss": 0.6989,
4849
+ "step": 5110
4850
+ },
4851
+ {
4852
+ "epoch": 1.62,
4853
+ "learning_rate": 0.0002,
4854
+ "loss": 0.6938,
4855
+ "step": 5120
4856
+ },
4857
+ {
4858
+ "epoch": 1.63,
4859
+ "learning_rate": 0.0002,
4860
+ "loss": 0.6895,
4861
+ "step": 5130
4862
+ },
4863
+ {
4864
+ "epoch": 1.63,
4865
+ "learning_rate": 0.0002,
4866
+ "loss": 0.6915,
4867
+ "step": 5140
4868
+ },
4869
+ {
4870
+ "epoch": 1.63,
4871
+ "learning_rate": 0.0002,
4872
+ "loss": 0.7672,
4873
+ "step": 5150
4874
+ },
4875
+ {
4876
+ "epoch": 1.63,
4877
+ "learning_rate": 0.0002,
4878
+ "loss": 0.6413,
4879
+ "step": 5160
4880
+ },
4881
+ {
4882
+ "epoch": 1.64,
4883
+ "learning_rate": 0.0002,
4884
+ "loss": 0.7195,
4885
+ "step": 5170
4886
+ },
4887
+ {
4888
+ "epoch": 1.64,
4889
+ "learning_rate": 0.0002,
4890
+ "loss": 0.6783,
4891
+ "step": 5180
4892
+ },
4893
+ {
4894
+ "epoch": 1.64,
4895
+ "learning_rate": 0.0002,
4896
+ "loss": 0.6457,
4897
+ "step": 5190
4898
+ },
4899
+ {
4900
+ "epoch": 1.65,
4901
+ "learning_rate": 0.0002,
4902
+ "loss": 0.6959,
4903
+ "step": 5200
4904
+ },
4905
+ {
4906
+ "epoch": 1.65,
4907
+ "eval_loss": 0.736714243888855,
4908
+ "eval_runtime": 111.0389,
4909
+ "eval_samples_per_second": 9.006,
4910
+ "eval_steps_per_second": 4.503,
4911
+ "step": 5200
4912
+ },
4913
+ {
4914
+ "epoch": 1.65,
4915
+ "mmlu_eval_accuracy": 0.4835750759985151,
4916
+ "mmlu_eval_accuracy_abstract_algebra": 0.36363636363636365,
4917
+ "mmlu_eval_accuracy_anatomy": 0.7857142857142857,
4918
+ "mmlu_eval_accuracy_astronomy": 0.375,
4919
+ "mmlu_eval_accuracy_business_ethics": 0.45454545454545453,
4920
+ "mmlu_eval_accuracy_clinical_knowledge": 0.4827586206896552,
4921
+ "mmlu_eval_accuracy_college_biology": 0.4375,
4922
+ "mmlu_eval_accuracy_college_chemistry": 0.25,
4923
+ "mmlu_eval_accuracy_college_computer_science": 0.2727272727272727,
4924
+ "mmlu_eval_accuracy_college_mathematics": 0.2727272727272727,
4925
+ "mmlu_eval_accuracy_college_medicine": 0.45454545454545453,
4926
+ "mmlu_eval_accuracy_college_physics": 0.36363636363636365,
4927
+ "mmlu_eval_accuracy_computer_security": 0.5454545454545454,
4928
+ "mmlu_eval_accuracy_conceptual_physics": 0.4230769230769231,
4929
+ "mmlu_eval_accuracy_econometrics": 0.25,
4930
+ "mmlu_eval_accuracy_electrical_engineering": 0.25,
4931
+ "mmlu_eval_accuracy_elementary_mathematics": 0.36585365853658536,
4932
+ "mmlu_eval_accuracy_formal_logic": 0.21428571428571427,
4933
+ "mmlu_eval_accuracy_global_facts": 0.5,
4934
+ "mmlu_eval_accuracy_high_school_biology": 0.375,
4935
+ "mmlu_eval_accuracy_high_school_chemistry": 0.18181818181818182,
4936
+ "mmlu_eval_accuracy_high_school_computer_science": 0.5555555555555556,
4937
+ "mmlu_eval_accuracy_high_school_european_history": 0.6111111111111112,
4938
+ "mmlu_eval_accuracy_high_school_geography": 0.7727272727272727,
4939
+ "mmlu_eval_accuracy_high_school_government_and_politics": 0.5714285714285714,
4940
+ "mmlu_eval_accuracy_high_school_macroeconomics": 0.4883720930232558,
4941
+ "mmlu_eval_accuracy_high_school_mathematics": 0.1724137931034483,
4942
+ "mmlu_eval_accuracy_high_school_microeconomics": 0.5,
4943
+ "mmlu_eval_accuracy_high_school_physics": 0.17647058823529413,
4944
+ "mmlu_eval_accuracy_high_school_psychology": 0.85,
4945
+ "mmlu_eval_accuracy_high_school_statistics": 0.2608695652173913,
4946
+ "mmlu_eval_accuracy_high_school_us_history": 0.6363636363636364,
4947
+ "mmlu_eval_accuracy_high_school_world_history": 0.7307692307692307,
4948
+ "mmlu_eval_accuracy_human_aging": 0.7391304347826086,
4949
+ "mmlu_eval_accuracy_human_sexuality": 0.4166666666666667,
4950
+ "mmlu_eval_accuracy_international_law": 0.8461538461538461,
4951
+ "mmlu_eval_accuracy_jurisprudence": 0.36363636363636365,
4952
+ "mmlu_eval_accuracy_logical_fallacies": 0.6666666666666666,
4953
+ "mmlu_eval_accuracy_machine_learning": 0.2727272727272727,
4954
+ "mmlu_eval_accuracy_management": 0.6363636363636364,
4955
+ "mmlu_eval_accuracy_marketing": 0.76,
4956
+ "mmlu_eval_accuracy_medical_genetics": 0.8181818181818182,
4957
+ "mmlu_eval_accuracy_miscellaneous": 0.6395348837209303,
4958
+ "mmlu_eval_accuracy_moral_disputes": 0.47368421052631576,
4959
+ "mmlu_eval_accuracy_moral_scenarios": 0.23,
4960
+ "mmlu_eval_accuracy_nutrition": 0.696969696969697,
4961
+ "mmlu_eval_accuracy_philosophy": 0.4411764705882353,
4962
+ "mmlu_eval_accuracy_prehistory": 0.5142857142857142,
4963
+ "mmlu_eval_accuracy_professional_accounting": 0.22580645161290322,
4964
+ "mmlu_eval_accuracy_professional_law": 0.3058823529411765,
4965
+ "mmlu_eval_accuracy_professional_medicine": 0.5483870967741935,
4966
+ "mmlu_eval_accuracy_professional_psychology": 0.4927536231884058,
4967
+ "mmlu_eval_accuracy_public_relations": 0.5833333333333334,
4968
+ "mmlu_eval_accuracy_security_studies": 0.4074074074074074,
4969
+ "mmlu_eval_accuracy_sociology": 0.6818181818181818,
4970
+ "mmlu_eval_accuracy_us_foreign_policy": 0.7272727272727273,
4971
+ "mmlu_eval_accuracy_virology": 0.5,
4972
+ "mmlu_eval_accuracy_world_religions": 0.631578947368421,
4973
+ "mmlu_loss": 1.2928575183564004,
4974
+ "step": 5200
4975
+ },
4976
+ {
4977
+ "epoch": 1.65,
4978
+ "learning_rate": 0.0002,
4979
+ "loss": 0.654,
4980
+ "step": 5210
4981
+ },
4982
+ {
4983
+ "epoch": 1.65,
4984
+ "learning_rate": 0.0002,
4985
+ "loss": 0.692,
4986
+ "step": 5220
4987
+ },
4988
+ {
4989
+ "epoch": 1.66,
4990
+ "learning_rate": 0.0002,
4991
+ "loss": 0.6774,
4992
+ "step": 5230
4993
+ },
4994
+ {
4995
+ "epoch": 1.66,
4996
+ "learning_rate": 0.0002,
4997
+ "loss": 0.6383,
4998
+ "step": 5240
4999
+ },
5000
+ {
5001
+ "epoch": 1.66,
5002
+ "learning_rate": 0.0002,
5003
+ "loss": 0.6949,
5004
+ "step": 5250
5005
+ },
5006
+ {
5007
+ "epoch": 1.67,
5008
+ "learning_rate": 0.0002,
5009
+ "loss": 0.6992,
5010
+ "step": 5260
5011
+ },
5012
+ {
5013
+ "epoch": 1.67,
5014
+ "learning_rate": 0.0002,
5015
+ "loss": 0.6612,
5016
+ "step": 5270
5017
+ },
5018
+ {
5019
+ "epoch": 1.67,
5020
+ "learning_rate": 0.0002,
5021
+ "loss": 0.7651,
5022
+ "step": 5280
5023
+ },
5024
+ {
5025
+ "epoch": 1.68,
5026
+ "learning_rate": 0.0002,
5027
+ "loss": 0.6994,
5028
+ "step": 5290
5029
+ },
5030
+ {
5031
+ "epoch": 1.68,
5032
+ "learning_rate": 0.0002,
5033
+ "loss": 0.7105,
5034
+ "step": 5300
5035
+ },
5036
+ {
5037
+ "epoch": 1.68,
5038
+ "learning_rate": 0.0002,
5039
+ "loss": 0.6972,
5040
+ "step": 5310
5041
+ },
5042
+ {
5043
+ "epoch": 1.69,
5044
+ "learning_rate": 0.0002,
5045
+ "loss": 0.7531,
5046
+ "step": 5320
5047
+ },
5048
+ {
5049
+ "epoch": 1.69,
5050
+ "learning_rate": 0.0002,
5051
+ "loss": 0.7072,
5052
+ "step": 5330
5053
+ },
5054
+ {
5055
+ "epoch": 1.69,
5056
+ "learning_rate": 0.0002,
5057
+ "loss": 0.6964,
5058
+ "step": 5340
5059
+ },
5060
+ {
5061
+ "epoch": 1.7,
5062
+ "learning_rate": 0.0002,
5063
+ "loss": 0.7574,
5064
+ "step": 5350
5065
+ },
5066
+ {
5067
+ "epoch": 1.7,
5068
+ "learning_rate": 0.0002,
5069
+ "loss": 0.7155,
5070
+ "step": 5360
5071
+ },
5072
+ {
5073
+ "epoch": 1.7,
5074
+ "learning_rate": 0.0002,
5075
+ "loss": 0.7104,
5076
+ "step": 5370
5077
+ },
5078
+ {
5079
+ "epoch": 1.7,
5080
+ "learning_rate": 0.0002,
5081
+ "loss": 0.7495,
5082
+ "step": 5380
5083
+ },
5084
+ {
5085
+ "epoch": 1.71,
5086
+ "learning_rate": 0.0002,
5087
+ "loss": 0.7259,
5088
+ "step": 5390
5089
+ },
5090
+ {
5091
+ "epoch": 1.71,
5092
+ "learning_rate": 0.0002,
5093
+ "loss": 0.7394,
5094
+ "step": 5400
5095
+ },
5096
+ {
5097
+ "epoch": 1.71,
5098
+ "eval_loss": 0.7311118245124817,
5099
+ "eval_runtime": 111.2623,
5100
+ "eval_samples_per_second": 8.988,
5101
+ "eval_steps_per_second": 4.494,
5102
+ "step": 5400
5103
+ },
5104
+ {
5105
+ "epoch": 1.71,
5106
+ "mmlu_eval_accuracy": 0.4848351410303045,
5107
+ "mmlu_eval_accuracy_abstract_algebra": 0.2727272727272727,
5108
+ "mmlu_eval_accuracy_anatomy": 0.7142857142857143,
5109
+ "mmlu_eval_accuracy_astronomy": 0.4375,
5110
+ "mmlu_eval_accuracy_business_ethics": 0.45454545454545453,
5111
+ "mmlu_eval_accuracy_clinical_knowledge": 0.5172413793103449,
5112
+ "mmlu_eval_accuracy_college_biology": 0.375,
5113
+ "mmlu_eval_accuracy_college_chemistry": 0.25,
5114
+ "mmlu_eval_accuracy_college_computer_science": 0.2727272727272727,
5115
+ "mmlu_eval_accuracy_college_mathematics": 0.36363636363636365,
5116
+ "mmlu_eval_accuracy_college_medicine": 0.36363636363636365,
5117
+ "mmlu_eval_accuracy_college_physics": 0.36363636363636365,
5118
+ "mmlu_eval_accuracy_computer_security": 0.45454545454545453,
5119
+ "mmlu_eval_accuracy_conceptual_physics": 0.4230769230769231,
5120
+ "mmlu_eval_accuracy_econometrics": 0.16666666666666666,
5121
+ "mmlu_eval_accuracy_electrical_engineering": 0.25,
5122
+ "mmlu_eval_accuracy_elementary_mathematics": 0.3902439024390244,
5123
+ "mmlu_eval_accuracy_formal_logic": 0.2857142857142857,
5124
+ "mmlu_eval_accuracy_global_facts": 0.4,
5125
+ "mmlu_eval_accuracy_high_school_biology": 0.40625,
5126
+ "mmlu_eval_accuracy_high_school_chemistry": 0.22727272727272727,
5127
+ "mmlu_eval_accuracy_high_school_computer_science": 0.5555555555555556,
5128
+ "mmlu_eval_accuracy_high_school_european_history": 0.6666666666666666,
5129
+ "mmlu_eval_accuracy_high_school_geography": 0.8636363636363636,
5130
+ "mmlu_eval_accuracy_high_school_government_and_politics": 0.6190476190476191,
5131
+ "mmlu_eval_accuracy_high_school_macroeconomics": 0.4883720930232558,
5132
+ "mmlu_eval_accuracy_high_school_mathematics": 0.1724137931034483,
5133
+ "mmlu_eval_accuracy_high_school_microeconomics": 0.38461538461538464,
5134
+ "mmlu_eval_accuracy_high_school_physics": 0.11764705882352941,
5135
+ "mmlu_eval_accuracy_high_school_psychology": 0.8666666666666667,
5136
+ "mmlu_eval_accuracy_high_school_statistics": 0.391304347826087,
5137
+ "mmlu_eval_accuracy_high_school_us_history": 0.6363636363636364,
5138
+ "mmlu_eval_accuracy_high_school_world_history": 0.6923076923076923,
5139
+ "mmlu_eval_accuracy_human_aging": 0.7391304347826086,
5140
+ "mmlu_eval_accuracy_human_sexuality": 0.4166666666666667,
5141
+ "mmlu_eval_accuracy_international_law": 0.7692307692307693,
5142
+ "mmlu_eval_accuracy_jurisprudence": 0.36363636363636365,
5143
+ "mmlu_eval_accuracy_logical_fallacies": 0.6111111111111112,
5144
+ "mmlu_eval_accuracy_machine_learning": 0.18181818181818182,
5145
+ "mmlu_eval_accuracy_management": 0.7272727272727273,
5146
+ "mmlu_eval_accuracy_marketing": 0.76,
5147
+ "mmlu_eval_accuracy_medical_genetics": 0.9090909090909091,
5148
+ "mmlu_eval_accuracy_miscellaneous": 0.6511627906976745,
5149
+ "mmlu_eval_accuracy_moral_disputes": 0.5526315789473685,
5150
+ "mmlu_eval_accuracy_moral_scenarios": 0.22,
5151
+ "mmlu_eval_accuracy_nutrition": 0.6060606060606061,
5152
+ "mmlu_eval_accuracy_philosophy": 0.4411764705882353,
5153
+ "mmlu_eval_accuracy_prehistory": 0.4857142857142857,
5154
+ "mmlu_eval_accuracy_professional_accounting": 0.3548387096774194,
5155
+ "mmlu_eval_accuracy_professional_law": 0.3058823529411765,
5156
+ "mmlu_eval_accuracy_professional_medicine": 0.6129032258064516,
5157
+ "mmlu_eval_accuracy_professional_psychology": 0.4927536231884058,
5158
+ "mmlu_eval_accuracy_public_relations": 0.5833333333333334,
5159
+ "mmlu_eval_accuracy_security_studies": 0.4074074074074074,
5160
+ "mmlu_eval_accuracy_sociology": 0.6363636363636364,
5161
+ "mmlu_eval_accuracy_us_foreign_policy": 0.7272727272727273,
5162
+ "mmlu_eval_accuracy_virology": 0.5,
5163
+ "mmlu_eval_accuracy_world_religions": 0.7368421052631579,
5164
+ "mmlu_loss": 1.348453690776937,
5165
+ "step": 5400
5166
+ },
5167
+ {
5168
+ "epoch": 1.71,
5169
+ "learning_rate": 0.0002,
5170
+ "loss": 0.7047,
5171
+ "step": 5410
5172
+ },
5173
+ {
5174
+ "epoch": 1.72,
5175
+ "learning_rate": 0.0002,
5176
+ "loss": 0.7001,
5177
+ "step": 5420
5178
+ },
5179
+ {
5180
+ "epoch": 1.72,
5181
+ "learning_rate": 0.0002,
5182
+ "loss": 0.6759,
5183
+ "step": 5430
5184
+ },
5185
+ {
5186
+ "epoch": 1.72,
5187
+ "learning_rate": 0.0002,
5188
+ "loss": 0.707,
5189
+ "step": 5440
5190
+ },
5191
+ {
5192
+ "epoch": 1.73,
5193
+ "learning_rate": 0.0002,
5194
+ "loss": 0.6648,
5195
+ "step": 5450
5196
+ },
5197
+ {
5198
+ "epoch": 1.73,
5199
+ "learning_rate": 0.0002,
5200
+ "loss": 0.7223,
5201
+ "step": 5460
5202
+ },
5203
+ {
5204
+ "epoch": 1.73,
5205
+ "learning_rate": 0.0002,
5206
+ "loss": 0.722,
5207
+ "step": 5470
5208
+ },
5209
+ {
5210
+ "epoch": 1.74,
5211
+ "learning_rate": 0.0002,
5212
+ "loss": 0.7848,
5213
+ "step": 5480
5214
+ },
5215
+ {
5216
+ "epoch": 1.74,
5217
+ "learning_rate": 0.0002,
5218
+ "loss": 0.6956,
5219
+ "step": 5490
5220
+ },
5221
+ {
5222
+ "epoch": 1.74,
5223
+ "learning_rate": 0.0002,
5224
+ "loss": 0.6584,
5225
+ "step": 5500
5226
+ },
5227
+ {
5228
+ "epoch": 1.75,
5229
+ "learning_rate": 0.0002,
5230
+ "loss": 0.7522,
5231
+ "step": 5510
5232
+ },
5233
+ {
5234
+ "epoch": 1.75,
5235
+ "learning_rate": 0.0002,
5236
+ "loss": 0.7374,
5237
+ "step": 5520
5238
+ },
5239
+ {
5240
+ "epoch": 1.75,
5241
+ "learning_rate": 0.0002,
5242
+ "loss": 0.635,
5243
+ "step": 5530
5244
+ },
5245
+ {
5246
+ "epoch": 1.76,
5247
+ "learning_rate": 0.0002,
5248
+ "loss": 0.6947,
5249
+ "step": 5540
5250
+ },
5251
+ {
5252
+ "epoch": 1.76,
5253
+ "learning_rate": 0.0002,
5254
+ "loss": 0.6948,
5255
+ "step": 5550
5256
+ },
5257
+ {
5258
+ "epoch": 1.76,
5259
+ "learning_rate": 0.0002,
5260
+ "loss": 0.676,
5261
+ "step": 5560
5262
+ },
5263
+ {
5264
+ "epoch": 1.76,
5265
+ "learning_rate": 0.0002,
5266
+ "loss": 0.7053,
5267
+ "step": 5570
5268
+ },
5269
+ {
5270
+ "epoch": 1.77,
5271
+ "learning_rate": 0.0002,
5272
+ "loss": 0.6868,
5273
+ "step": 5580
5274
+ },
5275
+ {
5276
+ "epoch": 1.77,
5277
+ "learning_rate": 0.0002,
5278
+ "loss": 0.7307,
5279
+ "step": 5590
5280
+ },
5281
+ {
5282
+ "epoch": 1.77,
5283
+ "learning_rate": 0.0002,
5284
+ "loss": 0.6902,
5285
+ "step": 5600
5286
+ },
5287
+ {
5288
+ "epoch": 1.77,
5289
+ "eval_loss": 0.7314637899398804,
5290
+ "eval_runtime": 111.0487,
5291
+ "eval_samples_per_second": 9.005,
5292
+ "eval_steps_per_second": 4.503,
5293
+ "step": 5600
5294
+ },
5295
+ {
5296
+ "epoch": 1.77,
5297
+ "mmlu_eval_accuracy": 0.48467107795368586,
5298
+ "mmlu_eval_accuracy_abstract_algebra": 0.2727272727272727,
5299
+ "mmlu_eval_accuracy_anatomy": 0.7142857142857143,
5300
+ "mmlu_eval_accuracy_astronomy": 0.5,
5301
+ "mmlu_eval_accuracy_business_ethics": 0.45454545454545453,
5302
+ "mmlu_eval_accuracy_clinical_knowledge": 0.5172413793103449,
5303
+ "mmlu_eval_accuracy_college_biology": 0.375,
5304
+ "mmlu_eval_accuracy_college_chemistry": 0.375,
5305
+ "mmlu_eval_accuracy_college_computer_science": 0.2727272727272727,
5306
+ "mmlu_eval_accuracy_college_mathematics": 0.2727272727272727,
5307
+ "mmlu_eval_accuracy_college_medicine": 0.36363636363636365,
5308
+ "mmlu_eval_accuracy_college_physics": 0.36363636363636365,
5309
+ "mmlu_eval_accuracy_computer_security": 0.5454545454545454,
5310
+ "mmlu_eval_accuracy_conceptual_physics": 0.4230769230769231,
5311
+ "mmlu_eval_accuracy_econometrics": 0.25,
5312
+ "mmlu_eval_accuracy_electrical_engineering": 0.375,
5313
+ "mmlu_eval_accuracy_elementary_mathematics": 0.3170731707317073,
5314
+ "mmlu_eval_accuracy_formal_logic": 0.2857142857142857,
5315
+ "mmlu_eval_accuracy_global_facts": 0.3,
5316
+ "mmlu_eval_accuracy_high_school_biology": 0.375,
5317
+ "mmlu_eval_accuracy_high_school_chemistry": 0.22727272727272727,
5318
+ "mmlu_eval_accuracy_high_school_computer_science": 0.5555555555555556,
5319
+ "mmlu_eval_accuracy_high_school_european_history": 0.6111111111111112,
5320
+ "mmlu_eval_accuracy_high_school_geography": 0.8636363636363636,
5321
+ "mmlu_eval_accuracy_high_school_government_and_politics": 0.6190476190476191,
5322
+ "mmlu_eval_accuracy_high_school_macroeconomics": 0.4418604651162791,
5323
+ "mmlu_eval_accuracy_high_school_mathematics": 0.13793103448275862,
5324
+ "mmlu_eval_accuracy_high_school_microeconomics": 0.46153846153846156,
5325
+ "mmlu_eval_accuracy_high_school_physics": 0.058823529411764705,
5326
+ "mmlu_eval_accuracy_high_school_psychology": 0.85,
5327
+ "mmlu_eval_accuracy_high_school_statistics": 0.2608695652173913,
5328
+ "mmlu_eval_accuracy_high_school_us_history": 0.6363636363636364,
5329
+ "mmlu_eval_accuracy_high_school_world_history": 0.7307692307692307,
5330
+ "mmlu_eval_accuracy_human_aging": 0.782608695652174,
5331
+ "mmlu_eval_accuracy_human_sexuality": 0.4166666666666667,
5332
+ "mmlu_eval_accuracy_international_law": 0.8461538461538461,
5333
+ "mmlu_eval_accuracy_jurisprudence": 0.36363636363636365,
5334
+ "mmlu_eval_accuracy_logical_fallacies": 0.6666666666666666,
5335
+ "mmlu_eval_accuracy_machine_learning": 0.18181818181818182,
5336
+ "mmlu_eval_accuracy_management": 0.6363636363636364,
5337
+ "mmlu_eval_accuracy_marketing": 0.76,
5338
+ "mmlu_eval_accuracy_medical_genetics": 0.9090909090909091,
5339
+ "mmlu_eval_accuracy_miscellaneous": 0.627906976744186,
5340
+ "mmlu_eval_accuracy_moral_disputes": 0.5,
5341
+ "mmlu_eval_accuracy_moral_scenarios": 0.25,
5342
+ "mmlu_eval_accuracy_nutrition": 0.7272727272727273,
5343
+ "mmlu_eval_accuracy_philosophy": 0.5,
5344
+ "mmlu_eval_accuracy_prehistory": 0.5142857142857142,
5345
+ "mmlu_eval_accuracy_professional_accounting": 0.1935483870967742,
5346
+ "mmlu_eval_accuracy_professional_law": 0.3411764705882353,
5347
+ "mmlu_eval_accuracy_professional_medicine": 0.5483870967741935,
5348
+ "mmlu_eval_accuracy_professional_psychology": 0.4782608695652174,
5349
+ "mmlu_eval_accuracy_public_relations": 0.5,
5350
+ "mmlu_eval_accuracy_security_studies": 0.48148148148148145,
5351
+ "mmlu_eval_accuracy_sociology": 0.6818181818181818,
5352
+ "mmlu_eval_accuracy_us_foreign_policy": 0.7272727272727273,
5353
+ "mmlu_eval_accuracy_virology": 0.5,
5354
+ "mmlu_eval_accuracy_world_religions": 0.6842105263157895,
5355
+ "mmlu_loss": 1.3874250733665636,
5356
+ "step": 5600
5357
+ },
5358
+ {
5359
+ "epoch": 1.78,
5360
+ "learning_rate": 0.0002,
5361
+ "loss": 0.6558,
5362
+ "step": 5610
5363
+ },
5364
+ {
5365
+ "epoch": 1.78,
5366
+ "learning_rate": 0.0002,
5367
+ "loss": 0.714,
5368
+ "step": 5620
5369
+ },
5370
+ {
5371
+ "epoch": 1.78,
5372
+ "learning_rate": 0.0002,
5373
+ "loss": 0.7019,
5374
+ "step": 5630
5375
+ },
5376
+ {
5377
+ "epoch": 1.79,
5378
+ "learning_rate": 0.0002,
5379
+ "loss": 0.7084,
5380
+ "step": 5640
5381
+ },
5382
+ {
5383
+ "epoch": 1.79,
5384
+ "learning_rate": 0.0002,
5385
+ "loss": 0.7184,
5386
+ "step": 5650
5387
+ },
5388
+ {
5389
+ "epoch": 1.79,
5390
+ "learning_rate": 0.0002,
5391
+ "loss": 0.6524,
5392
+ "step": 5660
5393
+ },
5394
+ {
5395
+ "epoch": 1.8,
5396
+ "learning_rate": 0.0002,
5397
+ "loss": 0.7265,
5398
+ "step": 5670
5399
+ },
5400
+ {
5401
+ "epoch": 1.8,
5402
+ "learning_rate": 0.0002,
5403
+ "loss": 0.7164,
5404
+ "step": 5680
5405
+ },
5406
+ {
5407
+ "epoch": 1.8,
5408
+ "learning_rate": 0.0002,
5409
+ "loss": 0.6825,
5410
+ "step": 5690
5411
+ },
5412
+ {
5413
+ "epoch": 1.81,
5414
+ "learning_rate": 0.0002,
5415
+ "loss": 0.7427,
5416
+ "step": 5700
5417
+ },
5418
+ {
5419
+ "epoch": 1.81,
5420
+ "learning_rate": 0.0002,
5421
+ "loss": 0.7416,
5422
+ "step": 5710
5423
+ },
5424
+ {
5425
+ "epoch": 1.81,
5426
+ "learning_rate": 0.0002,
5427
+ "loss": 0.7027,
5428
+ "step": 5720
5429
+ },
5430
+ {
5431
+ "epoch": 1.82,
5432
+ "learning_rate": 0.0002,
5433
+ "loss": 0.7039,
5434
+ "step": 5730
5435
+ },
5436
+ {
5437
+ "epoch": 1.82,
5438
+ "learning_rate": 0.0002,
5439
+ "loss": 0.7108,
5440
+ "step": 5740
5441
+ },
5442
+ {
5443
+ "epoch": 1.82,
5444
+ "learning_rate": 0.0002,
5445
+ "loss": 0.6257,
5446
+ "step": 5750
5447
+ },
5448
+ {
5449
+ "epoch": 1.83,
5450
+ "learning_rate": 0.0002,
5451
+ "loss": 0.6665,
5452
+ "step": 5760
5453
+ },
5454
+ {
5455
+ "epoch": 1.83,
5456
+ "learning_rate": 0.0002,
5457
+ "loss": 0.7371,
5458
+ "step": 5770
5459
+ },
5460
+ {
5461
+ "epoch": 1.83,
5462
+ "learning_rate": 0.0002,
5463
+ "loss": 0.7194,
5464
+ "step": 5780
5465
+ },
5466
+ {
5467
+ "epoch": 1.83,
5468
+ "learning_rate": 0.0002,
5469
+ "loss": 0.7164,
5470
+ "step": 5790
5471
+ },
5472
+ {
5473
+ "epoch": 1.84,
5474
+ "learning_rate": 0.0002,
5475
+ "loss": 0.6887,
5476
+ "step": 5800
5477
+ },
5478
+ {
5479
+ "epoch": 1.84,
5480
+ "eval_loss": 0.732559084892273,
5481
+ "eval_runtime": 111.5342,
5482
+ "eval_samples_per_second": 8.966,
5483
+ "eval_steps_per_second": 4.483,
5484
+ "step": 5800
5485
+ },
5486
+ {
5487
+ "epoch": 1.84,
5488
+ "mmlu_eval_accuracy": 0.4740066355704332,
5489
+ "mmlu_eval_accuracy_abstract_algebra": 0.2727272727272727,
5490
+ "mmlu_eval_accuracy_anatomy": 0.6428571428571429,
5491
+ "mmlu_eval_accuracy_astronomy": 0.4375,
5492
+ "mmlu_eval_accuracy_business_ethics": 0.45454545454545453,
5493
+ "mmlu_eval_accuracy_clinical_knowledge": 0.4827586206896552,
5494
+ "mmlu_eval_accuracy_college_biology": 0.375,
5495
+ "mmlu_eval_accuracy_college_chemistry": 0.375,
5496
+ "mmlu_eval_accuracy_college_computer_science": 0.2727272727272727,
5497
+ "mmlu_eval_accuracy_college_mathematics": 0.36363636363636365,
5498
+ "mmlu_eval_accuracy_college_medicine": 0.3181818181818182,
5499
+ "mmlu_eval_accuracy_college_physics": 0.2727272727272727,
5500
+ "mmlu_eval_accuracy_computer_security": 0.45454545454545453,
5501
+ "mmlu_eval_accuracy_conceptual_physics": 0.4230769230769231,
5502
+ "mmlu_eval_accuracy_econometrics": 0.16666666666666666,
5503
+ "mmlu_eval_accuracy_electrical_engineering": 0.25,
5504
+ "mmlu_eval_accuracy_elementary_mathematics": 0.3170731707317073,
5505
+ "mmlu_eval_accuracy_formal_logic": 0.21428571428571427,
5506
+ "mmlu_eval_accuracy_global_facts": 0.4,
5507
+ "mmlu_eval_accuracy_high_school_biology": 0.375,
5508
+ "mmlu_eval_accuracy_high_school_chemistry": 0.22727272727272727,
5509
+ "mmlu_eval_accuracy_high_school_computer_science": 0.5555555555555556,
5510
+ "mmlu_eval_accuracy_high_school_european_history": 0.5555555555555556,
5511
+ "mmlu_eval_accuracy_high_school_geography": 0.8181818181818182,
5512
+ "mmlu_eval_accuracy_high_school_government_and_politics": 0.5714285714285714,
5513
+ "mmlu_eval_accuracy_high_school_macroeconomics": 0.4186046511627907,
5514
+ "mmlu_eval_accuracy_high_school_mathematics": 0.20689655172413793,
5515
+ "mmlu_eval_accuracy_high_school_microeconomics": 0.4230769230769231,
5516
+ "mmlu_eval_accuracy_high_school_physics": 0.11764705882352941,
5517
+ "mmlu_eval_accuracy_high_school_psychology": 0.8333333333333334,
5518
+ "mmlu_eval_accuracy_high_school_statistics": 0.30434782608695654,
5519
+ "mmlu_eval_accuracy_high_school_us_history": 0.6363636363636364,
5520
+ "mmlu_eval_accuracy_high_school_world_history": 0.6538461538461539,
5521
+ "mmlu_eval_accuracy_human_aging": 0.6956521739130435,
5522
+ "mmlu_eval_accuracy_human_sexuality": 0.3333333333333333,
5523
+ "mmlu_eval_accuracy_international_law": 0.9230769230769231,
5524
+ "mmlu_eval_accuracy_jurisprudence": 0.36363636363636365,
5525
+ "mmlu_eval_accuracy_logical_fallacies": 0.6111111111111112,
5526
+ "mmlu_eval_accuracy_machine_learning": 0.18181818181818182,
5527
+ "mmlu_eval_accuracy_management": 0.6363636363636364,
5528
+ "mmlu_eval_accuracy_marketing": 0.72,
5529
+ "mmlu_eval_accuracy_medical_genetics": 0.9090909090909091,
5530
+ "mmlu_eval_accuracy_miscellaneous": 0.6511627906976745,
5531
+ "mmlu_eval_accuracy_moral_disputes": 0.42105263157894735,
5532
+ "mmlu_eval_accuracy_moral_scenarios": 0.25,
5533
+ "mmlu_eval_accuracy_nutrition": 0.6363636363636364,
5534
+ "mmlu_eval_accuracy_philosophy": 0.5,
5535
+ "mmlu_eval_accuracy_prehistory": 0.5142857142857142,
5536
+ "mmlu_eval_accuracy_professional_accounting": 0.3870967741935484,
5537
+ "mmlu_eval_accuracy_professional_law": 0.29411764705882354,
5538
+ "mmlu_eval_accuracy_professional_medicine": 0.6451612903225806,
5539
+ "mmlu_eval_accuracy_professional_psychology": 0.4927536231884058,
5540
+ "mmlu_eval_accuracy_public_relations": 0.6666666666666666,
5541
+ "mmlu_eval_accuracy_security_studies": 0.4444444444444444,
5542
+ "mmlu_eval_accuracy_sociology": 0.6363636363636364,
5543
+ "mmlu_eval_accuracy_us_foreign_policy": 0.7272727272727273,
5544
+ "mmlu_eval_accuracy_virology": 0.5555555555555556,
5545
+ "mmlu_eval_accuracy_world_religions": 0.631578947368421,
5546
+ "mmlu_loss": 1.3845557023822173,
5547
+ "step": 5800
5548
+ },
5549
+ {
5550
+ "epoch": 1.84,
5551
+ "learning_rate": 0.0002,
5552
+ "loss": 0.668,
5553
+ "step": 5810
5554
+ },
5555
+ {
5556
+ "epoch": 1.84,
5557
+ "learning_rate": 0.0002,
5558
+ "loss": 0.6993,
5559
+ "step": 5820
5560
+ },
5561
+ {
5562
+ "epoch": 1.85,
5563
+ "learning_rate": 0.0002,
5564
+ "loss": 0.7418,
5565
+ "step": 5830
5566
+ },
5567
+ {
5568
+ "epoch": 1.85,
5569
+ "learning_rate": 0.0002,
5570
+ "loss": 0.6916,
5571
+ "step": 5840
5572
+ },
5573
+ {
5574
+ "epoch": 1.85,
5575
+ "learning_rate": 0.0002,
5576
+ "loss": 0.7564,
5577
+ "step": 5850
5578
+ },
5579
+ {
5580
+ "epoch": 1.86,
5581
+ "learning_rate": 0.0002,
5582
+ "loss": 0.641,
5583
+ "step": 5860
5584
+ },
5585
+ {
5586
+ "epoch": 1.86,
5587
+ "learning_rate": 0.0002,
5588
+ "loss": 0.7593,
5589
+ "step": 5870
5590
+ },
5591
+ {
5592
+ "epoch": 1.86,
5593
+ "learning_rate": 0.0002,
5594
+ "loss": 0.6886,
5595
+ "step": 5880
5596
+ },
5597
+ {
5598
+ "epoch": 1.87,
5599
+ "learning_rate": 0.0002,
5600
+ "loss": 0.7053,
5601
+ "step": 5890
5602
+ },
5603
+ {
5604
+ "epoch": 1.87,
5605
+ "learning_rate": 0.0002,
5606
+ "loss": 0.6201,
5607
+ "step": 5900
5608
+ },
5609
+ {
5610
+ "epoch": 1.87,
5611
+ "learning_rate": 0.0002,
5612
+ "loss": 0.6998,
5613
+ "step": 5910
5614
+ },
5615
+ {
5616
+ "epoch": 1.88,
5617
+ "learning_rate": 0.0002,
5618
+ "loss": 0.6768,
5619
+ "step": 5920
5620
+ },
5621
+ {
5622
+ "epoch": 1.88,
5623
+ "learning_rate": 0.0002,
5624
+ "loss": 0.711,
5625
+ "step": 5930
5626
+ },
5627
+ {
5628
+ "epoch": 1.88,
5629
+ "learning_rate": 0.0002,
5630
+ "loss": 0.681,
5631
+ "step": 5940
5632
+ },
5633
+ {
5634
+ "epoch": 1.89,
5635
+ "learning_rate": 0.0002,
5636
+ "loss": 0.7145,
5637
+ "step": 5950
5638
+ },
5639
+ {
5640
+ "epoch": 1.89,
5641
+ "learning_rate": 0.0002,
5642
+ "loss": 0.7513,
5643
+ "step": 5960
5644
+ },
5645
+ {
5646
+ "epoch": 1.89,
5647
+ "learning_rate": 0.0002,
5648
+ "loss": 0.6817,
5649
+ "step": 5970
5650
+ },
5651
+ {
5652
+ "epoch": 1.89,
5653
+ "learning_rate": 0.0002,
5654
+ "loss": 0.6757,
5655
+ "step": 5980
5656
+ },
5657
+ {
5658
+ "epoch": 1.9,
5659
+ "learning_rate": 0.0002,
5660
+ "loss": 0.6899,
5661
+ "step": 5990
5662
+ },
5663
+ {
5664
+ "epoch": 1.9,
5665
+ "learning_rate": 0.0002,
5666
+ "loss": 0.6821,
5667
+ "step": 6000
5668
+ },
5669
+ {
5670
+ "epoch": 1.9,
5671
+ "eval_loss": 0.7302425503730774,
5672
+ "eval_runtime": 111.0525,
5673
+ "eval_samples_per_second": 9.005,
5674
+ "eval_steps_per_second": 4.502,
5675
+ "step": 6000
5676
+ },
5677
+ {
5678
+ "epoch": 1.9,
5679
+ "mmlu_eval_accuracy": 0.47023094937776666,
5680
+ "mmlu_eval_accuracy_abstract_algebra": 0.36363636363636365,
5681
+ "mmlu_eval_accuracy_anatomy": 0.6428571428571429,
5682
+ "mmlu_eval_accuracy_astronomy": 0.4375,
5683
+ "mmlu_eval_accuracy_business_ethics": 0.5454545454545454,
5684
+ "mmlu_eval_accuracy_clinical_knowledge": 0.5172413793103449,
5685
+ "mmlu_eval_accuracy_college_biology": 0.375,
5686
+ "mmlu_eval_accuracy_college_chemistry": 0.125,
5687
+ "mmlu_eval_accuracy_college_computer_science": 0.18181818181818182,
5688
+ "mmlu_eval_accuracy_college_mathematics": 0.36363636363636365,
5689
+ "mmlu_eval_accuracy_college_medicine": 0.36363636363636365,
5690
+ "mmlu_eval_accuracy_college_physics": 0.36363636363636365,
5691
+ "mmlu_eval_accuracy_computer_security": 0.36363636363636365,
5692
+ "mmlu_eval_accuracy_conceptual_physics": 0.4230769230769231,
5693
+ "mmlu_eval_accuracy_econometrics": 0.25,
5694
+ "mmlu_eval_accuracy_electrical_engineering": 0.25,
5695
+ "mmlu_eval_accuracy_elementary_mathematics": 0.2682926829268293,
5696
+ "mmlu_eval_accuracy_formal_logic": 0.14285714285714285,
5697
+ "mmlu_eval_accuracy_global_facts": 0.4,
5698
+ "mmlu_eval_accuracy_high_school_biology": 0.34375,
5699
+ "mmlu_eval_accuracy_high_school_chemistry": 0.3181818181818182,
5700
+ "mmlu_eval_accuracy_high_school_computer_science": 0.5555555555555556,
5701
+ "mmlu_eval_accuracy_high_school_european_history": 0.6111111111111112,
5702
+ "mmlu_eval_accuracy_high_school_geography": 0.7727272727272727,
5703
+ "mmlu_eval_accuracy_high_school_government_and_politics": 0.5714285714285714,
5704
+ "mmlu_eval_accuracy_high_school_macroeconomics": 0.4186046511627907,
5705
+ "mmlu_eval_accuracy_high_school_mathematics": 0.2413793103448276,
5706
+ "mmlu_eval_accuracy_high_school_microeconomics": 0.38461538461538464,
5707
+ "mmlu_eval_accuracy_high_school_physics": 0.11764705882352941,
5708
+ "mmlu_eval_accuracy_high_school_psychology": 0.8666666666666667,
5709
+ "mmlu_eval_accuracy_high_school_statistics": 0.21739130434782608,
5710
+ "mmlu_eval_accuracy_high_school_us_history": 0.6363636363636364,
5711
+ "mmlu_eval_accuracy_high_school_world_history": 0.7307692307692307,
5712
+ "mmlu_eval_accuracy_human_aging": 0.6956521739130435,
5713
+ "mmlu_eval_accuracy_human_sexuality": 0.3333333333333333,
5714
+ "mmlu_eval_accuracy_international_law": 0.9230769230769231,
5715
+ "mmlu_eval_accuracy_jurisprudence": 0.36363636363636365,
5716
+ "mmlu_eval_accuracy_logical_fallacies": 0.6111111111111112,
5717
+ "mmlu_eval_accuracy_machine_learning": 0.18181818181818182,
5718
+ "mmlu_eval_accuracy_management": 0.6363636363636364,
5719
+ "mmlu_eval_accuracy_marketing": 0.72,
5720
+ "mmlu_eval_accuracy_medical_genetics": 0.9090909090909091,
5721
+ "mmlu_eval_accuracy_miscellaneous": 0.627906976744186,
5722
+ "mmlu_eval_accuracy_moral_disputes": 0.47368421052631576,
5723
+ "mmlu_eval_accuracy_moral_scenarios": 0.24,
5724
+ "mmlu_eval_accuracy_nutrition": 0.5757575757575758,
5725
+ "mmlu_eval_accuracy_philosophy": 0.47058823529411764,
5726
+ "mmlu_eval_accuracy_prehistory": 0.4857142857142857,
5727
+ "mmlu_eval_accuracy_professional_accounting": 0.3870967741935484,
5728
+ "mmlu_eval_accuracy_professional_law": 0.3,
5729
+ "mmlu_eval_accuracy_professional_medicine": 0.6451612903225806,
5730
+ "mmlu_eval_accuracy_professional_psychology": 0.5072463768115942,
5731
+ "mmlu_eval_accuracy_public_relations": 0.6666666666666666,
5732
+ "mmlu_eval_accuracy_security_studies": 0.48148148148148145,
5733
+ "mmlu_eval_accuracy_sociology": 0.6363636363636364,
5734
+ "mmlu_eval_accuracy_us_foreign_policy": 0.6363636363636364,
5735
+ "mmlu_eval_accuracy_virology": 0.5,
5736
+ "mmlu_eval_accuracy_world_religions": 0.631578947368421,
5737
+ "mmlu_loss": 1.4106916200087525,
5738
+ "step": 6000
5739
+ },
5740
+ {
5741
+ "epoch": 1.9,
5742
+ "learning_rate": 0.0002,
5743
+ "loss": 0.7115,
5744
+ "step": 6010
5745
+ },
5746
+ {
5747
+ "epoch": 1.91,
5748
+ "learning_rate": 0.0002,
5749
+ "loss": 0.6862,
5750
+ "step": 6020
5751
+ },
5752
+ {
5753
+ "epoch": 1.91,
5754
+ "learning_rate": 0.0002,
5755
+ "loss": 0.6705,
5756
+ "step": 6030
5757
+ },
5758
+ {
5759
+ "epoch": 1.91,
5760
+ "learning_rate": 0.0002,
5761
+ "loss": 0.6848,
5762
+ "step": 6040
5763
+ },
5764
+ {
5765
+ "epoch": 1.92,
5766
+ "learning_rate": 0.0002,
5767
+ "loss": 0.7765,
5768
+ "step": 6050
5769
+ },
5770
+ {
5771
+ "epoch": 1.92,
5772
+ "learning_rate": 0.0002,
5773
+ "loss": 0.6801,
5774
+ "step": 6060
5775
+ },
5776
+ {
5777
+ "epoch": 1.92,
5778
+ "learning_rate": 0.0002,
5779
+ "loss": 0.6648,
5780
+ "step": 6070
5781
+ },
5782
+ {
5783
+ "epoch": 1.93,
5784
+ "learning_rate": 0.0002,
5785
+ "loss": 0.6847,
5786
+ "step": 6080
5787
+ },
5788
+ {
5789
+ "epoch": 1.93,
5790
+ "learning_rate": 0.0002,
5791
+ "loss": 0.665,
5792
+ "step": 6090
5793
+ },
5794
+ {
5795
+ "epoch": 1.93,
5796
+ "learning_rate": 0.0002,
5797
+ "loss": 0.7627,
5798
+ "step": 6100
5799
+ },
5800
+ {
5801
+ "epoch": 1.94,
5802
+ "learning_rate": 0.0002,
5803
+ "loss": 0.6874,
5804
+ "step": 6110
5805
+ },
5806
+ {
5807
+ "epoch": 1.94,
5808
+ "learning_rate": 0.0002,
5809
+ "loss": 0.6907,
5810
+ "step": 6120
5811
+ },
5812
+ {
5813
+ "epoch": 1.94,
5814
+ "learning_rate": 0.0002,
5815
+ "loss": 0.6369,
5816
+ "step": 6130
5817
+ },
5818
+ {
5819
+ "epoch": 1.95,
5820
+ "learning_rate": 0.0002,
5821
+ "loss": 0.7289,
5822
+ "step": 6140
5823
+ },
5824
+ {
5825
+ "epoch": 1.95,
5826
+ "learning_rate": 0.0002,
5827
+ "loss": 0.7233,
5828
+ "step": 6150
5829
+ },
5830
+ {
5831
+ "epoch": 1.95,
5832
+ "learning_rate": 0.0002,
5833
+ "loss": 0.68,
5834
+ "step": 6160
5835
+ },
5836
+ {
5837
+ "epoch": 1.96,
5838
+ "learning_rate": 0.0002,
5839
+ "loss": 0.6842,
5840
+ "step": 6170
5841
+ },
5842
+ {
5843
+ "epoch": 1.96,
5844
+ "learning_rate": 0.0002,
5845
+ "loss": 0.7125,
5846
+ "step": 6180
5847
+ },
5848
+ {
5849
+ "epoch": 1.96,
5850
+ "learning_rate": 0.0002,
5851
+ "loss": 0.683,
5852
+ "step": 6190
5853
+ },
5854
+ {
5855
+ "epoch": 1.96,
5856
+ "learning_rate": 0.0002,
5857
+ "loss": 0.7097,
5858
+ "step": 6200
5859
+ },
5860
+ {
5861
+ "epoch": 1.96,
5862
+ "eval_loss": 0.7293602228164673,
5863
+ "eval_runtime": 111.0579,
5864
+ "eval_samples_per_second": 9.004,
5865
+ "eval_steps_per_second": 4.502,
5866
+ "step": 6200
5867
+ },
5868
+ {
5869
+ "epoch": 1.96,
5870
+ "mmlu_eval_accuracy": 0.4704848103487601,
5871
+ "mmlu_eval_accuracy_abstract_algebra": 0.2727272727272727,
5872
+ "mmlu_eval_accuracy_anatomy": 0.6428571428571429,
5873
+ "mmlu_eval_accuracy_astronomy": 0.4375,
5874
+ "mmlu_eval_accuracy_business_ethics": 0.5454545454545454,
5875
+ "mmlu_eval_accuracy_clinical_knowledge": 0.4482758620689655,
5876
+ "mmlu_eval_accuracy_college_biology": 0.375,
5877
+ "mmlu_eval_accuracy_college_chemistry": 0.25,
5878
+ "mmlu_eval_accuracy_college_computer_science": 0.2727272727272727,
5879
+ "mmlu_eval_accuracy_college_mathematics": 0.2727272727272727,
5880
+ "mmlu_eval_accuracy_college_medicine": 0.36363636363636365,
5881
+ "mmlu_eval_accuracy_college_physics": 0.2727272727272727,
5882
+ "mmlu_eval_accuracy_computer_security": 0.36363636363636365,
5883
+ "mmlu_eval_accuracy_conceptual_physics": 0.38461538461538464,
5884
+ "mmlu_eval_accuracy_econometrics": 0.25,
5885
+ "mmlu_eval_accuracy_electrical_engineering": 0.25,
5886
+ "mmlu_eval_accuracy_elementary_mathematics": 0.34146341463414637,
5887
+ "mmlu_eval_accuracy_formal_logic": 0.14285714285714285,
5888
+ "mmlu_eval_accuracy_global_facts": 0.3,
5889
+ "mmlu_eval_accuracy_high_school_biology": 0.375,
5890
+ "mmlu_eval_accuracy_high_school_chemistry": 0.22727272727272727,
5891
+ "mmlu_eval_accuracy_high_school_computer_science": 0.5555555555555556,
5892
+ "mmlu_eval_accuracy_high_school_european_history": 0.5555555555555556,
5893
+ "mmlu_eval_accuracy_high_school_geography": 0.8181818181818182,
5894
+ "mmlu_eval_accuracy_high_school_government_and_politics": 0.5714285714285714,
5895
+ "mmlu_eval_accuracy_high_school_macroeconomics": 0.4883720930232558,
5896
+ "mmlu_eval_accuracy_high_school_mathematics": 0.20689655172413793,
5897
+ "mmlu_eval_accuracy_high_school_microeconomics": 0.4230769230769231,
5898
+ "mmlu_eval_accuracy_high_school_physics": 0.11764705882352941,
5899
+ "mmlu_eval_accuracy_high_school_psychology": 0.8666666666666667,
5900
+ "mmlu_eval_accuracy_high_school_statistics": 0.30434782608695654,
5901
+ "mmlu_eval_accuracy_high_school_us_history": 0.6363636363636364,
5902
+ "mmlu_eval_accuracy_high_school_world_history": 0.6923076923076923,
5903
+ "mmlu_eval_accuracy_human_aging": 0.782608695652174,
5904
+ "mmlu_eval_accuracy_human_sexuality": 0.3333333333333333,
5905
+ "mmlu_eval_accuracy_international_law": 0.9230769230769231,
5906
+ "mmlu_eval_accuracy_jurisprudence": 0.36363636363636365,
5907
+ "mmlu_eval_accuracy_logical_fallacies": 0.6111111111111112,
5908
+ "mmlu_eval_accuracy_machine_learning": 0.18181818181818182,
5909
+ "mmlu_eval_accuracy_management": 0.6363636363636364,
5910
+ "mmlu_eval_accuracy_marketing": 0.68,
5911
+ "mmlu_eval_accuracy_medical_genetics": 0.9090909090909091,
5912
+ "mmlu_eval_accuracy_miscellaneous": 0.6511627906976745,
5913
+ "mmlu_eval_accuracy_moral_disputes": 0.5526315789473685,
5914
+ "mmlu_eval_accuracy_moral_scenarios": 0.28,
5915
+ "mmlu_eval_accuracy_nutrition": 0.6060606060606061,
5916
+ "mmlu_eval_accuracy_philosophy": 0.4411764705882353,
5917
+ "mmlu_eval_accuracy_prehistory": 0.45714285714285713,
5918
+ "mmlu_eval_accuracy_professional_accounting": 0.41935483870967744,
5919
+ "mmlu_eval_accuracy_professional_law": 0.31176470588235294,
5920
+ "mmlu_eval_accuracy_professional_medicine": 0.5806451612903226,
5921
+ "mmlu_eval_accuracy_professional_psychology": 0.463768115942029,
5922
+ "mmlu_eval_accuracy_public_relations": 0.6666666666666666,
5923
+ "mmlu_eval_accuracy_security_studies": 0.48148148148148145,
5924
+ "mmlu_eval_accuracy_sociology": 0.6363636363636364,
5925
+ "mmlu_eval_accuracy_us_foreign_policy": 0.6363636363636364,
5926
+ "mmlu_eval_accuracy_virology": 0.5555555555555556,
5927
+ "mmlu_eval_accuracy_world_religions": 0.631578947368421,
5928
+ "mmlu_loss": 1.374586288985011,
5929
+ "step": 6200
5930
+ },
5931
+ {
5932
+ "epoch": 1.97,
5933
+ "learning_rate": 0.0002,
5934
+ "loss": 0.7095,
5935
+ "step": 6210
5936
+ },
5937
+ {
5938
+ "epoch": 1.97,
5939
+ "learning_rate": 0.0002,
5940
+ "loss": 0.7681,
5941
+ "step": 6220
5942
+ },
5943
+ {
5944
+ "epoch": 1.97,
5945
+ "learning_rate": 0.0002,
5946
+ "loss": 0.7356,
5947
+ "step": 6230
5948
+ },
5949
+ {
5950
+ "epoch": 1.98,
5951
+ "learning_rate": 0.0002,
5952
+ "loss": 0.6956,
5953
+ "step": 6240
5954
+ },
5955
+ {
5956
+ "epoch": 1.98,
5957
+ "learning_rate": 0.0002,
5958
+ "loss": 0.7034,
5959
+ "step": 6250
5960
+ },
5961
+ {
5962
+ "epoch": 1.98,
5963
+ "learning_rate": 0.0002,
5964
+ "loss": 0.6532,
5965
+ "step": 6260
5966
+ },
5967
+ {
5968
+ "epoch": 1.99,
5969
+ "learning_rate": 0.0002,
5970
+ "loss": 0.6917,
5971
+ "step": 6270
5972
+ },
5973
+ {
5974
+ "epoch": 1.99,
5975
+ "learning_rate": 0.0002,
5976
+ "loss": 0.6392,
5977
+ "step": 6280
5978
+ },
5979
+ {
5980
+ "epoch": 1.99,
5981
+ "learning_rate": 0.0002,
5982
+ "loss": 0.6656,
5983
+ "step": 6290
5984
+ },
5985
+ {
5986
+ "epoch": 2.0,
5987
+ "learning_rate": 0.0002,
5988
+ "loss": 0.6829,
5989
+ "step": 6300
5990
+ },
5991
+ {
5992
+ "epoch": 2.0,
5993
+ "learning_rate": 0.0002,
5994
+ "loss": 0.675,
5995
+ "step": 6310
5996
+ },
5997
+ {
5998
+ "epoch": 2.0,
5999
+ "learning_rate": 0.0002,
6000
+ "loss": 0.6321,
6001
+ "step": 6320
6002
+ },
6003
+ {
6004
+ "epoch": 2.01,
6005
+ "learning_rate": 0.0002,
6006
+ "loss": 0.6109,
6007
+ "step": 6330
6008
+ },
6009
+ {
6010
+ "epoch": 2.01,
6011
+ "learning_rate": 0.0002,
6012
+ "loss": 0.6065,
6013
+ "step": 6340
6014
+ },
6015
+ {
6016
+ "epoch": 2.01,
6017
+ "learning_rate": 0.0002,
6018
+ "loss": 0.5912,
6019
+ "step": 6350
6020
+ },
6021
+ {
6022
+ "epoch": 2.02,
6023
+ "learning_rate": 0.0002,
6024
+ "loss": 0.613,
6025
+ "step": 6360
6026
+ },
6027
+ {
6028
+ "epoch": 2.02,
6029
+ "learning_rate": 0.0002,
6030
+ "loss": 0.586,
6031
+ "step": 6370
6032
+ },
6033
+ {
6034
+ "epoch": 2.02,
6035
+ "learning_rate": 0.0002,
6036
+ "loss": 0.6383,
6037
+ "step": 6380
6038
+ },
6039
+ {
6040
+ "epoch": 2.02,
6041
+ "learning_rate": 0.0002,
6042
+ "loss": 0.5629,
6043
+ "step": 6390
6044
+ },
6045
+ {
6046
+ "epoch": 2.03,
6047
+ "learning_rate": 0.0002,
6048
+ "loss": 0.6048,
6049
+ "step": 6400
6050
+ },
6051
+ {
6052
+ "epoch": 2.03,
6053
+ "eval_loss": 0.7574472427368164,
6054
+ "eval_runtime": 110.9511,
6055
+ "eval_samples_per_second": 9.013,
6056
+ "eval_steps_per_second": 4.506,
6057
+ "step": 6400
6058
+ },
6059
+ {
6060
+ "epoch": 2.03,
6061
+ "mmlu_eval_accuracy": 0.470592564742188,
6062
+ "mmlu_eval_accuracy_abstract_algebra": 0.2727272727272727,
6063
+ "mmlu_eval_accuracy_anatomy": 0.6428571428571429,
6064
+ "mmlu_eval_accuracy_astronomy": 0.375,
6065
+ "mmlu_eval_accuracy_business_ethics": 0.45454545454545453,
6066
+ "mmlu_eval_accuracy_clinical_knowledge": 0.5172413793103449,
6067
+ "mmlu_eval_accuracy_college_biology": 0.375,
6068
+ "mmlu_eval_accuracy_college_chemistry": 0.125,
6069
+ "mmlu_eval_accuracy_college_computer_science": 0.2727272727272727,
6070
+ "mmlu_eval_accuracy_college_mathematics": 0.2727272727272727,
6071
+ "mmlu_eval_accuracy_college_medicine": 0.4090909090909091,
6072
+ "mmlu_eval_accuracy_college_physics": 0.36363636363636365,
6073
+ "mmlu_eval_accuracy_computer_security": 0.36363636363636365,
6074
+ "mmlu_eval_accuracy_conceptual_physics": 0.4230769230769231,
6075
+ "mmlu_eval_accuracy_econometrics": 0.25,
6076
+ "mmlu_eval_accuracy_electrical_engineering": 0.25,
6077
+ "mmlu_eval_accuracy_elementary_mathematics": 0.3170731707317073,
6078
+ "mmlu_eval_accuracy_formal_logic": 0.14285714285714285,
6079
+ "mmlu_eval_accuracy_global_facts": 0.3,
6080
+ "mmlu_eval_accuracy_high_school_biology": 0.4375,
6081
+ "mmlu_eval_accuracy_high_school_chemistry": 0.22727272727272727,
6082
+ "mmlu_eval_accuracy_high_school_computer_science": 0.5555555555555556,
6083
+ "mmlu_eval_accuracy_high_school_european_history": 0.6111111111111112,
6084
+ "mmlu_eval_accuracy_high_school_geography": 0.8181818181818182,
6085
+ "mmlu_eval_accuracy_high_school_government_and_politics": 0.6190476190476191,
6086
+ "mmlu_eval_accuracy_high_school_macroeconomics": 0.5116279069767442,
6087
+ "mmlu_eval_accuracy_high_school_mathematics": 0.20689655172413793,
6088
+ "mmlu_eval_accuracy_high_school_microeconomics": 0.38461538461538464,
6089
+ "mmlu_eval_accuracy_high_school_physics": 0.0,
6090
+ "mmlu_eval_accuracy_high_school_psychology": 0.8333333333333334,
6091
+ "mmlu_eval_accuracy_high_school_statistics": 0.34782608695652173,
6092
+ "mmlu_eval_accuracy_high_school_us_history": 0.6818181818181818,
6093
+ "mmlu_eval_accuracy_high_school_world_history": 0.7307692307692307,
6094
+ "mmlu_eval_accuracy_human_aging": 0.7391304347826086,
6095
+ "mmlu_eval_accuracy_human_sexuality": 0.3333333333333333,
6096
+ "mmlu_eval_accuracy_international_law": 0.9230769230769231,
6097
+ "mmlu_eval_accuracy_jurisprudence": 0.36363636363636365,
6098
+ "mmlu_eval_accuracy_logical_fallacies": 0.6111111111111112,
6099
+ "mmlu_eval_accuracy_machine_learning": 0.18181818181818182,
6100
+ "mmlu_eval_accuracy_management": 0.6363636363636364,
6101
+ "mmlu_eval_accuracy_marketing": 0.76,
6102
+ "mmlu_eval_accuracy_medical_genetics": 0.9090909090909091,
6103
+ "mmlu_eval_accuracy_miscellaneous": 0.6395348837209303,
6104
+ "mmlu_eval_accuracy_moral_disputes": 0.5,
6105
+ "mmlu_eval_accuracy_moral_scenarios": 0.26,
6106
+ "mmlu_eval_accuracy_nutrition": 0.6060606060606061,
6107
+ "mmlu_eval_accuracy_philosophy": 0.4411764705882353,
6108
+ "mmlu_eval_accuracy_prehistory": 0.4857142857142857,
6109
+ "mmlu_eval_accuracy_professional_accounting": 0.2903225806451613,
6110
+ "mmlu_eval_accuracy_professional_law": 0.32941176470588235,
6111
+ "mmlu_eval_accuracy_professional_medicine": 0.6129032258064516,
6112
+ "mmlu_eval_accuracy_professional_psychology": 0.4927536231884058,
6113
+ "mmlu_eval_accuracy_public_relations": 0.5833333333333334,
6114
+ "mmlu_eval_accuracy_security_studies": 0.48148148148148145,
6115
+ "mmlu_eval_accuracy_sociology": 0.6363636363636364,
6116
+ "mmlu_eval_accuracy_us_foreign_policy": 0.7272727272727273,
6117
+ "mmlu_eval_accuracy_virology": 0.5555555555555556,
6118
+ "mmlu_eval_accuracy_world_religions": 0.631578947368421,
6119
+ "mmlu_loss": 1.3004325469542422,
6120
+ "step": 6400
6121
+ },
6122
+ {
6123
+ "epoch": 2.03,
6124
+ "learning_rate": 0.0002,
6125
+ "loss": 0.5702,
6126
+ "step": 6410
6127
+ },
6128
+ {
6129
+ "epoch": 2.03,
6130
+ "learning_rate": 0.0002,
6131
+ "loss": 0.5957,
6132
+ "step": 6420
6133
+ },
6134
+ {
6135
+ "epoch": 2.04,
6136
+ "learning_rate": 0.0002,
6137
+ "loss": 0.5994,
6138
+ "step": 6430
6139
+ },
6140
+ {
6141
+ "epoch": 2.04,
6142
+ "learning_rate": 0.0002,
6143
+ "loss": 0.5922,
6144
+ "step": 6440
6145
+ },
6146
+ {
6147
+ "epoch": 2.04,
6148
+ "learning_rate": 0.0002,
6149
+ "loss": 0.5626,
6150
+ "step": 6450
6151
+ },
6152
+ {
6153
+ "epoch": 2.05,
6154
+ "learning_rate": 0.0002,
6155
+ "loss": 0.5912,
6156
+ "step": 6460
6157
+ },
6158
+ {
6159
+ "epoch": 2.05,
6160
+ "learning_rate": 0.0002,
6161
+ "loss": 0.5877,
6162
+ "step": 6470
6163
+ },
6164
+ {
6165
+ "epoch": 2.05,
6166
+ "learning_rate": 0.0002,
6167
+ "loss": 0.578,
6168
+ "step": 6480
6169
+ },
6170
+ {
6171
+ "epoch": 2.06,
6172
+ "learning_rate": 0.0002,
6173
+ "loss": 0.6207,
6174
+ "step": 6490
6175
+ },
6176
+ {
6177
+ "epoch": 2.06,
6178
+ "learning_rate": 0.0002,
6179
+ "loss": 0.5606,
6180
+ "step": 6500
6181
+ },
6182
+ {
6183
+ "epoch": 2.06,
6184
+ "learning_rate": 0.0002,
6185
+ "loss": 0.553,
6186
+ "step": 6510
6187
+ },
6188
+ {
6189
+ "epoch": 2.07,
6190
+ "learning_rate": 0.0002,
6191
+ "loss": 0.6092,
6192
+ "step": 6520
6193
+ },
6194
+ {
6195
+ "epoch": 2.07,
6196
+ "learning_rate": 0.0002,
6197
+ "loss": 0.6183,
6198
+ "step": 6530
6199
+ },
6200
+ {
6201
+ "epoch": 2.07,
6202
+ "learning_rate": 0.0002,
6203
+ "loss": 0.5825,
6204
+ "step": 6540
6205
+ },
6206
+ {
6207
+ "epoch": 2.08,
6208
+ "learning_rate": 0.0002,
6209
+ "loss": 0.5674,
6210
+ "step": 6550
6211
+ },
6212
+ {
6213
+ "epoch": 2.08,
6214
+ "learning_rate": 0.0002,
6215
+ "loss": 0.5587,
6216
+ "step": 6560
6217
+ },
6218
+ {
6219
+ "epoch": 2.08,
6220
+ "learning_rate": 0.0002,
6221
+ "loss": 0.5317,
6222
+ "step": 6570
6223
+ },
6224
+ {
6225
+ "epoch": 2.08,
6226
+ "learning_rate": 0.0002,
6227
+ "loss": 0.6731,
6228
+ "step": 6580
6229
+ },
6230
+ {
6231
+ "epoch": 2.09,
6232
+ "learning_rate": 0.0002,
6233
+ "loss": 0.6242,
6234
+ "step": 6590
6235
+ },
6236
+ {
6237
+ "epoch": 2.09,
6238
+ "learning_rate": 0.0002,
6239
+ "loss": 0.6332,
6240
+ "step": 6600
6241
+ },
6242
+ {
6243
+ "epoch": 2.09,
6244
+ "eval_loss": 0.7567528486251831,
6245
+ "eval_runtime": 111.0264,
6246
+ "eval_samples_per_second": 9.007,
6247
+ "eval_steps_per_second": 4.503,
6248
+ "step": 6600
6249
+ },
6250
+ {
6251
+ "epoch": 2.09,
6252
+ "mmlu_eval_accuracy": 0.47542707100737025,
6253
+ "mmlu_eval_accuracy_abstract_algebra": 0.2727272727272727,
6254
+ "mmlu_eval_accuracy_anatomy": 0.6428571428571429,
6255
+ "mmlu_eval_accuracy_astronomy": 0.4375,
6256
+ "mmlu_eval_accuracy_business_ethics": 0.45454545454545453,
6257
+ "mmlu_eval_accuracy_clinical_knowledge": 0.4827586206896552,
6258
+ "mmlu_eval_accuracy_college_biology": 0.375,
6259
+ "mmlu_eval_accuracy_college_chemistry": 0.25,
6260
+ "mmlu_eval_accuracy_college_computer_science": 0.2727272727272727,
6261
+ "mmlu_eval_accuracy_college_mathematics": 0.2727272727272727,
6262
+ "mmlu_eval_accuracy_college_medicine": 0.36363636363636365,
6263
+ "mmlu_eval_accuracy_college_physics": 0.36363636363636365,
6264
+ "mmlu_eval_accuracy_computer_security": 0.45454545454545453,
6265
+ "mmlu_eval_accuracy_conceptual_physics": 0.4230769230769231,
6266
+ "mmlu_eval_accuracy_econometrics": 0.16666666666666666,
6267
+ "mmlu_eval_accuracy_electrical_engineering": 0.25,
6268
+ "mmlu_eval_accuracy_elementary_mathematics": 0.34146341463414637,
6269
+ "mmlu_eval_accuracy_formal_logic": 0.2857142857142857,
6270
+ "mmlu_eval_accuracy_global_facts": 0.3,
6271
+ "mmlu_eval_accuracy_high_school_biology": 0.375,
6272
+ "mmlu_eval_accuracy_high_school_chemistry": 0.22727272727272727,
6273
+ "mmlu_eval_accuracy_high_school_computer_science": 0.5555555555555556,
6274
+ "mmlu_eval_accuracy_high_school_european_history": 0.6111111111111112,
6275
+ "mmlu_eval_accuracy_high_school_geography": 0.8636363636363636,
6276
+ "mmlu_eval_accuracy_high_school_government_and_politics": 0.6666666666666666,
6277
+ "mmlu_eval_accuracy_high_school_macroeconomics": 0.46511627906976744,
6278
+ "mmlu_eval_accuracy_high_school_mathematics": 0.20689655172413793,
6279
+ "mmlu_eval_accuracy_high_school_microeconomics": 0.46153846153846156,
6280
+ "mmlu_eval_accuracy_high_school_physics": 0.058823529411764705,
6281
+ "mmlu_eval_accuracy_high_school_psychology": 0.85,
6282
+ "mmlu_eval_accuracy_high_school_statistics": 0.2608695652173913,
6283
+ "mmlu_eval_accuracy_high_school_us_history": 0.6818181818181818,
6284
+ "mmlu_eval_accuracy_high_school_world_history": 0.7307692307692307,
6285
+ "mmlu_eval_accuracy_human_aging": 0.6956521739130435,
6286
+ "mmlu_eval_accuracy_human_sexuality": 0.3333333333333333,
6287
+ "mmlu_eval_accuracy_international_law": 0.8461538461538461,
6288
+ "mmlu_eval_accuracy_jurisprudence": 0.36363636363636365,
6289
+ "mmlu_eval_accuracy_logical_fallacies": 0.6111111111111112,
6290
+ "mmlu_eval_accuracy_machine_learning": 0.36363636363636365,
6291
+ "mmlu_eval_accuracy_management": 0.6363636363636364,
6292
+ "mmlu_eval_accuracy_marketing": 0.8,
6293
+ "mmlu_eval_accuracy_medical_genetics": 0.9090909090909091,
6294
+ "mmlu_eval_accuracy_miscellaneous": 0.6395348837209303,
6295
+ "mmlu_eval_accuracy_moral_disputes": 0.4473684210526316,
6296
+ "mmlu_eval_accuracy_moral_scenarios": 0.26,
6297
+ "mmlu_eval_accuracy_nutrition": 0.6060606060606061,
6298
+ "mmlu_eval_accuracy_philosophy": 0.47058823529411764,
6299
+ "mmlu_eval_accuracy_prehistory": 0.4857142857142857,
6300
+ "mmlu_eval_accuracy_professional_accounting": 0.25806451612903225,
6301
+ "mmlu_eval_accuracy_professional_law": 0.31176470588235294,
6302
+ "mmlu_eval_accuracy_professional_medicine": 0.5806451612903226,
6303
+ "mmlu_eval_accuracy_professional_psychology": 0.4782608695652174,
6304
+ "mmlu_eval_accuracy_public_relations": 0.5,
6305
+ "mmlu_eval_accuracy_security_studies": 0.48148148148148145,
6306
+ "mmlu_eval_accuracy_sociology": 0.6818181818181818,
6307
+ "mmlu_eval_accuracy_us_foreign_policy": 0.7272727272727273,
6308
+ "mmlu_eval_accuracy_virology": 0.5555555555555556,
6309
+ "mmlu_eval_accuracy_world_religions": 0.631578947368421,
6310
+ "mmlu_loss": 1.4275867019247448,
6311
+ "step": 6600
6312
+ },
6313
+ {
6314
+ "epoch": 2.09,
6315
+ "learning_rate": 0.0002,
6316
+ "loss": 0.5948,
6317
+ "step": 6610
6318
+ },
6319
+ {
6320
+ "epoch": 2.1,
6321
+ "learning_rate": 0.0002,
6322
+ "loss": 0.6068,
6323
+ "step": 6620
6324
+ },
6325
+ {
6326
+ "epoch": 2.1,
6327
+ "learning_rate": 0.0002,
6328
+ "loss": 0.5831,
6329
+ "step": 6630
6330
+ },
6331
+ {
6332
+ "epoch": 2.1,
6333
+ "learning_rate": 0.0002,
6334
+ "loss": 0.5664,
6335
+ "step": 6640
6336
+ },
6337
+ {
6338
+ "epoch": 2.11,
6339
+ "learning_rate": 0.0002,
6340
+ "loss": 0.622,
6341
+ "step": 6650
6342
+ },
6343
+ {
6344
+ "epoch": 2.11,
6345
+ "learning_rate": 0.0002,
6346
+ "loss": 0.5759,
6347
+ "step": 6660
6348
+ },
6349
+ {
6350
+ "epoch": 2.11,
6351
+ "learning_rate": 0.0002,
6352
+ "loss": 0.5841,
6353
+ "step": 6670
6354
+ },
6355
+ {
6356
+ "epoch": 2.12,
6357
+ "learning_rate": 0.0002,
6358
+ "loss": 0.6221,
6359
+ "step": 6680
6360
+ },
6361
+ {
6362
+ "epoch": 2.12,
6363
+ "learning_rate": 0.0002,
6364
+ "loss": 0.5904,
6365
+ "step": 6690
6366
+ },
6367
+ {
6368
+ "epoch": 2.12,
6369
+ "learning_rate": 0.0002,
6370
+ "loss": 0.6121,
6371
+ "step": 6700
6372
+ },
6373
+ {
6374
+ "epoch": 2.13,
6375
+ "learning_rate": 0.0002,
6376
+ "loss": 0.5526,
6377
+ "step": 6710
6378
+ },
6379
+ {
6380
+ "epoch": 2.13,
6381
+ "learning_rate": 0.0002,
6382
+ "loss": 0.6742,
6383
+ "step": 6720
6384
+ },
6385
+ {
6386
+ "epoch": 2.13,
6387
+ "learning_rate": 0.0002,
6388
+ "loss": 0.5705,
6389
+ "step": 6730
6390
+ },
6391
+ {
6392
+ "epoch": 2.14,
6393
+ "learning_rate": 0.0002,
6394
+ "loss": 0.6151,
6395
+ "step": 6740
6396
+ },
6397
+ {
6398
+ "epoch": 2.14,
6399
+ "learning_rate": 0.0002,
6400
+ "loss": 0.5902,
6401
+ "step": 6750
6402
+ },
6403
+ {
6404
+ "epoch": 2.14,
6405
+ "learning_rate": 0.0002,
6406
+ "loss": 0.6448,
6407
+ "step": 6760
6408
+ },
6409
+ {
6410
+ "epoch": 2.15,
6411
+ "learning_rate": 0.0002,
6412
+ "loss": 0.5395,
6413
+ "step": 6770
6414
+ },
6415
+ {
6416
+ "epoch": 2.15,
6417
+ "learning_rate": 0.0002,
6418
+ "loss": 0.5613,
6419
+ "step": 6780
6420
+ },
6421
+ {
6422
+ "epoch": 2.15,
6423
+ "learning_rate": 0.0002,
6424
+ "loss": 0.5802,
6425
+ "step": 6790
6426
+ },
6427
+ {
6428
+ "epoch": 2.15,
6429
+ "learning_rate": 0.0002,
6430
+ "loss": 0.6026,
6431
+ "step": 6800
6432
+ },
6433
+ {
6434
+ "epoch": 2.15,
6435
+ "eval_loss": 0.7631368637084961,
6436
+ "eval_runtime": 111.0583,
6437
+ "eval_samples_per_second": 9.004,
6438
+ "eval_steps_per_second": 4.502,
6439
+ "step": 6800
6440
+ },
6441
+ {
6442
+ "epoch": 2.15,
6443
+ "mmlu_eval_accuracy": 0.47370240345715936,
6444
+ "mmlu_eval_accuracy_abstract_algebra": 0.36363636363636365,
6445
+ "mmlu_eval_accuracy_anatomy": 0.5714285714285714,
6446
+ "mmlu_eval_accuracy_astronomy": 0.4375,
6447
+ "mmlu_eval_accuracy_business_ethics": 0.45454545454545453,
6448
+ "mmlu_eval_accuracy_clinical_knowledge": 0.4827586206896552,
6449
+ "mmlu_eval_accuracy_college_biology": 0.375,
6450
+ "mmlu_eval_accuracy_college_chemistry": 0.375,
6451
+ "mmlu_eval_accuracy_college_computer_science": 0.2727272727272727,
6452
+ "mmlu_eval_accuracy_college_mathematics": 0.2727272727272727,
6453
+ "mmlu_eval_accuracy_college_medicine": 0.4090909090909091,
6454
+ "mmlu_eval_accuracy_college_physics": 0.2727272727272727,
6455
+ "mmlu_eval_accuracy_computer_security": 0.45454545454545453,
6456
+ "mmlu_eval_accuracy_conceptual_physics": 0.4230769230769231,
6457
+ "mmlu_eval_accuracy_econometrics": 0.16666666666666666,
6458
+ "mmlu_eval_accuracy_electrical_engineering": 0.25,
6459
+ "mmlu_eval_accuracy_elementary_mathematics": 0.34146341463414637,
6460
+ "mmlu_eval_accuracy_formal_logic": 0.2857142857142857,
6461
+ "mmlu_eval_accuracy_global_facts": 0.3,
6462
+ "mmlu_eval_accuracy_high_school_biology": 0.375,
6463
+ "mmlu_eval_accuracy_high_school_chemistry": 0.2727272727272727,
6464
+ "mmlu_eval_accuracy_high_school_computer_science": 0.5555555555555556,
6465
+ "mmlu_eval_accuracy_high_school_european_history": 0.6111111111111112,
6466
+ "mmlu_eval_accuracy_high_school_geography": 0.8181818181818182,
6467
+ "mmlu_eval_accuracy_high_school_government_and_politics": 0.6190476190476191,
6468
+ "mmlu_eval_accuracy_high_school_macroeconomics": 0.5116279069767442,
6469
+ "mmlu_eval_accuracy_high_school_mathematics": 0.13793103448275862,
6470
+ "mmlu_eval_accuracy_high_school_microeconomics": 0.46153846153846156,
6471
+ "mmlu_eval_accuracy_high_school_physics": 0.0,
6472
+ "mmlu_eval_accuracy_high_school_psychology": 0.8666666666666667,
6473
+ "mmlu_eval_accuracy_high_school_statistics": 0.30434782608695654,
6474
+ "mmlu_eval_accuracy_high_school_us_history": 0.6363636363636364,
6475
+ "mmlu_eval_accuracy_high_school_world_history": 0.6538461538461539,
6476
+ "mmlu_eval_accuracy_human_aging": 0.6956521739130435,
6477
+ "mmlu_eval_accuracy_human_sexuality": 0.4166666666666667,
6478
+ "mmlu_eval_accuracy_international_law": 0.9230769230769231,
6479
+ "mmlu_eval_accuracy_jurisprudence": 0.36363636363636365,
6480
+ "mmlu_eval_accuracy_logical_fallacies": 0.6111111111111112,
6481
+ "mmlu_eval_accuracy_machine_learning": 0.18181818181818182,
6482
+ "mmlu_eval_accuracy_management": 0.6363636363636364,
6483
+ "mmlu_eval_accuracy_marketing": 0.72,
6484
+ "mmlu_eval_accuracy_medical_genetics": 0.9090909090909091,
6485
+ "mmlu_eval_accuracy_miscellaneous": 0.6511627906976745,
6486
+ "mmlu_eval_accuracy_moral_disputes": 0.4473684210526316,
6487
+ "mmlu_eval_accuracy_moral_scenarios": 0.25,
6488
+ "mmlu_eval_accuracy_nutrition": 0.6666666666666666,
6489
+ "mmlu_eval_accuracy_philosophy": 0.5,
6490
+ "mmlu_eval_accuracy_prehistory": 0.4857142857142857,
6491
+ "mmlu_eval_accuracy_professional_accounting": 0.2903225806451613,
6492
+ "mmlu_eval_accuracy_professional_law": 0.3235294117647059,
6493
+ "mmlu_eval_accuracy_professional_medicine": 0.5483870967741935,
6494
+ "mmlu_eval_accuracy_professional_psychology": 0.5072463768115942,
6495
+ "mmlu_eval_accuracy_public_relations": 0.5,
6496
+ "mmlu_eval_accuracy_security_studies": 0.4444444444444444,
6497
+ "mmlu_eval_accuracy_sociology": 0.6818181818181818,
6498
+ "mmlu_eval_accuracy_us_foreign_policy": 0.7272727272727273,
6499
+ "mmlu_eval_accuracy_virology": 0.5555555555555556,
6500
+ "mmlu_eval_accuracy_world_religions": 0.631578947368421,
6501
+ "mmlu_loss": 1.295992794337223,
6502
+ "step": 6800
6503
  }
6504
  ],
6505
  "max_steps": 10000,
6506
  "num_train_epochs": 4,
6507
+ "total_flos": 2.0637949787377336e+18,
6508
  "trial_name": null,
6509
  "trial_params": null
6510
  }
{checkpoint-4600 β†’ checkpoint-6800}/training_args.bin RENAMED
File without changes