Farouk commited on
Commit
5650484
β€’
1 Parent(s): a87b441

Training in progress, step 8200

Browse files
adapter_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:34c505ed1233aadc3a11b8854ff062a285901dd7cb4aba26f3d543787998a3a2
3
  size 319977229
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:41495ac2a6e599a28fbb089b85c94cdd1fee50a70344cebf6ef7b2136faa0701
3
  size 319977229
checkpoint-6000/adapter_model/adapter_model/README.md DELETED
@@ -1,20 +0,0 @@
1
- ---
2
- library_name: peft
3
- ---
4
- ## Training procedure
5
-
6
-
7
- The following `bitsandbytes` quantization config was used during training:
8
- - load_in_8bit: False
9
- - load_in_4bit: True
10
- - llm_int8_threshold: 6.0
11
- - llm_int8_skip_modules: None
12
- - llm_int8_enable_fp32_cpu_offload: False
13
- - llm_int8_has_fp16_weight: False
14
- - bnb_4bit_quant_type: nf4
15
- - bnb_4bit_use_double_quant: True
16
- - bnb_4bit_compute_dtype: bfloat16
17
- ### Framework versions
18
-
19
-
20
- - PEFT 0.4.0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
checkpoint-6000/adapter_model/adapter_model/adapter_config.json DELETED
@@ -1,26 +0,0 @@
1
- {
2
- "auto_mapping": null,
3
- "base_model_name_or_path": "pankajmathur/orca_mini_v3_7b",
4
- "bias": "none",
5
- "fan_in_fan_out": false,
6
- "inference_mode": true,
7
- "init_lora_weights": true,
8
- "layers_pattern": null,
9
- "layers_to_transform": null,
10
- "lora_alpha": 16.0,
11
- "lora_dropout": 0.1,
12
- "modules_to_save": null,
13
- "peft_type": "LORA",
14
- "r": 64,
15
- "revision": null,
16
- "target_modules": [
17
- "v_proj",
18
- "down_proj",
19
- "o_proj",
20
- "k_proj",
21
- "up_proj",
22
- "gate_proj",
23
- "q_proj"
24
- ],
25
- "task_type": "CAUSAL_LM"
26
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
checkpoint-6000/adapter_model/adapter_model/adapter_model.bin DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:1594718415d25a2a9db5d02daa2fddd9f38c41e6c07c47f9622970ce45ff409c
3
- size 319977229
 
 
 
 
checkpoint-6200/adapter_model/adapter_model/README.md CHANGED
@@ -92,6 +92,17 @@ The following `bitsandbytes` quantization config was used during training:
92
  - bnb_4bit_use_double_quant: True
93
  - bnb_4bit_compute_dtype: bfloat16
94
 
 
 
 
 
 
 
 
 
 
 
 
95
  The following `bitsandbytes` quantization config was used during training:
96
  - load_in_8bit: False
97
  - load_in_4bit: True
@@ -112,5 +123,6 @@ The following `bitsandbytes` quantization config was used during training:
112
  - PEFT 0.4.0
113
  - PEFT 0.4.0
114
  - PEFT 0.4.0
 
115
 
116
  - PEFT 0.4.0
 
92
  - bnb_4bit_use_double_quant: True
93
  - bnb_4bit_compute_dtype: bfloat16
94
 
95
+ The following `bitsandbytes` quantization config was used during training:
96
+ - load_in_8bit: False
97
+ - load_in_4bit: True
98
+ - llm_int8_threshold: 6.0
99
+ - llm_int8_skip_modules: None
100
+ - llm_int8_enable_fp32_cpu_offload: False
101
+ - llm_int8_has_fp16_weight: False
102
+ - bnb_4bit_quant_type: nf4
103
+ - bnb_4bit_use_double_quant: True
104
+ - bnb_4bit_compute_dtype: bfloat16
105
+
106
  The following `bitsandbytes` quantization config was used during training:
107
  - load_in_8bit: False
108
  - load_in_4bit: True
 
123
  - PEFT 0.4.0
124
  - PEFT 0.4.0
125
  - PEFT 0.4.0
126
+ - PEFT 0.4.0
127
 
128
  - PEFT 0.4.0
checkpoint-6200/adapter_model/adapter_model/adapter_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:dd8d4db418e38e33609fd25f88967237731ed4ef5adb6aefa1887158a75352e4
3
  size 319977229
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:34c505ed1233aadc3a11b8854ff062a285901dd7cb4aba26f3d543787998a3a2
3
  size 319977229
{checkpoint-6000 β†’ checkpoint-8200}/README.md RENAMED
File without changes
{checkpoint-6000 β†’ checkpoint-8200}/adapter_config.json RENAMED
File without changes
{checkpoint-6000 β†’ checkpoint-8200}/adapter_model.bin RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:1594718415d25a2a9db5d02daa2fddd9f38c41e6c07c47f9622970ce45ff409c
3
  size 319977229
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:41495ac2a6e599a28fbb089b85c94cdd1fee50a70344cebf6ef7b2136faa0701
3
  size 319977229
{checkpoint-6000 β†’ checkpoint-8200}/added_tokens.json RENAMED
File without changes
{checkpoint-6000 β†’ checkpoint-8200}/optimizer.pt RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b5d837ba55981de50643219987e343b3b272812b6a4c5edae0dbd8959e9177a6
3
  size 1279539973
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a912838b5a3ae49965864568df962db291ad7b9bb503ef5175853621b3e359f5
3
  size 1279539973
{checkpoint-6000 β†’ checkpoint-8200}/rng_state.pth RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:25afc8c8d4794b78c430b574913890aa6bba66ad028a32ea964d397c5b90e296
3
  size 14511
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0522580ce47fdf7fbef466024b247488ac6f7c180d571cb766497ab8f6e933f7
3
  size 14511
{checkpoint-6000 β†’ checkpoint-8200}/scheduler.pt RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:1d57105578b154c1429d6c6085dcbdd82165ec497fa3a0fd74932549f9aaac46
3
  size 627
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:613102d8c8b309b4aec1d07a4f439649e72414ddd8dba3c06a125a1039277b82
3
  size 627
{checkpoint-6000 β†’ checkpoint-8200}/special_tokens_map.json RENAMED
File without changes
{checkpoint-6000 β†’ checkpoint-8200}/tokenizer.model RENAMED
File without changes
{checkpoint-6000 β†’ checkpoint-8200}/tokenizer_config.json RENAMED
File without changes
{checkpoint-6000 β†’ checkpoint-8200}/trainer_state.json RENAMED
@@ -1,8 +1,8 @@
1
  {
2
- "best_metric": 0.7302425503730774,
3
- "best_model_checkpoint": "experts/expert-16/checkpoint-6000",
4
- "epoch": 1.9011406844106464,
5
- "global_step": 6000,
6
  "is_hyper_param_search": false,
7
  "is_local_process_zero": true,
8
  "is_world_process_zero": true,
@@ -5736,11 +5736,2112 @@
5736
  "mmlu_eval_accuracy_world_religions": 0.631578947368421,
5737
  "mmlu_loss": 1.4106916200087525,
5738
  "step": 6000
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5739
  }
5740
  ],
5741
  "max_steps": 10000,
5742
  "num_train_epochs": 4,
5743
- "total_flos": 1.821192054607184e+18,
5744
  "trial_name": null,
5745
  "trial_params": null
5746
  }
 
1
  {
2
+ "best_metric": 0.7293602228164673,
3
+ "best_model_checkpoint": "experts/expert-16/checkpoint-6200",
4
+ "epoch": 2.5982256020278833,
5
+ "global_step": 8200,
6
  "is_hyper_param_search": false,
7
  "is_local_process_zero": true,
8
  "is_world_process_zero": true,
 
5736
  "mmlu_eval_accuracy_world_religions": 0.631578947368421,
5737
  "mmlu_loss": 1.4106916200087525,
5738
  "step": 6000
5739
+ },
5740
+ {
5741
+ "epoch": 1.9,
5742
+ "learning_rate": 0.0002,
5743
+ "loss": 0.7115,
5744
+ "step": 6010
5745
+ },
5746
+ {
5747
+ "epoch": 1.91,
5748
+ "learning_rate": 0.0002,
5749
+ "loss": 0.6862,
5750
+ "step": 6020
5751
+ },
5752
+ {
5753
+ "epoch": 1.91,
5754
+ "learning_rate": 0.0002,
5755
+ "loss": 0.6705,
5756
+ "step": 6030
5757
+ },
5758
+ {
5759
+ "epoch": 1.91,
5760
+ "learning_rate": 0.0002,
5761
+ "loss": 0.6848,
5762
+ "step": 6040
5763
+ },
5764
+ {
5765
+ "epoch": 1.92,
5766
+ "learning_rate": 0.0002,
5767
+ "loss": 0.7765,
5768
+ "step": 6050
5769
+ },
5770
+ {
5771
+ "epoch": 1.92,
5772
+ "learning_rate": 0.0002,
5773
+ "loss": 0.6801,
5774
+ "step": 6060
5775
+ },
5776
+ {
5777
+ "epoch": 1.92,
5778
+ "learning_rate": 0.0002,
5779
+ "loss": 0.6648,
5780
+ "step": 6070
5781
+ },
5782
+ {
5783
+ "epoch": 1.93,
5784
+ "learning_rate": 0.0002,
5785
+ "loss": 0.6847,
5786
+ "step": 6080
5787
+ },
5788
+ {
5789
+ "epoch": 1.93,
5790
+ "learning_rate": 0.0002,
5791
+ "loss": 0.665,
5792
+ "step": 6090
5793
+ },
5794
+ {
5795
+ "epoch": 1.93,
5796
+ "learning_rate": 0.0002,
5797
+ "loss": 0.7627,
5798
+ "step": 6100
5799
+ },
5800
+ {
5801
+ "epoch": 1.94,
5802
+ "learning_rate": 0.0002,
5803
+ "loss": 0.6874,
5804
+ "step": 6110
5805
+ },
5806
+ {
5807
+ "epoch": 1.94,
5808
+ "learning_rate": 0.0002,
5809
+ "loss": 0.6907,
5810
+ "step": 6120
5811
+ },
5812
+ {
5813
+ "epoch": 1.94,
5814
+ "learning_rate": 0.0002,
5815
+ "loss": 0.6369,
5816
+ "step": 6130
5817
+ },
5818
+ {
5819
+ "epoch": 1.95,
5820
+ "learning_rate": 0.0002,
5821
+ "loss": 0.7289,
5822
+ "step": 6140
5823
+ },
5824
+ {
5825
+ "epoch": 1.95,
5826
+ "learning_rate": 0.0002,
5827
+ "loss": 0.7233,
5828
+ "step": 6150
5829
+ },
5830
+ {
5831
+ "epoch": 1.95,
5832
+ "learning_rate": 0.0002,
5833
+ "loss": 0.68,
5834
+ "step": 6160
5835
+ },
5836
+ {
5837
+ "epoch": 1.96,
5838
+ "learning_rate": 0.0002,
5839
+ "loss": 0.6842,
5840
+ "step": 6170
5841
+ },
5842
+ {
5843
+ "epoch": 1.96,
5844
+ "learning_rate": 0.0002,
5845
+ "loss": 0.7125,
5846
+ "step": 6180
5847
+ },
5848
+ {
5849
+ "epoch": 1.96,
5850
+ "learning_rate": 0.0002,
5851
+ "loss": 0.683,
5852
+ "step": 6190
5853
+ },
5854
+ {
5855
+ "epoch": 1.96,
5856
+ "learning_rate": 0.0002,
5857
+ "loss": 0.7097,
5858
+ "step": 6200
5859
+ },
5860
+ {
5861
+ "epoch": 1.96,
5862
+ "eval_loss": 0.7293602228164673,
5863
+ "eval_runtime": 111.0579,
5864
+ "eval_samples_per_second": 9.004,
5865
+ "eval_steps_per_second": 4.502,
5866
+ "step": 6200
5867
+ },
5868
+ {
5869
+ "epoch": 1.96,
5870
+ "mmlu_eval_accuracy": 0.4704848103487601,
5871
+ "mmlu_eval_accuracy_abstract_algebra": 0.2727272727272727,
5872
+ "mmlu_eval_accuracy_anatomy": 0.6428571428571429,
5873
+ "mmlu_eval_accuracy_astronomy": 0.4375,
5874
+ "mmlu_eval_accuracy_business_ethics": 0.5454545454545454,
5875
+ "mmlu_eval_accuracy_clinical_knowledge": 0.4482758620689655,
5876
+ "mmlu_eval_accuracy_college_biology": 0.375,
5877
+ "mmlu_eval_accuracy_college_chemistry": 0.25,
5878
+ "mmlu_eval_accuracy_college_computer_science": 0.2727272727272727,
5879
+ "mmlu_eval_accuracy_college_mathematics": 0.2727272727272727,
5880
+ "mmlu_eval_accuracy_college_medicine": 0.36363636363636365,
5881
+ "mmlu_eval_accuracy_college_physics": 0.2727272727272727,
5882
+ "mmlu_eval_accuracy_computer_security": 0.36363636363636365,
5883
+ "mmlu_eval_accuracy_conceptual_physics": 0.38461538461538464,
5884
+ "mmlu_eval_accuracy_econometrics": 0.25,
5885
+ "mmlu_eval_accuracy_electrical_engineering": 0.25,
5886
+ "mmlu_eval_accuracy_elementary_mathematics": 0.34146341463414637,
5887
+ "mmlu_eval_accuracy_formal_logic": 0.14285714285714285,
5888
+ "mmlu_eval_accuracy_global_facts": 0.3,
5889
+ "mmlu_eval_accuracy_high_school_biology": 0.375,
5890
+ "mmlu_eval_accuracy_high_school_chemistry": 0.22727272727272727,
5891
+ "mmlu_eval_accuracy_high_school_computer_science": 0.5555555555555556,
5892
+ "mmlu_eval_accuracy_high_school_european_history": 0.5555555555555556,
5893
+ "mmlu_eval_accuracy_high_school_geography": 0.8181818181818182,
5894
+ "mmlu_eval_accuracy_high_school_government_and_politics": 0.5714285714285714,
5895
+ "mmlu_eval_accuracy_high_school_macroeconomics": 0.4883720930232558,
5896
+ "mmlu_eval_accuracy_high_school_mathematics": 0.20689655172413793,
5897
+ "mmlu_eval_accuracy_high_school_microeconomics": 0.4230769230769231,
5898
+ "mmlu_eval_accuracy_high_school_physics": 0.11764705882352941,
5899
+ "mmlu_eval_accuracy_high_school_psychology": 0.8666666666666667,
5900
+ "mmlu_eval_accuracy_high_school_statistics": 0.30434782608695654,
5901
+ "mmlu_eval_accuracy_high_school_us_history": 0.6363636363636364,
5902
+ "mmlu_eval_accuracy_high_school_world_history": 0.6923076923076923,
5903
+ "mmlu_eval_accuracy_human_aging": 0.782608695652174,
5904
+ "mmlu_eval_accuracy_human_sexuality": 0.3333333333333333,
5905
+ "mmlu_eval_accuracy_international_law": 0.9230769230769231,
5906
+ "mmlu_eval_accuracy_jurisprudence": 0.36363636363636365,
5907
+ "mmlu_eval_accuracy_logical_fallacies": 0.6111111111111112,
5908
+ "mmlu_eval_accuracy_machine_learning": 0.18181818181818182,
5909
+ "mmlu_eval_accuracy_management": 0.6363636363636364,
5910
+ "mmlu_eval_accuracy_marketing": 0.68,
5911
+ "mmlu_eval_accuracy_medical_genetics": 0.9090909090909091,
5912
+ "mmlu_eval_accuracy_miscellaneous": 0.6511627906976745,
5913
+ "mmlu_eval_accuracy_moral_disputes": 0.5526315789473685,
5914
+ "mmlu_eval_accuracy_moral_scenarios": 0.28,
5915
+ "mmlu_eval_accuracy_nutrition": 0.6060606060606061,
5916
+ "mmlu_eval_accuracy_philosophy": 0.4411764705882353,
5917
+ "mmlu_eval_accuracy_prehistory": 0.45714285714285713,
5918
+ "mmlu_eval_accuracy_professional_accounting": 0.41935483870967744,
5919
+ "mmlu_eval_accuracy_professional_law": 0.31176470588235294,
5920
+ "mmlu_eval_accuracy_professional_medicine": 0.5806451612903226,
5921
+ "mmlu_eval_accuracy_professional_psychology": 0.463768115942029,
5922
+ "mmlu_eval_accuracy_public_relations": 0.6666666666666666,
5923
+ "mmlu_eval_accuracy_security_studies": 0.48148148148148145,
5924
+ "mmlu_eval_accuracy_sociology": 0.6363636363636364,
5925
+ "mmlu_eval_accuracy_us_foreign_policy": 0.6363636363636364,
5926
+ "mmlu_eval_accuracy_virology": 0.5555555555555556,
5927
+ "mmlu_eval_accuracy_world_religions": 0.631578947368421,
5928
+ "mmlu_loss": 1.374586288985011,
5929
+ "step": 6200
5930
+ },
5931
+ {
5932
+ "epoch": 1.97,
5933
+ "learning_rate": 0.0002,
5934
+ "loss": 0.7095,
5935
+ "step": 6210
5936
+ },
5937
+ {
5938
+ "epoch": 1.97,
5939
+ "learning_rate": 0.0002,
5940
+ "loss": 0.7681,
5941
+ "step": 6220
5942
+ },
5943
+ {
5944
+ "epoch": 1.97,
5945
+ "learning_rate": 0.0002,
5946
+ "loss": 0.7356,
5947
+ "step": 6230
5948
+ },
5949
+ {
5950
+ "epoch": 1.98,
5951
+ "learning_rate": 0.0002,
5952
+ "loss": 0.6956,
5953
+ "step": 6240
5954
+ },
5955
+ {
5956
+ "epoch": 1.98,
5957
+ "learning_rate": 0.0002,
5958
+ "loss": 0.7034,
5959
+ "step": 6250
5960
+ },
5961
+ {
5962
+ "epoch": 1.98,
5963
+ "learning_rate": 0.0002,
5964
+ "loss": 0.6532,
5965
+ "step": 6260
5966
+ },
5967
+ {
5968
+ "epoch": 1.99,
5969
+ "learning_rate": 0.0002,
5970
+ "loss": 0.6917,
5971
+ "step": 6270
5972
+ },
5973
+ {
5974
+ "epoch": 1.99,
5975
+ "learning_rate": 0.0002,
5976
+ "loss": 0.6392,
5977
+ "step": 6280
5978
+ },
5979
+ {
5980
+ "epoch": 1.99,
5981
+ "learning_rate": 0.0002,
5982
+ "loss": 0.6656,
5983
+ "step": 6290
5984
+ },
5985
+ {
5986
+ "epoch": 2.0,
5987
+ "learning_rate": 0.0002,
5988
+ "loss": 0.6829,
5989
+ "step": 6300
5990
+ },
5991
+ {
5992
+ "epoch": 2.0,
5993
+ "learning_rate": 0.0002,
5994
+ "loss": 0.675,
5995
+ "step": 6310
5996
+ },
5997
+ {
5998
+ "epoch": 2.0,
5999
+ "learning_rate": 0.0002,
6000
+ "loss": 0.6321,
6001
+ "step": 6320
6002
+ },
6003
+ {
6004
+ "epoch": 2.01,
6005
+ "learning_rate": 0.0002,
6006
+ "loss": 0.6109,
6007
+ "step": 6330
6008
+ },
6009
+ {
6010
+ "epoch": 2.01,
6011
+ "learning_rate": 0.0002,
6012
+ "loss": 0.6065,
6013
+ "step": 6340
6014
+ },
6015
+ {
6016
+ "epoch": 2.01,
6017
+ "learning_rate": 0.0002,
6018
+ "loss": 0.5912,
6019
+ "step": 6350
6020
+ },
6021
+ {
6022
+ "epoch": 2.02,
6023
+ "learning_rate": 0.0002,
6024
+ "loss": 0.613,
6025
+ "step": 6360
6026
+ },
6027
+ {
6028
+ "epoch": 2.02,
6029
+ "learning_rate": 0.0002,
6030
+ "loss": 0.586,
6031
+ "step": 6370
6032
+ },
6033
+ {
6034
+ "epoch": 2.02,
6035
+ "learning_rate": 0.0002,
6036
+ "loss": 0.6383,
6037
+ "step": 6380
6038
+ },
6039
+ {
6040
+ "epoch": 2.02,
6041
+ "learning_rate": 0.0002,
6042
+ "loss": 0.5629,
6043
+ "step": 6390
6044
+ },
6045
+ {
6046
+ "epoch": 2.03,
6047
+ "learning_rate": 0.0002,
6048
+ "loss": 0.6048,
6049
+ "step": 6400
6050
+ },
6051
+ {
6052
+ "epoch": 2.03,
6053
+ "eval_loss": 0.7574472427368164,
6054
+ "eval_runtime": 110.9511,
6055
+ "eval_samples_per_second": 9.013,
6056
+ "eval_steps_per_second": 4.506,
6057
+ "step": 6400
6058
+ },
6059
+ {
6060
+ "epoch": 2.03,
6061
+ "mmlu_eval_accuracy": 0.470592564742188,
6062
+ "mmlu_eval_accuracy_abstract_algebra": 0.2727272727272727,
6063
+ "mmlu_eval_accuracy_anatomy": 0.6428571428571429,
6064
+ "mmlu_eval_accuracy_astronomy": 0.375,
6065
+ "mmlu_eval_accuracy_business_ethics": 0.45454545454545453,
6066
+ "mmlu_eval_accuracy_clinical_knowledge": 0.5172413793103449,
6067
+ "mmlu_eval_accuracy_college_biology": 0.375,
6068
+ "mmlu_eval_accuracy_college_chemistry": 0.125,
6069
+ "mmlu_eval_accuracy_college_computer_science": 0.2727272727272727,
6070
+ "mmlu_eval_accuracy_college_mathematics": 0.2727272727272727,
6071
+ "mmlu_eval_accuracy_college_medicine": 0.4090909090909091,
6072
+ "mmlu_eval_accuracy_college_physics": 0.36363636363636365,
6073
+ "mmlu_eval_accuracy_computer_security": 0.36363636363636365,
6074
+ "mmlu_eval_accuracy_conceptual_physics": 0.4230769230769231,
6075
+ "mmlu_eval_accuracy_econometrics": 0.25,
6076
+ "mmlu_eval_accuracy_electrical_engineering": 0.25,
6077
+ "mmlu_eval_accuracy_elementary_mathematics": 0.3170731707317073,
6078
+ "mmlu_eval_accuracy_formal_logic": 0.14285714285714285,
6079
+ "mmlu_eval_accuracy_global_facts": 0.3,
6080
+ "mmlu_eval_accuracy_high_school_biology": 0.4375,
6081
+ "mmlu_eval_accuracy_high_school_chemistry": 0.22727272727272727,
6082
+ "mmlu_eval_accuracy_high_school_computer_science": 0.5555555555555556,
6083
+ "mmlu_eval_accuracy_high_school_european_history": 0.6111111111111112,
6084
+ "mmlu_eval_accuracy_high_school_geography": 0.8181818181818182,
6085
+ "mmlu_eval_accuracy_high_school_government_and_politics": 0.6190476190476191,
6086
+ "mmlu_eval_accuracy_high_school_macroeconomics": 0.5116279069767442,
6087
+ "mmlu_eval_accuracy_high_school_mathematics": 0.20689655172413793,
6088
+ "mmlu_eval_accuracy_high_school_microeconomics": 0.38461538461538464,
6089
+ "mmlu_eval_accuracy_high_school_physics": 0.0,
6090
+ "mmlu_eval_accuracy_high_school_psychology": 0.8333333333333334,
6091
+ "mmlu_eval_accuracy_high_school_statistics": 0.34782608695652173,
6092
+ "mmlu_eval_accuracy_high_school_us_history": 0.6818181818181818,
6093
+ "mmlu_eval_accuracy_high_school_world_history": 0.7307692307692307,
6094
+ "mmlu_eval_accuracy_human_aging": 0.7391304347826086,
6095
+ "mmlu_eval_accuracy_human_sexuality": 0.3333333333333333,
6096
+ "mmlu_eval_accuracy_international_law": 0.9230769230769231,
6097
+ "mmlu_eval_accuracy_jurisprudence": 0.36363636363636365,
6098
+ "mmlu_eval_accuracy_logical_fallacies": 0.6111111111111112,
6099
+ "mmlu_eval_accuracy_machine_learning": 0.18181818181818182,
6100
+ "mmlu_eval_accuracy_management": 0.6363636363636364,
6101
+ "mmlu_eval_accuracy_marketing": 0.76,
6102
+ "mmlu_eval_accuracy_medical_genetics": 0.9090909090909091,
6103
+ "mmlu_eval_accuracy_miscellaneous": 0.6395348837209303,
6104
+ "mmlu_eval_accuracy_moral_disputes": 0.5,
6105
+ "mmlu_eval_accuracy_moral_scenarios": 0.26,
6106
+ "mmlu_eval_accuracy_nutrition": 0.6060606060606061,
6107
+ "mmlu_eval_accuracy_philosophy": 0.4411764705882353,
6108
+ "mmlu_eval_accuracy_prehistory": 0.4857142857142857,
6109
+ "mmlu_eval_accuracy_professional_accounting": 0.2903225806451613,
6110
+ "mmlu_eval_accuracy_professional_law": 0.32941176470588235,
6111
+ "mmlu_eval_accuracy_professional_medicine": 0.6129032258064516,
6112
+ "mmlu_eval_accuracy_professional_psychology": 0.4927536231884058,
6113
+ "mmlu_eval_accuracy_public_relations": 0.5833333333333334,
6114
+ "mmlu_eval_accuracy_security_studies": 0.48148148148148145,
6115
+ "mmlu_eval_accuracy_sociology": 0.6363636363636364,
6116
+ "mmlu_eval_accuracy_us_foreign_policy": 0.7272727272727273,
6117
+ "mmlu_eval_accuracy_virology": 0.5555555555555556,
6118
+ "mmlu_eval_accuracy_world_religions": 0.631578947368421,
6119
+ "mmlu_loss": 1.3004325469542422,
6120
+ "step": 6400
6121
+ },
6122
+ {
6123
+ "epoch": 2.03,
6124
+ "learning_rate": 0.0002,
6125
+ "loss": 0.5702,
6126
+ "step": 6410
6127
+ },
6128
+ {
6129
+ "epoch": 2.03,
6130
+ "learning_rate": 0.0002,
6131
+ "loss": 0.5957,
6132
+ "step": 6420
6133
+ },
6134
+ {
6135
+ "epoch": 2.04,
6136
+ "learning_rate": 0.0002,
6137
+ "loss": 0.5994,
6138
+ "step": 6430
6139
+ },
6140
+ {
6141
+ "epoch": 2.04,
6142
+ "learning_rate": 0.0002,
6143
+ "loss": 0.5922,
6144
+ "step": 6440
6145
+ },
6146
+ {
6147
+ "epoch": 2.04,
6148
+ "learning_rate": 0.0002,
6149
+ "loss": 0.5626,
6150
+ "step": 6450
6151
+ },
6152
+ {
6153
+ "epoch": 2.05,
6154
+ "learning_rate": 0.0002,
6155
+ "loss": 0.5912,
6156
+ "step": 6460
6157
+ },
6158
+ {
6159
+ "epoch": 2.05,
6160
+ "learning_rate": 0.0002,
6161
+ "loss": 0.5877,
6162
+ "step": 6470
6163
+ },
6164
+ {
6165
+ "epoch": 2.05,
6166
+ "learning_rate": 0.0002,
6167
+ "loss": 0.578,
6168
+ "step": 6480
6169
+ },
6170
+ {
6171
+ "epoch": 2.06,
6172
+ "learning_rate": 0.0002,
6173
+ "loss": 0.6207,
6174
+ "step": 6490
6175
+ },
6176
+ {
6177
+ "epoch": 2.06,
6178
+ "learning_rate": 0.0002,
6179
+ "loss": 0.5606,
6180
+ "step": 6500
6181
+ },
6182
+ {
6183
+ "epoch": 2.06,
6184
+ "learning_rate": 0.0002,
6185
+ "loss": 0.553,
6186
+ "step": 6510
6187
+ },
6188
+ {
6189
+ "epoch": 2.07,
6190
+ "learning_rate": 0.0002,
6191
+ "loss": 0.6092,
6192
+ "step": 6520
6193
+ },
6194
+ {
6195
+ "epoch": 2.07,
6196
+ "learning_rate": 0.0002,
6197
+ "loss": 0.6183,
6198
+ "step": 6530
6199
+ },
6200
+ {
6201
+ "epoch": 2.07,
6202
+ "learning_rate": 0.0002,
6203
+ "loss": 0.5825,
6204
+ "step": 6540
6205
+ },
6206
+ {
6207
+ "epoch": 2.08,
6208
+ "learning_rate": 0.0002,
6209
+ "loss": 0.5674,
6210
+ "step": 6550
6211
+ },
6212
+ {
6213
+ "epoch": 2.08,
6214
+ "learning_rate": 0.0002,
6215
+ "loss": 0.5587,
6216
+ "step": 6560
6217
+ },
6218
+ {
6219
+ "epoch": 2.08,
6220
+ "learning_rate": 0.0002,
6221
+ "loss": 0.5317,
6222
+ "step": 6570
6223
+ },
6224
+ {
6225
+ "epoch": 2.08,
6226
+ "learning_rate": 0.0002,
6227
+ "loss": 0.6731,
6228
+ "step": 6580
6229
+ },
6230
+ {
6231
+ "epoch": 2.09,
6232
+ "learning_rate": 0.0002,
6233
+ "loss": 0.6242,
6234
+ "step": 6590
6235
+ },
6236
+ {
6237
+ "epoch": 2.09,
6238
+ "learning_rate": 0.0002,
6239
+ "loss": 0.6332,
6240
+ "step": 6600
6241
+ },
6242
+ {
6243
+ "epoch": 2.09,
6244
+ "eval_loss": 0.7567528486251831,
6245
+ "eval_runtime": 111.0264,
6246
+ "eval_samples_per_second": 9.007,
6247
+ "eval_steps_per_second": 4.503,
6248
+ "step": 6600
6249
+ },
6250
+ {
6251
+ "epoch": 2.09,
6252
+ "mmlu_eval_accuracy": 0.47542707100737025,
6253
+ "mmlu_eval_accuracy_abstract_algebra": 0.2727272727272727,
6254
+ "mmlu_eval_accuracy_anatomy": 0.6428571428571429,
6255
+ "mmlu_eval_accuracy_astronomy": 0.4375,
6256
+ "mmlu_eval_accuracy_business_ethics": 0.45454545454545453,
6257
+ "mmlu_eval_accuracy_clinical_knowledge": 0.4827586206896552,
6258
+ "mmlu_eval_accuracy_college_biology": 0.375,
6259
+ "mmlu_eval_accuracy_college_chemistry": 0.25,
6260
+ "mmlu_eval_accuracy_college_computer_science": 0.2727272727272727,
6261
+ "mmlu_eval_accuracy_college_mathematics": 0.2727272727272727,
6262
+ "mmlu_eval_accuracy_college_medicine": 0.36363636363636365,
6263
+ "mmlu_eval_accuracy_college_physics": 0.36363636363636365,
6264
+ "mmlu_eval_accuracy_computer_security": 0.45454545454545453,
6265
+ "mmlu_eval_accuracy_conceptual_physics": 0.4230769230769231,
6266
+ "mmlu_eval_accuracy_econometrics": 0.16666666666666666,
6267
+ "mmlu_eval_accuracy_electrical_engineering": 0.25,
6268
+ "mmlu_eval_accuracy_elementary_mathematics": 0.34146341463414637,
6269
+ "mmlu_eval_accuracy_formal_logic": 0.2857142857142857,
6270
+ "mmlu_eval_accuracy_global_facts": 0.3,
6271
+ "mmlu_eval_accuracy_high_school_biology": 0.375,
6272
+ "mmlu_eval_accuracy_high_school_chemistry": 0.22727272727272727,
6273
+ "mmlu_eval_accuracy_high_school_computer_science": 0.5555555555555556,
6274
+ "mmlu_eval_accuracy_high_school_european_history": 0.6111111111111112,
6275
+ "mmlu_eval_accuracy_high_school_geography": 0.8636363636363636,
6276
+ "mmlu_eval_accuracy_high_school_government_and_politics": 0.6666666666666666,
6277
+ "mmlu_eval_accuracy_high_school_macroeconomics": 0.46511627906976744,
6278
+ "mmlu_eval_accuracy_high_school_mathematics": 0.20689655172413793,
6279
+ "mmlu_eval_accuracy_high_school_microeconomics": 0.46153846153846156,
6280
+ "mmlu_eval_accuracy_high_school_physics": 0.058823529411764705,
6281
+ "mmlu_eval_accuracy_high_school_psychology": 0.85,
6282
+ "mmlu_eval_accuracy_high_school_statistics": 0.2608695652173913,
6283
+ "mmlu_eval_accuracy_high_school_us_history": 0.6818181818181818,
6284
+ "mmlu_eval_accuracy_high_school_world_history": 0.7307692307692307,
6285
+ "mmlu_eval_accuracy_human_aging": 0.6956521739130435,
6286
+ "mmlu_eval_accuracy_human_sexuality": 0.3333333333333333,
6287
+ "mmlu_eval_accuracy_international_law": 0.8461538461538461,
6288
+ "mmlu_eval_accuracy_jurisprudence": 0.36363636363636365,
6289
+ "mmlu_eval_accuracy_logical_fallacies": 0.6111111111111112,
6290
+ "mmlu_eval_accuracy_machine_learning": 0.36363636363636365,
6291
+ "mmlu_eval_accuracy_management": 0.6363636363636364,
6292
+ "mmlu_eval_accuracy_marketing": 0.8,
6293
+ "mmlu_eval_accuracy_medical_genetics": 0.9090909090909091,
6294
+ "mmlu_eval_accuracy_miscellaneous": 0.6395348837209303,
6295
+ "mmlu_eval_accuracy_moral_disputes": 0.4473684210526316,
6296
+ "mmlu_eval_accuracy_moral_scenarios": 0.26,
6297
+ "mmlu_eval_accuracy_nutrition": 0.6060606060606061,
6298
+ "mmlu_eval_accuracy_philosophy": 0.47058823529411764,
6299
+ "mmlu_eval_accuracy_prehistory": 0.4857142857142857,
6300
+ "mmlu_eval_accuracy_professional_accounting": 0.25806451612903225,
6301
+ "mmlu_eval_accuracy_professional_law": 0.31176470588235294,
6302
+ "mmlu_eval_accuracy_professional_medicine": 0.5806451612903226,
6303
+ "mmlu_eval_accuracy_professional_psychology": 0.4782608695652174,
6304
+ "mmlu_eval_accuracy_public_relations": 0.5,
6305
+ "mmlu_eval_accuracy_security_studies": 0.48148148148148145,
6306
+ "mmlu_eval_accuracy_sociology": 0.6818181818181818,
6307
+ "mmlu_eval_accuracy_us_foreign_policy": 0.7272727272727273,
6308
+ "mmlu_eval_accuracy_virology": 0.5555555555555556,
6309
+ "mmlu_eval_accuracy_world_religions": 0.631578947368421,
6310
+ "mmlu_loss": 1.4275867019247448,
6311
+ "step": 6600
6312
+ },
6313
+ {
6314
+ "epoch": 2.09,
6315
+ "learning_rate": 0.0002,
6316
+ "loss": 0.5948,
6317
+ "step": 6610
6318
+ },
6319
+ {
6320
+ "epoch": 2.1,
6321
+ "learning_rate": 0.0002,
6322
+ "loss": 0.6068,
6323
+ "step": 6620
6324
+ },
6325
+ {
6326
+ "epoch": 2.1,
6327
+ "learning_rate": 0.0002,
6328
+ "loss": 0.5831,
6329
+ "step": 6630
6330
+ },
6331
+ {
6332
+ "epoch": 2.1,
6333
+ "learning_rate": 0.0002,
6334
+ "loss": 0.5664,
6335
+ "step": 6640
6336
+ },
6337
+ {
6338
+ "epoch": 2.11,
6339
+ "learning_rate": 0.0002,
6340
+ "loss": 0.622,
6341
+ "step": 6650
6342
+ },
6343
+ {
6344
+ "epoch": 2.11,
6345
+ "learning_rate": 0.0002,
6346
+ "loss": 0.5759,
6347
+ "step": 6660
6348
+ },
6349
+ {
6350
+ "epoch": 2.11,
6351
+ "learning_rate": 0.0002,
6352
+ "loss": 0.5841,
6353
+ "step": 6670
6354
+ },
6355
+ {
6356
+ "epoch": 2.12,
6357
+ "learning_rate": 0.0002,
6358
+ "loss": 0.6221,
6359
+ "step": 6680
6360
+ },
6361
+ {
6362
+ "epoch": 2.12,
6363
+ "learning_rate": 0.0002,
6364
+ "loss": 0.5904,
6365
+ "step": 6690
6366
+ },
6367
+ {
6368
+ "epoch": 2.12,
6369
+ "learning_rate": 0.0002,
6370
+ "loss": 0.6121,
6371
+ "step": 6700
6372
+ },
6373
+ {
6374
+ "epoch": 2.13,
6375
+ "learning_rate": 0.0002,
6376
+ "loss": 0.5526,
6377
+ "step": 6710
6378
+ },
6379
+ {
6380
+ "epoch": 2.13,
6381
+ "learning_rate": 0.0002,
6382
+ "loss": 0.6742,
6383
+ "step": 6720
6384
+ },
6385
+ {
6386
+ "epoch": 2.13,
6387
+ "learning_rate": 0.0002,
6388
+ "loss": 0.5705,
6389
+ "step": 6730
6390
+ },
6391
+ {
6392
+ "epoch": 2.14,
6393
+ "learning_rate": 0.0002,
6394
+ "loss": 0.6151,
6395
+ "step": 6740
6396
+ },
6397
+ {
6398
+ "epoch": 2.14,
6399
+ "learning_rate": 0.0002,
6400
+ "loss": 0.5902,
6401
+ "step": 6750
6402
+ },
6403
+ {
6404
+ "epoch": 2.14,
6405
+ "learning_rate": 0.0002,
6406
+ "loss": 0.6448,
6407
+ "step": 6760
6408
+ },
6409
+ {
6410
+ "epoch": 2.15,
6411
+ "learning_rate": 0.0002,
6412
+ "loss": 0.5395,
6413
+ "step": 6770
6414
+ },
6415
+ {
6416
+ "epoch": 2.15,
6417
+ "learning_rate": 0.0002,
6418
+ "loss": 0.5613,
6419
+ "step": 6780
6420
+ },
6421
+ {
6422
+ "epoch": 2.15,
6423
+ "learning_rate": 0.0002,
6424
+ "loss": 0.5802,
6425
+ "step": 6790
6426
+ },
6427
+ {
6428
+ "epoch": 2.15,
6429
+ "learning_rate": 0.0002,
6430
+ "loss": 0.6026,
6431
+ "step": 6800
6432
+ },
6433
+ {
6434
+ "epoch": 2.15,
6435
+ "eval_loss": 0.7631368637084961,
6436
+ "eval_runtime": 111.0583,
6437
+ "eval_samples_per_second": 9.004,
6438
+ "eval_steps_per_second": 4.502,
6439
+ "step": 6800
6440
+ },
6441
+ {
6442
+ "epoch": 2.15,
6443
+ "mmlu_eval_accuracy": 0.47370240345715936,
6444
+ "mmlu_eval_accuracy_abstract_algebra": 0.36363636363636365,
6445
+ "mmlu_eval_accuracy_anatomy": 0.5714285714285714,
6446
+ "mmlu_eval_accuracy_astronomy": 0.4375,
6447
+ "mmlu_eval_accuracy_business_ethics": 0.45454545454545453,
6448
+ "mmlu_eval_accuracy_clinical_knowledge": 0.4827586206896552,
6449
+ "mmlu_eval_accuracy_college_biology": 0.375,
6450
+ "mmlu_eval_accuracy_college_chemistry": 0.375,
6451
+ "mmlu_eval_accuracy_college_computer_science": 0.2727272727272727,
6452
+ "mmlu_eval_accuracy_college_mathematics": 0.2727272727272727,
6453
+ "mmlu_eval_accuracy_college_medicine": 0.4090909090909091,
6454
+ "mmlu_eval_accuracy_college_physics": 0.2727272727272727,
6455
+ "mmlu_eval_accuracy_computer_security": 0.45454545454545453,
6456
+ "mmlu_eval_accuracy_conceptual_physics": 0.4230769230769231,
6457
+ "mmlu_eval_accuracy_econometrics": 0.16666666666666666,
6458
+ "mmlu_eval_accuracy_electrical_engineering": 0.25,
6459
+ "mmlu_eval_accuracy_elementary_mathematics": 0.34146341463414637,
6460
+ "mmlu_eval_accuracy_formal_logic": 0.2857142857142857,
6461
+ "mmlu_eval_accuracy_global_facts": 0.3,
6462
+ "mmlu_eval_accuracy_high_school_biology": 0.375,
6463
+ "mmlu_eval_accuracy_high_school_chemistry": 0.2727272727272727,
6464
+ "mmlu_eval_accuracy_high_school_computer_science": 0.5555555555555556,
6465
+ "mmlu_eval_accuracy_high_school_european_history": 0.6111111111111112,
6466
+ "mmlu_eval_accuracy_high_school_geography": 0.8181818181818182,
6467
+ "mmlu_eval_accuracy_high_school_government_and_politics": 0.6190476190476191,
6468
+ "mmlu_eval_accuracy_high_school_macroeconomics": 0.5116279069767442,
6469
+ "mmlu_eval_accuracy_high_school_mathematics": 0.13793103448275862,
6470
+ "mmlu_eval_accuracy_high_school_microeconomics": 0.46153846153846156,
6471
+ "mmlu_eval_accuracy_high_school_physics": 0.0,
6472
+ "mmlu_eval_accuracy_high_school_psychology": 0.8666666666666667,
6473
+ "mmlu_eval_accuracy_high_school_statistics": 0.30434782608695654,
6474
+ "mmlu_eval_accuracy_high_school_us_history": 0.6363636363636364,
6475
+ "mmlu_eval_accuracy_high_school_world_history": 0.6538461538461539,
6476
+ "mmlu_eval_accuracy_human_aging": 0.6956521739130435,
6477
+ "mmlu_eval_accuracy_human_sexuality": 0.4166666666666667,
6478
+ "mmlu_eval_accuracy_international_law": 0.9230769230769231,
6479
+ "mmlu_eval_accuracy_jurisprudence": 0.36363636363636365,
6480
+ "mmlu_eval_accuracy_logical_fallacies": 0.6111111111111112,
6481
+ "mmlu_eval_accuracy_machine_learning": 0.18181818181818182,
6482
+ "mmlu_eval_accuracy_management": 0.6363636363636364,
6483
+ "mmlu_eval_accuracy_marketing": 0.72,
6484
+ "mmlu_eval_accuracy_medical_genetics": 0.9090909090909091,
6485
+ "mmlu_eval_accuracy_miscellaneous": 0.6511627906976745,
6486
+ "mmlu_eval_accuracy_moral_disputes": 0.4473684210526316,
6487
+ "mmlu_eval_accuracy_moral_scenarios": 0.25,
6488
+ "mmlu_eval_accuracy_nutrition": 0.6666666666666666,
6489
+ "mmlu_eval_accuracy_philosophy": 0.5,
6490
+ "mmlu_eval_accuracy_prehistory": 0.4857142857142857,
6491
+ "mmlu_eval_accuracy_professional_accounting": 0.2903225806451613,
6492
+ "mmlu_eval_accuracy_professional_law": 0.3235294117647059,
6493
+ "mmlu_eval_accuracy_professional_medicine": 0.5483870967741935,
6494
+ "mmlu_eval_accuracy_professional_psychology": 0.5072463768115942,
6495
+ "mmlu_eval_accuracy_public_relations": 0.5,
6496
+ "mmlu_eval_accuracy_security_studies": 0.4444444444444444,
6497
+ "mmlu_eval_accuracy_sociology": 0.6818181818181818,
6498
+ "mmlu_eval_accuracy_us_foreign_policy": 0.7272727272727273,
6499
+ "mmlu_eval_accuracy_virology": 0.5555555555555556,
6500
+ "mmlu_eval_accuracy_world_religions": 0.631578947368421,
6501
+ "mmlu_loss": 1.295992794337223,
6502
+ "step": 6800
6503
+ },
6504
+ {
6505
+ "epoch": 2.16,
6506
+ "learning_rate": 0.0002,
6507
+ "loss": 0.5435,
6508
+ "step": 6810
6509
+ },
6510
+ {
6511
+ "epoch": 2.16,
6512
+ "learning_rate": 0.0002,
6513
+ "loss": 0.593,
6514
+ "step": 6820
6515
+ },
6516
+ {
6517
+ "epoch": 2.16,
6518
+ "learning_rate": 0.0002,
6519
+ "loss": 0.5898,
6520
+ "step": 6830
6521
+ },
6522
+ {
6523
+ "epoch": 2.17,
6524
+ "learning_rate": 0.0002,
6525
+ "loss": 0.5404,
6526
+ "step": 6840
6527
+ },
6528
+ {
6529
+ "epoch": 2.17,
6530
+ "learning_rate": 0.0002,
6531
+ "loss": 0.593,
6532
+ "step": 6850
6533
+ },
6534
+ {
6535
+ "epoch": 2.17,
6536
+ "learning_rate": 0.0002,
6537
+ "loss": 0.5832,
6538
+ "step": 6860
6539
+ },
6540
+ {
6541
+ "epoch": 2.18,
6542
+ "learning_rate": 0.0002,
6543
+ "loss": 0.6201,
6544
+ "step": 6870
6545
+ },
6546
+ {
6547
+ "epoch": 2.18,
6548
+ "learning_rate": 0.0002,
6549
+ "loss": 0.6147,
6550
+ "step": 6880
6551
+ },
6552
+ {
6553
+ "epoch": 2.18,
6554
+ "learning_rate": 0.0002,
6555
+ "loss": 0.6102,
6556
+ "step": 6890
6557
+ },
6558
+ {
6559
+ "epoch": 2.19,
6560
+ "learning_rate": 0.0002,
6561
+ "loss": 0.5885,
6562
+ "step": 6900
6563
+ },
6564
+ {
6565
+ "epoch": 2.19,
6566
+ "learning_rate": 0.0002,
6567
+ "loss": 0.5549,
6568
+ "step": 6910
6569
+ },
6570
+ {
6571
+ "epoch": 2.19,
6572
+ "learning_rate": 0.0002,
6573
+ "loss": 0.5973,
6574
+ "step": 6920
6575
+ },
6576
+ {
6577
+ "epoch": 2.2,
6578
+ "learning_rate": 0.0002,
6579
+ "loss": 0.589,
6580
+ "step": 6930
6581
+ },
6582
+ {
6583
+ "epoch": 2.2,
6584
+ "learning_rate": 0.0002,
6585
+ "loss": 0.6258,
6586
+ "step": 6940
6587
+ },
6588
+ {
6589
+ "epoch": 2.2,
6590
+ "learning_rate": 0.0002,
6591
+ "loss": 0.6038,
6592
+ "step": 6950
6593
+ },
6594
+ {
6595
+ "epoch": 2.21,
6596
+ "learning_rate": 0.0002,
6597
+ "loss": 0.5865,
6598
+ "step": 6960
6599
+ },
6600
+ {
6601
+ "epoch": 2.21,
6602
+ "learning_rate": 0.0002,
6603
+ "loss": 0.6355,
6604
+ "step": 6970
6605
+ },
6606
+ {
6607
+ "epoch": 2.21,
6608
+ "learning_rate": 0.0002,
6609
+ "loss": 0.6572,
6610
+ "step": 6980
6611
+ },
6612
+ {
6613
+ "epoch": 2.21,
6614
+ "learning_rate": 0.0002,
6615
+ "loss": 0.5367,
6616
+ "step": 6990
6617
+ },
6618
+ {
6619
+ "epoch": 2.22,
6620
+ "learning_rate": 0.0002,
6621
+ "loss": 0.5959,
6622
+ "step": 7000
6623
+ },
6624
+ {
6625
+ "epoch": 2.22,
6626
+ "eval_loss": 0.7645158767700195,
6627
+ "eval_runtime": 111.037,
6628
+ "eval_samples_per_second": 9.006,
6629
+ "eval_steps_per_second": 4.503,
6630
+ "step": 7000
6631
+ },
6632
+ {
6633
+ "epoch": 2.22,
6634
+ "mmlu_eval_accuracy": 0.478166482161635,
6635
+ "mmlu_eval_accuracy_abstract_algebra": 0.36363636363636365,
6636
+ "mmlu_eval_accuracy_anatomy": 0.5,
6637
+ "mmlu_eval_accuracy_astronomy": 0.4375,
6638
+ "mmlu_eval_accuracy_business_ethics": 0.45454545454545453,
6639
+ "mmlu_eval_accuracy_clinical_knowledge": 0.4827586206896552,
6640
+ "mmlu_eval_accuracy_college_biology": 0.375,
6641
+ "mmlu_eval_accuracy_college_chemistry": 0.375,
6642
+ "mmlu_eval_accuracy_college_computer_science": 0.2727272727272727,
6643
+ "mmlu_eval_accuracy_college_mathematics": 0.09090909090909091,
6644
+ "mmlu_eval_accuracy_college_medicine": 0.36363636363636365,
6645
+ "mmlu_eval_accuracy_college_physics": 0.36363636363636365,
6646
+ "mmlu_eval_accuracy_computer_security": 0.5454545454545454,
6647
+ "mmlu_eval_accuracy_conceptual_physics": 0.46153846153846156,
6648
+ "mmlu_eval_accuracy_econometrics": 0.16666666666666666,
6649
+ "mmlu_eval_accuracy_electrical_engineering": 0.25,
6650
+ "mmlu_eval_accuracy_elementary_mathematics": 0.34146341463414637,
6651
+ "mmlu_eval_accuracy_formal_logic": 0.2857142857142857,
6652
+ "mmlu_eval_accuracy_global_facts": 0.5,
6653
+ "mmlu_eval_accuracy_high_school_biology": 0.375,
6654
+ "mmlu_eval_accuracy_high_school_chemistry": 0.22727272727272727,
6655
+ "mmlu_eval_accuracy_high_school_computer_science": 0.5555555555555556,
6656
+ "mmlu_eval_accuracy_high_school_european_history": 0.6111111111111112,
6657
+ "mmlu_eval_accuracy_high_school_geography": 0.8181818181818182,
6658
+ "mmlu_eval_accuracy_high_school_government_and_politics": 0.6190476190476191,
6659
+ "mmlu_eval_accuracy_high_school_macroeconomics": 0.46511627906976744,
6660
+ "mmlu_eval_accuracy_high_school_mathematics": 0.13793103448275862,
6661
+ "mmlu_eval_accuracy_high_school_microeconomics": 0.5,
6662
+ "mmlu_eval_accuracy_high_school_physics": 0.058823529411764705,
6663
+ "mmlu_eval_accuracy_high_school_psychology": 0.8666666666666667,
6664
+ "mmlu_eval_accuracy_high_school_statistics": 0.2608695652173913,
6665
+ "mmlu_eval_accuracy_high_school_us_history": 0.6818181818181818,
6666
+ "mmlu_eval_accuracy_high_school_world_history": 0.7307692307692307,
6667
+ "mmlu_eval_accuracy_human_aging": 0.782608695652174,
6668
+ "mmlu_eval_accuracy_human_sexuality": 0.3333333333333333,
6669
+ "mmlu_eval_accuracy_international_law": 0.9230769230769231,
6670
+ "mmlu_eval_accuracy_jurisprudence": 0.36363636363636365,
6671
+ "mmlu_eval_accuracy_logical_fallacies": 0.6111111111111112,
6672
+ "mmlu_eval_accuracy_machine_learning": 0.09090909090909091,
6673
+ "mmlu_eval_accuracy_management": 0.7272727272727273,
6674
+ "mmlu_eval_accuracy_marketing": 0.84,
6675
+ "mmlu_eval_accuracy_medical_genetics": 0.9090909090909091,
6676
+ "mmlu_eval_accuracy_miscellaneous": 0.6511627906976745,
6677
+ "mmlu_eval_accuracy_moral_disputes": 0.47368421052631576,
6678
+ "mmlu_eval_accuracy_moral_scenarios": 0.23,
6679
+ "mmlu_eval_accuracy_nutrition": 0.6060606060606061,
6680
+ "mmlu_eval_accuracy_philosophy": 0.5,
6681
+ "mmlu_eval_accuracy_prehistory": 0.5142857142857142,
6682
+ "mmlu_eval_accuracy_professional_accounting": 0.2903225806451613,
6683
+ "mmlu_eval_accuracy_professional_law": 0.35294117647058826,
6684
+ "mmlu_eval_accuracy_professional_medicine": 0.5161290322580645,
6685
+ "mmlu_eval_accuracy_professional_psychology": 0.463768115942029,
6686
+ "mmlu_eval_accuracy_public_relations": 0.5,
6687
+ "mmlu_eval_accuracy_security_studies": 0.4444444444444444,
6688
+ "mmlu_eval_accuracy_sociology": 0.6818181818181818,
6689
+ "mmlu_eval_accuracy_us_foreign_policy": 0.7272727272727273,
6690
+ "mmlu_eval_accuracy_virology": 0.5,
6691
+ "mmlu_eval_accuracy_world_religions": 0.6842105263157895,
6692
+ "mmlu_loss": 1.506881151358079,
6693
+ "step": 7000
6694
+ },
6695
+ {
6696
+ "epoch": 2.22,
6697
+ "learning_rate": 0.0002,
6698
+ "loss": 0.6429,
6699
+ "step": 7010
6700
+ },
6701
+ {
6702
+ "epoch": 2.22,
6703
+ "learning_rate": 0.0002,
6704
+ "loss": 0.5899,
6705
+ "step": 7020
6706
+ },
6707
+ {
6708
+ "epoch": 2.23,
6709
+ "learning_rate": 0.0002,
6710
+ "loss": 0.5661,
6711
+ "step": 7030
6712
+ },
6713
+ {
6714
+ "epoch": 2.23,
6715
+ "learning_rate": 0.0002,
6716
+ "loss": 0.5747,
6717
+ "step": 7040
6718
+ },
6719
+ {
6720
+ "epoch": 2.23,
6721
+ "learning_rate": 0.0002,
6722
+ "loss": 0.603,
6723
+ "step": 7050
6724
+ },
6725
+ {
6726
+ "epoch": 2.24,
6727
+ "learning_rate": 0.0002,
6728
+ "loss": 0.5864,
6729
+ "step": 7060
6730
+ },
6731
+ {
6732
+ "epoch": 2.24,
6733
+ "learning_rate": 0.0002,
6734
+ "loss": 0.588,
6735
+ "step": 7070
6736
+ },
6737
+ {
6738
+ "epoch": 2.24,
6739
+ "learning_rate": 0.0002,
6740
+ "loss": 0.6275,
6741
+ "step": 7080
6742
+ },
6743
+ {
6744
+ "epoch": 2.25,
6745
+ "learning_rate": 0.0002,
6746
+ "loss": 0.6118,
6747
+ "step": 7090
6748
+ },
6749
+ {
6750
+ "epoch": 2.25,
6751
+ "learning_rate": 0.0002,
6752
+ "loss": 0.6475,
6753
+ "step": 7100
6754
+ },
6755
+ {
6756
+ "epoch": 2.25,
6757
+ "learning_rate": 0.0002,
6758
+ "loss": 0.6191,
6759
+ "step": 7110
6760
+ },
6761
+ {
6762
+ "epoch": 2.26,
6763
+ "learning_rate": 0.0002,
6764
+ "loss": 0.5623,
6765
+ "step": 7120
6766
+ },
6767
+ {
6768
+ "epoch": 2.26,
6769
+ "learning_rate": 0.0002,
6770
+ "loss": 0.6052,
6771
+ "step": 7130
6772
+ },
6773
+ {
6774
+ "epoch": 2.26,
6775
+ "learning_rate": 0.0002,
6776
+ "loss": 0.545,
6777
+ "step": 7140
6778
+ },
6779
+ {
6780
+ "epoch": 2.27,
6781
+ "learning_rate": 0.0002,
6782
+ "loss": 0.5975,
6783
+ "step": 7150
6784
+ },
6785
+ {
6786
+ "epoch": 2.27,
6787
+ "learning_rate": 0.0002,
6788
+ "loss": 0.6022,
6789
+ "step": 7160
6790
+ },
6791
+ {
6792
+ "epoch": 2.27,
6793
+ "learning_rate": 0.0002,
6794
+ "loss": 0.608,
6795
+ "step": 7170
6796
+ },
6797
+ {
6798
+ "epoch": 2.28,
6799
+ "learning_rate": 0.0002,
6800
+ "loss": 0.6401,
6801
+ "step": 7180
6802
+ },
6803
+ {
6804
+ "epoch": 2.28,
6805
+ "learning_rate": 0.0002,
6806
+ "loss": 0.6429,
6807
+ "step": 7190
6808
+ },
6809
+ {
6810
+ "epoch": 2.28,
6811
+ "learning_rate": 0.0002,
6812
+ "loss": 0.5495,
6813
+ "step": 7200
6814
+ },
6815
+ {
6816
+ "epoch": 2.28,
6817
+ "eval_loss": 0.7578040361404419,
6818
+ "eval_runtime": 111.0662,
6819
+ "eval_samples_per_second": 9.004,
6820
+ "eval_steps_per_second": 4.502,
6821
+ "step": 7200
6822
+ },
6823
+ {
6824
+ "epoch": 2.28,
6825
+ "mmlu_eval_accuracy": 0.47051789661643223,
6826
+ "mmlu_eval_accuracy_abstract_algebra": 0.36363636363636365,
6827
+ "mmlu_eval_accuracy_anatomy": 0.6428571428571429,
6828
+ "mmlu_eval_accuracy_astronomy": 0.4375,
6829
+ "mmlu_eval_accuracy_business_ethics": 0.45454545454545453,
6830
+ "mmlu_eval_accuracy_clinical_knowledge": 0.5172413793103449,
6831
+ "mmlu_eval_accuracy_college_biology": 0.375,
6832
+ "mmlu_eval_accuracy_college_chemistry": 0.125,
6833
+ "mmlu_eval_accuracy_college_computer_science": 0.2727272727272727,
6834
+ "mmlu_eval_accuracy_college_mathematics": 0.09090909090909091,
6835
+ "mmlu_eval_accuracy_college_medicine": 0.36363636363636365,
6836
+ "mmlu_eval_accuracy_college_physics": 0.36363636363636365,
6837
+ "mmlu_eval_accuracy_computer_security": 0.45454545454545453,
6838
+ "mmlu_eval_accuracy_conceptual_physics": 0.4230769230769231,
6839
+ "mmlu_eval_accuracy_econometrics": 0.25,
6840
+ "mmlu_eval_accuracy_electrical_engineering": 0.25,
6841
+ "mmlu_eval_accuracy_elementary_mathematics": 0.2682926829268293,
6842
+ "mmlu_eval_accuracy_formal_logic": 0.2857142857142857,
6843
+ "mmlu_eval_accuracy_global_facts": 0.3,
6844
+ "mmlu_eval_accuracy_high_school_biology": 0.34375,
6845
+ "mmlu_eval_accuracy_high_school_chemistry": 0.22727272727272727,
6846
+ "mmlu_eval_accuracy_high_school_computer_science": 0.5555555555555556,
6847
+ "mmlu_eval_accuracy_high_school_european_history": 0.5555555555555556,
6848
+ "mmlu_eval_accuracy_high_school_geography": 0.8636363636363636,
6849
+ "mmlu_eval_accuracy_high_school_government_and_politics": 0.6190476190476191,
6850
+ "mmlu_eval_accuracy_high_school_macroeconomics": 0.46511627906976744,
6851
+ "mmlu_eval_accuracy_high_school_mathematics": 0.1724137931034483,
6852
+ "mmlu_eval_accuracy_high_school_microeconomics": 0.46153846153846156,
6853
+ "mmlu_eval_accuracy_high_school_physics": 0.0,
6854
+ "mmlu_eval_accuracy_high_school_psychology": 0.8666666666666667,
6855
+ "mmlu_eval_accuracy_high_school_statistics": 0.30434782608695654,
6856
+ "mmlu_eval_accuracy_high_school_us_history": 0.6363636363636364,
6857
+ "mmlu_eval_accuracy_high_school_world_history": 0.6923076923076923,
6858
+ "mmlu_eval_accuracy_human_aging": 0.7391304347826086,
6859
+ "mmlu_eval_accuracy_human_sexuality": 0.4166666666666667,
6860
+ "mmlu_eval_accuracy_international_law": 0.8461538461538461,
6861
+ "mmlu_eval_accuracy_jurisprudence": 0.36363636363636365,
6862
+ "mmlu_eval_accuracy_logical_fallacies": 0.6111111111111112,
6863
+ "mmlu_eval_accuracy_machine_learning": 0.09090909090909091,
6864
+ "mmlu_eval_accuracy_management": 0.7272727272727273,
6865
+ "mmlu_eval_accuracy_marketing": 0.76,
6866
+ "mmlu_eval_accuracy_medical_genetics": 0.9090909090909091,
6867
+ "mmlu_eval_accuracy_miscellaneous": 0.6395348837209303,
6868
+ "mmlu_eval_accuracy_moral_disputes": 0.5,
6869
+ "mmlu_eval_accuracy_moral_scenarios": 0.25,
6870
+ "mmlu_eval_accuracy_nutrition": 0.6060606060606061,
6871
+ "mmlu_eval_accuracy_philosophy": 0.4411764705882353,
6872
+ "mmlu_eval_accuracy_prehistory": 0.5142857142857142,
6873
+ "mmlu_eval_accuracy_professional_accounting": 0.3548387096774194,
6874
+ "mmlu_eval_accuracy_professional_law": 0.3176470588235294,
6875
+ "mmlu_eval_accuracy_professional_medicine": 0.6451612903225806,
6876
+ "mmlu_eval_accuracy_professional_psychology": 0.5072463768115942,
6877
+ "mmlu_eval_accuracy_public_relations": 0.5,
6878
+ "mmlu_eval_accuracy_security_studies": 0.48148148148148145,
6879
+ "mmlu_eval_accuracy_sociology": 0.6818181818181818,
6880
+ "mmlu_eval_accuracy_us_foreign_policy": 0.7272727272727273,
6881
+ "mmlu_eval_accuracy_virology": 0.5555555555555556,
6882
+ "mmlu_eval_accuracy_world_religions": 0.631578947368421,
6883
+ "mmlu_loss": 1.5382918150944747,
6884
+ "step": 7200
6885
+ },
6886
+ {
6887
+ "epoch": 2.28,
6888
+ "learning_rate": 0.0002,
6889
+ "loss": 0.5606,
6890
+ "step": 7210
6891
+ },
6892
+ {
6893
+ "epoch": 2.29,
6894
+ "learning_rate": 0.0002,
6895
+ "loss": 0.5737,
6896
+ "step": 7220
6897
+ },
6898
+ {
6899
+ "epoch": 2.29,
6900
+ "learning_rate": 0.0002,
6901
+ "loss": 0.6112,
6902
+ "step": 7230
6903
+ },
6904
+ {
6905
+ "epoch": 2.29,
6906
+ "learning_rate": 0.0002,
6907
+ "loss": 0.626,
6908
+ "step": 7240
6909
+ },
6910
+ {
6911
+ "epoch": 2.3,
6912
+ "learning_rate": 0.0002,
6913
+ "loss": 0.608,
6914
+ "step": 7250
6915
+ },
6916
+ {
6917
+ "epoch": 2.3,
6918
+ "learning_rate": 0.0002,
6919
+ "loss": 0.6265,
6920
+ "step": 7260
6921
+ },
6922
+ {
6923
+ "epoch": 2.3,
6924
+ "learning_rate": 0.0002,
6925
+ "loss": 0.6053,
6926
+ "step": 7270
6927
+ },
6928
+ {
6929
+ "epoch": 2.31,
6930
+ "learning_rate": 0.0002,
6931
+ "loss": 0.6135,
6932
+ "step": 7280
6933
+ },
6934
+ {
6935
+ "epoch": 2.31,
6936
+ "learning_rate": 0.0002,
6937
+ "loss": 0.5217,
6938
+ "step": 7290
6939
+ },
6940
+ {
6941
+ "epoch": 2.31,
6942
+ "learning_rate": 0.0002,
6943
+ "loss": 0.6124,
6944
+ "step": 7300
6945
+ },
6946
+ {
6947
+ "epoch": 2.32,
6948
+ "learning_rate": 0.0002,
6949
+ "loss": 0.5506,
6950
+ "step": 7310
6951
+ },
6952
+ {
6953
+ "epoch": 2.32,
6954
+ "learning_rate": 0.0002,
6955
+ "loss": 0.6095,
6956
+ "step": 7320
6957
+ },
6958
+ {
6959
+ "epoch": 2.32,
6960
+ "learning_rate": 0.0002,
6961
+ "loss": 0.5972,
6962
+ "step": 7330
6963
+ },
6964
+ {
6965
+ "epoch": 2.33,
6966
+ "learning_rate": 0.0002,
6967
+ "loss": 0.6714,
6968
+ "step": 7340
6969
+ },
6970
+ {
6971
+ "epoch": 2.33,
6972
+ "learning_rate": 0.0002,
6973
+ "loss": 0.6083,
6974
+ "step": 7350
6975
+ },
6976
+ {
6977
+ "epoch": 2.33,
6978
+ "learning_rate": 0.0002,
6979
+ "loss": 0.6033,
6980
+ "step": 7360
6981
+ },
6982
+ {
6983
+ "epoch": 2.34,
6984
+ "learning_rate": 0.0002,
6985
+ "loss": 0.5881,
6986
+ "step": 7370
6987
+ },
6988
+ {
6989
+ "epoch": 2.34,
6990
+ "learning_rate": 0.0002,
6991
+ "loss": 0.5958,
6992
+ "step": 7380
6993
+ },
6994
+ {
6995
+ "epoch": 2.34,
6996
+ "learning_rate": 0.0002,
6997
+ "loss": 0.6009,
6998
+ "step": 7390
6999
+ },
7000
+ {
7001
+ "epoch": 2.34,
7002
+ "learning_rate": 0.0002,
7003
+ "loss": 0.5608,
7004
+ "step": 7400
7005
+ },
7006
+ {
7007
+ "epoch": 2.34,
7008
+ "eval_loss": 0.767185628414154,
7009
+ "eval_runtime": 111.2161,
7010
+ "eval_samples_per_second": 8.992,
7011
+ "eval_steps_per_second": 4.496,
7012
+ "step": 7400
7013
+ },
7014
+ {
7015
+ "epoch": 2.34,
7016
+ "mmlu_eval_accuracy": 0.46046773240416866,
7017
+ "mmlu_eval_accuracy_abstract_algebra": 0.2727272727272727,
7018
+ "mmlu_eval_accuracy_anatomy": 0.6428571428571429,
7019
+ "mmlu_eval_accuracy_astronomy": 0.4375,
7020
+ "mmlu_eval_accuracy_business_ethics": 0.5454545454545454,
7021
+ "mmlu_eval_accuracy_clinical_knowledge": 0.4827586206896552,
7022
+ "mmlu_eval_accuracy_college_biology": 0.375,
7023
+ "mmlu_eval_accuracy_college_chemistry": 0.125,
7024
+ "mmlu_eval_accuracy_college_computer_science": 0.2727272727272727,
7025
+ "mmlu_eval_accuracy_college_mathematics": 0.09090909090909091,
7026
+ "mmlu_eval_accuracy_college_medicine": 0.36363636363636365,
7027
+ "mmlu_eval_accuracy_college_physics": 0.09090909090909091,
7028
+ "mmlu_eval_accuracy_computer_security": 0.36363636363636365,
7029
+ "mmlu_eval_accuracy_conceptual_physics": 0.4230769230769231,
7030
+ "mmlu_eval_accuracy_econometrics": 0.25,
7031
+ "mmlu_eval_accuracy_electrical_engineering": 0.25,
7032
+ "mmlu_eval_accuracy_elementary_mathematics": 0.2926829268292683,
7033
+ "mmlu_eval_accuracy_formal_logic": 0.14285714285714285,
7034
+ "mmlu_eval_accuracy_global_facts": 0.3,
7035
+ "mmlu_eval_accuracy_high_school_biology": 0.3125,
7036
+ "mmlu_eval_accuracy_high_school_chemistry": 0.13636363636363635,
7037
+ "mmlu_eval_accuracy_high_school_computer_science": 0.5555555555555556,
7038
+ "mmlu_eval_accuracy_high_school_european_history": 0.5555555555555556,
7039
+ "mmlu_eval_accuracy_high_school_geography": 0.8181818181818182,
7040
+ "mmlu_eval_accuracy_high_school_government_and_politics": 0.6666666666666666,
7041
+ "mmlu_eval_accuracy_high_school_macroeconomics": 0.46511627906976744,
7042
+ "mmlu_eval_accuracy_high_school_mathematics": 0.2413793103448276,
7043
+ "mmlu_eval_accuracy_high_school_microeconomics": 0.46153846153846156,
7044
+ "mmlu_eval_accuracy_high_school_physics": 0.0,
7045
+ "mmlu_eval_accuracy_high_school_psychology": 0.8666666666666667,
7046
+ "mmlu_eval_accuracy_high_school_statistics": 0.34782608695652173,
7047
+ "mmlu_eval_accuracy_high_school_us_history": 0.6363636363636364,
7048
+ "mmlu_eval_accuracy_high_school_world_history": 0.7307692307692307,
7049
+ "mmlu_eval_accuracy_human_aging": 0.782608695652174,
7050
+ "mmlu_eval_accuracy_human_sexuality": 0.4166666666666667,
7051
+ "mmlu_eval_accuracy_international_law": 0.9230769230769231,
7052
+ "mmlu_eval_accuracy_jurisprudence": 0.36363636363636365,
7053
+ "mmlu_eval_accuracy_logical_fallacies": 0.6111111111111112,
7054
+ "mmlu_eval_accuracy_machine_learning": 0.09090909090909091,
7055
+ "mmlu_eval_accuracy_management": 0.6363636363636364,
7056
+ "mmlu_eval_accuracy_marketing": 0.8,
7057
+ "mmlu_eval_accuracy_medical_genetics": 0.9090909090909091,
7058
+ "mmlu_eval_accuracy_miscellaneous": 0.627906976744186,
7059
+ "mmlu_eval_accuracy_moral_disputes": 0.5263157894736842,
7060
+ "mmlu_eval_accuracy_moral_scenarios": 0.27,
7061
+ "mmlu_eval_accuracy_nutrition": 0.5454545454545454,
7062
+ "mmlu_eval_accuracy_philosophy": 0.4117647058823529,
7063
+ "mmlu_eval_accuracy_prehistory": 0.5142857142857142,
7064
+ "mmlu_eval_accuracy_professional_accounting": 0.3548387096774194,
7065
+ "mmlu_eval_accuracy_professional_law": 0.3,
7066
+ "mmlu_eval_accuracy_professional_medicine": 0.6451612903225806,
7067
+ "mmlu_eval_accuracy_professional_psychology": 0.4782608695652174,
7068
+ "mmlu_eval_accuracy_public_relations": 0.5833333333333334,
7069
+ "mmlu_eval_accuracy_security_studies": 0.4444444444444444,
7070
+ "mmlu_eval_accuracy_sociology": 0.7272727272727273,
7071
+ "mmlu_eval_accuracy_us_foreign_policy": 0.6363636363636364,
7072
+ "mmlu_eval_accuracy_virology": 0.5,
7073
+ "mmlu_eval_accuracy_world_religions": 0.631578947368421,
7074
+ "mmlu_loss": 1.5711101981040392,
7075
+ "step": 7400
7076
+ },
7077
+ {
7078
+ "epoch": 2.35,
7079
+ "learning_rate": 0.0002,
7080
+ "loss": 0.5974,
7081
+ "step": 7410
7082
+ },
7083
+ {
7084
+ "epoch": 2.35,
7085
+ "learning_rate": 0.0002,
7086
+ "loss": 0.5677,
7087
+ "step": 7420
7088
+ },
7089
+ {
7090
+ "epoch": 2.35,
7091
+ "learning_rate": 0.0002,
7092
+ "loss": 0.5592,
7093
+ "step": 7430
7094
+ },
7095
+ {
7096
+ "epoch": 2.36,
7097
+ "learning_rate": 0.0002,
7098
+ "loss": 0.5754,
7099
+ "step": 7440
7100
+ },
7101
+ {
7102
+ "epoch": 2.36,
7103
+ "learning_rate": 0.0002,
7104
+ "loss": 0.6117,
7105
+ "step": 7450
7106
+ },
7107
+ {
7108
+ "epoch": 2.36,
7109
+ "learning_rate": 0.0002,
7110
+ "loss": 0.5462,
7111
+ "step": 7460
7112
+ },
7113
+ {
7114
+ "epoch": 2.37,
7115
+ "learning_rate": 0.0002,
7116
+ "loss": 0.5888,
7117
+ "step": 7470
7118
+ },
7119
+ {
7120
+ "epoch": 2.37,
7121
+ "learning_rate": 0.0002,
7122
+ "loss": 0.5933,
7123
+ "step": 7480
7124
+ },
7125
+ {
7126
+ "epoch": 2.37,
7127
+ "learning_rate": 0.0002,
7128
+ "loss": 0.6329,
7129
+ "step": 7490
7130
+ },
7131
+ {
7132
+ "epoch": 2.38,
7133
+ "learning_rate": 0.0002,
7134
+ "loss": 0.6803,
7135
+ "step": 7500
7136
+ },
7137
+ {
7138
+ "epoch": 2.38,
7139
+ "learning_rate": 0.0002,
7140
+ "loss": 0.5907,
7141
+ "step": 7510
7142
+ },
7143
+ {
7144
+ "epoch": 2.38,
7145
+ "learning_rate": 0.0002,
7146
+ "loss": 0.5929,
7147
+ "step": 7520
7148
+ },
7149
+ {
7150
+ "epoch": 2.39,
7151
+ "learning_rate": 0.0002,
7152
+ "loss": 0.6288,
7153
+ "step": 7530
7154
+ },
7155
+ {
7156
+ "epoch": 2.39,
7157
+ "learning_rate": 0.0002,
7158
+ "loss": 0.5839,
7159
+ "step": 7540
7160
+ },
7161
+ {
7162
+ "epoch": 2.39,
7163
+ "learning_rate": 0.0002,
7164
+ "loss": 0.5886,
7165
+ "step": 7550
7166
+ },
7167
+ {
7168
+ "epoch": 2.4,
7169
+ "learning_rate": 0.0002,
7170
+ "loss": 0.6225,
7171
+ "step": 7560
7172
+ },
7173
+ {
7174
+ "epoch": 2.4,
7175
+ "learning_rate": 0.0002,
7176
+ "loss": 0.6009,
7177
+ "step": 7570
7178
+ },
7179
+ {
7180
+ "epoch": 2.4,
7181
+ "learning_rate": 0.0002,
7182
+ "loss": 0.5975,
7183
+ "step": 7580
7184
+ },
7185
+ {
7186
+ "epoch": 2.4,
7187
+ "learning_rate": 0.0002,
7188
+ "loss": 0.5581,
7189
+ "step": 7590
7190
+ },
7191
+ {
7192
+ "epoch": 2.41,
7193
+ "learning_rate": 0.0002,
7194
+ "loss": 0.612,
7195
+ "step": 7600
7196
+ },
7197
+ {
7198
+ "epoch": 2.41,
7199
+ "eval_loss": 0.76031494140625,
7200
+ "eval_runtime": 111.0399,
7201
+ "eval_samples_per_second": 9.006,
7202
+ "eval_steps_per_second": 4.503,
7203
+ "step": 7600
7204
+ },
7205
+ {
7206
+ "epoch": 2.41,
7207
+ "mmlu_eval_accuracy": 0.47951118911559576,
7208
+ "mmlu_eval_accuracy_abstract_algebra": 0.36363636363636365,
7209
+ "mmlu_eval_accuracy_anatomy": 0.5714285714285714,
7210
+ "mmlu_eval_accuracy_astronomy": 0.5,
7211
+ "mmlu_eval_accuracy_business_ethics": 0.45454545454545453,
7212
+ "mmlu_eval_accuracy_clinical_knowledge": 0.4827586206896552,
7213
+ "mmlu_eval_accuracy_college_biology": 0.4375,
7214
+ "mmlu_eval_accuracy_college_chemistry": 0.375,
7215
+ "mmlu_eval_accuracy_college_computer_science": 0.36363636363636365,
7216
+ "mmlu_eval_accuracy_college_mathematics": 0.18181818181818182,
7217
+ "mmlu_eval_accuracy_college_medicine": 0.36363636363636365,
7218
+ "mmlu_eval_accuracy_college_physics": 0.18181818181818182,
7219
+ "mmlu_eval_accuracy_computer_security": 0.45454545454545453,
7220
+ "mmlu_eval_accuracy_conceptual_physics": 0.4230769230769231,
7221
+ "mmlu_eval_accuracy_econometrics": 0.25,
7222
+ "mmlu_eval_accuracy_electrical_engineering": 0.25,
7223
+ "mmlu_eval_accuracy_elementary_mathematics": 0.34146341463414637,
7224
+ "mmlu_eval_accuracy_formal_logic": 0.07142857142857142,
7225
+ "mmlu_eval_accuracy_global_facts": 0.5,
7226
+ "mmlu_eval_accuracy_high_school_biology": 0.40625,
7227
+ "mmlu_eval_accuracy_high_school_chemistry": 0.22727272727272727,
7228
+ "mmlu_eval_accuracy_high_school_computer_science": 0.5555555555555556,
7229
+ "mmlu_eval_accuracy_high_school_european_history": 0.6666666666666666,
7230
+ "mmlu_eval_accuracy_high_school_geography": 0.8636363636363636,
7231
+ "mmlu_eval_accuracy_high_school_government_and_politics": 0.6190476190476191,
7232
+ "mmlu_eval_accuracy_high_school_macroeconomics": 0.4418604651162791,
7233
+ "mmlu_eval_accuracy_high_school_mathematics": 0.1724137931034483,
7234
+ "mmlu_eval_accuracy_high_school_microeconomics": 0.46153846153846156,
7235
+ "mmlu_eval_accuracy_high_school_physics": 0.0,
7236
+ "mmlu_eval_accuracy_high_school_psychology": 0.8833333333333333,
7237
+ "mmlu_eval_accuracy_high_school_statistics": 0.21739130434782608,
7238
+ "mmlu_eval_accuracy_high_school_us_history": 0.6363636363636364,
7239
+ "mmlu_eval_accuracy_high_school_world_history": 0.6923076923076923,
7240
+ "mmlu_eval_accuracy_human_aging": 0.782608695652174,
7241
+ "mmlu_eval_accuracy_human_sexuality": 0.4166666666666667,
7242
+ "mmlu_eval_accuracy_international_law": 0.9230769230769231,
7243
+ "mmlu_eval_accuracy_jurisprudence": 0.36363636363636365,
7244
+ "mmlu_eval_accuracy_logical_fallacies": 0.6111111111111112,
7245
+ "mmlu_eval_accuracy_machine_learning": 0.09090909090909091,
7246
+ "mmlu_eval_accuracy_management": 0.6363636363636364,
7247
+ "mmlu_eval_accuracy_marketing": 0.8,
7248
+ "mmlu_eval_accuracy_medical_genetics": 0.9090909090909091,
7249
+ "mmlu_eval_accuracy_miscellaneous": 0.6395348837209303,
7250
+ "mmlu_eval_accuracy_moral_disputes": 0.4473684210526316,
7251
+ "mmlu_eval_accuracy_moral_scenarios": 0.24,
7252
+ "mmlu_eval_accuracy_nutrition": 0.6666666666666666,
7253
+ "mmlu_eval_accuracy_philosophy": 0.5588235294117647,
7254
+ "mmlu_eval_accuracy_prehistory": 0.4857142857142857,
7255
+ "mmlu_eval_accuracy_professional_accounting": 0.3225806451612903,
7256
+ "mmlu_eval_accuracy_professional_law": 0.3352941176470588,
7257
+ "mmlu_eval_accuracy_professional_medicine": 0.5806451612903226,
7258
+ "mmlu_eval_accuracy_professional_psychology": 0.5217391304347826,
7259
+ "mmlu_eval_accuracy_public_relations": 0.5,
7260
+ "mmlu_eval_accuracy_security_studies": 0.4444444444444444,
7261
+ "mmlu_eval_accuracy_sociology": 0.6818181818181818,
7262
+ "mmlu_eval_accuracy_us_foreign_policy": 0.7272727272727273,
7263
+ "mmlu_eval_accuracy_virology": 0.5,
7264
+ "mmlu_eval_accuracy_world_religions": 0.7368421052631579,
7265
+ "mmlu_loss": 1.584926052947891,
7266
+ "step": 7600
7267
+ },
7268
+ {
7269
+ "epoch": 2.41,
7270
+ "learning_rate": 0.0002,
7271
+ "loss": 0.5914,
7272
+ "step": 7610
7273
+ },
7274
+ {
7275
+ "epoch": 2.41,
7276
+ "learning_rate": 0.0002,
7277
+ "loss": 0.59,
7278
+ "step": 7620
7279
+ },
7280
+ {
7281
+ "epoch": 2.42,
7282
+ "learning_rate": 0.0002,
7283
+ "loss": 0.6179,
7284
+ "step": 7630
7285
+ },
7286
+ {
7287
+ "epoch": 2.42,
7288
+ "learning_rate": 0.0002,
7289
+ "loss": 0.6203,
7290
+ "step": 7640
7291
+ },
7292
+ {
7293
+ "epoch": 2.42,
7294
+ "learning_rate": 0.0002,
7295
+ "loss": 0.6113,
7296
+ "step": 7650
7297
+ },
7298
+ {
7299
+ "epoch": 2.43,
7300
+ "learning_rate": 0.0002,
7301
+ "loss": 0.5505,
7302
+ "step": 7660
7303
+ },
7304
+ {
7305
+ "epoch": 2.43,
7306
+ "learning_rate": 0.0002,
7307
+ "loss": 0.5664,
7308
+ "step": 7670
7309
+ },
7310
+ {
7311
+ "epoch": 2.43,
7312
+ "learning_rate": 0.0002,
7313
+ "loss": 0.596,
7314
+ "step": 7680
7315
+ },
7316
+ {
7317
+ "epoch": 2.44,
7318
+ "learning_rate": 0.0002,
7319
+ "loss": 0.6125,
7320
+ "step": 7690
7321
+ },
7322
+ {
7323
+ "epoch": 2.44,
7324
+ "learning_rate": 0.0002,
7325
+ "loss": 0.607,
7326
+ "step": 7700
7327
+ },
7328
+ {
7329
+ "epoch": 2.44,
7330
+ "learning_rate": 0.0002,
7331
+ "loss": 0.5657,
7332
+ "step": 7710
7333
+ },
7334
+ {
7335
+ "epoch": 2.45,
7336
+ "learning_rate": 0.0002,
7337
+ "loss": 0.5419,
7338
+ "step": 7720
7339
+ },
7340
+ {
7341
+ "epoch": 2.45,
7342
+ "learning_rate": 0.0002,
7343
+ "loss": 0.614,
7344
+ "step": 7730
7345
+ },
7346
+ {
7347
+ "epoch": 2.45,
7348
+ "learning_rate": 0.0002,
7349
+ "loss": 0.6107,
7350
+ "step": 7740
7351
+ },
7352
+ {
7353
+ "epoch": 2.46,
7354
+ "learning_rate": 0.0002,
7355
+ "loss": 0.6099,
7356
+ "step": 7750
7357
+ },
7358
+ {
7359
+ "epoch": 2.46,
7360
+ "learning_rate": 0.0002,
7361
+ "loss": 0.5994,
7362
+ "step": 7760
7363
+ },
7364
+ {
7365
+ "epoch": 2.46,
7366
+ "learning_rate": 0.0002,
7367
+ "loss": 0.6274,
7368
+ "step": 7770
7369
+ },
7370
+ {
7371
+ "epoch": 2.47,
7372
+ "learning_rate": 0.0002,
7373
+ "loss": 0.5902,
7374
+ "step": 7780
7375
+ },
7376
+ {
7377
+ "epoch": 2.47,
7378
+ "learning_rate": 0.0002,
7379
+ "loss": 0.5902,
7380
+ "step": 7790
7381
+ },
7382
+ {
7383
+ "epoch": 2.47,
7384
+ "learning_rate": 0.0002,
7385
+ "loss": 0.599,
7386
+ "step": 7800
7387
+ },
7388
+ {
7389
+ "epoch": 2.47,
7390
+ "eval_loss": 0.760485827922821,
7391
+ "eval_runtime": 111.1916,
7392
+ "eval_samples_per_second": 8.993,
7393
+ "eval_steps_per_second": 4.497,
7394
+ "step": 7800
7395
+ },
7396
+ {
7397
+ "epoch": 2.47,
7398
+ "mmlu_eval_accuracy": 0.48418694277386404,
7399
+ "mmlu_eval_accuracy_abstract_algebra": 0.2727272727272727,
7400
+ "mmlu_eval_accuracy_anatomy": 0.5,
7401
+ "mmlu_eval_accuracy_astronomy": 0.4375,
7402
+ "mmlu_eval_accuracy_business_ethics": 0.45454545454545453,
7403
+ "mmlu_eval_accuracy_clinical_knowledge": 0.4827586206896552,
7404
+ "mmlu_eval_accuracy_college_biology": 0.375,
7405
+ "mmlu_eval_accuracy_college_chemistry": 0.375,
7406
+ "mmlu_eval_accuracy_college_computer_science": 0.36363636363636365,
7407
+ "mmlu_eval_accuracy_college_mathematics": 0.09090909090909091,
7408
+ "mmlu_eval_accuracy_college_medicine": 0.5,
7409
+ "mmlu_eval_accuracy_college_physics": 0.2727272727272727,
7410
+ "mmlu_eval_accuracy_computer_security": 0.45454545454545453,
7411
+ "mmlu_eval_accuracy_conceptual_physics": 0.4230769230769231,
7412
+ "mmlu_eval_accuracy_econometrics": 0.25,
7413
+ "mmlu_eval_accuracy_electrical_engineering": 0.25,
7414
+ "mmlu_eval_accuracy_elementary_mathematics": 0.36585365853658536,
7415
+ "mmlu_eval_accuracy_formal_logic": 0.21428571428571427,
7416
+ "mmlu_eval_accuracy_global_facts": 0.5,
7417
+ "mmlu_eval_accuracy_high_school_biology": 0.40625,
7418
+ "mmlu_eval_accuracy_high_school_chemistry": 0.22727272727272727,
7419
+ "mmlu_eval_accuracy_high_school_computer_science": 0.5555555555555556,
7420
+ "mmlu_eval_accuracy_high_school_european_history": 0.6111111111111112,
7421
+ "mmlu_eval_accuracy_high_school_geography": 0.8636363636363636,
7422
+ "mmlu_eval_accuracy_high_school_government_and_politics": 0.6666666666666666,
7423
+ "mmlu_eval_accuracy_high_school_macroeconomics": 0.46511627906976744,
7424
+ "mmlu_eval_accuracy_high_school_mathematics": 0.1724137931034483,
7425
+ "mmlu_eval_accuracy_high_school_microeconomics": 0.5,
7426
+ "mmlu_eval_accuracy_high_school_physics": 0.0,
7427
+ "mmlu_eval_accuracy_high_school_psychology": 0.8666666666666667,
7428
+ "mmlu_eval_accuracy_high_school_statistics": 0.2608695652173913,
7429
+ "mmlu_eval_accuracy_high_school_us_history": 0.6363636363636364,
7430
+ "mmlu_eval_accuracy_high_school_world_history": 0.6923076923076923,
7431
+ "mmlu_eval_accuracy_human_aging": 0.7391304347826086,
7432
+ "mmlu_eval_accuracy_human_sexuality": 0.4166666666666667,
7433
+ "mmlu_eval_accuracy_international_law": 0.9230769230769231,
7434
+ "mmlu_eval_accuracy_jurisprudence": 0.36363636363636365,
7435
+ "mmlu_eval_accuracy_logical_fallacies": 0.6111111111111112,
7436
+ "mmlu_eval_accuracy_machine_learning": 0.18181818181818182,
7437
+ "mmlu_eval_accuracy_management": 0.7272727272727273,
7438
+ "mmlu_eval_accuracy_marketing": 0.84,
7439
+ "mmlu_eval_accuracy_medical_genetics": 0.9090909090909091,
7440
+ "mmlu_eval_accuracy_miscellaneous": 0.627906976744186,
7441
+ "mmlu_eval_accuracy_moral_disputes": 0.5263157894736842,
7442
+ "mmlu_eval_accuracy_moral_scenarios": 0.27,
7443
+ "mmlu_eval_accuracy_nutrition": 0.6666666666666666,
7444
+ "mmlu_eval_accuracy_philosophy": 0.5588235294117647,
7445
+ "mmlu_eval_accuracy_prehistory": 0.5142857142857142,
7446
+ "mmlu_eval_accuracy_professional_accounting": 0.3225806451612903,
7447
+ "mmlu_eval_accuracy_professional_law": 0.34705882352941175,
7448
+ "mmlu_eval_accuracy_professional_medicine": 0.6129032258064516,
7449
+ "mmlu_eval_accuracy_professional_psychology": 0.4782608695652174,
7450
+ "mmlu_eval_accuracy_public_relations": 0.5,
7451
+ "mmlu_eval_accuracy_security_studies": 0.4074074074074074,
7452
+ "mmlu_eval_accuracy_sociology": 0.6363636363636364,
7453
+ "mmlu_eval_accuracy_us_foreign_policy": 0.7272727272727273,
7454
+ "mmlu_eval_accuracy_virology": 0.5,
7455
+ "mmlu_eval_accuracy_world_religions": 0.6842105263157895,
7456
+ "mmlu_loss": 1.4828916400204128,
7457
+ "step": 7800
7458
+ },
7459
+ {
7460
+ "epoch": 2.47,
7461
+ "learning_rate": 0.0002,
7462
+ "loss": 0.6005,
7463
+ "step": 7810
7464
+ },
7465
+ {
7466
+ "epoch": 2.48,
7467
+ "learning_rate": 0.0002,
7468
+ "loss": 0.6662,
7469
+ "step": 7820
7470
+ },
7471
+ {
7472
+ "epoch": 2.48,
7473
+ "learning_rate": 0.0002,
7474
+ "loss": 0.5821,
7475
+ "step": 7830
7476
+ },
7477
+ {
7478
+ "epoch": 2.48,
7479
+ "learning_rate": 0.0002,
7480
+ "loss": 0.5826,
7481
+ "step": 7840
7482
+ },
7483
+ {
7484
+ "epoch": 2.49,
7485
+ "learning_rate": 0.0002,
7486
+ "loss": 0.5804,
7487
+ "step": 7850
7488
+ },
7489
+ {
7490
+ "epoch": 2.49,
7491
+ "learning_rate": 0.0002,
7492
+ "loss": 0.587,
7493
+ "step": 7860
7494
+ },
7495
+ {
7496
+ "epoch": 2.49,
7497
+ "learning_rate": 0.0002,
7498
+ "loss": 0.6062,
7499
+ "step": 7870
7500
+ },
7501
+ {
7502
+ "epoch": 2.5,
7503
+ "learning_rate": 0.0002,
7504
+ "loss": 0.5616,
7505
+ "step": 7880
7506
+ },
7507
+ {
7508
+ "epoch": 2.5,
7509
+ "learning_rate": 0.0002,
7510
+ "loss": 0.6351,
7511
+ "step": 7890
7512
+ },
7513
+ {
7514
+ "epoch": 2.5,
7515
+ "learning_rate": 0.0002,
7516
+ "loss": 0.5738,
7517
+ "step": 7900
7518
+ },
7519
+ {
7520
+ "epoch": 2.51,
7521
+ "learning_rate": 0.0002,
7522
+ "loss": 0.5564,
7523
+ "step": 7910
7524
+ },
7525
+ {
7526
+ "epoch": 2.51,
7527
+ "learning_rate": 0.0002,
7528
+ "loss": 0.5696,
7529
+ "step": 7920
7530
+ },
7531
+ {
7532
+ "epoch": 2.51,
7533
+ "learning_rate": 0.0002,
7534
+ "loss": 0.5812,
7535
+ "step": 7930
7536
+ },
7537
+ {
7538
+ "epoch": 2.52,
7539
+ "learning_rate": 0.0002,
7540
+ "loss": 0.5786,
7541
+ "step": 7940
7542
+ },
7543
+ {
7544
+ "epoch": 2.52,
7545
+ "learning_rate": 0.0002,
7546
+ "loss": 0.6053,
7547
+ "step": 7950
7548
+ },
7549
+ {
7550
+ "epoch": 2.52,
7551
+ "learning_rate": 0.0002,
7552
+ "loss": 0.5727,
7553
+ "step": 7960
7554
+ },
7555
+ {
7556
+ "epoch": 2.53,
7557
+ "learning_rate": 0.0002,
7558
+ "loss": 0.621,
7559
+ "step": 7970
7560
+ },
7561
+ {
7562
+ "epoch": 2.53,
7563
+ "learning_rate": 0.0002,
7564
+ "loss": 0.5679,
7565
+ "step": 7980
7566
+ },
7567
+ {
7568
+ "epoch": 2.53,
7569
+ "learning_rate": 0.0002,
7570
+ "loss": 0.6138,
7571
+ "step": 7990
7572
+ },
7573
+ {
7574
+ "epoch": 2.53,
7575
+ "learning_rate": 0.0002,
7576
+ "loss": 0.588,
7577
+ "step": 8000
7578
+ },
7579
+ {
7580
+ "epoch": 2.53,
7581
+ "eval_loss": 0.7585816979408264,
7582
+ "eval_runtime": 111.2835,
7583
+ "eval_samples_per_second": 8.986,
7584
+ "eval_steps_per_second": 4.493,
7585
+ "step": 8000
7586
+ },
7587
+ {
7588
+ "epoch": 2.53,
7589
+ "mmlu_eval_accuracy": 0.48589851563960756,
7590
+ "mmlu_eval_accuracy_abstract_algebra": 0.2727272727272727,
7591
+ "mmlu_eval_accuracy_anatomy": 0.6428571428571429,
7592
+ "mmlu_eval_accuracy_astronomy": 0.4375,
7593
+ "mmlu_eval_accuracy_business_ethics": 0.45454545454545453,
7594
+ "mmlu_eval_accuracy_clinical_knowledge": 0.5172413793103449,
7595
+ "mmlu_eval_accuracy_college_biology": 0.375,
7596
+ "mmlu_eval_accuracy_college_chemistry": 0.375,
7597
+ "mmlu_eval_accuracy_college_computer_science": 0.36363636363636365,
7598
+ "mmlu_eval_accuracy_college_mathematics": 0.09090909090909091,
7599
+ "mmlu_eval_accuracy_college_medicine": 0.4090909090909091,
7600
+ "mmlu_eval_accuracy_college_physics": 0.18181818181818182,
7601
+ "mmlu_eval_accuracy_computer_security": 0.45454545454545453,
7602
+ "mmlu_eval_accuracy_conceptual_physics": 0.4230769230769231,
7603
+ "mmlu_eval_accuracy_econometrics": 0.25,
7604
+ "mmlu_eval_accuracy_electrical_engineering": 0.375,
7605
+ "mmlu_eval_accuracy_elementary_mathematics": 0.34146341463414637,
7606
+ "mmlu_eval_accuracy_formal_logic": 0.2857142857142857,
7607
+ "mmlu_eval_accuracy_global_facts": 0.5,
7608
+ "mmlu_eval_accuracy_high_school_biology": 0.40625,
7609
+ "mmlu_eval_accuracy_high_school_chemistry": 0.22727272727272727,
7610
+ "mmlu_eval_accuracy_high_school_computer_science": 0.5555555555555556,
7611
+ "mmlu_eval_accuracy_high_school_european_history": 0.6111111111111112,
7612
+ "mmlu_eval_accuracy_high_school_geography": 0.8636363636363636,
7613
+ "mmlu_eval_accuracy_high_school_government_and_politics": 0.6666666666666666,
7614
+ "mmlu_eval_accuracy_high_school_macroeconomics": 0.4883720930232558,
7615
+ "mmlu_eval_accuracy_high_school_mathematics": 0.1724137931034483,
7616
+ "mmlu_eval_accuracy_high_school_microeconomics": 0.5384615384615384,
7617
+ "mmlu_eval_accuracy_high_school_physics": 0.0,
7618
+ "mmlu_eval_accuracy_high_school_psychology": 0.9,
7619
+ "mmlu_eval_accuracy_high_school_statistics": 0.391304347826087,
7620
+ "mmlu_eval_accuracy_high_school_us_history": 0.6363636363636364,
7621
+ "mmlu_eval_accuracy_high_school_world_history": 0.7307692307692307,
7622
+ "mmlu_eval_accuracy_human_aging": 0.7391304347826086,
7623
+ "mmlu_eval_accuracy_human_sexuality": 0.4166666666666667,
7624
+ "mmlu_eval_accuracy_international_law": 0.9230769230769231,
7625
+ "mmlu_eval_accuracy_jurisprudence": 0.36363636363636365,
7626
+ "mmlu_eval_accuracy_logical_fallacies": 0.6666666666666666,
7627
+ "mmlu_eval_accuracy_machine_learning": 0.2727272727272727,
7628
+ "mmlu_eval_accuracy_management": 0.6363636363636364,
7629
+ "mmlu_eval_accuracy_marketing": 0.68,
7630
+ "mmlu_eval_accuracy_medical_genetics": 0.9090909090909091,
7631
+ "mmlu_eval_accuracy_miscellaneous": 0.6627906976744186,
7632
+ "mmlu_eval_accuracy_moral_disputes": 0.5526315789473685,
7633
+ "mmlu_eval_accuracy_moral_scenarios": 0.25,
7634
+ "mmlu_eval_accuracy_nutrition": 0.48484848484848486,
7635
+ "mmlu_eval_accuracy_philosophy": 0.5,
7636
+ "mmlu_eval_accuracy_prehistory": 0.5428571428571428,
7637
+ "mmlu_eval_accuracy_professional_accounting": 0.3225806451612903,
7638
+ "mmlu_eval_accuracy_professional_law": 0.3588235294117647,
7639
+ "mmlu_eval_accuracy_professional_medicine": 0.5806451612903226,
7640
+ "mmlu_eval_accuracy_professional_psychology": 0.4927536231884058,
7641
+ "mmlu_eval_accuracy_public_relations": 0.5,
7642
+ "mmlu_eval_accuracy_security_studies": 0.4074074074074074,
7643
+ "mmlu_eval_accuracy_sociology": 0.7272727272727273,
7644
+ "mmlu_eval_accuracy_us_foreign_policy": 0.6363636363636364,
7645
+ "mmlu_eval_accuracy_virology": 0.5,
7646
+ "mmlu_eval_accuracy_world_religions": 0.631578947368421,
7647
+ "mmlu_loss": 1.566373301120402,
7648
+ "step": 8000
7649
+ },
7650
+ {
7651
+ "epoch": 2.54,
7652
+ "learning_rate": 0.0002,
7653
+ "loss": 0.5624,
7654
+ "step": 8010
7655
+ },
7656
+ {
7657
+ "epoch": 2.54,
7658
+ "learning_rate": 0.0002,
7659
+ "loss": 0.6206,
7660
+ "step": 8020
7661
+ },
7662
+ {
7663
+ "epoch": 2.54,
7664
+ "learning_rate": 0.0002,
7665
+ "loss": 0.607,
7666
+ "step": 8030
7667
+ },
7668
+ {
7669
+ "epoch": 2.55,
7670
+ "learning_rate": 0.0002,
7671
+ "loss": 0.6344,
7672
+ "step": 8040
7673
+ },
7674
+ {
7675
+ "epoch": 2.55,
7676
+ "learning_rate": 0.0002,
7677
+ "loss": 0.6705,
7678
+ "step": 8050
7679
+ },
7680
+ {
7681
+ "epoch": 2.55,
7682
+ "learning_rate": 0.0002,
7683
+ "loss": 0.5679,
7684
+ "step": 8060
7685
+ },
7686
+ {
7687
+ "epoch": 2.56,
7688
+ "learning_rate": 0.0002,
7689
+ "loss": 0.6,
7690
+ "step": 8070
7691
+ },
7692
+ {
7693
+ "epoch": 2.56,
7694
+ "learning_rate": 0.0002,
7695
+ "loss": 0.6486,
7696
+ "step": 8080
7697
+ },
7698
+ {
7699
+ "epoch": 2.56,
7700
+ "learning_rate": 0.0002,
7701
+ "loss": 0.5959,
7702
+ "step": 8090
7703
+ },
7704
+ {
7705
+ "epoch": 2.57,
7706
+ "learning_rate": 0.0002,
7707
+ "loss": 0.6454,
7708
+ "step": 8100
7709
+ },
7710
+ {
7711
+ "epoch": 2.57,
7712
+ "learning_rate": 0.0002,
7713
+ "loss": 0.6085,
7714
+ "step": 8110
7715
+ },
7716
+ {
7717
+ "epoch": 2.57,
7718
+ "learning_rate": 0.0002,
7719
+ "loss": 0.5509,
7720
+ "step": 8120
7721
+ },
7722
+ {
7723
+ "epoch": 2.58,
7724
+ "learning_rate": 0.0002,
7725
+ "loss": 0.6267,
7726
+ "step": 8130
7727
+ },
7728
+ {
7729
+ "epoch": 2.58,
7730
+ "learning_rate": 0.0002,
7731
+ "loss": 0.5865,
7732
+ "step": 8140
7733
+ },
7734
+ {
7735
+ "epoch": 2.58,
7736
+ "learning_rate": 0.0002,
7737
+ "loss": 0.6002,
7738
+ "step": 8150
7739
+ },
7740
+ {
7741
+ "epoch": 2.59,
7742
+ "learning_rate": 0.0002,
7743
+ "loss": 0.6342,
7744
+ "step": 8160
7745
+ },
7746
+ {
7747
+ "epoch": 2.59,
7748
+ "learning_rate": 0.0002,
7749
+ "loss": 0.6312,
7750
+ "step": 8170
7751
+ },
7752
+ {
7753
+ "epoch": 2.59,
7754
+ "learning_rate": 0.0002,
7755
+ "loss": 0.6361,
7756
+ "step": 8180
7757
+ },
7758
+ {
7759
+ "epoch": 2.6,
7760
+ "learning_rate": 0.0002,
7761
+ "loss": 0.5676,
7762
+ "step": 8190
7763
+ },
7764
+ {
7765
+ "epoch": 2.6,
7766
+ "learning_rate": 0.0002,
7767
+ "loss": 0.6125,
7768
+ "step": 8200
7769
+ },
7770
+ {
7771
+ "epoch": 2.6,
7772
+ "eval_loss": 0.7568719387054443,
7773
+ "eval_runtime": 111.2374,
7774
+ "eval_samples_per_second": 8.99,
7775
+ "eval_steps_per_second": 4.495,
7776
+ "step": 8200
7777
+ },
7778
+ {
7779
+ "epoch": 2.6,
7780
+ "mmlu_eval_accuracy": 0.4699982014237092,
7781
+ "mmlu_eval_accuracy_abstract_algebra": 0.2727272727272727,
7782
+ "mmlu_eval_accuracy_anatomy": 0.5714285714285714,
7783
+ "mmlu_eval_accuracy_astronomy": 0.4375,
7784
+ "mmlu_eval_accuracy_business_ethics": 0.45454545454545453,
7785
+ "mmlu_eval_accuracy_clinical_knowledge": 0.5517241379310345,
7786
+ "mmlu_eval_accuracy_college_biology": 0.375,
7787
+ "mmlu_eval_accuracy_college_chemistry": 0.375,
7788
+ "mmlu_eval_accuracy_college_computer_science": 0.36363636363636365,
7789
+ "mmlu_eval_accuracy_college_mathematics": 0.09090909090909091,
7790
+ "mmlu_eval_accuracy_college_medicine": 0.4090909090909091,
7791
+ "mmlu_eval_accuracy_college_physics": 0.2727272727272727,
7792
+ "mmlu_eval_accuracy_computer_security": 0.36363636363636365,
7793
+ "mmlu_eval_accuracy_conceptual_physics": 0.38461538461538464,
7794
+ "mmlu_eval_accuracy_econometrics": 0.25,
7795
+ "mmlu_eval_accuracy_electrical_engineering": 0.25,
7796
+ "mmlu_eval_accuracy_elementary_mathematics": 0.34146341463414637,
7797
+ "mmlu_eval_accuracy_formal_logic": 0.21428571428571427,
7798
+ "mmlu_eval_accuracy_global_facts": 0.3,
7799
+ "mmlu_eval_accuracy_high_school_biology": 0.375,
7800
+ "mmlu_eval_accuracy_high_school_chemistry": 0.2727272727272727,
7801
+ "mmlu_eval_accuracy_high_school_computer_science": 0.5555555555555556,
7802
+ "mmlu_eval_accuracy_high_school_european_history": 0.5555555555555556,
7803
+ "mmlu_eval_accuracy_high_school_geography": 0.8181818181818182,
7804
+ "mmlu_eval_accuracy_high_school_government_and_politics": 0.6666666666666666,
7805
+ "mmlu_eval_accuracy_high_school_macroeconomics": 0.46511627906976744,
7806
+ "mmlu_eval_accuracy_high_school_mathematics": 0.20689655172413793,
7807
+ "mmlu_eval_accuracy_high_school_microeconomics": 0.46153846153846156,
7808
+ "mmlu_eval_accuracy_high_school_physics": 0.058823529411764705,
7809
+ "mmlu_eval_accuracy_high_school_psychology": 0.8666666666666667,
7810
+ "mmlu_eval_accuracy_high_school_statistics": 0.30434782608695654,
7811
+ "mmlu_eval_accuracy_high_school_us_history": 0.6363636363636364,
7812
+ "mmlu_eval_accuracy_high_school_world_history": 0.7307692307692307,
7813
+ "mmlu_eval_accuracy_human_aging": 0.6956521739130435,
7814
+ "mmlu_eval_accuracy_human_sexuality": 0.4166666666666667,
7815
+ "mmlu_eval_accuracy_international_law": 0.9230769230769231,
7816
+ "mmlu_eval_accuracy_jurisprudence": 0.36363636363636365,
7817
+ "mmlu_eval_accuracy_logical_fallacies": 0.6666666666666666,
7818
+ "mmlu_eval_accuracy_machine_learning": 0.18181818181818182,
7819
+ "mmlu_eval_accuracy_management": 0.6363636363636364,
7820
+ "mmlu_eval_accuracy_marketing": 0.68,
7821
+ "mmlu_eval_accuracy_medical_genetics": 0.7272727272727273,
7822
+ "mmlu_eval_accuracy_miscellaneous": 0.6744186046511628,
7823
+ "mmlu_eval_accuracy_moral_disputes": 0.47368421052631576,
7824
+ "mmlu_eval_accuracy_moral_scenarios": 0.24,
7825
+ "mmlu_eval_accuracy_nutrition": 0.6060606060606061,
7826
+ "mmlu_eval_accuracy_philosophy": 0.47058823529411764,
7827
+ "mmlu_eval_accuracy_prehistory": 0.5142857142857142,
7828
+ "mmlu_eval_accuracy_professional_accounting": 0.2903225806451613,
7829
+ "mmlu_eval_accuracy_professional_law": 0.3588235294117647,
7830
+ "mmlu_eval_accuracy_professional_medicine": 0.5806451612903226,
7831
+ "mmlu_eval_accuracy_professional_psychology": 0.5072463768115942,
7832
+ "mmlu_eval_accuracy_public_relations": 0.5833333333333334,
7833
+ "mmlu_eval_accuracy_security_studies": 0.4444444444444444,
7834
+ "mmlu_eval_accuracy_sociology": 0.6818181818181818,
7835
+ "mmlu_eval_accuracy_us_foreign_policy": 0.6363636363636364,
7836
+ "mmlu_eval_accuracy_virology": 0.5,
7837
+ "mmlu_eval_accuracy_world_religions": 0.6842105263157895,
7838
+ "mmlu_loss": 1.3685555242527248,
7839
+ "step": 8200
7840
  }
7841
  ],
7842
  "max_steps": 10000,
7843
  "num_train_epochs": 4,
7844
+ "total_flos": 2.488198759838761e+18,
7845
  "trial_name": null,
7846
  "trial_params": null
7847
  }
{checkpoint-6000 β†’ checkpoint-8200}/training_args.bin RENAMED
File without changes