prateeky2806 commited on
Commit
25d6710
1 Parent(s): ded333f

Training in progress, step 2800

Browse files
adapter_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:5ee7e41128582785dd24e1c9dc00e5460ce8c55f73be522326efed607f54266a
3
  size 319977229
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0108a9915efa007676867bf99c94032dc1f433fc686a888e50eff6ad063d8ca5
3
  size 319977229
checkpoint-2200/adapter_model/adapter_model/README.md CHANGED
@@ -15,6 +15,17 @@ The following `bitsandbytes` quantization config was used during training:
15
  - bnb_4bit_use_double_quant: True
16
  - bnb_4bit_compute_dtype: bfloat16
17
 
 
 
 
 
 
 
 
 
 
 
 
18
  The following `bitsandbytes` quantization config was used during training:
19
  - load_in_8bit: False
20
  - load_in_4bit: True
@@ -27,6 +38,7 @@ The following `bitsandbytes` quantization config was used during training:
27
  - bnb_4bit_compute_dtype: bfloat16
28
  ### Framework versions
29
 
 
30
  - PEFT 0.4.0
31
 
32
  - PEFT 0.4.0
 
15
  - bnb_4bit_use_double_quant: True
16
  - bnb_4bit_compute_dtype: bfloat16
17
 
18
+ The following `bitsandbytes` quantization config was used during training:
19
+ - load_in_8bit: False
20
+ - load_in_4bit: True
21
+ - llm_int8_threshold: 6.0
22
+ - llm_int8_skip_modules: None
23
+ - llm_int8_enable_fp32_cpu_offload: False
24
+ - llm_int8_has_fp16_weight: False
25
+ - bnb_4bit_quant_type: nf4
26
+ - bnb_4bit_use_double_quant: True
27
+ - bnb_4bit_compute_dtype: bfloat16
28
+
29
  The following `bitsandbytes` quantization config was used during training:
30
  - load_in_8bit: False
31
  - load_in_4bit: True
 
38
  - bnb_4bit_compute_dtype: bfloat16
39
  ### Framework versions
40
 
41
+ - PEFT 0.4.0
42
  - PEFT 0.4.0
43
 
44
  - PEFT 0.4.0
checkpoint-2200/adapter_model/adapter_model/adapter_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:e85fd3e6b70102003d1579eab4e7d5e34cae0e96f378721c4976efa6d46ee2a7
3
  size 319977229
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5ee7e41128582785dd24e1c9dc00e5460ce8c55f73be522326efed607f54266a
3
  size 319977229
checkpoint-2800/README.md ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: peft
3
+ ---
4
+ ## Training procedure
5
+
6
+
7
+ The following `bitsandbytes` quantization config was used during training:
8
+ - load_in_8bit: False
9
+ - load_in_4bit: True
10
+ - llm_int8_threshold: 6.0
11
+ - llm_int8_skip_modules: None
12
+ - llm_int8_enable_fp32_cpu_offload: False
13
+ - llm_int8_has_fp16_weight: False
14
+ - bnb_4bit_quant_type: nf4
15
+ - bnb_4bit_use_double_quant: True
16
+ - bnb_4bit_compute_dtype: bfloat16
17
+ ### Framework versions
18
+
19
+
20
+ - PEFT 0.4.0
checkpoint-2800/adapter_config.json ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "auto_mapping": null,
3
+ "base_model_name_or_path": "NousResearch/Nous-Hermes-llama-2-7b",
4
+ "bias": "none",
5
+ "fan_in_fan_out": false,
6
+ "inference_mode": true,
7
+ "init_lora_weights": true,
8
+ "layers_pattern": null,
9
+ "layers_to_transform": null,
10
+ "lora_alpha": 16.0,
11
+ "lora_dropout": 0.1,
12
+ "modules_to_save": null,
13
+ "peft_type": "LORA",
14
+ "r": 64,
15
+ "revision": null,
16
+ "target_modules": [
17
+ "down_proj",
18
+ "v_proj",
19
+ "up_proj",
20
+ "q_proj",
21
+ "gate_proj",
22
+ "o_proj",
23
+ "k_proj"
24
+ ],
25
+ "task_type": "CAUSAL_LM"
26
+ }
checkpoint-2800/adapter_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0108a9915efa007676867bf99c94032dc1f433fc686a888e50eff6ad063d8ca5
3
+ size 319977229
checkpoint-2800/added_tokens.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ {
2
+ "<pad>": 32000
3
+ }
checkpoint-2800/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:291fc20fe07e9828d861a6d60f863c467034b3ce0147bab97cd00a5148d69d6f
3
+ size 1279539973
checkpoint-2800/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0db3993012977e0a3acc5016b23cfff5aaa47c1d26c765138d1f94c8beb5029d
3
+ size 14511
checkpoint-2800/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:79c08a7148c570c3d4c87a735e3acef3af52d256eb019f3a3f3feeab8a656949
3
+ size 627
checkpoint-2800/special_tokens_map.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": "<s>",
3
+ "eos_token": "</s>",
4
+ "pad_token": "<unk>",
5
+ "unk_token": "<unk>"
6
+ }
checkpoint-2800/tokenizer.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9e556afd44213b6bd1be2b850ebbbd98f5481437a8021afaf58ee7fb1818d347
3
+ size 499723
checkpoint-2800/tokenizer_config.json ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": true,
3
+ "add_eos_token": false,
4
+ "bos_token": {
5
+ "__type": "AddedToken",
6
+ "content": "<s>",
7
+ "lstrip": false,
8
+ "normalized": true,
9
+ "rstrip": false,
10
+ "single_word": false
11
+ },
12
+ "clean_up_tokenization_spaces": false,
13
+ "eos_token": {
14
+ "__type": "AddedToken",
15
+ "content": "</s>",
16
+ "lstrip": false,
17
+ "normalized": true,
18
+ "rstrip": false,
19
+ "single_word": false
20
+ },
21
+ "legacy": false,
22
+ "model_max_length": 1000000000000000019884624838656,
23
+ "pad_token": null,
24
+ "padding_side": "right",
25
+ "sp_model_kwargs": {},
26
+ "tokenizer_class": "LlamaTokenizer",
27
+ "unk_token": {
28
+ "__type": "AddedToken",
29
+ "content": "<unk>",
30
+ "lstrip": false,
31
+ "normalized": true,
32
+ "rstrip": false,
33
+ "single_word": false
34
+ }
35
+ }
checkpoint-2800/trainer_state.json ADDED
@@ -0,0 +1,2690 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 0.598466694355011,
3
+ "best_model_checkpoint": "./output_v2/7b_cluster020_Nous-Hermes-llama-2-7b_partitioned_v3_standardized_020/checkpoint-2200",
4
+ "epoch": 1.1333738109694393,
5
+ "global_step": 2800,
6
+ "is_hyper_param_search": false,
7
+ "is_local_process_zero": true,
8
+ "is_world_process_zero": true,
9
+ "log_history": [
10
+ {
11
+ "epoch": 0.0,
12
+ "learning_rate": 0.0002,
13
+ "loss": 0.6996,
14
+ "step": 10
15
+ },
16
+ {
17
+ "epoch": 0.01,
18
+ "learning_rate": 0.0002,
19
+ "loss": 0.7986,
20
+ "step": 20
21
+ },
22
+ {
23
+ "epoch": 0.01,
24
+ "learning_rate": 0.0002,
25
+ "loss": 0.5936,
26
+ "step": 30
27
+ },
28
+ {
29
+ "epoch": 0.02,
30
+ "learning_rate": 0.0002,
31
+ "loss": 0.6164,
32
+ "step": 40
33
+ },
34
+ {
35
+ "epoch": 0.02,
36
+ "learning_rate": 0.0002,
37
+ "loss": 0.7464,
38
+ "step": 50
39
+ },
40
+ {
41
+ "epoch": 0.02,
42
+ "learning_rate": 0.0002,
43
+ "loss": 0.8856,
44
+ "step": 60
45
+ },
46
+ {
47
+ "epoch": 0.03,
48
+ "learning_rate": 0.0002,
49
+ "loss": 0.6476,
50
+ "step": 70
51
+ },
52
+ {
53
+ "epoch": 0.03,
54
+ "learning_rate": 0.0002,
55
+ "loss": 0.65,
56
+ "step": 80
57
+ },
58
+ {
59
+ "epoch": 0.04,
60
+ "learning_rate": 0.0002,
61
+ "loss": 0.5282,
62
+ "step": 90
63
+ },
64
+ {
65
+ "epoch": 0.04,
66
+ "learning_rate": 0.0002,
67
+ "loss": 0.5787,
68
+ "step": 100
69
+ },
70
+ {
71
+ "epoch": 0.04,
72
+ "learning_rate": 0.0002,
73
+ "loss": 0.6315,
74
+ "step": 110
75
+ },
76
+ {
77
+ "epoch": 0.05,
78
+ "learning_rate": 0.0002,
79
+ "loss": 0.5419,
80
+ "step": 120
81
+ },
82
+ {
83
+ "epoch": 0.05,
84
+ "learning_rate": 0.0002,
85
+ "loss": 0.593,
86
+ "step": 130
87
+ },
88
+ {
89
+ "epoch": 0.06,
90
+ "learning_rate": 0.0002,
91
+ "loss": 0.6773,
92
+ "step": 140
93
+ },
94
+ {
95
+ "epoch": 0.06,
96
+ "learning_rate": 0.0002,
97
+ "loss": 0.5536,
98
+ "step": 150
99
+ },
100
+ {
101
+ "epoch": 0.06,
102
+ "learning_rate": 0.0002,
103
+ "loss": 0.6384,
104
+ "step": 160
105
+ },
106
+ {
107
+ "epoch": 0.07,
108
+ "learning_rate": 0.0002,
109
+ "loss": 0.5736,
110
+ "step": 170
111
+ },
112
+ {
113
+ "epoch": 0.07,
114
+ "learning_rate": 0.0002,
115
+ "loss": 0.6157,
116
+ "step": 180
117
+ },
118
+ {
119
+ "epoch": 0.08,
120
+ "learning_rate": 0.0002,
121
+ "loss": 0.5551,
122
+ "step": 190
123
+ },
124
+ {
125
+ "epoch": 0.08,
126
+ "learning_rate": 0.0002,
127
+ "loss": 0.6446,
128
+ "step": 200
129
+ },
130
+ {
131
+ "epoch": 0.08,
132
+ "eval_loss": 0.6395586133003235,
133
+ "eval_runtime": 94.1614,
134
+ "eval_samples_per_second": 10.62,
135
+ "eval_steps_per_second": 5.31,
136
+ "step": 200
137
+ },
138
+ {
139
+ "epoch": 0.08,
140
+ "mmlu_eval_accuracy": 0.4559132721218583,
141
+ "mmlu_eval_accuracy_abstract_algebra": 0.2727272727272727,
142
+ "mmlu_eval_accuracy_anatomy": 0.6428571428571429,
143
+ "mmlu_eval_accuracy_astronomy": 0.4375,
144
+ "mmlu_eval_accuracy_business_ethics": 0.5454545454545454,
145
+ "mmlu_eval_accuracy_clinical_knowledge": 0.3793103448275862,
146
+ "mmlu_eval_accuracy_college_biology": 0.4375,
147
+ "mmlu_eval_accuracy_college_chemistry": 0.125,
148
+ "mmlu_eval_accuracy_college_computer_science": 0.36363636363636365,
149
+ "mmlu_eval_accuracy_college_mathematics": 0.2727272727272727,
150
+ "mmlu_eval_accuracy_college_medicine": 0.2727272727272727,
151
+ "mmlu_eval_accuracy_college_physics": 0.45454545454545453,
152
+ "mmlu_eval_accuracy_computer_security": 0.36363636363636365,
153
+ "mmlu_eval_accuracy_conceptual_physics": 0.46153846153846156,
154
+ "mmlu_eval_accuracy_econometrics": 0.16666666666666666,
155
+ "mmlu_eval_accuracy_electrical_engineering": 0.375,
156
+ "mmlu_eval_accuracy_elementary_mathematics": 0.3902439024390244,
157
+ "mmlu_eval_accuracy_formal_logic": 0.2857142857142857,
158
+ "mmlu_eval_accuracy_global_facts": 0.3,
159
+ "mmlu_eval_accuracy_high_school_biology": 0.3125,
160
+ "mmlu_eval_accuracy_high_school_chemistry": 0.36363636363636365,
161
+ "mmlu_eval_accuracy_high_school_computer_science": 0.5555555555555556,
162
+ "mmlu_eval_accuracy_high_school_european_history": 0.6111111111111112,
163
+ "mmlu_eval_accuracy_high_school_geography": 0.7272727272727273,
164
+ "mmlu_eval_accuracy_high_school_government_and_politics": 0.6190476190476191,
165
+ "mmlu_eval_accuracy_high_school_macroeconomics": 0.32558139534883723,
166
+ "mmlu_eval_accuracy_high_school_mathematics": 0.20689655172413793,
167
+ "mmlu_eval_accuracy_high_school_microeconomics": 0.4230769230769231,
168
+ "mmlu_eval_accuracy_high_school_physics": 0.29411764705882354,
169
+ "mmlu_eval_accuracy_high_school_psychology": 0.7166666666666667,
170
+ "mmlu_eval_accuracy_high_school_statistics": 0.30434782608695654,
171
+ "mmlu_eval_accuracy_high_school_us_history": 0.7272727272727273,
172
+ "mmlu_eval_accuracy_high_school_world_history": 0.5,
173
+ "mmlu_eval_accuracy_human_aging": 0.6956521739130435,
174
+ "mmlu_eval_accuracy_human_sexuality": 0.5,
175
+ "mmlu_eval_accuracy_international_law": 0.6923076923076923,
176
+ "mmlu_eval_accuracy_jurisprudence": 0.45454545454545453,
177
+ "mmlu_eval_accuracy_logical_fallacies": 0.5555555555555556,
178
+ "mmlu_eval_accuracy_machine_learning": 0.2727272727272727,
179
+ "mmlu_eval_accuracy_management": 0.5454545454545454,
180
+ "mmlu_eval_accuracy_marketing": 0.72,
181
+ "mmlu_eval_accuracy_medical_genetics": 0.7272727272727273,
182
+ "mmlu_eval_accuracy_miscellaneous": 0.627906976744186,
183
+ "mmlu_eval_accuracy_moral_disputes": 0.42105263157894735,
184
+ "mmlu_eval_accuracy_moral_scenarios": 0.24,
185
+ "mmlu_eval_accuracy_nutrition": 0.5454545454545454,
186
+ "mmlu_eval_accuracy_philosophy": 0.47058823529411764,
187
+ "mmlu_eval_accuracy_prehistory": 0.4857142857142857,
188
+ "mmlu_eval_accuracy_professional_accounting": 0.3548387096774194,
189
+ "mmlu_eval_accuracy_professional_law": 0.31176470588235294,
190
+ "mmlu_eval_accuracy_professional_medicine": 0.3870967741935484,
191
+ "mmlu_eval_accuracy_professional_psychology": 0.43478260869565216,
192
+ "mmlu_eval_accuracy_public_relations": 0.5,
193
+ "mmlu_eval_accuracy_security_studies": 0.4444444444444444,
194
+ "mmlu_eval_accuracy_sociology": 0.6363636363636364,
195
+ "mmlu_eval_accuracy_us_foreign_policy": 0.5454545454545454,
196
+ "mmlu_eval_accuracy_virology": 0.5,
197
+ "mmlu_eval_accuracy_world_religions": 0.6842105263157895,
198
+ "mmlu_loss": 1.0596903230868493,
199
+ "step": 200
200
+ },
201
+ {
202
+ "epoch": 0.09,
203
+ "learning_rate": 0.0002,
204
+ "loss": 0.7307,
205
+ "step": 210
206
+ },
207
+ {
208
+ "epoch": 0.09,
209
+ "learning_rate": 0.0002,
210
+ "loss": 0.5717,
211
+ "step": 220
212
+ },
213
+ {
214
+ "epoch": 0.09,
215
+ "learning_rate": 0.0002,
216
+ "loss": 0.6836,
217
+ "step": 230
218
+ },
219
+ {
220
+ "epoch": 0.1,
221
+ "learning_rate": 0.0002,
222
+ "loss": 0.5819,
223
+ "step": 240
224
+ },
225
+ {
226
+ "epoch": 0.1,
227
+ "learning_rate": 0.0002,
228
+ "loss": 0.5666,
229
+ "step": 250
230
+ },
231
+ {
232
+ "epoch": 0.11,
233
+ "learning_rate": 0.0002,
234
+ "loss": 0.5266,
235
+ "step": 260
236
+ },
237
+ {
238
+ "epoch": 0.11,
239
+ "learning_rate": 0.0002,
240
+ "loss": 0.5218,
241
+ "step": 270
242
+ },
243
+ {
244
+ "epoch": 0.11,
245
+ "learning_rate": 0.0002,
246
+ "loss": 0.5487,
247
+ "step": 280
248
+ },
249
+ {
250
+ "epoch": 0.12,
251
+ "learning_rate": 0.0002,
252
+ "loss": 0.5345,
253
+ "step": 290
254
+ },
255
+ {
256
+ "epoch": 0.12,
257
+ "learning_rate": 0.0002,
258
+ "loss": 0.6299,
259
+ "step": 300
260
+ },
261
+ {
262
+ "epoch": 0.13,
263
+ "learning_rate": 0.0002,
264
+ "loss": 0.5681,
265
+ "step": 310
266
+ },
267
+ {
268
+ "epoch": 0.13,
269
+ "learning_rate": 0.0002,
270
+ "loss": 0.5553,
271
+ "step": 320
272
+ },
273
+ {
274
+ "epoch": 0.13,
275
+ "learning_rate": 0.0002,
276
+ "loss": 0.575,
277
+ "step": 330
278
+ },
279
+ {
280
+ "epoch": 0.14,
281
+ "learning_rate": 0.0002,
282
+ "loss": 0.5708,
283
+ "step": 340
284
+ },
285
+ {
286
+ "epoch": 0.14,
287
+ "learning_rate": 0.0002,
288
+ "loss": 0.4932,
289
+ "step": 350
290
+ },
291
+ {
292
+ "epoch": 0.15,
293
+ "learning_rate": 0.0002,
294
+ "loss": 0.6957,
295
+ "step": 360
296
+ },
297
+ {
298
+ "epoch": 0.15,
299
+ "learning_rate": 0.0002,
300
+ "loss": 0.6442,
301
+ "step": 370
302
+ },
303
+ {
304
+ "epoch": 0.15,
305
+ "learning_rate": 0.0002,
306
+ "loss": 0.5999,
307
+ "step": 380
308
+ },
309
+ {
310
+ "epoch": 0.16,
311
+ "learning_rate": 0.0002,
312
+ "loss": 0.5086,
313
+ "step": 390
314
+ },
315
+ {
316
+ "epoch": 0.16,
317
+ "learning_rate": 0.0002,
318
+ "loss": 0.7349,
319
+ "step": 400
320
+ },
321
+ {
322
+ "epoch": 0.16,
323
+ "eval_loss": 0.6260886192321777,
324
+ "eval_runtime": 94.1289,
325
+ "eval_samples_per_second": 10.624,
326
+ "eval_steps_per_second": 5.312,
327
+ "step": 400
328
+ },
329
+ {
330
+ "epoch": 0.16,
331
+ "mmlu_eval_accuracy": 0.4735216064792921,
332
+ "mmlu_eval_accuracy_abstract_algebra": 0.2727272727272727,
333
+ "mmlu_eval_accuracy_anatomy": 0.5714285714285714,
334
+ "mmlu_eval_accuracy_astronomy": 0.5,
335
+ "mmlu_eval_accuracy_business_ethics": 0.5454545454545454,
336
+ "mmlu_eval_accuracy_clinical_knowledge": 0.4482758620689655,
337
+ "mmlu_eval_accuracy_college_biology": 0.375,
338
+ "mmlu_eval_accuracy_college_chemistry": 0.125,
339
+ "mmlu_eval_accuracy_college_computer_science": 0.2727272727272727,
340
+ "mmlu_eval_accuracy_college_mathematics": 0.2727272727272727,
341
+ "mmlu_eval_accuracy_college_medicine": 0.4090909090909091,
342
+ "mmlu_eval_accuracy_college_physics": 0.45454545454545453,
343
+ "mmlu_eval_accuracy_computer_security": 0.5454545454545454,
344
+ "mmlu_eval_accuracy_conceptual_physics": 0.4230769230769231,
345
+ "mmlu_eval_accuracy_econometrics": 0.25,
346
+ "mmlu_eval_accuracy_electrical_engineering": 0.375,
347
+ "mmlu_eval_accuracy_elementary_mathematics": 0.34146341463414637,
348
+ "mmlu_eval_accuracy_formal_logic": 0.2857142857142857,
349
+ "mmlu_eval_accuracy_global_facts": 0.5,
350
+ "mmlu_eval_accuracy_high_school_biology": 0.375,
351
+ "mmlu_eval_accuracy_high_school_chemistry": 0.3181818181818182,
352
+ "mmlu_eval_accuracy_high_school_computer_science": 0.4444444444444444,
353
+ "mmlu_eval_accuracy_high_school_european_history": 0.5555555555555556,
354
+ "mmlu_eval_accuracy_high_school_geography": 0.7272727272727273,
355
+ "mmlu_eval_accuracy_high_school_government_and_politics": 0.6190476190476191,
356
+ "mmlu_eval_accuracy_high_school_macroeconomics": 0.4186046511627907,
357
+ "mmlu_eval_accuracy_high_school_mathematics": 0.1724137931034483,
358
+ "mmlu_eval_accuracy_high_school_microeconomics": 0.46153846153846156,
359
+ "mmlu_eval_accuracy_high_school_physics": 0.35294117647058826,
360
+ "mmlu_eval_accuracy_high_school_psychology": 0.75,
361
+ "mmlu_eval_accuracy_high_school_statistics": 0.34782608695652173,
362
+ "mmlu_eval_accuracy_high_school_us_history": 0.6363636363636364,
363
+ "mmlu_eval_accuracy_high_school_world_history": 0.5,
364
+ "mmlu_eval_accuracy_human_aging": 0.6956521739130435,
365
+ "mmlu_eval_accuracy_human_sexuality": 0.4166666666666667,
366
+ "mmlu_eval_accuracy_international_law": 0.8461538461538461,
367
+ "mmlu_eval_accuracy_jurisprudence": 0.45454545454545453,
368
+ "mmlu_eval_accuracy_logical_fallacies": 0.6111111111111112,
369
+ "mmlu_eval_accuracy_machine_learning": 0.36363636363636365,
370
+ "mmlu_eval_accuracy_management": 0.45454545454545453,
371
+ "mmlu_eval_accuracy_marketing": 0.8,
372
+ "mmlu_eval_accuracy_medical_genetics": 0.7272727272727273,
373
+ "mmlu_eval_accuracy_miscellaneous": 0.6162790697674418,
374
+ "mmlu_eval_accuracy_moral_disputes": 0.5263157894736842,
375
+ "mmlu_eval_accuracy_moral_scenarios": 0.24,
376
+ "mmlu_eval_accuracy_nutrition": 0.6666666666666666,
377
+ "mmlu_eval_accuracy_philosophy": 0.47058823529411764,
378
+ "mmlu_eval_accuracy_prehistory": 0.5142857142857142,
379
+ "mmlu_eval_accuracy_professional_accounting": 0.25806451612903225,
380
+ "mmlu_eval_accuracy_professional_law": 0.3352941176470588,
381
+ "mmlu_eval_accuracy_professional_medicine": 0.45161290322580644,
382
+ "mmlu_eval_accuracy_professional_psychology": 0.4057971014492754,
383
+ "mmlu_eval_accuracy_public_relations": 0.5,
384
+ "mmlu_eval_accuracy_security_studies": 0.4444444444444444,
385
+ "mmlu_eval_accuracy_sociology": 0.7272727272727273,
386
+ "mmlu_eval_accuracy_us_foreign_policy": 0.6363636363636364,
387
+ "mmlu_eval_accuracy_virology": 0.4444444444444444,
388
+ "mmlu_eval_accuracy_world_religions": 0.7368421052631579,
389
+ "mmlu_loss": 0.9645495347814834,
390
+ "step": 400
391
+ },
392
+ {
393
+ "epoch": 0.17,
394
+ "learning_rate": 0.0002,
395
+ "loss": 0.6117,
396
+ "step": 410
397
+ },
398
+ {
399
+ "epoch": 0.17,
400
+ "learning_rate": 0.0002,
401
+ "loss": 0.5963,
402
+ "step": 420
403
+ },
404
+ {
405
+ "epoch": 0.17,
406
+ "learning_rate": 0.0002,
407
+ "loss": 0.5866,
408
+ "step": 430
409
+ },
410
+ {
411
+ "epoch": 0.18,
412
+ "learning_rate": 0.0002,
413
+ "loss": 0.5433,
414
+ "step": 440
415
+ },
416
+ {
417
+ "epoch": 0.18,
418
+ "learning_rate": 0.0002,
419
+ "loss": 0.5432,
420
+ "step": 450
421
+ },
422
+ {
423
+ "epoch": 0.19,
424
+ "learning_rate": 0.0002,
425
+ "loss": 0.5713,
426
+ "step": 460
427
+ },
428
+ {
429
+ "epoch": 0.19,
430
+ "learning_rate": 0.0002,
431
+ "loss": 0.5957,
432
+ "step": 470
433
+ },
434
+ {
435
+ "epoch": 0.19,
436
+ "learning_rate": 0.0002,
437
+ "loss": 0.6526,
438
+ "step": 480
439
+ },
440
+ {
441
+ "epoch": 0.2,
442
+ "learning_rate": 0.0002,
443
+ "loss": 0.57,
444
+ "step": 490
445
+ },
446
+ {
447
+ "epoch": 0.2,
448
+ "learning_rate": 0.0002,
449
+ "loss": 0.5938,
450
+ "step": 500
451
+ },
452
+ {
453
+ "epoch": 0.21,
454
+ "learning_rate": 0.0002,
455
+ "loss": 0.6141,
456
+ "step": 510
457
+ },
458
+ {
459
+ "epoch": 0.21,
460
+ "learning_rate": 0.0002,
461
+ "loss": 0.5262,
462
+ "step": 520
463
+ },
464
+ {
465
+ "epoch": 0.21,
466
+ "learning_rate": 0.0002,
467
+ "loss": 0.7055,
468
+ "step": 530
469
+ },
470
+ {
471
+ "epoch": 0.22,
472
+ "learning_rate": 0.0002,
473
+ "loss": 0.5412,
474
+ "step": 540
475
+ },
476
+ {
477
+ "epoch": 0.22,
478
+ "learning_rate": 0.0002,
479
+ "loss": 0.4956,
480
+ "step": 550
481
+ },
482
+ {
483
+ "epoch": 0.23,
484
+ "learning_rate": 0.0002,
485
+ "loss": 0.6345,
486
+ "step": 560
487
+ },
488
+ {
489
+ "epoch": 0.23,
490
+ "learning_rate": 0.0002,
491
+ "loss": 0.5665,
492
+ "step": 570
493
+ },
494
+ {
495
+ "epoch": 0.23,
496
+ "learning_rate": 0.0002,
497
+ "loss": 0.6687,
498
+ "step": 580
499
+ },
500
+ {
501
+ "epoch": 0.24,
502
+ "learning_rate": 0.0002,
503
+ "loss": 0.5994,
504
+ "step": 590
505
+ },
506
+ {
507
+ "epoch": 0.24,
508
+ "learning_rate": 0.0002,
509
+ "loss": 0.6209,
510
+ "step": 600
511
+ },
512
+ {
513
+ "epoch": 0.24,
514
+ "eval_loss": 0.6194782853126526,
515
+ "eval_runtime": 94.0475,
516
+ "eval_samples_per_second": 10.633,
517
+ "eval_steps_per_second": 5.316,
518
+ "step": 600
519
+ },
520
+ {
521
+ "epoch": 0.24,
522
+ "mmlu_eval_accuracy": 0.44690777926110636,
523
+ "mmlu_eval_accuracy_abstract_algebra": 0.18181818181818182,
524
+ "mmlu_eval_accuracy_anatomy": 0.42857142857142855,
525
+ "mmlu_eval_accuracy_astronomy": 0.375,
526
+ "mmlu_eval_accuracy_business_ethics": 0.6363636363636364,
527
+ "mmlu_eval_accuracy_clinical_knowledge": 0.41379310344827586,
528
+ "mmlu_eval_accuracy_college_biology": 0.375,
529
+ "mmlu_eval_accuracy_college_chemistry": 0.0,
530
+ "mmlu_eval_accuracy_college_computer_science": 0.45454545454545453,
531
+ "mmlu_eval_accuracy_college_mathematics": 0.2727272727272727,
532
+ "mmlu_eval_accuracy_college_medicine": 0.36363636363636365,
533
+ "mmlu_eval_accuracy_college_physics": 0.5454545454545454,
534
+ "mmlu_eval_accuracy_computer_security": 0.36363636363636365,
535
+ "mmlu_eval_accuracy_conceptual_physics": 0.3076923076923077,
536
+ "mmlu_eval_accuracy_econometrics": 0.25,
537
+ "mmlu_eval_accuracy_electrical_engineering": 0.3125,
538
+ "mmlu_eval_accuracy_elementary_mathematics": 0.36585365853658536,
539
+ "mmlu_eval_accuracy_formal_logic": 0.2857142857142857,
540
+ "mmlu_eval_accuracy_global_facts": 0.2,
541
+ "mmlu_eval_accuracy_high_school_biology": 0.28125,
542
+ "mmlu_eval_accuracy_high_school_chemistry": 0.4090909090909091,
543
+ "mmlu_eval_accuracy_high_school_computer_science": 0.6666666666666666,
544
+ "mmlu_eval_accuracy_high_school_european_history": 0.6666666666666666,
545
+ "mmlu_eval_accuracy_high_school_geography": 0.6818181818181818,
546
+ "mmlu_eval_accuracy_high_school_government_and_politics": 0.5714285714285714,
547
+ "mmlu_eval_accuracy_high_school_macroeconomics": 0.3488372093023256,
548
+ "mmlu_eval_accuracy_high_school_mathematics": 0.3103448275862069,
549
+ "mmlu_eval_accuracy_high_school_microeconomics": 0.38461538461538464,
550
+ "mmlu_eval_accuracy_high_school_physics": 0.17647058823529413,
551
+ "mmlu_eval_accuracy_high_school_psychology": 0.6833333333333333,
552
+ "mmlu_eval_accuracy_high_school_statistics": 0.2608695652173913,
553
+ "mmlu_eval_accuracy_high_school_us_history": 0.7272727272727273,
554
+ "mmlu_eval_accuracy_high_school_world_history": 0.5384615384615384,
555
+ "mmlu_eval_accuracy_human_aging": 0.6521739130434783,
556
+ "mmlu_eval_accuracy_human_sexuality": 0.5,
557
+ "mmlu_eval_accuracy_international_law": 0.7692307692307693,
558
+ "mmlu_eval_accuracy_jurisprudence": 0.45454545454545453,
559
+ "mmlu_eval_accuracy_logical_fallacies": 0.5555555555555556,
560
+ "mmlu_eval_accuracy_machine_learning": 0.09090909090909091,
561
+ "mmlu_eval_accuracy_management": 0.36363636363636365,
562
+ "mmlu_eval_accuracy_marketing": 0.72,
563
+ "mmlu_eval_accuracy_medical_genetics": 0.7272727272727273,
564
+ "mmlu_eval_accuracy_miscellaneous": 0.6744186046511628,
565
+ "mmlu_eval_accuracy_moral_disputes": 0.42105263157894735,
566
+ "mmlu_eval_accuracy_moral_scenarios": 0.24,
567
+ "mmlu_eval_accuracy_nutrition": 0.5454545454545454,
568
+ "mmlu_eval_accuracy_philosophy": 0.5588235294117647,
569
+ "mmlu_eval_accuracy_prehistory": 0.42857142857142855,
570
+ "mmlu_eval_accuracy_professional_accounting": 0.25806451612903225,
571
+ "mmlu_eval_accuracy_professional_law": 0.3411764705882353,
572
+ "mmlu_eval_accuracy_professional_medicine": 0.3870967741935484,
573
+ "mmlu_eval_accuracy_professional_psychology": 0.391304347826087,
574
+ "mmlu_eval_accuracy_public_relations": 0.5,
575
+ "mmlu_eval_accuracy_security_studies": 0.4444444444444444,
576
+ "mmlu_eval_accuracy_sociology": 0.6818181818181818,
577
+ "mmlu_eval_accuracy_us_foreign_policy": 0.6363636363636364,
578
+ "mmlu_eval_accuracy_virology": 0.5555555555555556,
579
+ "mmlu_eval_accuracy_world_religions": 0.7368421052631579,
580
+ "mmlu_loss": 0.9532866988613151,
581
+ "step": 600
582
+ },
583
+ {
584
+ "epoch": 0.25,
585
+ "learning_rate": 0.0002,
586
+ "loss": 0.6145,
587
+ "step": 610
588
+ },
589
+ {
590
+ "epoch": 0.25,
591
+ "learning_rate": 0.0002,
592
+ "loss": 0.6267,
593
+ "step": 620
594
+ },
595
+ {
596
+ "epoch": 0.26,
597
+ "learning_rate": 0.0002,
598
+ "loss": 0.5609,
599
+ "step": 630
600
+ },
601
+ {
602
+ "epoch": 0.26,
603
+ "learning_rate": 0.0002,
604
+ "loss": 0.4955,
605
+ "step": 640
606
+ },
607
+ {
608
+ "epoch": 0.26,
609
+ "learning_rate": 0.0002,
610
+ "loss": 0.551,
611
+ "step": 650
612
+ },
613
+ {
614
+ "epoch": 0.27,
615
+ "learning_rate": 0.0002,
616
+ "loss": 0.5323,
617
+ "step": 660
618
+ },
619
+ {
620
+ "epoch": 0.27,
621
+ "learning_rate": 0.0002,
622
+ "loss": 0.5905,
623
+ "step": 670
624
+ },
625
+ {
626
+ "epoch": 0.28,
627
+ "learning_rate": 0.0002,
628
+ "loss": 0.7368,
629
+ "step": 680
630
+ },
631
+ {
632
+ "epoch": 0.28,
633
+ "learning_rate": 0.0002,
634
+ "loss": 0.573,
635
+ "step": 690
636
+ },
637
+ {
638
+ "epoch": 0.28,
639
+ "learning_rate": 0.0002,
640
+ "loss": 0.5785,
641
+ "step": 700
642
+ },
643
+ {
644
+ "epoch": 0.29,
645
+ "learning_rate": 0.0002,
646
+ "loss": 0.5802,
647
+ "step": 710
648
+ },
649
+ {
650
+ "epoch": 0.29,
651
+ "learning_rate": 0.0002,
652
+ "loss": 0.5823,
653
+ "step": 720
654
+ },
655
+ {
656
+ "epoch": 0.3,
657
+ "learning_rate": 0.0002,
658
+ "loss": 0.5718,
659
+ "step": 730
660
+ },
661
+ {
662
+ "epoch": 0.3,
663
+ "learning_rate": 0.0002,
664
+ "loss": 0.5783,
665
+ "step": 740
666
+ },
667
+ {
668
+ "epoch": 0.3,
669
+ "learning_rate": 0.0002,
670
+ "loss": 0.5367,
671
+ "step": 750
672
+ },
673
+ {
674
+ "epoch": 0.31,
675
+ "learning_rate": 0.0002,
676
+ "loss": 0.6111,
677
+ "step": 760
678
+ },
679
+ {
680
+ "epoch": 0.31,
681
+ "learning_rate": 0.0002,
682
+ "loss": 0.5343,
683
+ "step": 770
684
+ },
685
+ {
686
+ "epoch": 0.32,
687
+ "learning_rate": 0.0002,
688
+ "loss": 0.6399,
689
+ "step": 780
690
+ },
691
+ {
692
+ "epoch": 0.32,
693
+ "learning_rate": 0.0002,
694
+ "loss": 0.6314,
695
+ "step": 790
696
+ },
697
+ {
698
+ "epoch": 0.32,
699
+ "learning_rate": 0.0002,
700
+ "loss": 0.506,
701
+ "step": 800
702
+ },
703
+ {
704
+ "epoch": 0.32,
705
+ "eval_loss": 0.612710177898407,
706
+ "eval_runtime": 94.0353,
707
+ "eval_samples_per_second": 10.634,
708
+ "eval_steps_per_second": 5.317,
709
+ "step": 800
710
+ },
711
+ {
712
+ "epoch": 0.32,
713
+ "mmlu_eval_accuracy": 0.451824690933893,
714
+ "mmlu_eval_accuracy_abstract_algebra": 0.45454545454545453,
715
+ "mmlu_eval_accuracy_anatomy": 0.5714285714285714,
716
+ "mmlu_eval_accuracy_astronomy": 0.4375,
717
+ "mmlu_eval_accuracy_business_ethics": 0.6363636363636364,
718
+ "mmlu_eval_accuracy_clinical_knowledge": 0.4482758620689655,
719
+ "mmlu_eval_accuracy_college_biology": 0.4375,
720
+ "mmlu_eval_accuracy_college_chemistry": 0.125,
721
+ "mmlu_eval_accuracy_college_computer_science": 0.18181818181818182,
722
+ "mmlu_eval_accuracy_college_mathematics": 0.36363636363636365,
723
+ "mmlu_eval_accuracy_college_medicine": 0.3181818181818182,
724
+ "mmlu_eval_accuracy_college_physics": 0.45454545454545453,
725
+ "mmlu_eval_accuracy_computer_security": 0.45454545454545453,
726
+ "mmlu_eval_accuracy_conceptual_physics": 0.3076923076923077,
727
+ "mmlu_eval_accuracy_econometrics": 0.16666666666666666,
728
+ "mmlu_eval_accuracy_electrical_engineering": 0.375,
729
+ "mmlu_eval_accuracy_elementary_mathematics": 0.3170731707317073,
730
+ "mmlu_eval_accuracy_formal_logic": 0.2857142857142857,
731
+ "mmlu_eval_accuracy_global_facts": 0.3,
732
+ "mmlu_eval_accuracy_high_school_biology": 0.40625,
733
+ "mmlu_eval_accuracy_high_school_chemistry": 0.36363636363636365,
734
+ "mmlu_eval_accuracy_high_school_computer_science": 0.5555555555555556,
735
+ "mmlu_eval_accuracy_high_school_european_history": 0.4444444444444444,
736
+ "mmlu_eval_accuracy_high_school_geography": 0.6818181818181818,
737
+ "mmlu_eval_accuracy_high_school_government_and_politics": 0.5714285714285714,
738
+ "mmlu_eval_accuracy_high_school_macroeconomics": 0.37209302325581395,
739
+ "mmlu_eval_accuracy_high_school_mathematics": 0.3448275862068966,
740
+ "mmlu_eval_accuracy_high_school_microeconomics": 0.4230769230769231,
741
+ "mmlu_eval_accuracy_high_school_physics": 0.35294117647058826,
742
+ "mmlu_eval_accuracy_high_school_psychology": 0.75,
743
+ "mmlu_eval_accuracy_high_school_statistics": 0.2608695652173913,
744
+ "mmlu_eval_accuracy_high_school_us_history": 0.6818181818181818,
745
+ "mmlu_eval_accuracy_high_school_world_history": 0.46153846153846156,
746
+ "mmlu_eval_accuracy_human_aging": 0.6956521739130435,
747
+ "mmlu_eval_accuracy_human_sexuality": 0.3333333333333333,
748
+ "mmlu_eval_accuracy_international_law": 0.8461538461538461,
749
+ "mmlu_eval_accuracy_jurisprudence": 0.2727272727272727,
750
+ "mmlu_eval_accuracy_logical_fallacies": 0.5555555555555556,
751
+ "mmlu_eval_accuracy_machine_learning": 0.2727272727272727,
752
+ "mmlu_eval_accuracy_management": 0.36363636363636365,
753
+ "mmlu_eval_accuracy_marketing": 0.64,
754
+ "mmlu_eval_accuracy_medical_genetics": 0.7272727272727273,
755
+ "mmlu_eval_accuracy_miscellaneous": 0.6395348837209303,
756
+ "mmlu_eval_accuracy_moral_disputes": 0.5263157894736842,
757
+ "mmlu_eval_accuracy_moral_scenarios": 0.23,
758
+ "mmlu_eval_accuracy_nutrition": 0.5454545454545454,
759
+ "mmlu_eval_accuracy_philosophy": 0.47058823529411764,
760
+ "mmlu_eval_accuracy_prehistory": 0.45714285714285713,
761
+ "mmlu_eval_accuracy_professional_accounting": 0.2903225806451613,
762
+ "mmlu_eval_accuracy_professional_law": 0.3411764705882353,
763
+ "mmlu_eval_accuracy_professional_medicine": 0.41935483870967744,
764
+ "mmlu_eval_accuracy_professional_psychology": 0.42028985507246375,
765
+ "mmlu_eval_accuracy_public_relations": 0.5833333333333334,
766
+ "mmlu_eval_accuracy_security_studies": 0.4444444444444444,
767
+ "mmlu_eval_accuracy_sociology": 0.5909090909090909,
768
+ "mmlu_eval_accuracy_us_foreign_policy": 0.5454545454545454,
769
+ "mmlu_eval_accuracy_virology": 0.5,
770
+ "mmlu_eval_accuracy_world_religions": 0.7368421052631579,
771
+ "mmlu_loss": 0.9771831466113619,
772
+ "step": 800
773
+ },
774
+ {
775
+ "epoch": 0.33,
776
+ "learning_rate": 0.0002,
777
+ "loss": 0.604,
778
+ "step": 810
779
+ },
780
+ {
781
+ "epoch": 0.33,
782
+ "learning_rate": 0.0002,
783
+ "loss": 0.5633,
784
+ "step": 820
785
+ },
786
+ {
787
+ "epoch": 0.34,
788
+ "learning_rate": 0.0002,
789
+ "loss": 0.5965,
790
+ "step": 830
791
+ },
792
+ {
793
+ "epoch": 0.34,
794
+ "learning_rate": 0.0002,
795
+ "loss": 0.5563,
796
+ "step": 840
797
+ },
798
+ {
799
+ "epoch": 0.34,
800
+ "learning_rate": 0.0002,
801
+ "loss": 0.6227,
802
+ "step": 850
803
+ },
804
+ {
805
+ "epoch": 0.35,
806
+ "learning_rate": 0.0002,
807
+ "loss": 0.6758,
808
+ "step": 860
809
+ },
810
+ {
811
+ "epoch": 0.35,
812
+ "learning_rate": 0.0002,
813
+ "loss": 0.6293,
814
+ "step": 870
815
+ },
816
+ {
817
+ "epoch": 0.36,
818
+ "learning_rate": 0.0002,
819
+ "loss": 0.6711,
820
+ "step": 880
821
+ },
822
+ {
823
+ "epoch": 0.36,
824
+ "learning_rate": 0.0002,
825
+ "loss": 0.5607,
826
+ "step": 890
827
+ },
828
+ {
829
+ "epoch": 0.36,
830
+ "learning_rate": 0.0002,
831
+ "loss": 0.66,
832
+ "step": 900
833
+ },
834
+ {
835
+ "epoch": 0.37,
836
+ "learning_rate": 0.0002,
837
+ "loss": 0.5449,
838
+ "step": 910
839
+ },
840
+ {
841
+ "epoch": 0.37,
842
+ "learning_rate": 0.0002,
843
+ "loss": 0.5715,
844
+ "step": 920
845
+ },
846
+ {
847
+ "epoch": 0.38,
848
+ "learning_rate": 0.0002,
849
+ "loss": 0.5366,
850
+ "step": 930
851
+ },
852
+ {
853
+ "epoch": 0.38,
854
+ "learning_rate": 0.0002,
855
+ "loss": 0.4633,
856
+ "step": 940
857
+ },
858
+ {
859
+ "epoch": 0.38,
860
+ "learning_rate": 0.0002,
861
+ "loss": 0.5635,
862
+ "step": 950
863
+ },
864
+ {
865
+ "epoch": 0.39,
866
+ "learning_rate": 0.0002,
867
+ "loss": 0.5331,
868
+ "step": 960
869
+ },
870
+ {
871
+ "epoch": 0.39,
872
+ "learning_rate": 0.0002,
873
+ "loss": 0.5642,
874
+ "step": 970
875
+ },
876
+ {
877
+ "epoch": 0.4,
878
+ "learning_rate": 0.0002,
879
+ "loss": 0.6002,
880
+ "step": 980
881
+ },
882
+ {
883
+ "epoch": 0.4,
884
+ "learning_rate": 0.0002,
885
+ "loss": 0.5484,
886
+ "step": 990
887
+ },
888
+ {
889
+ "epoch": 0.4,
890
+ "learning_rate": 0.0002,
891
+ "loss": 0.591,
892
+ "step": 1000
893
+ },
894
+ {
895
+ "epoch": 0.4,
896
+ "eval_loss": 0.6107870936393738,
897
+ "eval_runtime": 94.1069,
898
+ "eval_samples_per_second": 10.626,
899
+ "eval_steps_per_second": 5.313,
900
+ "step": 1000
901
+ },
902
+ {
903
+ "epoch": 0.4,
904
+ "mmlu_eval_accuracy": 0.44434023866715483,
905
+ "mmlu_eval_accuracy_abstract_algebra": 0.36363636363636365,
906
+ "mmlu_eval_accuracy_anatomy": 0.5,
907
+ "mmlu_eval_accuracy_astronomy": 0.3125,
908
+ "mmlu_eval_accuracy_business_ethics": 0.5454545454545454,
909
+ "mmlu_eval_accuracy_clinical_knowledge": 0.41379310344827586,
910
+ "mmlu_eval_accuracy_college_biology": 0.375,
911
+ "mmlu_eval_accuracy_college_chemistry": 0.0,
912
+ "mmlu_eval_accuracy_college_computer_science": 0.36363636363636365,
913
+ "mmlu_eval_accuracy_college_mathematics": 0.2727272727272727,
914
+ "mmlu_eval_accuracy_college_medicine": 0.36363636363636365,
915
+ "mmlu_eval_accuracy_college_physics": 0.5454545454545454,
916
+ "mmlu_eval_accuracy_computer_security": 0.36363636363636365,
917
+ "mmlu_eval_accuracy_conceptual_physics": 0.34615384615384615,
918
+ "mmlu_eval_accuracy_econometrics": 0.16666666666666666,
919
+ "mmlu_eval_accuracy_electrical_engineering": 0.375,
920
+ "mmlu_eval_accuracy_elementary_mathematics": 0.36585365853658536,
921
+ "mmlu_eval_accuracy_formal_logic": 0.21428571428571427,
922
+ "mmlu_eval_accuracy_global_facts": 0.3,
923
+ "mmlu_eval_accuracy_high_school_biology": 0.375,
924
+ "mmlu_eval_accuracy_high_school_chemistry": 0.4090909090909091,
925
+ "mmlu_eval_accuracy_high_school_computer_science": 0.6666666666666666,
926
+ "mmlu_eval_accuracy_high_school_european_history": 0.6111111111111112,
927
+ "mmlu_eval_accuracy_high_school_geography": 0.6818181818181818,
928
+ "mmlu_eval_accuracy_high_school_government_and_politics": 0.5714285714285714,
929
+ "mmlu_eval_accuracy_high_school_macroeconomics": 0.32558139534883723,
930
+ "mmlu_eval_accuracy_high_school_mathematics": 0.3103448275862069,
931
+ "mmlu_eval_accuracy_high_school_microeconomics": 0.38461538461538464,
932
+ "mmlu_eval_accuracy_high_school_physics": 0.29411764705882354,
933
+ "mmlu_eval_accuracy_high_school_psychology": 0.7,
934
+ "mmlu_eval_accuracy_high_school_statistics": 0.30434782608695654,
935
+ "mmlu_eval_accuracy_high_school_us_history": 0.7272727272727273,
936
+ "mmlu_eval_accuracy_high_school_world_history": 0.46153846153846156,
937
+ "mmlu_eval_accuracy_human_aging": 0.6956521739130435,
938
+ "mmlu_eval_accuracy_human_sexuality": 0.3333333333333333,
939
+ "mmlu_eval_accuracy_international_law": 0.7692307692307693,
940
+ "mmlu_eval_accuracy_jurisprudence": 0.36363636363636365,
941
+ "mmlu_eval_accuracy_logical_fallacies": 0.5555555555555556,
942
+ "mmlu_eval_accuracy_machine_learning": 0.18181818181818182,
943
+ "mmlu_eval_accuracy_management": 0.36363636363636365,
944
+ "mmlu_eval_accuracy_marketing": 0.72,
945
+ "mmlu_eval_accuracy_medical_genetics": 0.7272727272727273,
946
+ "mmlu_eval_accuracy_miscellaneous": 0.627906976744186,
947
+ "mmlu_eval_accuracy_moral_disputes": 0.42105263157894735,
948
+ "mmlu_eval_accuracy_moral_scenarios": 0.24,
949
+ "mmlu_eval_accuracy_nutrition": 0.5454545454545454,
950
+ "mmlu_eval_accuracy_philosophy": 0.47058823529411764,
951
+ "mmlu_eval_accuracy_prehistory": 0.4,
952
+ "mmlu_eval_accuracy_professional_accounting": 0.22580645161290322,
953
+ "mmlu_eval_accuracy_professional_law": 0.3411764705882353,
954
+ "mmlu_eval_accuracy_professional_medicine": 0.41935483870967744,
955
+ "mmlu_eval_accuracy_professional_psychology": 0.42028985507246375,
956
+ "mmlu_eval_accuracy_public_relations": 0.5833333333333334,
957
+ "mmlu_eval_accuracy_security_studies": 0.5185185185185185,
958
+ "mmlu_eval_accuracy_sociology": 0.5,
959
+ "mmlu_eval_accuracy_us_foreign_policy": 0.5454545454545454,
960
+ "mmlu_eval_accuracy_virology": 0.6111111111111112,
961
+ "mmlu_eval_accuracy_world_religions": 0.7368421052631579,
962
+ "mmlu_loss": 0.9966794518515585,
963
+ "step": 1000
964
+ },
965
+ {
966
+ "epoch": 0.41,
967
+ "learning_rate": 0.0002,
968
+ "loss": 0.5453,
969
+ "step": 1010
970
+ },
971
+ {
972
+ "epoch": 0.41,
973
+ "learning_rate": 0.0002,
974
+ "loss": 0.4825,
975
+ "step": 1020
976
+ },
977
+ {
978
+ "epoch": 0.42,
979
+ "learning_rate": 0.0002,
980
+ "loss": 0.5663,
981
+ "step": 1030
982
+ },
983
+ {
984
+ "epoch": 0.42,
985
+ "learning_rate": 0.0002,
986
+ "loss": 0.4528,
987
+ "step": 1040
988
+ },
989
+ {
990
+ "epoch": 0.43,
991
+ "learning_rate": 0.0002,
992
+ "loss": 0.6646,
993
+ "step": 1050
994
+ },
995
+ {
996
+ "epoch": 0.43,
997
+ "learning_rate": 0.0002,
998
+ "loss": 0.5613,
999
+ "step": 1060
1000
+ },
1001
+ {
1002
+ "epoch": 0.43,
1003
+ "learning_rate": 0.0002,
1004
+ "loss": 0.6912,
1005
+ "step": 1070
1006
+ },
1007
+ {
1008
+ "epoch": 0.44,
1009
+ "learning_rate": 0.0002,
1010
+ "loss": 0.6891,
1011
+ "step": 1080
1012
+ },
1013
+ {
1014
+ "epoch": 0.44,
1015
+ "learning_rate": 0.0002,
1016
+ "loss": 0.5508,
1017
+ "step": 1090
1018
+ },
1019
+ {
1020
+ "epoch": 0.45,
1021
+ "learning_rate": 0.0002,
1022
+ "loss": 0.6595,
1023
+ "step": 1100
1024
+ },
1025
+ {
1026
+ "epoch": 0.45,
1027
+ "learning_rate": 0.0002,
1028
+ "loss": 0.5936,
1029
+ "step": 1110
1030
+ },
1031
+ {
1032
+ "epoch": 0.45,
1033
+ "learning_rate": 0.0002,
1034
+ "loss": 0.6558,
1035
+ "step": 1120
1036
+ },
1037
+ {
1038
+ "epoch": 0.46,
1039
+ "learning_rate": 0.0002,
1040
+ "loss": 0.6729,
1041
+ "step": 1130
1042
+ },
1043
+ {
1044
+ "epoch": 0.46,
1045
+ "learning_rate": 0.0002,
1046
+ "loss": 0.6205,
1047
+ "step": 1140
1048
+ },
1049
+ {
1050
+ "epoch": 0.47,
1051
+ "learning_rate": 0.0002,
1052
+ "loss": 0.6675,
1053
+ "step": 1150
1054
+ },
1055
+ {
1056
+ "epoch": 0.47,
1057
+ "learning_rate": 0.0002,
1058
+ "loss": 0.5649,
1059
+ "step": 1160
1060
+ },
1061
+ {
1062
+ "epoch": 0.47,
1063
+ "learning_rate": 0.0002,
1064
+ "loss": 0.5922,
1065
+ "step": 1170
1066
+ },
1067
+ {
1068
+ "epoch": 0.48,
1069
+ "learning_rate": 0.0002,
1070
+ "loss": 0.4905,
1071
+ "step": 1180
1072
+ },
1073
+ {
1074
+ "epoch": 0.48,
1075
+ "learning_rate": 0.0002,
1076
+ "loss": 0.6746,
1077
+ "step": 1190
1078
+ },
1079
+ {
1080
+ "epoch": 0.49,
1081
+ "learning_rate": 0.0002,
1082
+ "loss": 0.6171,
1083
+ "step": 1200
1084
+ },
1085
+ {
1086
+ "epoch": 0.49,
1087
+ "eval_loss": 0.612968921661377,
1088
+ "eval_runtime": 94.0813,
1089
+ "eval_samples_per_second": 10.629,
1090
+ "eval_steps_per_second": 5.315,
1091
+ "step": 1200
1092
+ },
1093
+ {
1094
+ "epoch": 0.49,
1095
+ "mmlu_eval_accuracy": 0.4287111481518256,
1096
+ "mmlu_eval_accuracy_abstract_algebra": 0.45454545454545453,
1097
+ "mmlu_eval_accuracy_anatomy": 0.5,
1098
+ "mmlu_eval_accuracy_astronomy": 0.375,
1099
+ "mmlu_eval_accuracy_business_ethics": 0.45454545454545453,
1100
+ "mmlu_eval_accuracy_clinical_knowledge": 0.4827586206896552,
1101
+ "mmlu_eval_accuracy_college_biology": 0.375,
1102
+ "mmlu_eval_accuracy_college_chemistry": 0.0,
1103
+ "mmlu_eval_accuracy_college_computer_science": 0.09090909090909091,
1104
+ "mmlu_eval_accuracy_college_mathematics": 0.2727272727272727,
1105
+ "mmlu_eval_accuracy_college_medicine": 0.36363636363636365,
1106
+ "mmlu_eval_accuracy_college_physics": 0.36363636363636365,
1107
+ "mmlu_eval_accuracy_computer_security": 0.2727272727272727,
1108
+ "mmlu_eval_accuracy_conceptual_physics": 0.34615384615384615,
1109
+ "mmlu_eval_accuracy_econometrics": 0.16666666666666666,
1110
+ "mmlu_eval_accuracy_electrical_engineering": 0.3125,
1111
+ "mmlu_eval_accuracy_elementary_mathematics": 0.4146341463414634,
1112
+ "mmlu_eval_accuracy_formal_logic": 0.2857142857142857,
1113
+ "mmlu_eval_accuracy_global_facts": 0.3,
1114
+ "mmlu_eval_accuracy_high_school_biology": 0.25,
1115
+ "mmlu_eval_accuracy_high_school_chemistry": 0.36363636363636365,
1116
+ "mmlu_eval_accuracy_high_school_computer_science": 0.5555555555555556,
1117
+ "mmlu_eval_accuracy_high_school_european_history": 0.5555555555555556,
1118
+ "mmlu_eval_accuracy_high_school_geography": 0.7727272727272727,
1119
+ "mmlu_eval_accuracy_high_school_government_and_politics": 0.5238095238095238,
1120
+ "mmlu_eval_accuracy_high_school_macroeconomics": 0.3488372093023256,
1121
+ "mmlu_eval_accuracy_high_school_mathematics": 0.27586206896551724,
1122
+ "mmlu_eval_accuracy_high_school_microeconomics": 0.3076923076923077,
1123
+ "mmlu_eval_accuracy_high_school_physics": 0.17647058823529413,
1124
+ "mmlu_eval_accuracy_high_school_psychology": 0.7,
1125
+ "mmlu_eval_accuracy_high_school_statistics": 0.34782608695652173,
1126
+ "mmlu_eval_accuracy_high_school_us_history": 0.7272727272727273,
1127
+ "mmlu_eval_accuracy_high_school_world_history": 0.4230769230769231,
1128
+ "mmlu_eval_accuracy_human_aging": 0.6086956521739131,
1129
+ "mmlu_eval_accuracy_human_sexuality": 0.4166666666666667,
1130
+ "mmlu_eval_accuracy_international_law": 0.7692307692307693,
1131
+ "mmlu_eval_accuracy_jurisprudence": 0.36363636363636365,
1132
+ "mmlu_eval_accuracy_logical_fallacies": 0.5,
1133
+ "mmlu_eval_accuracy_machine_learning": 0.2727272727272727,
1134
+ "mmlu_eval_accuracy_management": 0.36363636363636365,
1135
+ "mmlu_eval_accuracy_marketing": 0.76,
1136
+ "mmlu_eval_accuracy_medical_genetics": 0.7272727272727273,
1137
+ "mmlu_eval_accuracy_miscellaneous": 0.6046511627906976,
1138
+ "mmlu_eval_accuracy_moral_disputes": 0.4473684210526316,
1139
+ "mmlu_eval_accuracy_moral_scenarios": 0.23,
1140
+ "mmlu_eval_accuracy_nutrition": 0.5151515151515151,
1141
+ "mmlu_eval_accuracy_philosophy": 0.5,
1142
+ "mmlu_eval_accuracy_prehistory": 0.4,
1143
+ "mmlu_eval_accuracy_professional_accounting": 0.22580645161290322,
1144
+ "mmlu_eval_accuracy_professional_law": 0.3235294117647059,
1145
+ "mmlu_eval_accuracy_professional_medicine": 0.3548387096774194,
1146
+ "mmlu_eval_accuracy_professional_psychology": 0.43478260869565216,
1147
+ "mmlu_eval_accuracy_public_relations": 0.6666666666666666,
1148
+ "mmlu_eval_accuracy_security_studies": 0.4444444444444444,
1149
+ "mmlu_eval_accuracy_sociology": 0.45454545454545453,
1150
+ "mmlu_eval_accuracy_us_foreign_policy": 0.5454545454545454,
1151
+ "mmlu_eval_accuracy_virology": 0.6111111111111112,
1152
+ "mmlu_eval_accuracy_world_religions": 0.7368421052631579,
1153
+ "mmlu_loss": 1.0120855229213717,
1154
+ "step": 1200
1155
+ },
1156
+ {
1157
+ "epoch": 0.49,
1158
+ "learning_rate": 0.0002,
1159
+ "loss": 0.5031,
1160
+ "step": 1210
1161
+ },
1162
+ {
1163
+ "epoch": 0.49,
1164
+ "learning_rate": 0.0002,
1165
+ "loss": 0.5928,
1166
+ "step": 1220
1167
+ },
1168
+ {
1169
+ "epoch": 0.5,
1170
+ "learning_rate": 0.0002,
1171
+ "loss": 0.5746,
1172
+ "step": 1230
1173
+ },
1174
+ {
1175
+ "epoch": 0.5,
1176
+ "learning_rate": 0.0002,
1177
+ "loss": 0.572,
1178
+ "step": 1240
1179
+ },
1180
+ {
1181
+ "epoch": 0.51,
1182
+ "learning_rate": 0.0002,
1183
+ "loss": 0.5716,
1184
+ "step": 1250
1185
+ },
1186
+ {
1187
+ "epoch": 0.51,
1188
+ "learning_rate": 0.0002,
1189
+ "loss": 0.4872,
1190
+ "step": 1260
1191
+ },
1192
+ {
1193
+ "epoch": 0.51,
1194
+ "learning_rate": 0.0002,
1195
+ "loss": 0.6716,
1196
+ "step": 1270
1197
+ },
1198
+ {
1199
+ "epoch": 0.52,
1200
+ "learning_rate": 0.0002,
1201
+ "loss": 0.6052,
1202
+ "step": 1280
1203
+ },
1204
+ {
1205
+ "epoch": 0.52,
1206
+ "learning_rate": 0.0002,
1207
+ "loss": 0.5711,
1208
+ "step": 1290
1209
+ },
1210
+ {
1211
+ "epoch": 0.53,
1212
+ "learning_rate": 0.0002,
1213
+ "loss": 0.7097,
1214
+ "step": 1300
1215
+ },
1216
+ {
1217
+ "epoch": 0.53,
1218
+ "learning_rate": 0.0002,
1219
+ "loss": 0.5536,
1220
+ "step": 1310
1221
+ },
1222
+ {
1223
+ "epoch": 0.53,
1224
+ "learning_rate": 0.0002,
1225
+ "loss": 0.7815,
1226
+ "step": 1320
1227
+ },
1228
+ {
1229
+ "epoch": 0.54,
1230
+ "learning_rate": 0.0002,
1231
+ "loss": 0.6709,
1232
+ "step": 1330
1233
+ },
1234
+ {
1235
+ "epoch": 0.54,
1236
+ "learning_rate": 0.0002,
1237
+ "loss": 0.5422,
1238
+ "step": 1340
1239
+ },
1240
+ {
1241
+ "epoch": 0.55,
1242
+ "learning_rate": 0.0002,
1243
+ "loss": 0.566,
1244
+ "step": 1350
1245
+ },
1246
+ {
1247
+ "epoch": 0.55,
1248
+ "learning_rate": 0.0002,
1249
+ "loss": 0.4571,
1250
+ "step": 1360
1251
+ },
1252
+ {
1253
+ "epoch": 0.55,
1254
+ "learning_rate": 0.0002,
1255
+ "loss": 0.6572,
1256
+ "step": 1370
1257
+ },
1258
+ {
1259
+ "epoch": 0.56,
1260
+ "learning_rate": 0.0002,
1261
+ "loss": 0.5951,
1262
+ "step": 1380
1263
+ },
1264
+ {
1265
+ "epoch": 0.56,
1266
+ "learning_rate": 0.0002,
1267
+ "loss": 0.6753,
1268
+ "step": 1390
1269
+ },
1270
+ {
1271
+ "epoch": 0.57,
1272
+ "learning_rate": 0.0002,
1273
+ "loss": 0.6247,
1274
+ "step": 1400
1275
+ },
1276
+ {
1277
+ "epoch": 0.57,
1278
+ "eval_loss": 0.6076797842979431,
1279
+ "eval_runtime": 94.144,
1280
+ "eval_samples_per_second": 10.622,
1281
+ "eval_steps_per_second": 5.311,
1282
+ "step": 1400
1283
+ },
1284
+ {
1285
+ "epoch": 0.57,
1286
+ "mmlu_eval_accuracy": 0.434467608651013,
1287
+ "mmlu_eval_accuracy_abstract_algebra": 0.2727272727272727,
1288
+ "mmlu_eval_accuracy_anatomy": 0.5714285714285714,
1289
+ "mmlu_eval_accuracy_astronomy": 0.375,
1290
+ "mmlu_eval_accuracy_business_ethics": 0.5454545454545454,
1291
+ "mmlu_eval_accuracy_clinical_knowledge": 0.41379310344827586,
1292
+ "mmlu_eval_accuracy_college_biology": 0.25,
1293
+ "mmlu_eval_accuracy_college_chemistry": 0.0,
1294
+ "mmlu_eval_accuracy_college_computer_science": 0.2727272727272727,
1295
+ "mmlu_eval_accuracy_college_mathematics": 0.45454545454545453,
1296
+ "mmlu_eval_accuracy_college_medicine": 0.36363636363636365,
1297
+ "mmlu_eval_accuracy_college_physics": 0.36363636363636365,
1298
+ "mmlu_eval_accuracy_computer_security": 0.45454545454545453,
1299
+ "mmlu_eval_accuracy_conceptual_physics": 0.4230769230769231,
1300
+ "mmlu_eval_accuracy_econometrics": 0.16666666666666666,
1301
+ "mmlu_eval_accuracy_electrical_engineering": 0.375,
1302
+ "mmlu_eval_accuracy_elementary_mathematics": 0.3170731707317073,
1303
+ "mmlu_eval_accuracy_formal_logic": 0.21428571428571427,
1304
+ "mmlu_eval_accuracy_global_facts": 0.2,
1305
+ "mmlu_eval_accuracy_high_school_biology": 0.34375,
1306
+ "mmlu_eval_accuracy_high_school_chemistry": 0.36363636363636365,
1307
+ "mmlu_eval_accuracy_high_school_computer_science": 0.6666666666666666,
1308
+ "mmlu_eval_accuracy_high_school_european_history": 0.4444444444444444,
1309
+ "mmlu_eval_accuracy_high_school_geography": 0.7272727272727273,
1310
+ "mmlu_eval_accuracy_high_school_government_and_politics": 0.5238095238095238,
1311
+ "mmlu_eval_accuracy_high_school_macroeconomics": 0.3488372093023256,
1312
+ "mmlu_eval_accuracy_high_school_mathematics": 0.20689655172413793,
1313
+ "mmlu_eval_accuracy_high_school_microeconomics": 0.34615384615384615,
1314
+ "mmlu_eval_accuracy_high_school_physics": 0.29411764705882354,
1315
+ "mmlu_eval_accuracy_high_school_psychology": 0.6833333333333333,
1316
+ "mmlu_eval_accuracy_high_school_statistics": 0.391304347826087,
1317
+ "mmlu_eval_accuracy_high_school_us_history": 0.5909090909090909,
1318
+ "mmlu_eval_accuracy_high_school_world_history": 0.46153846153846156,
1319
+ "mmlu_eval_accuracy_human_aging": 0.6086956521739131,
1320
+ "mmlu_eval_accuracy_human_sexuality": 0.25,
1321
+ "mmlu_eval_accuracy_international_law": 0.7692307692307693,
1322
+ "mmlu_eval_accuracy_jurisprudence": 0.36363636363636365,
1323
+ "mmlu_eval_accuracy_logical_fallacies": 0.5,
1324
+ "mmlu_eval_accuracy_machine_learning": 0.2727272727272727,
1325
+ "mmlu_eval_accuracy_management": 0.45454545454545453,
1326
+ "mmlu_eval_accuracy_marketing": 0.68,
1327
+ "mmlu_eval_accuracy_medical_genetics": 0.7272727272727273,
1328
+ "mmlu_eval_accuracy_miscellaneous": 0.6395348837209303,
1329
+ "mmlu_eval_accuracy_moral_disputes": 0.4473684210526316,
1330
+ "mmlu_eval_accuracy_moral_scenarios": 0.23,
1331
+ "mmlu_eval_accuracy_nutrition": 0.5454545454545454,
1332
+ "mmlu_eval_accuracy_philosophy": 0.4411764705882353,
1333
+ "mmlu_eval_accuracy_prehistory": 0.42857142857142855,
1334
+ "mmlu_eval_accuracy_professional_accounting": 0.3225806451612903,
1335
+ "mmlu_eval_accuracy_professional_law": 0.3235294117647059,
1336
+ "mmlu_eval_accuracy_professional_medicine": 0.41935483870967744,
1337
+ "mmlu_eval_accuracy_professional_psychology": 0.42028985507246375,
1338
+ "mmlu_eval_accuracy_public_relations": 0.6666666666666666,
1339
+ "mmlu_eval_accuracy_security_studies": 0.4444444444444444,
1340
+ "mmlu_eval_accuracy_sociology": 0.5454545454545454,
1341
+ "mmlu_eval_accuracy_us_foreign_policy": 0.5454545454545454,
1342
+ "mmlu_eval_accuracy_virology": 0.5555555555555556,
1343
+ "mmlu_eval_accuracy_world_religions": 0.7368421052631579,
1344
+ "mmlu_loss": 0.9527658471161641,
1345
+ "step": 1400
1346
+ },
1347
+ {
1348
+ "epoch": 0.57,
1349
+ "learning_rate": 0.0002,
1350
+ "loss": 0.6337,
1351
+ "step": 1410
1352
+ },
1353
+ {
1354
+ "epoch": 0.57,
1355
+ "learning_rate": 0.0002,
1356
+ "loss": 0.5077,
1357
+ "step": 1420
1358
+ },
1359
+ {
1360
+ "epoch": 0.58,
1361
+ "learning_rate": 0.0002,
1362
+ "loss": 0.5413,
1363
+ "step": 1430
1364
+ },
1365
+ {
1366
+ "epoch": 0.58,
1367
+ "learning_rate": 0.0002,
1368
+ "loss": 0.6527,
1369
+ "step": 1440
1370
+ },
1371
+ {
1372
+ "epoch": 0.59,
1373
+ "learning_rate": 0.0002,
1374
+ "loss": 0.6435,
1375
+ "step": 1450
1376
+ },
1377
+ {
1378
+ "epoch": 0.59,
1379
+ "learning_rate": 0.0002,
1380
+ "loss": 0.5503,
1381
+ "step": 1460
1382
+ },
1383
+ {
1384
+ "epoch": 0.6,
1385
+ "learning_rate": 0.0002,
1386
+ "loss": 0.5819,
1387
+ "step": 1470
1388
+ },
1389
+ {
1390
+ "epoch": 0.6,
1391
+ "learning_rate": 0.0002,
1392
+ "loss": 0.6342,
1393
+ "step": 1480
1394
+ },
1395
+ {
1396
+ "epoch": 0.6,
1397
+ "learning_rate": 0.0002,
1398
+ "loss": 0.5843,
1399
+ "step": 1490
1400
+ },
1401
+ {
1402
+ "epoch": 0.61,
1403
+ "learning_rate": 0.0002,
1404
+ "loss": 0.5134,
1405
+ "step": 1500
1406
+ },
1407
+ {
1408
+ "epoch": 0.61,
1409
+ "learning_rate": 0.0002,
1410
+ "loss": 0.5694,
1411
+ "step": 1510
1412
+ },
1413
+ {
1414
+ "epoch": 0.62,
1415
+ "learning_rate": 0.0002,
1416
+ "loss": 0.6172,
1417
+ "step": 1520
1418
+ },
1419
+ {
1420
+ "epoch": 0.62,
1421
+ "learning_rate": 0.0002,
1422
+ "loss": 0.5765,
1423
+ "step": 1530
1424
+ },
1425
+ {
1426
+ "epoch": 0.62,
1427
+ "learning_rate": 0.0002,
1428
+ "loss": 0.591,
1429
+ "step": 1540
1430
+ },
1431
+ {
1432
+ "epoch": 0.63,
1433
+ "learning_rate": 0.0002,
1434
+ "loss": 0.5039,
1435
+ "step": 1550
1436
+ },
1437
+ {
1438
+ "epoch": 0.63,
1439
+ "learning_rate": 0.0002,
1440
+ "loss": 0.6288,
1441
+ "step": 1560
1442
+ },
1443
+ {
1444
+ "epoch": 0.64,
1445
+ "learning_rate": 0.0002,
1446
+ "loss": 0.5196,
1447
+ "step": 1570
1448
+ },
1449
+ {
1450
+ "epoch": 0.64,
1451
+ "learning_rate": 0.0002,
1452
+ "loss": 0.5867,
1453
+ "step": 1580
1454
+ },
1455
+ {
1456
+ "epoch": 0.64,
1457
+ "learning_rate": 0.0002,
1458
+ "loss": 0.6002,
1459
+ "step": 1590
1460
+ },
1461
+ {
1462
+ "epoch": 0.65,
1463
+ "learning_rate": 0.0002,
1464
+ "loss": 0.6534,
1465
+ "step": 1600
1466
+ },
1467
+ {
1468
+ "epoch": 0.65,
1469
+ "eval_loss": 0.6057603359222412,
1470
+ "eval_runtime": 94.1839,
1471
+ "eval_samples_per_second": 10.618,
1472
+ "eval_steps_per_second": 5.309,
1473
+ "step": 1600
1474
+ },
1475
+ {
1476
+ "epoch": 0.65,
1477
+ "mmlu_eval_accuracy": 0.4521968363152983,
1478
+ "mmlu_eval_accuracy_abstract_algebra": 0.2727272727272727,
1479
+ "mmlu_eval_accuracy_anatomy": 0.5714285714285714,
1480
+ "mmlu_eval_accuracy_astronomy": 0.375,
1481
+ "mmlu_eval_accuracy_business_ethics": 0.5454545454545454,
1482
+ "mmlu_eval_accuracy_clinical_knowledge": 0.41379310344827586,
1483
+ "mmlu_eval_accuracy_college_biology": 0.4375,
1484
+ "mmlu_eval_accuracy_college_chemistry": 0.125,
1485
+ "mmlu_eval_accuracy_college_computer_science": 0.36363636363636365,
1486
+ "mmlu_eval_accuracy_college_mathematics": 0.36363636363636365,
1487
+ "mmlu_eval_accuracy_college_medicine": 0.3181818181818182,
1488
+ "mmlu_eval_accuracy_college_physics": 0.45454545454545453,
1489
+ "mmlu_eval_accuracy_computer_security": 0.5454545454545454,
1490
+ "mmlu_eval_accuracy_conceptual_physics": 0.38461538461538464,
1491
+ "mmlu_eval_accuracy_econometrics": 0.16666666666666666,
1492
+ "mmlu_eval_accuracy_electrical_engineering": 0.375,
1493
+ "mmlu_eval_accuracy_elementary_mathematics": 0.2926829268292683,
1494
+ "mmlu_eval_accuracy_formal_logic": 0.21428571428571427,
1495
+ "mmlu_eval_accuracy_global_facts": 0.3,
1496
+ "mmlu_eval_accuracy_high_school_biology": 0.375,
1497
+ "mmlu_eval_accuracy_high_school_chemistry": 0.3181818181818182,
1498
+ "mmlu_eval_accuracy_high_school_computer_science": 0.5555555555555556,
1499
+ "mmlu_eval_accuracy_high_school_european_history": 0.5,
1500
+ "mmlu_eval_accuracy_high_school_geography": 0.7272727272727273,
1501
+ "mmlu_eval_accuracy_high_school_government_and_politics": 0.5238095238095238,
1502
+ "mmlu_eval_accuracy_high_school_macroeconomics": 0.46511627906976744,
1503
+ "mmlu_eval_accuracy_high_school_mathematics": 0.20689655172413793,
1504
+ "mmlu_eval_accuracy_high_school_microeconomics": 0.38461538461538464,
1505
+ "mmlu_eval_accuracy_high_school_physics": 0.23529411764705882,
1506
+ "mmlu_eval_accuracy_high_school_psychology": 0.75,
1507
+ "mmlu_eval_accuracy_high_school_statistics": 0.34782608695652173,
1508
+ "mmlu_eval_accuracy_high_school_us_history": 0.6363636363636364,
1509
+ "mmlu_eval_accuracy_high_school_world_history": 0.4230769230769231,
1510
+ "mmlu_eval_accuracy_human_aging": 0.6521739130434783,
1511
+ "mmlu_eval_accuracy_human_sexuality": 0.3333333333333333,
1512
+ "mmlu_eval_accuracy_international_law": 0.7692307692307693,
1513
+ "mmlu_eval_accuracy_jurisprudence": 0.45454545454545453,
1514
+ "mmlu_eval_accuracy_logical_fallacies": 0.5555555555555556,
1515
+ "mmlu_eval_accuracy_machine_learning": 0.2727272727272727,
1516
+ "mmlu_eval_accuracy_management": 0.5454545454545454,
1517
+ "mmlu_eval_accuracy_marketing": 0.76,
1518
+ "mmlu_eval_accuracy_medical_genetics": 0.7272727272727273,
1519
+ "mmlu_eval_accuracy_miscellaneous": 0.6395348837209303,
1520
+ "mmlu_eval_accuracy_moral_disputes": 0.4473684210526316,
1521
+ "mmlu_eval_accuracy_moral_scenarios": 0.28,
1522
+ "mmlu_eval_accuracy_nutrition": 0.5454545454545454,
1523
+ "mmlu_eval_accuracy_philosophy": 0.4411764705882353,
1524
+ "mmlu_eval_accuracy_prehistory": 0.5142857142857142,
1525
+ "mmlu_eval_accuracy_professional_accounting": 0.25806451612903225,
1526
+ "mmlu_eval_accuracy_professional_law": 0.3411764705882353,
1527
+ "mmlu_eval_accuracy_professional_medicine": 0.41935483870967744,
1528
+ "mmlu_eval_accuracy_professional_psychology": 0.42028985507246375,
1529
+ "mmlu_eval_accuracy_public_relations": 0.5833333333333334,
1530
+ "mmlu_eval_accuracy_security_studies": 0.5185185185185185,
1531
+ "mmlu_eval_accuracy_sociology": 0.5454545454545454,
1532
+ "mmlu_eval_accuracy_us_foreign_policy": 0.5454545454545454,
1533
+ "mmlu_eval_accuracy_virology": 0.5,
1534
+ "mmlu_eval_accuracy_world_religions": 0.7368421052631579,
1535
+ "mmlu_loss": 0.8609263468582388,
1536
+ "step": 1600
1537
+ },
1538
+ {
1539
+ "epoch": 0.65,
1540
+ "learning_rate": 0.0002,
1541
+ "loss": 0.5552,
1542
+ "step": 1610
1543
+ },
1544
+ {
1545
+ "epoch": 0.66,
1546
+ "learning_rate": 0.0002,
1547
+ "loss": 0.4649,
1548
+ "step": 1620
1549
+ },
1550
+ {
1551
+ "epoch": 0.66,
1552
+ "learning_rate": 0.0002,
1553
+ "loss": 0.5148,
1554
+ "step": 1630
1555
+ },
1556
+ {
1557
+ "epoch": 0.66,
1558
+ "learning_rate": 0.0002,
1559
+ "loss": 0.4968,
1560
+ "step": 1640
1561
+ },
1562
+ {
1563
+ "epoch": 0.67,
1564
+ "learning_rate": 0.0002,
1565
+ "loss": 0.5822,
1566
+ "step": 1650
1567
+ },
1568
+ {
1569
+ "epoch": 0.67,
1570
+ "learning_rate": 0.0002,
1571
+ "loss": 0.4779,
1572
+ "step": 1660
1573
+ },
1574
+ {
1575
+ "epoch": 0.68,
1576
+ "learning_rate": 0.0002,
1577
+ "loss": 0.6367,
1578
+ "step": 1670
1579
+ },
1580
+ {
1581
+ "epoch": 0.68,
1582
+ "learning_rate": 0.0002,
1583
+ "loss": 0.7188,
1584
+ "step": 1680
1585
+ },
1586
+ {
1587
+ "epoch": 0.68,
1588
+ "learning_rate": 0.0002,
1589
+ "loss": 0.5493,
1590
+ "step": 1690
1591
+ },
1592
+ {
1593
+ "epoch": 0.69,
1594
+ "learning_rate": 0.0002,
1595
+ "loss": 0.5365,
1596
+ "step": 1700
1597
+ },
1598
+ {
1599
+ "epoch": 0.69,
1600
+ "learning_rate": 0.0002,
1601
+ "loss": 0.6451,
1602
+ "step": 1710
1603
+ },
1604
+ {
1605
+ "epoch": 0.7,
1606
+ "learning_rate": 0.0002,
1607
+ "loss": 0.5231,
1608
+ "step": 1720
1609
+ },
1610
+ {
1611
+ "epoch": 0.7,
1612
+ "learning_rate": 0.0002,
1613
+ "loss": 0.7517,
1614
+ "step": 1730
1615
+ },
1616
+ {
1617
+ "epoch": 0.7,
1618
+ "learning_rate": 0.0002,
1619
+ "loss": 0.5724,
1620
+ "step": 1740
1621
+ },
1622
+ {
1623
+ "epoch": 0.71,
1624
+ "learning_rate": 0.0002,
1625
+ "loss": 0.4755,
1626
+ "step": 1750
1627
+ },
1628
+ {
1629
+ "epoch": 0.71,
1630
+ "learning_rate": 0.0002,
1631
+ "loss": 0.672,
1632
+ "step": 1760
1633
+ },
1634
+ {
1635
+ "epoch": 0.72,
1636
+ "learning_rate": 0.0002,
1637
+ "loss": 0.6718,
1638
+ "step": 1770
1639
+ },
1640
+ {
1641
+ "epoch": 0.72,
1642
+ "learning_rate": 0.0002,
1643
+ "loss": 0.6726,
1644
+ "step": 1780
1645
+ },
1646
+ {
1647
+ "epoch": 0.72,
1648
+ "learning_rate": 0.0002,
1649
+ "loss": 0.5012,
1650
+ "step": 1790
1651
+ },
1652
+ {
1653
+ "epoch": 0.73,
1654
+ "learning_rate": 0.0002,
1655
+ "loss": 0.4542,
1656
+ "step": 1800
1657
+ },
1658
+ {
1659
+ "epoch": 0.73,
1660
+ "eval_loss": 0.6079343557357788,
1661
+ "eval_runtime": 94.5927,
1662
+ "eval_samples_per_second": 10.572,
1663
+ "eval_steps_per_second": 5.286,
1664
+ "step": 1800
1665
+ },
1666
+ {
1667
+ "epoch": 0.73,
1668
+ "mmlu_eval_accuracy": 0.45560649806753273,
1669
+ "mmlu_eval_accuracy_abstract_algebra": 0.2727272727272727,
1670
+ "mmlu_eval_accuracy_anatomy": 0.6428571428571429,
1671
+ "mmlu_eval_accuracy_astronomy": 0.375,
1672
+ "mmlu_eval_accuracy_business_ethics": 0.6363636363636364,
1673
+ "mmlu_eval_accuracy_clinical_knowledge": 0.4482758620689655,
1674
+ "mmlu_eval_accuracy_college_biology": 0.375,
1675
+ "mmlu_eval_accuracy_college_chemistry": 0.25,
1676
+ "mmlu_eval_accuracy_college_computer_science": 0.36363636363636365,
1677
+ "mmlu_eval_accuracy_college_mathematics": 0.18181818181818182,
1678
+ "mmlu_eval_accuracy_college_medicine": 0.3181818181818182,
1679
+ "mmlu_eval_accuracy_college_physics": 0.45454545454545453,
1680
+ "mmlu_eval_accuracy_computer_security": 0.18181818181818182,
1681
+ "mmlu_eval_accuracy_conceptual_physics": 0.3076923076923077,
1682
+ "mmlu_eval_accuracy_econometrics": 0.16666666666666666,
1683
+ "mmlu_eval_accuracy_electrical_engineering": 0.4375,
1684
+ "mmlu_eval_accuracy_elementary_mathematics": 0.4634146341463415,
1685
+ "mmlu_eval_accuracy_formal_logic": 0.14285714285714285,
1686
+ "mmlu_eval_accuracy_global_facts": 0.5,
1687
+ "mmlu_eval_accuracy_high_school_biology": 0.375,
1688
+ "mmlu_eval_accuracy_high_school_chemistry": 0.36363636363636365,
1689
+ "mmlu_eval_accuracy_high_school_computer_science": 0.5555555555555556,
1690
+ "mmlu_eval_accuracy_high_school_european_history": 0.6111111111111112,
1691
+ "mmlu_eval_accuracy_high_school_geography": 0.6818181818181818,
1692
+ "mmlu_eval_accuracy_high_school_government_and_politics": 0.6666666666666666,
1693
+ "mmlu_eval_accuracy_high_school_macroeconomics": 0.3023255813953488,
1694
+ "mmlu_eval_accuracy_high_school_mathematics": 0.2413793103448276,
1695
+ "mmlu_eval_accuracy_high_school_microeconomics": 0.46153846153846156,
1696
+ "mmlu_eval_accuracy_high_school_physics": 0.23529411764705882,
1697
+ "mmlu_eval_accuracy_high_school_psychology": 0.6666666666666666,
1698
+ "mmlu_eval_accuracy_high_school_statistics": 0.34782608695652173,
1699
+ "mmlu_eval_accuracy_high_school_us_history": 0.7272727272727273,
1700
+ "mmlu_eval_accuracy_high_school_world_history": 0.5769230769230769,
1701
+ "mmlu_eval_accuracy_human_aging": 0.6086956521739131,
1702
+ "mmlu_eval_accuracy_human_sexuality": 0.5,
1703
+ "mmlu_eval_accuracy_international_law": 0.7692307692307693,
1704
+ "mmlu_eval_accuracy_jurisprudence": 0.5454545454545454,
1705
+ "mmlu_eval_accuracy_logical_fallacies": 0.5,
1706
+ "mmlu_eval_accuracy_machine_learning": 0.09090909090909091,
1707
+ "mmlu_eval_accuracy_management": 0.5454545454545454,
1708
+ "mmlu_eval_accuracy_marketing": 0.8,
1709
+ "mmlu_eval_accuracy_medical_genetics": 0.7272727272727273,
1710
+ "mmlu_eval_accuracy_miscellaneous": 0.6744186046511628,
1711
+ "mmlu_eval_accuracy_moral_disputes": 0.4473684210526316,
1712
+ "mmlu_eval_accuracy_moral_scenarios": 0.23,
1713
+ "mmlu_eval_accuracy_nutrition": 0.5454545454545454,
1714
+ "mmlu_eval_accuracy_philosophy": 0.5882352941176471,
1715
+ "mmlu_eval_accuracy_prehistory": 0.37142857142857144,
1716
+ "mmlu_eval_accuracy_professional_accounting": 0.25806451612903225,
1717
+ "mmlu_eval_accuracy_professional_law": 0.3411764705882353,
1718
+ "mmlu_eval_accuracy_professional_medicine": 0.41935483870967744,
1719
+ "mmlu_eval_accuracy_professional_psychology": 0.391304347826087,
1720
+ "mmlu_eval_accuracy_public_relations": 0.5833333333333334,
1721
+ "mmlu_eval_accuracy_security_studies": 0.48148148148148145,
1722
+ "mmlu_eval_accuracy_sociology": 0.5454545454545454,
1723
+ "mmlu_eval_accuracy_us_foreign_policy": 0.5454545454545454,
1724
+ "mmlu_eval_accuracy_virology": 0.4444444444444444,
1725
+ "mmlu_eval_accuracy_world_religions": 0.6842105263157895,
1726
+ "mmlu_loss": 0.9425534363398352,
1727
+ "step": 1800
1728
+ },
1729
+ {
1730
+ "epoch": 0.73,
1731
+ "learning_rate": 0.0002,
1732
+ "loss": 0.4603,
1733
+ "step": 1810
1734
+ },
1735
+ {
1736
+ "epoch": 0.74,
1737
+ "learning_rate": 0.0002,
1738
+ "loss": 0.5112,
1739
+ "step": 1820
1740
+ },
1741
+ {
1742
+ "epoch": 0.74,
1743
+ "learning_rate": 0.0002,
1744
+ "loss": 0.6551,
1745
+ "step": 1830
1746
+ },
1747
+ {
1748
+ "epoch": 0.74,
1749
+ "learning_rate": 0.0002,
1750
+ "loss": 0.5428,
1751
+ "step": 1840
1752
+ },
1753
+ {
1754
+ "epoch": 0.75,
1755
+ "learning_rate": 0.0002,
1756
+ "loss": 0.634,
1757
+ "step": 1850
1758
+ },
1759
+ {
1760
+ "epoch": 0.75,
1761
+ "learning_rate": 0.0002,
1762
+ "loss": 0.538,
1763
+ "step": 1860
1764
+ },
1765
+ {
1766
+ "epoch": 0.76,
1767
+ "learning_rate": 0.0002,
1768
+ "loss": 0.5745,
1769
+ "step": 1870
1770
+ },
1771
+ {
1772
+ "epoch": 0.76,
1773
+ "learning_rate": 0.0002,
1774
+ "loss": 0.7127,
1775
+ "step": 1880
1776
+ },
1777
+ {
1778
+ "epoch": 0.77,
1779
+ "learning_rate": 0.0002,
1780
+ "loss": 0.6231,
1781
+ "step": 1890
1782
+ },
1783
+ {
1784
+ "epoch": 0.77,
1785
+ "learning_rate": 0.0002,
1786
+ "loss": 0.5608,
1787
+ "step": 1900
1788
+ },
1789
+ {
1790
+ "epoch": 0.77,
1791
+ "learning_rate": 0.0002,
1792
+ "loss": 0.6482,
1793
+ "step": 1910
1794
+ },
1795
+ {
1796
+ "epoch": 0.78,
1797
+ "learning_rate": 0.0002,
1798
+ "loss": 0.5111,
1799
+ "step": 1920
1800
+ },
1801
+ {
1802
+ "epoch": 0.78,
1803
+ "learning_rate": 0.0002,
1804
+ "loss": 0.6582,
1805
+ "step": 1930
1806
+ },
1807
+ {
1808
+ "epoch": 0.79,
1809
+ "learning_rate": 0.0002,
1810
+ "loss": 0.6121,
1811
+ "step": 1940
1812
+ },
1813
+ {
1814
+ "epoch": 0.79,
1815
+ "learning_rate": 0.0002,
1816
+ "loss": 0.6185,
1817
+ "step": 1950
1818
+ },
1819
+ {
1820
+ "epoch": 0.79,
1821
+ "learning_rate": 0.0002,
1822
+ "loss": 0.5918,
1823
+ "step": 1960
1824
+ },
1825
+ {
1826
+ "epoch": 0.8,
1827
+ "learning_rate": 0.0002,
1828
+ "loss": 0.5883,
1829
+ "step": 1970
1830
+ },
1831
+ {
1832
+ "epoch": 0.8,
1833
+ "learning_rate": 0.0002,
1834
+ "loss": 0.6027,
1835
+ "step": 1980
1836
+ },
1837
+ {
1838
+ "epoch": 0.81,
1839
+ "learning_rate": 0.0002,
1840
+ "loss": 0.4892,
1841
+ "step": 1990
1842
+ },
1843
+ {
1844
+ "epoch": 0.81,
1845
+ "learning_rate": 0.0002,
1846
+ "loss": 0.6467,
1847
+ "step": 2000
1848
+ },
1849
+ {
1850
+ "epoch": 0.81,
1851
+ "eval_loss": 0.6004832983016968,
1852
+ "eval_runtime": 93.9867,
1853
+ "eval_samples_per_second": 10.64,
1854
+ "eval_steps_per_second": 5.32,
1855
+ "step": 2000
1856
+ },
1857
+ {
1858
+ "epoch": 0.81,
1859
+ "mmlu_eval_accuracy": 0.4582647054610207,
1860
+ "mmlu_eval_accuracy_abstract_algebra": 0.2727272727272727,
1861
+ "mmlu_eval_accuracy_anatomy": 0.5714285714285714,
1862
+ "mmlu_eval_accuracy_astronomy": 0.375,
1863
+ "mmlu_eval_accuracy_business_ethics": 0.5454545454545454,
1864
+ "mmlu_eval_accuracy_clinical_knowledge": 0.41379310344827586,
1865
+ "mmlu_eval_accuracy_college_biology": 0.375,
1866
+ "mmlu_eval_accuracy_college_chemistry": 0.375,
1867
+ "mmlu_eval_accuracy_college_computer_science": 0.45454545454545453,
1868
+ "mmlu_eval_accuracy_college_mathematics": 0.18181818181818182,
1869
+ "mmlu_eval_accuracy_college_medicine": 0.3181818181818182,
1870
+ "mmlu_eval_accuracy_college_physics": 0.45454545454545453,
1871
+ "mmlu_eval_accuracy_computer_security": 0.09090909090909091,
1872
+ "mmlu_eval_accuracy_conceptual_physics": 0.3076923076923077,
1873
+ "mmlu_eval_accuracy_econometrics": 0.16666666666666666,
1874
+ "mmlu_eval_accuracy_electrical_engineering": 0.375,
1875
+ "mmlu_eval_accuracy_elementary_mathematics": 0.4146341463414634,
1876
+ "mmlu_eval_accuracy_formal_logic": 0.2857142857142857,
1877
+ "mmlu_eval_accuracy_global_facts": 0.3,
1878
+ "mmlu_eval_accuracy_high_school_biology": 0.34375,
1879
+ "mmlu_eval_accuracy_high_school_chemistry": 0.4090909090909091,
1880
+ "mmlu_eval_accuracy_high_school_computer_science": 0.7777777777777778,
1881
+ "mmlu_eval_accuracy_high_school_european_history": 0.4444444444444444,
1882
+ "mmlu_eval_accuracy_high_school_geography": 0.7272727272727273,
1883
+ "mmlu_eval_accuracy_high_school_government_and_politics": 0.6666666666666666,
1884
+ "mmlu_eval_accuracy_high_school_macroeconomics": 0.37209302325581395,
1885
+ "mmlu_eval_accuracy_high_school_mathematics": 0.2413793103448276,
1886
+ "mmlu_eval_accuracy_high_school_microeconomics": 0.4230769230769231,
1887
+ "mmlu_eval_accuracy_high_school_physics": 0.29411764705882354,
1888
+ "mmlu_eval_accuracy_high_school_psychology": 0.6833333333333333,
1889
+ "mmlu_eval_accuracy_high_school_statistics": 0.30434782608695654,
1890
+ "mmlu_eval_accuracy_high_school_us_history": 0.6818181818181818,
1891
+ "mmlu_eval_accuracy_high_school_world_history": 0.6153846153846154,
1892
+ "mmlu_eval_accuracy_human_aging": 0.6956521739130435,
1893
+ "mmlu_eval_accuracy_human_sexuality": 0.5,
1894
+ "mmlu_eval_accuracy_international_law": 0.7692307692307693,
1895
+ "mmlu_eval_accuracy_jurisprudence": 0.2727272727272727,
1896
+ "mmlu_eval_accuracy_logical_fallacies": 0.6111111111111112,
1897
+ "mmlu_eval_accuracy_machine_learning": 0.18181818181818182,
1898
+ "mmlu_eval_accuracy_management": 0.5454545454545454,
1899
+ "mmlu_eval_accuracy_marketing": 0.8,
1900
+ "mmlu_eval_accuracy_medical_genetics": 0.7272727272727273,
1901
+ "mmlu_eval_accuracy_miscellaneous": 0.6976744186046512,
1902
+ "mmlu_eval_accuracy_moral_disputes": 0.47368421052631576,
1903
+ "mmlu_eval_accuracy_moral_scenarios": 0.24,
1904
+ "mmlu_eval_accuracy_nutrition": 0.5454545454545454,
1905
+ "mmlu_eval_accuracy_philosophy": 0.5294117647058824,
1906
+ "mmlu_eval_accuracy_prehistory": 0.37142857142857144,
1907
+ "mmlu_eval_accuracy_professional_accounting": 0.41935483870967744,
1908
+ "mmlu_eval_accuracy_professional_law": 0.3235294117647059,
1909
+ "mmlu_eval_accuracy_professional_medicine": 0.41935483870967744,
1910
+ "mmlu_eval_accuracy_professional_psychology": 0.37681159420289856,
1911
+ "mmlu_eval_accuracy_public_relations": 0.5833333333333334,
1912
+ "mmlu_eval_accuracy_security_studies": 0.4444444444444444,
1913
+ "mmlu_eval_accuracy_sociology": 0.5454545454545454,
1914
+ "mmlu_eval_accuracy_us_foreign_policy": 0.5454545454545454,
1915
+ "mmlu_eval_accuracy_virology": 0.5555555555555556,
1916
+ "mmlu_eval_accuracy_world_religions": 0.6842105263157895,
1917
+ "mmlu_loss": 0.8762637240612787,
1918
+ "step": 2000
1919
+ },
1920
+ {
1921
+ "epoch": 0.81,
1922
+ "learning_rate": 0.0002,
1923
+ "loss": 0.6325,
1924
+ "step": 2010
1925
+ },
1926
+ {
1927
+ "epoch": 0.82,
1928
+ "learning_rate": 0.0002,
1929
+ "loss": 0.5258,
1930
+ "step": 2020
1931
+ },
1932
+ {
1933
+ "epoch": 0.82,
1934
+ "learning_rate": 0.0002,
1935
+ "loss": 0.5538,
1936
+ "step": 2030
1937
+ },
1938
+ {
1939
+ "epoch": 0.83,
1940
+ "learning_rate": 0.0002,
1941
+ "loss": 0.598,
1942
+ "step": 2040
1943
+ },
1944
+ {
1945
+ "epoch": 0.83,
1946
+ "learning_rate": 0.0002,
1947
+ "loss": 0.5337,
1948
+ "step": 2050
1949
+ },
1950
+ {
1951
+ "epoch": 0.83,
1952
+ "learning_rate": 0.0002,
1953
+ "loss": 0.5749,
1954
+ "step": 2060
1955
+ },
1956
+ {
1957
+ "epoch": 0.84,
1958
+ "learning_rate": 0.0002,
1959
+ "loss": 0.664,
1960
+ "step": 2070
1961
+ },
1962
+ {
1963
+ "epoch": 0.84,
1964
+ "learning_rate": 0.0002,
1965
+ "loss": 0.6095,
1966
+ "step": 2080
1967
+ },
1968
+ {
1969
+ "epoch": 0.85,
1970
+ "learning_rate": 0.0002,
1971
+ "loss": 0.5729,
1972
+ "step": 2090
1973
+ },
1974
+ {
1975
+ "epoch": 0.85,
1976
+ "learning_rate": 0.0002,
1977
+ "loss": 0.6395,
1978
+ "step": 2100
1979
+ },
1980
+ {
1981
+ "epoch": 0.85,
1982
+ "learning_rate": 0.0002,
1983
+ "loss": 0.5581,
1984
+ "step": 2110
1985
+ },
1986
+ {
1987
+ "epoch": 0.86,
1988
+ "learning_rate": 0.0002,
1989
+ "loss": 0.6305,
1990
+ "step": 2120
1991
+ },
1992
+ {
1993
+ "epoch": 0.86,
1994
+ "learning_rate": 0.0002,
1995
+ "loss": 0.6186,
1996
+ "step": 2130
1997
+ },
1998
+ {
1999
+ "epoch": 0.87,
2000
+ "learning_rate": 0.0002,
2001
+ "loss": 0.4686,
2002
+ "step": 2140
2003
+ },
2004
+ {
2005
+ "epoch": 0.87,
2006
+ "learning_rate": 0.0002,
2007
+ "loss": 0.6395,
2008
+ "step": 2150
2009
+ },
2010
+ {
2011
+ "epoch": 0.87,
2012
+ "learning_rate": 0.0002,
2013
+ "loss": 0.5673,
2014
+ "step": 2160
2015
+ },
2016
+ {
2017
+ "epoch": 0.88,
2018
+ "learning_rate": 0.0002,
2019
+ "loss": 0.5648,
2020
+ "step": 2170
2021
+ },
2022
+ {
2023
+ "epoch": 0.88,
2024
+ "learning_rate": 0.0002,
2025
+ "loss": 0.5265,
2026
+ "step": 2180
2027
+ },
2028
+ {
2029
+ "epoch": 0.89,
2030
+ "learning_rate": 0.0002,
2031
+ "loss": 0.542,
2032
+ "step": 2190
2033
+ },
2034
+ {
2035
+ "epoch": 0.89,
2036
+ "learning_rate": 0.0002,
2037
+ "loss": 0.488,
2038
+ "step": 2200
2039
+ },
2040
+ {
2041
+ "epoch": 0.89,
2042
+ "eval_loss": 0.598466694355011,
2043
+ "eval_runtime": 93.9641,
2044
+ "eval_samples_per_second": 10.642,
2045
+ "eval_steps_per_second": 5.321,
2046
+ "step": 2200
2047
+ },
2048
+ {
2049
+ "epoch": 0.89,
2050
+ "mmlu_eval_accuracy": 0.453684883010787,
2051
+ "mmlu_eval_accuracy_abstract_algebra": 0.09090909090909091,
2052
+ "mmlu_eval_accuracy_anatomy": 0.5714285714285714,
2053
+ "mmlu_eval_accuracy_astronomy": 0.4375,
2054
+ "mmlu_eval_accuracy_business_ethics": 0.6363636363636364,
2055
+ "mmlu_eval_accuracy_clinical_knowledge": 0.41379310344827586,
2056
+ "mmlu_eval_accuracy_college_biology": 0.375,
2057
+ "mmlu_eval_accuracy_college_chemistry": 0.0,
2058
+ "mmlu_eval_accuracy_college_computer_science": 0.36363636363636365,
2059
+ "mmlu_eval_accuracy_college_mathematics": 0.18181818181818182,
2060
+ "mmlu_eval_accuracy_college_medicine": 0.2727272727272727,
2061
+ "mmlu_eval_accuracy_college_physics": 0.5454545454545454,
2062
+ "mmlu_eval_accuracy_computer_security": 0.2727272727272727,
2063
+ "mmlu_eval_accuracy_conceptual_physics": 0.4230769230769231,
2064
+ "mmlu_eval_accuracy_econometrics": 0.16666666666666666,
2065
+ "mmlu_eval_accuracy_electrical_engineering": 0.4375,
2066
+ "mmlu_eval_accuracy_elementary_mathematics": 0.36585365853658536,
2067
+ "mmlu_eval_accuracy_formal_logic": 0.21428571428571427,
2068
+ "mmlu_eval_accuracy_global_facts": 0.6,
2069
+ "mmlu_eval_accuracy_high_school_biology": 0.40625,
2070
+ "mmlu_eval_accuracy_high_school_chemistry": 0.36363636363636365,
2071
+ "mmlu_eval_accuracy_high_school_computer_science": 0.6666666666666666,
2072
+ "mmlu_eval_accuracy_high_school_european_history": 0.5555555555555556,
2073
+ "mmlu_eval_accuracy_high_school_geography": 0.7272727272727273,
2074
+ "mmlu_eval_accuracy_high_school_government_and_politics": 0.6190476190476191,
2075
+ "mmlu_eval_accuracy_high_school_macroeconomics": 0.32558139534883723,
2076
+ "mmlu_eval_accuracy_high_school_mathematics": 0.2413793103448276,
2077
+ "mmlu_eval_accuracy_high_school_microeconomics": 0.46153846153846156,
2078
+ "mmlu_eval_accuracy_high_school_physics": 0.17647058823529413,
2079
+ "mmlu_eval_accuracy_high_school_psychology": 0.6833333333333333,
2080
+ "mmlu_eval_accuracy_high_school_statistics": 0.21739130434782608,
2081
+ "mmlu_eval_accuracy_high_school_us_history": 0.7272727272727273,
2082
+ "mmlu_eval_accuracy_high_school_world_history": 0.5384615384615384,
2083
+ "mmlu_eval_accuracy_human_aging": 0.7391304347826086,
2084
+ "mmlu_eval_accuracy_human_sexuality": 0.5,
2085
+ "mmlu_eval_accuracy_international_law": 0.7692307692307693,
2086
+ "mmlu_eval_accuracy_jurisprudence": 0.36363636363636365,
2087
+ "mmlu_eval_accuracy_logical_fallacies": 0.5555555555555556,
2088
+ "mmlu_eval_accuracy_machine_learning": 0.09090909090909091,
2089
+ "mmlu_eval_accuracy_management": 0.6363636363636364,
2090
+ "mmlu_eval_accuracy_marketing": 0.72,
2091
+ "mmlu_eval_accuracy_medical_genetics": 0.7272727272727273,
2092
+ "mmlu_eval_accuracy_miscellaneous": 0.6744186046511628,
2093
+ "mmlu_eval_accuracy_moral_disputes": 0.4473684210526316,
2094
+ "mmlu_eval_accuracy_moral_scenarios": 0.23,
2095
+ "mmlu_eval_accuracy_nutrition": 0.5151515151515151,
2096
+ "mmlu_eval_accuracy_philosophy": 0.47058823529411764,
2097
+ "mmlu_eval_accuracy_prehistory": 0.34285714285714286,
2098
+ "mmlu_eval_accuracy_professional_accounting": 0.2903225806451613,
2099
+ "mmlu_eval_accuracy_professional_law": 0.3588235294117647,
2100
+ "mmlu_eval_accuracy_professional_medicine": 0.45161290322580644,
2101
+ "mmlu_eval_accuracy_professional_psychology": 0.34782608695652173,
2102
+ "mmlu_eval_accuracy_public_relations": 0.5833333333333334,
2103
+ "mmlu_eval_accuracy_security_studies": 0.4444444444444444,
2104
+ "mmlu_eval_accuracy_sociology": 0.6818181818181818,
2105
+ "mmlu_eval_accuracy_us_foreign_policy": 0.5454545454545454,
2106
+ "mmlu_eval_accuracy_virology": 0.6111111111111112,
2107
+ "mmlu_eval_accuracy_world_religions": 0.6842105263157895,
2108
+ "mmlu_loss": 0.9464586163158516,
2109
+ "step": 2200
2110
+ },
2111
+ {
2112
+ "epoch": 0.89,
2113
+ "learning_rate": 0.0002,
2114
+ "loss": 0.5428,
2115
+ "step": 2210
2116
+ },
2117
+ {
2118
+ "epoch": 0.9,
2119
+ "learning_rate": 0.0002,
2120
+ "loss": 0.6717,
2121
+ "step": 2220
2122
+ },
2123
+ {
2124
+ "epoch": 0.9,
2125
+ "learning_rate": 0.0002,
2126
+ "loss": 0.6128,
2127
+ "step": 2230
2128
+ },
2129
+ {
2130
+ "epoch": 0.91,
2131
+ "learning_rate": 0.0002,
2132
+ "loss": 0.5053,
2133
+ "step": 2240
2134
+ },
2135
+ {
2136
+ "epoch": 0.91,
2137
+ "learning_rate": 0.0002,
2138
+ "loss": 0.5135,
2139
+ "step": 2250
2140
+ },
2141
+ {
2142
+ "epoch": 0.91,
2143
+ "learning_rate": 0.0002,
2144
+ "loss": 0.5352,
2145
+ "step": 2260
2146
+ },
2147
+ {
2148
+ "epoch": 0.92,
2149
+ "learning_rate": 0.0002,
2150
+ "loss": 0.5411,
2151
+ "step": 2270
2152
+ },
2153
+ {
2154
+ "epoch": 0.92,
2155
+ "learning_rate": 0.0002,
2156
+ "loss": 0.7386,
2157
+ "step": 2280
2158
+ },
2159
+ {
2160
+ "epoch": 0.93,
2161
+ "learning_rate": 0.0002,
2162
+ "loss": 0.5334,
2163
+ "step": 2290
2164
+ },
2165
+ {
2166
+ "epoch": 0.93,
2167
+ "learning_rate": 0.0002,
2168
+ "loss": 0.5402,
2169
+ "step": 2300
2170
+ },
2171
+ {
2172
+ "epoch": 0.94,
2173
+ "learning_rate": 0.0002,
2174
+ "loss": 0.7309,
2175
+ "step": 2310
2176
+ },
2177
+ {
2178
+ "epoch": 0.94,
2179
+ "learning_rate": 0.0002,
2180
+ "loss": 0.7377,
2181
+ "step": 2320
2182
+ },
2183
+ {
2184
+ "epoch": 0.94,
2185
+ "learning_rate": 0.0002,
2186
+ "loss": 0.4948,
2187
+ "step": 2330
2188
+ },
2189
+ {
2190
+ "epoch": 0.95,
2191
+ "learning_rate": 0.0002,
2192
+ "loss": 0.5601,
2193
+ "step": 2340
2194
+ },
2195
+ {
2196
+ "epoch": 0.95,
2197
+ "learning_rate": 0.0002,
2198
+ "loss": 0.5611,
2199
+ "step": 2350
2200
+ },
2201
+ {
2202
+ "epoch": 0.96,
2203
+ "learning_rate": 0.0002,
2204
+ "loss": 0.5769,
2205
+ "step": 2360
2206
+ },
2207
+ {
2208
+ "epoch": 0.96,
2209
+ "learning_rate": 0.0002,
2210
+ "loss": 0.4425,
2211
+ "step": 2370
2212
+ },
2213
+ {
2214
+ "epoch": 0.96,
2215
+ "learning_rate": 0.0002,
2216
+ "loss": 0.5148,
2217
+ "step": 2380
2218
+ },
2219
+ {
2220
+ "epoch": 0.97,
2221
+ "learning_rate": 0.0002,
2222
+ "loss": 0.5422,
2223
+ "step": 2390
2224
+ },
2225
+ {
2226
+ "epoch": 0.97,
2227
+ "learning_rate": 0.0002,
2228
+ "loss": 0.5161,
2229
+ "step": 2400
2230
+ },
2231
+ {
2232
+ "epoch": 0.97,
2233
+ "eval_loss": 0.6037020683288574,
2234
+ "eval_runtime": 94.2295,
2235
+ "eval_samples_per_second": 10.612,
2236
+ "eval_steps_per_second": 5.306,
2237
+ "step": 2400
2238
+ },
2239
+ {
2240
+ "epoch": 0.97,
2241
+ "mmlu_eval_accuracy": 0.4454679892995579,
2242
+ "mmlu_eval_accuracy_abstract_algebra": 0.18181818181818182,
2243
+ "mmlu_eval_accuracy_anatomy": 0.5714285714285714,
2244
+ "mmlu_eval_accuracy_astronomy": 0.5625,
2245
+ "mmlu_eval_accuracy_business_ethics": 0.7272727272727273,
2246
+ "mmlu_eval_accuracy_clinical_knowledge": 0.3103448275862069,
2247
+ "mmlu_eval_accuracy_college_biology": 0.25,
2248
+ "mmlu_eval_accuracy_college_chemistry": 0.25,
2249
+ "mmlu_eval_accuracy_college_computer_science": 0.5454545454545454,
2250
+ "mmlu_eval_accuracy_college_mathematics": 0.2727272727272727,
2251
+ "mmlu_eval_accuracy_college_medicine": 0.2727272727272727,
2252
+ "mmlu_eval_accuracy_college_physics": 0.36363636363636365,
2253
+ "mmlu_eval_accuracy_computer_security": 0.18181818181818182,
2254
+ "mmlu_eval_accuracy_conceptual_physics": 0.4230769230769231,
2255
+ "mmlu_eval_accuracy_econometrics": 0.16666666666666666,
2256
+ "mmlu_eval_accuracy_electrical_engineering": 0.5,
2257
+ "mmlu_eval_accuracy_elementary_mathematics": 0.34146341463414637,
2258
+ "mmlu_eval_accuracy_formal_logic": 0.2857142857142857,
2259
+ "mmlu_eval_accuracy_global_facts": 0.2,
2260
+ "mmlu_eval_accuracy_high_school_biology": 0.28125,
2261
+ "mmlu_eval_accuracy_high_school_chemistry": 0.3181818181818182,
2262
+ "mmlu_eval_accuracy_high_school_computer_science": 0.5555555555555556,
2263
+ "mmlu_eval_accuracy_high_school_european_history": 0.6666666666666666,
2264
+ "mmlu_eval_accuracy_high_school_geography": 0.6363636363636364,
2265
+ "mmlu_eval_accuracy_high_school_government_and_politics": 0.6190476190476191,
2266
+ "mmlu_eval_accuracy_high_school_macroeconomics": 0.32558139534883723,
2267
+ "mmlu_eval_accuracy_high_school_mathematics": 0.3448275862068966,
2268
+ "mmlu_eval_accuracy_high_school_microeconomics": 0.34615384615384615,
2269
+ "mmlu_eval_accuracy_high_school_physics": 0.23529411764705882,
2270
+ "mmlu_eval_accuracy_high_school_psychology": 0.65,
2271
+ "mmlu_eval_accuracy_high_school_statistics": 0.391304347826087,
2272
+ "mmlu_eval_accuracy_high_school_us_history": 0.5454545454545454,
2273
+ "mmlu_eval_accuracy_high_school_world_history": 0.6153846153846154,
2274
+ "mmlu_eval_accuracy_human_aging": 0.6086956521739131,
2275
+ "mmlu_eval_accuracy_human_sexuality": 0.5,
2276
+ "mmlu_eval_accuracy_international_law": 0.6923076923076923,
2277
+ "mmlu_eval_accuracy_jurisprudence": 0.36363636363636365,
2278
+ "mmlu_eval_accuracy_logical_fallacies": 0.5,
2279
+ "mmlu_eval_accuracy_machine_learning": 0.09090909090909091,
2280
+ "mmlu_eval_accuracy_management": 0.45454545454545453,
2281
+ "mmlu_eval_accuracy_marketing": 0.72,
2282
+ "mmlu_eval_accuracy_medical_genetics": 0.8181818181818182,
2283
+ "mmlu_eval_accuracy_miscellaneous": 0.6395348837209303,
2284
+ "mmlu_eval_accuracy_moral_disputes": 0.5,
2285
+ "mmlu_eval_accuracy_moral_scenarios": 0.24,
2286
+ "mmlu_eval_accuracy_nutrition": 0.5151515151515151,
2287
+ "mmlu_eval_accuracy_philosophy": 0.5588235294117647,
2288
+ "mmlu_eval_accuracy_prehistory": 0.2857142857142857,
2289
+ "mmlu_eval_accuracy_professional_accounting": 0.3870967741935484,
2290
+ "mmlu_eval_accuracy_professional_law": 0.3176470588235294,
2291
+ "mmlu_eval_accuracy_professional_medicine": 0.41935483870967744,
2292
+ "mmlu_eval_accuracy_professional_psychology": 0.42028985507246375,
2293
+ "mmlu_eval_accuracy_public_relations": 0.5833333333333334,
2294
+ "mmlu_eval_accuracy_security_studies": 0.37037037037037035,
2295
+ "mmlu_eval_accuracy_sociology": 0.5454545454545454,
2296
+ "mmlu_eval_accuracy_us_foreign_policy": 0.6363636363636364,
2297
+ "mmlu_eval_accuracy_virology": 0.4444444444444444,
2298
+ "mmlu_eval_accuracy_world_religions": 0.8421052631578947,
2299
+ "mmlu_loss": 0.8906538285723554,
2300
+ "step": 2400
2301
+ },
2302
+ {
2303
+ "epoch": 0.98,
2304
+ "learning_rate": 0.0002,
2305
+ "loss": 0.5139,
2306
+ "step": 2410
2307
+ },
2308
+ {
2309
+ "epoch": 0.98,
2310
+ "learning_rate": 0.0002,
2311
+ "loss": 0.5291,
2312
+ "step": 2420
2313
+ },
2314
+ {
2315
+ "epoch": 0.98,
2316
+ "learning_rate": 0.0002,
2317
+ "loss": 0.6079,
2318
+ "step": 2430
2319
+ },
2320
+ {
2321
+ "epoch": 0.99,
2322
+ "learning_rate": 0.0002,
2323
+ "loss": 0.5692,
2324
+ "step": 2440
2325
+ },
2326
+ {
2327
+ "epoch": 0.99,
2328
+ "learning_rate": 0.0002,
2329
+ "loss": 0.6136,
2330
+ "step": 2450
2331
+ },
2332
+ {
2333
+ "epoch": 1.0,
2334
+ "learning_rate": 0.0002,
2335
+ "loss": 0.5858,
2336
+ "step": 2460
2337
+ },
2338
+ {
2339
+ "epoch": 1.0,
2340
+ "learning_rate": 0.0002,
2341
+ "loss": 0.4679,
2342
+ "step": 2470
2343
+ },
2344
+ {
2345
+ "epoch": 1.0,
2346
+ "learning_rate": 0.0002,
2347
+ "loss": 0.5018,
2348
+ "step": 2480
2349
+ },
2350
+ {
2351
+ "epoch": 1.01,
2352
+ "learning_rate": 0.0002,
2353
+ "loss": 0.551,
2354
+ "step": 2490
2355
+ },
2356
+ {
2357
+ "epoch": 1.01,
2358
+ "learning_rate": 0.0002,
2359
+ "loss": 0.528,
2360
+ "step": 2500
2361
+ },
2362
+ {
2363
+ "epoch": 1.02,
2364
+ "learning_rate": 0.0002,
2365
+ "loss": 0.4489,
2366
+ "step": 2510
2367
+ },
2368
+ {
2369
+ "epoch": 1.02,
2370
+ "learning_rate": 0.0002,
2371
+ "loss": 0.4718,
2372
+ "step": 2520
2373
+ },
2374
+ {
2375
+ "epoch": 1.02,
2376
+ "learning_rate": 0.0002,
2377
+ "loss": 0.4079,
2378
+ "step": 2530
2379
+ },
2380
+ {
2381
+ "epoch": 1.03,
2382
+ "learning_rate": 0.0002,
2383
+ "loss": 0.4827,
2384
+ "step": 2540
2385
+ },
2386
+ {
2387
+ "epoch": 1.03,
2388
+ "learning_rate": 0.0002,
2389
+ "loss": 0.5017,
2390
+ "step": 2550
2391
+ },
2392
+ {
2393
+ "epoch": 1.04,
2394
+ "learning_rate": 0.0002,
2395
+ "loss": 0.4425,
2396
+ "step": 2560
2397
+ },
2398
+ {
2399
+ "epoch": 1.04,
2400
+ "learning_rate": 0.0002,
2401
+ "loss": 0.4271,
2402
+ "step": 2570
2403
+ },
2404
+ {
2405
+ "epoch": 1.04,
2406
+ "learning_rate": 0.0002,
2407
+ "loss": 0.5164,
2408
+ "step": 2580
2409
+ },
2410
+ {
2411
+ "epoch": 1.05,
2412
+ "learning_rate": 0.0002,
2413
+ "loss": 0.3981,
2414
+ "step": 2590
2415
+ },
2416
+ {
2417
+ "epoch": 1.05,
2418
+ "learning_rate": 0.0002,
2419
+ "loss": 0.645,
2420
+ "step": 2600
2421
+ },
2422
+ {
2423
+ "epoch": 1.05,
2424
+ "eval_loss": 0.6178489327430725,
2425
+ "eval_runtime": 94.1423,
2426
+ "eval_samples_per_second": 10.622,
2427
+ "eval_steps_per_second": 5.311,
2428
+ "step": 2600
2429
+ },
2430
+ {
2431
+ "epoch": 1.05,
2432
+ "mmlu_eval_accuracy": 0.461544966521083,
2433
+ "mmlu_eval_accuracy_abstract_algebra": 0.18181818181818182,
2434
+ "mmlu_eval_accuracy_anatomy": 0.5714285714285714,
2435
+ "mmlu_eval_accuracy_astronomy": 0.4375,
2436
+ "mmlu_eval_accuracy_business_ethics": 0.6363636363636364,
2437
+ "mmlu_eval_accuracy_clinical_knowledge": 0.3793103448275862,
2438
+ "mmlu_eval_accuracy_college_biology": 0.3125,
2439
+ "mmlu_eval_accuracy_college_chemistry": 0.125,
2440
+ "mmlu_eval_accuracy_college_computer_science": 0.36363636363636365,
2441
+ "mmlu_eval_accuracy_college_mathematics": 0.2727272727272727,
2442
+ "mmlu_eval_accuracy_college_medicine": 0.36363636363636365,
2443
+ "mmlu_eval_accuracy_college_physics": 0.5454545454545454,
2444
+ "mmlu_eval_accuracy_computer_security": 0.45454545454545453,
2445
+ "mmlu_eval_accuracy_conceptual_physics": 0.34615384615384615,
2446
+ "mmlu_eval_accuracy_econometrics": 0.16666666666666666,
2447
+ "mmlu_eval_accuracy_electrical_engineering": 0.4375,
2448
+ "mmlu_eval_accuracy_elementary_mathematics": 0.36585365853658536,
2449
+ "mmlu_eval_accuracy_formal_logic": 0.2857142857142857,
2450
+ "mmlu_eval_accuracy_global_facts": 0.3,
2451
+ "mmlu_eval_accuracy_high_school_biology": 0.3125,
2452
+ "mmlu_eval_accuracy_high_school_chemistry": 0.36363636363636365,
2453
+ "mmlu_eval_accuracy_high_school_computer_science": 0.6666666666666666,
2454
+ "mmlu_eval_accuracy_high_school_european_history": 0.5,
2455
+ "mmlu_eval_accuracy_high_school_geography": 0.6363636363636364,
2456
+ "mmlu_eval_accuracy_high_school_government_and_politics": 0.6666666666666666,
2457
+ "mmlu_eval_accuracy_high_school_macroeconomics": 0.37209302325581395,
2458
+ "mmlu_eval_accuracy_high_school_mathematics": 0.2413793103448276,
2459
+ "mmlu_eval_accuracy_high_school_microeconomics": 0.4230769230769231,
2460
+ "mmlu_eval_accuracy_high_school_physics": 0.17647058823529413,
2461
+ "mmlu_eval_accuracy_high_school_psychology": 0.8,
2462
+ "mmlu_eval_accuracy_high_school_statistics": 0.391304347826087,
2463
+ "mmlu_eval_accuracy_high_school_us_history": 0.6363636363636364,
2464
+ "mmlu_eval_accuracy_high_school_world_history": 0.6153846153846154,
2465
+ "mmlu_eval_accuracy_human_aging": 0.6521739130434783,
2466
+ "mmlu_eval_accuracy_human_sexuality": 0.5,
2467
+ "mmlu_eval_accuracy_international_law": 0.6923076923076923,
2468
+ "mmlu_eval_accuracy_jurisprudence": 0.5454545454545454,
2469
+ "mmlu_eval_accuracy_logical_fallacies": 0.5,
2470
+ "mmlu_eval_accuracy_machine_learning": 0.18181818181818182,
2471
+ "mmlu_eval_accuracy_management": 0.45454545454545453,
2472
+ "mmlu_eval_accuracy_marketing": 0.76,
2473
+ "mmlu_eval_accuracy_medical_genetics": 0.7272727272727273,
2474
+ "mmlu_eval_accuracy_miscellaneous": 0.6976744186046512,
2475
+ "mmlu_eval_accuracy_moral_disputes": 0.39473684210526316,
2476
+ "mmlu_eval_accuracy_moral_scenarios": 0.23,
2477
+ "mmlu_eval_accuracy_nutrition": 0.5454545454545454,
2478
+ "mmlu_eval_accuracy_philosophy": 0.5882352941176471,
2479
+ "mmlu_eval_accuracy_prehistory": 0.37142857142857144,
2480
+ "mmlu_eval_accuracy_professional_accounting": 0.25806451612903225,
2481
+ "mmlu_eval_accuracy_professional_law": 0.3352941176470588,
2482
+ "mmlu_eval_accuracy_professional_medicine": 0.45161290322580644,
2483
+ "mmlu_eval_accuracy_professional_psychology": 0.391304347826087,
2484
+ "mmlu_eval_accuracy_public_relations": 0.5,
2485
+ "mmlu_eval_accuracy_security_studies": 0.48148148148148145,
2486
+ "mmlu_eval_accuracy_sociology": 0.6818181818181818,
2487
+ "mmlu_eval_accuracy_us_foreign_policy": 0.7272727272727273,
2488
+ "mmlu_eval_accuracy_virology": 0.5555555555555556,
2489
+ "mmlu_eval_accuracy_world_religions": 0.7368421052631579,
2490
+ "mmlu_loss": 0.9348930491732711,
2491
+ "step": 2600
2492
+ },
2493
+ {
2494
+ "epoch": 1.06,
2495
+ "learning_rate": 0.0002,
2496
+ "loss": 0.5278,
2497
+ "step": 2610
2498
+ },
2499
+ {
2500
+ "epoch": 1.06,
2501
+ "learning_rate": 0.0002,
2502
+ "loss": 0.4388,
2503
+ "step": 2620
2504
+ },
2505
+ {
2506
+ "epoch": 1.06,
2507
+ "learning_rate": 0.0002,
2508
+ "loss": 0.5069,
2509
+ "step": 2630
2510
+ },
2511
+ {
2512
+ "epoch": 1.07,
2513
+ "learning_rate": 0.0002,
2514
+ "loss": 0.362,
2515
+ "step": 2640
2516
+ },
2517
+ {
2518
+ "epoch": 1.07,
2519
+ "learning_rate": 0.0002,
2520
+ "loss": 0.473,
2521
+ "step": 2650
2522
+ },
2523
+ {
2524
+ "epoch": 1.08,
2525
+ "learning_rate": 0.0002,
2526
+ "loss": 0.4701,
2527
+ "step": 2660
2528
+ },
2529
+ {
2530
+ "epoch": 1.08,
2531
+ "learning_rate": 0.0002,
2532
+ "loss": 0.3107,
2533
+ "step": 2670
2534
+ },
2535
+ {
2536
+ "epoch": 1.08,
2537
+ "learning_rate": 0.0002,
2538
+ "loss": 0.4745,
2539
+ "step": 2680
2540
+ },
2541
+ {
2542
+ "epoch": 1.09,
2543
+ "learning_rate": 0.0002,
2544
+ "loss": 0.5246,
2545
+ "step": 2690
2546
+ },
2547
+ {
2548
+ "epoch": 1.09,
2549
+ "learning_rate": 0.0002,
2550
+ "loss": 0.4257,
2551
+ "step": 2700
2552
+ },
2553
+ {
2554
+ "epoch": 1.1,
2555
+ "learning_rate": 0.0002,
2556
+ "loss": 0.5087,
2557
+ "step": 2710
2558
+ },
2559
+ {
2560
+ "epoch": 1.1,
2561
+ "learning_rate": 0.0002,
2562
+ "loss": 0.5063,
2563
+ "step": 2720
2564
+ },
2565
+ {
2566
+ "epoch": 1.11,
2567
+ "learning_rate": 0.0002,
2568
+ "loss": 0.3647,
2569
+ "step": 2730
2570
+ },
2571
+ {
2572
+ "epoch": 1.11,
2573
+ "learning_rate": 0.0002,
2574
+ "loss": 0.4308,
2575
+ "step": 2740
2576
+ },
2577
+ {
2578
+ "epoch": 1.11,
2579
+ "learning_rate": 0.0002,
2580
+ "loss": 0.5247,
2581
+ "step": 2750
2582
+ },
2583
+ {
2584
+ "epoch": 1.12,
2585
+ "learning_rate": 0.0002,
2586
+ "loss": 0.4857,
2587
+ "step": 2760
2588
+ },
2589
+ {
2590
+ "epoch": 1.12,
2591
+ "learning_rate": 0.0002,
2592
+ "loss": 0.3833,
2593
+ "step": 2770
2594
+ },
2595
+ {
2596
+ "epoch": 1.13,
2597
+ "learning_rate": 0.0002,
2598
+ "loss": 0.4455,
2599
+ "step": 2780
2600
+ },
2601
+ {
2602
+ "epoch": 1.13,
2603
+ "learning_rate": 0.0002,
2604
+ "loss": 0.3938,
2605
+ "step": 2790
2606
+ },
2607
+ {
2608
+ "epoch": 1.13,
2609
+ "learning_rate": 0.0002,
2610
+ "loss": 0.4697,
2611
+ "step": 2800
2612
+ },
2613
+ {
2614
+ "epoch": 1.13,
2615
+ "eval_loss": 0.6199526190757751,
2616
+ "eval_runtime": 94.2538,
2617
+ "eval_samples_per_second": 10.61,
2618
+ "eval_steps_per_second": 5.305,
2619
+ "step": 2800
2620
+ },
2621
+ {
2622
+ "epoch": 1.13,
2623
+ "mmlu_eval_accuracy": 0.46040539210666037,
2624
+ "mmlu_eval_accuracy_abstract_algebra": 0.18181818181818182,
2625
+ "mmlu_eval_accuracy_anatomy": 0.5714285714285714,
2626
+ "mmlu_eval_accuracy_astronomy": 0.4375,
2627
+ "mmlu_eval_accuracy_business_ethics": 0.5454545454545454,
2628
+ "mmlu_eval_accuracy_clinical_knowledge": 0.41379310344827586,
2629
+ "mmlu_eval_accuracy_college_biology": 0.375,
2630
+ "mmlu_eval_accuracy_college_chemistry": 0.125,
2631
+ "mmlu_eval_accuracy_college_computer_science": 0.2727272727272727,
2632
+ "mmlu_eval_accuracy_college_mathematics": 0.18181818181818182,
2633
+ "mmlu_eval_accuracy_college_medicine": 0.3181818181818182,
2634
+ "mmlu_eval_accuracy_college_physics": 0.45454545454545453,
2635
+ "mmlu_eval_accuracy_computer_security": 0.5454545454545454,
2636
+ "mmlu_eval_accuracy_conceptual_physics": 0.34615384615384615,
2637
+ "mmlu_eval_accuracy_econometrics": 0.16666666666666666,
2638
+ "mmlu_eval_accuracy_electrical_engineering": 0.4375,
2639
+ "mmlu_eval_accuracy_elementary_mathematics": 0.3170731707317073,
2640
+ "mmlu_eval_accuracy_formal_logic": 0.2857142857142857,
2641
+ "mmlu_eval_accuracy_global_facts": 0.5,
2642
+ "mmlu_eval_accuracy_high_school_biology": 0.375,
2643
+ "mmlu_eval_accuracy_high_school_chemistry": 0.36363636363636365,
2644
+ "mmlu_eval_accuracy_high_school_computer_science": 0.6666666666666666,
2645
+ "mmlu_eval_accuracy_high_school_european_history": 0.5555555555555556,
2646
+ "mmlu_eval_accuracy_high_school_geography": 0.6818181818181818,
2647
+ "mmlu_eval_accuracy_high_school_government_and_politics": 0.6666666666666666,
2648
+ "mmlu_eval_accuracy_high_school_macroeconomics": 0.3488372093023256,
2649
+ "mmlu_eval_accuracy_high_school_mathematics": 0.20689655172413793,
2650
+ "mmlu_eval_accuracy_high_school_microeconomics": 0.4230769230769231,
2651
+ "mmlu_eval_accuracy_high_school_physics": 0.23529411764705882,
2652
+ "mmlu_eval_accuracy_high_school_psychology": 0.7666666666666667,
2653
+ "mmlu_eval_accuracy_high_school_statistics": 0.34782608695652173,
2654
+ "mmlu_eval_accuracy_high_school_us_history": 0.6363636363636364,
2655
+ "mmlu_eval_accuracy_high_school_world_history": 0.5384615384615384,
2656
+ "mmlu_eval_accuracy_human_aging": 0.6521739130434783,
2657
+ "mmlu_eval_accuracy_human_sexuality": 0.5,
2658
+ "mmlu_eval_accuracy_international_law": 0.7692307692307693,
2659
+ "mmlu_eval_accuracy_jurisprudence": 0.5454545454545454,
2660
+ "mmlu_eval_accuracy_logical_fallacies": 0.5,
2661
+ "mmlu_eval_accuracy_machine_learning": 0.18181818181818182,
2662
+ "mmlu_eval_accuracy_management": 0.5454545454545454,
2663
+ "mmlu_eval_accuracy_marketing": 0.76,
2664
+ "mmlu_eval_accuracy_medical_genetics": 0.7272727272727273,
2665
+ "mmlu_eval_accuracy_miscellaneous": 0.7093023255813954,
2666
+ "mmlu_eval_accuracy_moral_disputes": 0.39473684210526316,
2667
+ "mmlu_eval_accuracy_moral_scenarios": 0.24,
2668
+ "mmlu_eval_accuracy_nutrition": 0.5454545454545454,
2669
+ "mmlu_eval_accuracy_philosophy": 0.5294117647058824,
2670
+ "mmlu_eval_accuracy_prehistory": 0.4,
2671
+ "mmlu_eval_accuracy_professional_accounting": 0.1935483870967742,
2672
+ "mmlu_eval_accuracy_professional_law": 0.3352941176470588,
2673
+ "mmlu_eval_accuracy_professional_medicine": 0.3870967741935484,
2674
+ "mmlu_eval_accuracy_professional_psychology": 0.4057971014492754,
2675
+ "mmlu_eval_accuracy_public_relations": 0.6666666666666666,
2676
+ "mmlu_eval_accuracy_security_studies": 0.48148148148148145,
2677
+ "mmlu_eval_accuracy_sociology": 0.5909090909090909,
2678
+ "mmlu_eval_accuracy_us_foreign_policy": 0.5454545454545454,
2679
+ "mmlu_eval_accuracy_virology": 0.6111111111111112,
2680
+ "mmlu_eval_accuracy_world_religions": 0.7368421052631579,
2681
+ "mmlu_loss": 0.9342371760612213,
2682
+ "step": 2800
2683
+ }
2684
+ ],
2685
+ "max_steps": 5000,
2686
+ "num_train_epochs": 3,
2687
+ "total_flos": 2.3639068475958067e+17,
2688
+ "trial_name": null,
2689
+ "trial_params": null
2690
+ }
checkpoint-2800/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:aa7040c79b74a7740d69d47609ddc5931289eb569fe83227d6ed0c6da6772a7a
3
+ size 6011