Farouk commited on
Commit
3468485
β€’
1 Parent(s): 401e125

Training in progress, step 9600

Browse files
adapter_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:abba24d45820055cdb1a36be57900b8f99dbd797b91c0f9691bc7704955f5c8c
3
  size 319977229
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e89c2ce43f512a93774b45f193b0dd939c1fb7cb618f77f34bb9707caf0f4b39
3
  size 319977229
checkpoint-6200/adapter_model/adapter_model/README.md CHANGED
@@ -169,6 +169,17 @@ The following `bitsandbytes` quantization config was used during training:
169
  - bnb_4bit_use_double_quant: True
170
  - bnb_4bit_compute_dtype: bfloat16
171
 
 
 
 
 
 
 
 
 
 
 
 
172
  The following `bitsandbytes` quantization config was used during training:
173
  - load_in_8bit: False
174
  - load_in_4bit: True
@@ -196,5 +207,6 @@ The following `bitsandbytes` quantization config was used during training:
196
  - PEFT 0.4.0
197
  - PEFT 0.4.0
198
  - PEFT 0.4.0
 
199
 
200
  - PEFT 0.4.0
 
169
  - bnb_4bit_use_double_quant: True
170
  - bnb_4bit_compute_dtype: bfloat16
171
 
172
+ The following `bitsandbytes` quantization config was used during training:
173
+ - load_in_8bit: False
174
+ - load_in_4bit: True
175
+ - llm_int8_threshold: 6.0
176
+ - llm_int8_skip_modules: None
177
+ - llm_int8_enable_fp32_cpu_offload: False
178
+ - llm_int8_has_fp16_weight: False
179
+ - bnb_4bit_quant_type: nf4
180
+ - bnb_4bit_use_double_quant: True
181
+ - bnb_4bit_compute_dtype: bfloat16
182
+
183
  The following `bitsandbytes` quantization config was used during training:
184
  - load_in_8bit: False
185
  - load_in_4bit: True
 
207
  - PEFT 0.4.0
208
  - PEFT 0.4.0
209
  - PEFT 0.4.0
210
+ - PEFT 0.4.0
211
 
212
  - PEFT 0.4.0
checkpoint-6200/adapter_model/adapter_model/adapter_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:3a9ec8442fecb441b16d48976920c04814ebaca7164fd4f7d1cb8602f5386e4a
3
  size 319977229
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:abba24d45820055cdb1a36be57900b8f99dbd797b91c0f9691bc7704955f5c8c
3
  size 319977229
{checkpoint-7600 β†’ checkpoint-9600}/README.md RENAMED
File without changes
{checkpoint-7600 β†’ checkpoint-9600}/adapter_config.json RENAMED
File without changes
{checkpoint-7600 β†’ checkpoint-9600}/adapter_model.bin RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:07063fab085ce2edb38e992b21ba4ee3b66bff6934db04d38b023343348d2b28
3
  size 319977229
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e89c2ce43f512a93774b45f193b0dd939c1fb7cb618f77f34bb9707caf0f4b39
3
  size 319977229
{checkpoint-7600 β†’ checkpoint-9600}/added_tokens.json RENAMED
File without changes
{checkpoint-7600 β†’ checkpoint-9600}/optimizer.pt RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:a60377cf0f531038c66e8c709a0bd3308aad9106ae9c96a1d17de9e0bd111152
3
  size 1279539973
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b0051fb5fc978e831b1ff0721616e9fd09b6e7196299a0b25eef68fac5f50820
3
  size 1279539973
{checkpoint-7600 β†’ checkpoint-9600}/rng_state.pth RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:825acf34a2b84662a9307ebc8e48fe8e0bfc7b2fa63d20786eeb2d10884cd18e
3
  size 14511
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e2e2fcf437fc79c176e41e3753ef7d8787002b2bbf34efa58afc34cd85f577e3
3
  size 14511
{checkpoint-7600 β†’ checkpoint-9600}/scheduler.pt RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:28e84ce1056e951be7d81d2edd8521bf4fd1356b40fedd4b87bf74e02969be5b
3
  size 627
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0b730b5465fd4d5b685fb0c0a2d7cb7400a28391a16b7e3e188b846bcfacab04
3
  size 627
{checkpoint-7600 β†’ checkpoint-9600}/special_tokens_map.json RENAMED
File without changes
{checkpoint-7600 β†’ checkpoint-9600}/tokenizer.model RENAMED
File without changes
{checkpoint-7600 β†’ checkpoint-9600}/tokenizer_config.json RENAMED
File without changes
{checkpoint-7600 β†’ checkpoint-9600}/trainer_state.json RENAMED
@@ -1,8 +1,8 @@
1
  {
2
  "best_metric": 0.7293602228164673,
3
  "best_model_checkpoint": "experts/expert-16/checkpoint-6200",
4
- "epoch": 2.4081115335868186,
5
- "global_step": 7600,
6
  "is_hyper_param_search": false,
7
  "is_local_process_zero": true,
8
  "is_world_process_zero": true,
@@ -7264,11 +7264,1921 @@
7264
  "mmlu_eval_accuracy_world_religions": 0.7368421052631579,
7265
  "mmlu_loss": 1.584926052947891,
7266
  "step": 7600
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7267
  }
7268
  ],
7269
  "max_steps": 10000,
7270
  "num_train_epochs": 4,
7271
- "total_flos": 2.3037492480033915e+18,
7272
  "trial_name": null,
7273
  "trial_params": null
7274
  }
 
1
  {
2
  "best_metric": 0.7293602228164673,
3
  "best_model_checkpoint": "experts/expert-16/checkpoint-6200",
4
+ "epoch": 3.041825095057034,
5
+ "global_step": 9600,
6
  "is_hyper_param_search": false,
7
  "is_local_process_zero": true,
8
  "is_world_process_zero": true,
 
7264
  "mmlu_eval_accuracy_world_religions": 0.7368421052631579,
7265
  "mmlu_loss": 1.584926052947891,
7266
  "step": 7600
7267
+ },
7268
+ {
7269
+ "epoch": 2.41,
7270
+ "learning_rate": 0.0002,
7271
+ "loss": 0.5914,
7272
+ "step": 7610
7273
+ },
7274
+ {
7275
+ "epoch": 2.41,
7276
+ "learning_rate": 0.0002,
7277
+ "loss": 0.59,
7278
+ "step": 7620
7279
+ },
7280
+ {
7281
+ "epoch": 2.42,
7282
+ "learning_rate": 0.0002,
7283
+ "loss": 0.6179,
7284
+ "step": 7630
7285
+ },
7286
+ {
7287
+ "epoch": 2.42,
7288
+ "learning_rate": 0.0002,
7289
+ "loss": 0.6203,
7290
+ "step": 7640
7291
+ },
7292
+ {
7293
+ "epoch": 2.42,
7294
+ "learning_rate": 0.0002,
7295
+ "loss": 0.6113,
7296
+ "step": 7650
7297
+ },
7298
+ {
7299
+ "epoch": 2.43,
7300
+ "learning_rate": 0.0002,
7301
+ "loss": 0.5505,
7302
+ "step": 7660
7303
+ },
7304
+ {
7305
+ "epoch": 2.43,
7306
+ "learning_rate": 0.0002,
7307
+ "loss": 0.5664,
7308
+ "step": 7670
7309
+ },
7310
+ {
7311
+ "epoch": 2.43,
7312
+ "learning_rate": 0.0002,
7313
+ "loss": 0.596,
7314
+ "step": 7680
7315
+ },
7316
+ {
7317
+ "epoch": 2.44,
7318
+ "learning_rate": 0.0002,
7319
+ "loss": 0.6125,
7320
+ "step": 7690
7321
+ },
7322
+ {
7323
+ "epoch": 2.44,
7324
+ "learning_rate": 0.0002,
7325
+ "loss": 0.607,
7326
+ "step": 7700
7327
+ },
7328
+ {
7329
+ "epoch": 2.44,
7330
+ "learning_rate": 0.0002,
7331
+ "loss": 0.5657,
7332
+ "step": 7710
7333
+ },
7334
+ {
7335
+ "epoch": 2.45,
7336
+ "learning_rate": 0.0002,
7337
+ "loss": 0.5419,
7338
+ "step": 7720
7339
+ },
7340
+ {
7341
+ "epoch": 2.45,
7342
+ "learning_rate": 0.0002,
7343
+ "loss": 0.614,
7344
+ "step": 7730
7345
+ },
7346
+ {
7347
+ "epoch": 2.45,
7348
+ "learning_rate": 0.0002,
7349
+ "loss": 0.6107,
7350
+ "step": 7740
7351
+ },
7352
+ {
7353
+ "epoch": 2.46,
7354
+ "learning_rate": 0.0002,
7355
+ "loss": 0.6099,
7356
+ "step": 7750
7357
+ },
7358
+ {
7359
+ "epoch": 2.46,
7360
+ "learning_rate": 0.0002,
7361
+ "loss": 0.5994,
7362
+ "step": 7760
7363
+ },
7364
+ {
7365
+ "epoch": 2.46,
7366
+ "learning_rate": 0.0002,
7367
+ "loss": 0.6274,
7368
+ "step": 7770
7369
+ },
7370
+ {
7371
+ "epoch": 2.47,
7372
+ "learning_rate": 0.0002,
7373
+ "loss": 0.5902,
7374
+ "step": 7780
7375
+ },
7376
+ {
7377
+ "epoch": 2.47,
7378
+ "learning_rate": 0.0002,
7379
+ "loss": 0.5902,
7380
+ "step": 7790
7381
+ },
7382
+ {
7383
+ "epoch": 2.47,
7384
+ "learning_rate": 0.0002,
7385
+ "loss": 0.599,
7386
+ "step": 7800
7387
+ },
7388
+ {
7389
+ "epoch": 2.47,
7390
+ "eval_loss": 0.760485827922821,
7391
+ "eval_runtime": 111.1916,
7392
+ "eval_samples_per_second": 8.993,
7393
+ "eval_steps_per_second": 4.497,
7394
+ "step": 7800
7395
+ },
7396
+ {
7397
+ "epoch": 2.47,
7398
+ "mmlu_eval_accuracy": 0.48418694277386404,
7399
+ "mmlu_eval_accuracy_abstract_algebra": 0.2727272727272727,
7400
+ "mmlu_eval_accuracy_anatomy": 0.5,
7401
+ "mmlu_eval_accuracy_astronomy": 0.4375,
7402
+ "mmlu_eval_accuracy_business_ethics": 0.45454545454545453,
7403
+ "mmlu_eval_accuracy_clinical_knowledge": 0.4827586206896552,
7404
+ "mmlu_eval_accuracy_college_biology": 0.375,
7405
+ "mmlu_eval_accuracy_college_chemistry": 0.375,
7406
+ "mmlu_eval_accuracy_college_computer_science": 0.36363636363636365,
7407
+ "mmlu_eval_accuracy_college_mathematics": 0.09090909090909091,
7408
+ "mmlu_eval_accuracy_college_medicine": 0.5,
7409
+ "mmlu_eval_accuracy_college_physics": 0.2727272727272727,
7410
+ "mmlu_eval_accuracy_computer_security": 0.45454545454545453,
7411
+ "mmlu_eval_accuracy_conceptual_physics": 0.4230769230769231,
7412
+ "mmlu_eval_accuracy_econometrics": 0.25,
7413
+ "mmlu_eval_accuracy_electrical_engineering": 0.25,
7414
+ "mmlu_eval_accuracy_elementary_mathematics": 0.36585365853658536,
7415
+ "mmlu_eval_accuracy_formal_logic": 0.21428571428571427,
7416
+ "mmlu_eval_accuracy_global_facts": 0.5,
7417
+ "mmlu_eval_accuracy_high_school_biology": 0.40625,
7418
+ "mmlu_eval_accuracy_high_school_chemistry": 0.22727272727272727,
7419
+ "mmlu_eval_accuracy_high_school_computer_science": 0.5555555555555556,
7420
+ "mmlu_eval_accuracy_high_school_european_history": 0.6111111111111112,
7421
+ "mmlu_eval_accuracy_high_school_geography": 0.8636363636363636,
7422
+ "mmlu_eval_accuracy_high_school_government_and_politics": 0.6666666666666666,
7423
+ "mmlu_eval_accuracy_high_school_macroeconomics": 0.46511627906976744,
7424
+ "mmlu_eval_accuracy_high_school_mathematics": 0.1724137931034483,
7425
+ "mmlu_eval_accuracy_high_school_microeconomics": 0.5,
7426
+ "mmlu_eval_accuracy_high_school_physics": 0.0,
7427
+ "mmlu_eval_accuracy_high_school_psychology": 0.8666666666666667,
7428
+ "mmlu_eval_accuracy_high_school_statistics": 0.2608695652173913,
7429
+ "mmlu_eval_accuracy_high_school_us_history": 0.6363636363636364,
7430
+ "mmlu_eval_accuracy_high_school_world_history": 0.6923076923076923,
7431
+ "mmlu_eval_accuracy_human_aging": 0.7391304347826086,
7432
+ "mmlu_eval_accuracy_human_sexuality": 0.4166666666666667,
7433
+ "mmlu_eval_accuracy_international_law": 0.9230769230769231,
7434
+ "mmlu_eval_accuracy_jurisprudence": 0.36363636363636365,
7435
+ "mmlu_eval_accuracy_logical_fallacies": 0.6111111111111112,
7436
+ "mmlu_eval_accuracy_machine_learning": 0.18181818181818182,
7437
+ "mmlu_eval_accuracy_management": 0.7272727272727273,
7438
+ "mmlu_eval_accuracy_marketing": 0.84,
7439
+ "mmlu_eval_accuracy_medical_genetics": 0.9090909090909091,
7440
+ "mmlu_eval_accuracy_miscellaneous": 0.627906976744186,
7441
+ "mmlu_eval_accuracy_moral_disputes": 0.5263157894736842,
7442
+ "mmlu_eval_accuracy_moral_scenarios": 0.27,
7443
+ "mmlu_eval_accuracy_nutrition": 0.6666666666666666,
7444
+ "mmlu_eval_accuracy_philosophy": 0.5588235294117647,
7445
+ "mmlu_eval_accuracy_prehistory": 0.5142857142857142,
7446
+ "mmlu_eval_accuracy_professional_accounting": 0.3225806451612903,
7447
+ "mmlu_eval_accuracy_professional_law": 0.34705882352941175,
7448
+ "mmlu_eval_accuracy_professional_medicine": 0.6129032258064516,
7449
+ "mmlu_eval_accuracy_professional_psychology": 0.4782608695652174,
7450
+ "mmlu_eval_accuracy_public_relations": 0.5,
7451
+ "mmlu_eval_accuracy_security_studies": 0.4074074074074074,
7452
+ "mmlu_eval_accuracy_sociology": 0.6363636363636364,
7453
+ "mmlu_eval_accuracy_us_foreign_policy": 0.7272727272727273,
7454
+ "mmlu_eval_accuracy_virology": 0.5,
7455
+ "mmlu_eval_accuracy_world_religions": 0.6842105263157895,
7456
+ "mmlu_loss": 1.4828916400204128,
7457
+ "step": 7800
7458
+ },
7459
+ {
7460
+ "epoch": 2.47,
7461
+ "learning_rate": 0.0002,
7462
+ "loss": 0.6005,
7463
+ "step": 7810
7464
+ },
7465
+ {
7466
+ "epoch": 2.48,
7467
+ "learning_rate": 0.0002,
7468
+ "loss": 0.6662,
7469
+ "step": 7820
7470
+ },
7471
+ {
7472
+ "epoch": 2.48,
7473
+ "learning_rate": 0.0002,
7474
+ "loss": 0.5821,
7475
+ "step": 7830
7476
+ },
7477
+ {
7478
+ "epoch": 2.48,
7479
+ "learning_rate": 0.0002,
7480
+ "loss": 0.5826,
7481
+ "step": 7840
7482
+ },
7483
+ {
7484
+ "epoch": 2.49,
7485
+ "learning_rate": 0.0002,
7486
+ "loss": 0.5804,
7487
+ "step": 7850
7488
+ },
7489
+ {
7490
+ "epoch": 2.49,
7491
+ "learning_rate": 0.0002,
7492
+ "loss": 0.587,
7493
+ "step": 7860
7494
+ },
7495
+ {
7496
+ "epoch": 2.49,
7497
+ "learning_rate": 0.0002,
7498
+ "loss": 0.6062,
7499
+ "step": 7870
7500
+ },
7501
+ {
7502
+ "epoch": 2.5,
7503
+ "learning_rate": 0.0002,
7504
+ "loss": 0.5616,
7505
+ "step": 7880
7506
+ },
7507
+ {
7508
+ "epoch": 2.5,
7509
+ "learning_rate": 0.0002,
7510
+ "loss": 0.6351,
7511
+ "step": 7890
7512
+ },
7513
+ {
7514
+ "epoch": 2.5,
7515
+ "learning_rate": 0.0002,
7516
+ "loss": 0.5738,
7517
+ "step": 7900
7518
+ },
7519
+ {
7520
+ "epoch": 2.51,
7521
+ "learning_rate": 0.0002,
7522
+ "loss": 0.5564,
7523
+ "step": 7910
7524
+ },
7525
+ {
7526
+ "epoch": 2.51,
7527
+ "learning_rate": 0.0002,
7528
+ "loss": 0.5696,
7529
+ "step": 7920
7530
+ },
7531
+ {
7532
+ "epoch": 2.51,
7533
+ "learning_rate": 0.0002,
7534
+ "loss": 0.5812,
7535
+ "step": 7930
7536
+ },
7537
+ {
7538
+ "epoch": 2.52,
7539
+ "learning_rate": 0.0002,
7540
+ "loss": 0.5786,
7541
+ "step": 7940
7542
+ },
7543
+ {
7544
+ "epoch": 2.52,
7545
+ "learning_rate": 0.0002,
7546
+ "loss": 0.6053,
7547
+ "step": 7950
7548
+ },
7549
+ {
7550
+ "epoch": 2.52,
7551
+ "learning_rate": 0.0002,
7552
+ "loss": 0.5727,
7553
+ "step": 7960
7554
+ },
7555
+ {
7556
+ "epoch": 2.53,
7557
+ "learning_rate": 0.0002,
7558
+ "loss": 0.621,
7559
+ "step": 7970
7560
+ },
7561
+ {
7562
+ "epoch": 2.53,
7563
+ "learning_rate": 0.0002,
7564
+ "loss": 0.5679,
7565
+ "step": 7980
7566
+ },
7567
+ {
7568
+ "epoch": 2.53,
7569
+ "learning_rate": 0.0002,
7570
+ "loss": 0.6138,
7571
+ "step": 7990
7572
+ },
7573
+ {
7574
+ "epoch": 2.53,
7575
+ "learning_rate": 0.0002,
7576
+ "loss": 0.588,
7577
+ "step": 8000
7578
+ },
7579
+ {
7580
+ "epoch": 2.53,
7581
+ "eval_loss": 0.7585816979408264,
7582
+ "eval_runtime": 111.2835,
7583
+ "eval_samples_per_second": 8.986,
7584
+ "eval_steps_per_second": 4.493,
7585
+ "step": 8000
7586
+ },
7587
+ {
7588
+ "epoch": 2.53,
7589
+ "mmlu_eval_accuracy": 0.48589851563960756,
7590
+ "mmlu_eval_accuracy_abstract_algebra": 0.2727272727272727,
7591
+ "mmlu_eval_accuracy_anatomy": 0.6428571428571429,
7592
+ "mmlu_eval_accuracy_astronomy": 0.4375,
7593
+ "mmlu_eval_accuracy_business_ethics": 0.45454545454545453,
7594
+ "mmlu_eval_accuracy_clinical_knowledge": 0.5172413793103449,
7595
+ "mmlu_eval_accuracy_college_biology": 0.375,
7596
+ "mmlu_eval_accuracy_college_chemistry": 0.375,
7597
+ "mmlu_eval_accuracy_college_computer_science": 0.36363636363636365,
7598
+ "mmlu_eval_accuracy_college_mathematics": 0.09090909090909091,
7599
+ "mmlu_eval_accuracy_college_medicine": 0.4090909090909091,
7600
+ "mmlu_eval_accuracy_college_physics": 0.18181818181818182,
7601
+ "mmlu_eval_accuracy_computer_security": 0.45454545454545453,
7602
+ "mmlu_eval_accuracy_conceptual_physics": 0.4230769230769231,
7603
+ "mmlu_eval_accuracy_econometrics": 0.25,
7604
+ "mmlu_eval_accuracy_electrical_engineering": 0.375,
7605
+ "mmlu_eval_accuracy_elementary_mathematics": 0.34146341463414637,
7606
+ "mmlu_eval_accuracy_formal_logic": 0.2857142857142857,
7607
+ "mmlu_eval_accuracy_global_facts": 0.5,
7608
+ "mmlu_eval_accuracy_high_school_biology": 0.40625,
7609
+ "mmlu_eval_accuracy_high_school_chemistry": 0.22727272727272727,
7610
+ "mmlu_eval_accuracy_high_school_computer_science": 0.5555555555555556,
7611
+ "mmlu_eval_accuracy_high_school_european_history": 0.6111111111111112,
7612
+ "mmlu_eval_accuracy_high_school_geography": 0.8636363636363636,
7613
+ "mmlu_eval_accuracy_high_school_government_and_politics": 0.6666666666666666,
7614
+ "mmlu_eval_accuracy_high_school_macroeconomics": 0.4883720930232558,
7615
+ "mmlu_eval_accuracy_high_school_mathematics": 0.1724137931034483,
7616
+ "mmlu_eval_accuracy_high_school_microeconomics": 0.5384615384615384,
7617
+ "mmlu_eval_accuracy_high_school_physics": 0.0,
7618
+ "mmlu_eval_accuracy_high_school_psychology": 0.9,
7619
+ "mmlu_eval_accuracy_high_school_statistics": 0.391304347826087,
7620
+ "mmlu_eval_accuracy_high_school_us_history": 0.6363636363636364,
7621
+ "mmlu_eval_accuracy_high_school_world_history": 0.7307692307692307,
7622
+ "mmlu_eval_accuracy_human_aging": 0.7391304347826086,
7623
+ "mmlu_eval_accuracy_human_sexuality": 0.4166666666666667,
7624
+ "mmlu_eval_accuracy_international_law": 0.9230769230769231,
7625
+ "mmlu_eval_accuracy_jurisprudence": 0.36363636363636365,
7626
+ "mmlu_eval_accuracy_logical_fallacies": 0.6666666666666666,
7627
+ "mmlu_eval_accuracy_machine_learning": 0.2727272727272727,
7628
+ "mmlu_eval_accuracy_management": 0.6363636363636364,
7629
+ "mmlu_eval_accuracy_marketing": 0.68,
7630
+ "mmlu_eval_accuracy_medical_genetics": 0.9090909090909091,
7631
+ "mmlu_eval_accuracy_miscellaneous": 0.6627906976744186,
7632
+ "mmlu_eval_accuracy_moral_disputes": 0.5526315789473685,
7633
+ "mmlu_eval_accuracy_moral_scenarios": 0.25,
7634
+ "mmlu_eval_accuracy_nutrition": 0.48484848484848486,
7635
+ "mmlu_eval_accuracy_philosophy": 0.5,
7636
+ "mmlu_eval_accuracy_prehistory": 0.5428571428571428,
7637
+ "mmlu_eval_accuracy_professional_accounting": 0.3225806451612903,
7638
+ "mmlu_eval_accuracy_professional_law": 0.3588235294117647,
7639
+ "mmlu_eval_accuracy_professional_medicine": 0.5806451612903226,
7640
+ "mmlu_eval_accuracy_professional_psychology": 0.4927536231884058,
7641
+ "mmlu_eval_accuracy_public_relations": 0.5,
7642
+ "mmlu_eval_accuracy_security_studies": 0.4074074074074074,
7643
+ "mmlu_eval_accuracy_sociology": 0.7272727272727273,
7644
+ "mmlu_eval_accuracy_us_foreign_policy": 0.6363636363636364,
7645
+ "mmlu_eval_accuracy_virology": 0.5,
7646
+ "mmlu_eval_accuracy_world_religions": 0.631578947368421,
7647
+ "mmlu_loss": 1.566373301120402,
7648
+ "step": 8000
7649
+ },
7650
+ {
7651
+ "epoch": 2.54,
7652
+ "learning_rate": 0.0002,
7653
+ "loss": 0.5624,
7654
+ "step": 8010
7655
+ },
7656
+ {
7657
+ "epoch": 2.54,
7658
+ "learning_rate": 0.0002,
7659
+ "loss": 0.6206,
7660
+ "step": 8020
7661
+ },
7662
+ {
7663
+ "epoch": 2.54,
7664
+ "learning_rate": 0.0002,
7665
+ "loss": 0.607,
7666
+ "step": 8030
7667
+ },
7668
+ {
7669
+ "epoch": 2.55,
7670
+ "learning_rate": 0.0002,
7671
+ "loss": 0.6344,
7672
+ "step": 8040
7673
+ },
7674
+ {
7675
+ "epoch": 2.55,
7676
+ "learning_rate": 0.0002,
7677
+ "loss": 0.6705,
7678
+ "step": 8050
7679
+ },
7680
+ {
7681
+ "epoch": 2.55,
7682
+ "learning_rate": 0.0002,
7683
+ "loss": 0.5679,
7684
+ "step": 8060
7685
+ },
7686
+ {
7687
+ "epoch": 2.56,
7688
+ "learning_rate": 0.0002,
7689
+ "loss": 0.6,
7690
+ "step": 8070
7691
+ },
7692
+ {
7693
+ "epoch": 2.56,
7694
+ "learning_rate": 0.0002,
7695
+ "loss": 0.6486,
7696
+ "step": 8080
7697
+ },
7698
+ {
7699
+ "epoch": 2.56,
7700
+ "learning_rate": 0.0002,
7701
+ "loss": 0.5959,
7702
+ "step": 8090
7703
+ },
7704
+ {
7705
+ "epoch": 2.57,
7706
+ "learning_rate": 0.0002,
7707
+ "loss": 0.6454,
7708
+ "step": 8100
7709
+ },
7710
+ {
7711
+ "epoch": 2.57,
7712
+ "learning_rate": 0.0002,
7713
+ "loss": 0.6085,
7714
+ "step": 8110
7715
+ },
7716
+ {
7717
+ "epoch": 2.57,
7718
+ "learning_rate": 0.0002,
7719
+ "loss": 0.5509,
7720
+ "step": 8120
7721
+ },
7722
+ {
7723
+ "epoch": 2.58,
7724
+ "learning_rate": 0.0002,
7725
+ "loss": 0.6267,
7726
+ "step": 8130
7727
+ },
7728
+ {
7729
+ "epoch": 2.58,
7730
+ "learning_rate": 0.0002,
7731
+ "loss": 0.5865,
7732
+ "step": 8140
7733
+ },
7734
+ {
7735
+ "epoch": 2.58,
7736
+ "learning_rate": 0.0002,
7737
+ "loss": 0.6002,
7738
+ "step": 8150
7739
+ },
7740
+ {
7741
+ "epoch": 2.59,
7742
+ "learning_rate": 0.0002,
7743
+ "loss": 0.6342,
7744
+ "step": 8160
7745
+ },
7746
+ {
7747
+ "epoch": 2.59,
7748
+ "learning_rate": 0.0002,
7749
+ "loss": 0.6312,
7750
+ "step": 8170
7751
+ },
7752
+ {
7753
+ "epoch": 2.59,
7754
+ "learning_rate": 0.0002,
7755
+ "loss": 0.6361,
7756
+ "step": 8180
7757
+ },
7758
+ {
7759
+ "epoch": 2.6,
7760
+ "learning_rate": 0.0002,
7761
+ "loss": 0.5676,
7762
+ "step": 8190
7763
+ },
7764
+ {
7765
+ "epoch": 2.6,
7766
+ "learning_rate": 0.0002,
7767
+ "loss": 0.6125,
7768
+ "step": 8200
7769
+ },
7770
+ {
7771
+ "epoch": 2.6,
7772
+ "eval_loss": 0.7568719387054443,
7773
+ "eval_runtime": 111.2374,
7774
+ "eval_samples_per_second": 8.99,
7775
+ "eval_steps_per_second": 4.495,
7776
+ "step": 8200
7777
+ },
7778
+ {
7779
+ "epoch": 2.6,
7780
+ "mmlu_eval_accuracy": 0.4699982014237092,
7781
+ "mmlu_eval_accuracy_abstract_algebra": 0.2727272727272727,
7782
+ "mmlu_eval_accuracy_anatomy": 0.5714285714285714,
7783
+ "mmlu_eval_accuracy_astronomy": 0.4375,
7784
+ "mmlu_eval_accuracy_business_ethics": 0.45454545454545453,
7785
+ "mmlu_eval_accuracy_clinical_knowledge": 0.5517241379310345,
7786
+ "mmlu_eval_accuracy_college_biology": 0.375,
7787
+ "mmlu_eval_accuracy_college_chemistry": 0.375,
7788
+ "mmlu_eval_accuracy_college_computer_science": 0.36363636363636365,
7789
+ "mmlu_eval_accuracy_college_mathematics": 0.09090909090909091,
7790
+ "mmlu_eval_accuracy_college_medicine": 0.4090909090909091,
7791
+ "mmlu_eval_accuracy_college_physics": 0.2727272727272727,
7792
+ "mmlu_eval_accuracy_computer_security": 0.36363636363636365,
7793
+ "mmlu_eval_accuracy_conceptual_physics": 0.38461538461538464,
7794
+ "mmlu_eval_accuracy_econometrics": 0.25,
7795
+ "mmlu_eval_accuracy_electrical_engineering": 0.25,
7796
+ "mmlu_eval_accuracy_elementary_mathematics": 0.34146341463414637,
7797
+ "mmlu_eval_accuracy_formal_logic": 0.21428571428571427,
7798
+ "mmlu_eval_accuracy_global_facts": 0.3,
7799
+ "mmlu_eval_accuracy_high_school_biology": 0.375,
7800
+ "mmlu_eval_accuracy_high_school_chemistry": 0.2727272727272727,
7801
+ "mmlu_eval_accuracy_high_school_computer_science": 0.5555555555555556,
7802
+ "mmlu_eval_accuracy_high_school_european_history": 0.5555555555555556,
7803
+ "mmlu_eval_accuracy_high_school_geography": 0.8181818181818182,
7804
+ "mmlu_eval_accuracy_high_school_government_and_politics": 0.6666666666666666,
7805
+ "mmlu_eval_accuracy_high_school_macroeconomics": 0.46511627906976744,
7806
+ "mmlu_eval_accuracy_high_school_mathematics": 0.20689655172413793,
7807
+ "mmlu_eval_accuracy_high_school_microeconomics": 0.46153846153846156,
7808
+ "mmlu_eval_accuracy_high_school_physics": 0.058823529411764705,
7809
+ "mmlu_eval_accuracy_high_school_psychology": 0.8666666666666667,
7810
+ "mmlu_eval_accuracy_high_school_statistics": 0.30434782608695654,
7811
+ "mmlu_eval_accuracy_high_school_us_history": 0.6363636363636364,
7812
+ "mmlu_eval_accuracy_high_school_world_history": 0.7307692307692307,
7813
+ "mmlu_eval_accuracy_human_aging": 0.6956521739130435,
7814
+ "mmlu_eval_accuracy_human_sexuality": 0.4166666666666667,
7815
+ "mmlu_eval_accuracy_international_law": 0.9230769230769231,
7816
+ "mmlu_eval_accuracy_jurisprudence": 0.36363636363636365,
7817
+ "mmlu_eval_accuracy_logical_fallacies": 0.6666666666666666,
7818
+ "mmlu_eval_accuracy_machine_learning": 0.18181818181818182,
7819
+ "mmlu_eval_accuracy_management": 0.6363636363636364,
7820
+ "mmlu_eval_accuracy_marketing": 0.68,
7821
+ "mmlu_eval_accuracy_medical_genetics": 0.7272727272727273,
7822
+ "mmlu_eval_accuracy_miscellaneous": 0.6744186046511628,
7823
+ "mmlu_eval_accuracy_moral_disputes": 0.47368421052631576,
7824
+ "mmlu_eval_accuracy_moral_scenarios": 0.24,
7825
+ "mmlu_eval_accuracy_nutrition": 0.6060606060606061,
7826
+ "mmlu_eval_accuracy_philosophy": 0.47058823529411764,
7827
+ "mmlu_eval_accuracy_prehistory": 0.5142857142857142,
7828
+ "mmlu_eval_accuracy_professional_accounting": 0.2903225806451613,
7829
+ "mmlu_eval_accuracy_professional_law": 0.3588235294117647,
7830
+ "mmlu_eval_accuracy_professional_medicine": 0.5806451612903226,
7831
+ "mmlu_eval_accuracy_professional_psychology": 0.5072463768115942,
7832
+ "mmlu_eval_accuracy_public_relations": 0.5833333333333334,
7833
+ "mmlu_eval_accuracy_security_studies": 0.4444444444444444,
7834
+ "mmlu_eval_accuracy_sociology": 0.6818181818181818,
7835
+ "mmlu_eval_accuracy_us_foreign_policy": 0.6363636363636364,
7836
+ "mmlu_eval_accuracy_virology": 0.5,
7837
+ "mmlu_eval_accuracy_world_religions": 0.6842105263157895,
7838
+ "mmlu_loss": 1.3685555242527248,
7839
+ "step": 8200
7840
+ },
7841
+ {
7842
+ "epoch": 2.6,
7843
+ "learning_rate": 0.0002,
7844
+ "loss": 0.5992,
7845
+ "step": 8210
7846
+ },
7847
+ {
7848
+ "epoch": 2.6,
7849
+ "learning_rate": 0.0002,
7850
+ "loss": 0.6068,
7851
+ "step": 8220
7852
+ },
7853
+ {
7854
+ "epoch": 2.61,
7855
+ "learning_rate": 0.0002,
7856
+ "loss": 0.6986,
7857
+ "step": 8230
7858
+ },
7859
+ {
7860
+ "epoch": 2.61,
7861
+ "learning_rate": 0.0002,
7862
+ "loss": 0.5809,
7863
+ "step": 8240
7864
+ },
7865
+ {
7866
+ "epoch": 2.61,
7867
+ "learning_rate": 0.0002,
7868
+ "loss": 0.6368,
7869
+ "step": 8250
7870
+ },
7871
+ {
7872
+ "epoch": 2.62,
7873
+ "learning_rate": 0.0002,
7874
+ "loss": 0.5731,
7875
+ "step": 8260
7876
+ },
7877
+ {
7878
+ "epoch": 2.62,
7879
+ "learning_rate": 0.0002,
7880
+ "loss": 0.6439,
7881
+ "step": 8270
7882
+ },
7883
+ {
7884
+ "epoch": 2.62,
7885
+ "learning_rate": 0.0002,
7886
+ "loss": 0.5661,
7887
+ "step": 8280
7888
+ },
7889
+ {
7890
+ "epoch": 2.63,
7891
+ "learning_rate": 0.0002,
7892
+ "loss": 0.5816,
7893
+ "step": 8290
7894
+ },
7895
+ {
7896
+ "epoch": 2.63,
7897
+ "learning_rate": 0.0002,
7898
+ "loss": 0.5385,
7899
+ "step": 8300
7900
+ },
7901
+ {
7902
+ "epoch": 2.63,
7903
+ "learning_rate": 0.0002,
7904
+ "loss": 0.5913,
7905
+ "step": 8310
7906
+ },
7907
+ {
7908
+ "epoch": 2.64,
7909
+ "learning_rate": 0.0002,
7910
+ "loss": 0.5817,
7911
+ "step": 8320
7912
+ },
7913
+ {
7914
+ "epoch": 2.64,
7915
+ "learning_rate": 0.0002,
7916
+ "loss": 0.6098,
7917
+ "step": 8330
7918
+ },
7919
+ {
7920
+ "epoch": 2.64,
7921
+ "learning_rate": 0.0002,
7922
+ "loss": 0.558,
7923
+ "step": 8340
7924
+ },
7925
+ {
7926
+ "epoch": 2.65,
7927
+ "learning_rate": 0.0002,
7928
+ "loss": 0.6008,
7929
+ "step": 8350
7930
+ },
7931
+ {
7932
+ "epoch": 2.65,
7933
+ "learning_rate": 0.0002,
7934
+ "loss": 0.5921,
7935
+ "step": 8360
7936
+ },
7937
+ {
7938
+ "epoch": 2.65,
7939
+ "learning_rate": 0.0002,
7940
+ "loss": 0.6194,
7941
+ "step": 8370
7942
+ },
7943
+ {
7944
+ "epoch": 2.66,
7945
+ "learning_rate": 0.0002,
7946
+ "loss": 0.6849,
7947
+ "step": 8380
7948
+ },
7949
+ {
7950
+ "epoch": 2.66,
7951
+ "learning_rate": 0.0002,
7952
+ "loss": 0.5851,
7953
+ "step": 8390
7954
+ },
7955
+ {
7956
+ "epoch": 2.66,
7957
+ "learning_rate": 0.0002,
7958
+ "loss": 0.5574,
7959
+ "step": 8400
7960
+ },
7961
+ {
7962
+ "epoch": 2.66,
7963
+ "eval_loss": 0.7574586868286133,
7964
+ "eval_runtime": 111.0853,
7965
+ "eval_samples_per_second": 9.002,
7966
+ "eval_steps_per_second": 4.501,
7967
+ "step": 8400
7968
+ },
7969
+ {
7970
+ "epoch": 2.66,
7971
+ "mmlu_eval_accuracy": 0.47813110611906134,
7972
+ "mmlu_eval_accuracy_abstract_algebra": 0.2727272727272727,
7973
+ "mmlu_eval_accuracy_anatomy": 0.5,
7974
+ "mmlu_eval_accuracy_astronomy": 0.4375,
7975
+ "mmlu_eval_accuracy_business_ethics": 0.5454545454545454,
7976
+ "mmlu_eval_accuracy_clinical_knowledge": 0.5517241379310345,
7977
+ "mmlu_eval_accuracy_college_biology": 0.375,
7978
+ "mmlu_eval_accuracy_college_chemistry": 0.375,
7979
+ "mmlu_eval_accuracy_college_computer_science": 0.36363636363636365,
7980
+ "mmlu_eval_accuracy_college_mathematics": 0.09090909090909091,
7981
+ "mmlu_eval_accuracy_college_medicine": 0.4090909090909091,
7982
+ "mmlu_eval_accuracy_college_physics": 0.18181818181818182,
7983
+ "mmlu_eval_accuracy_computer_security": 0.45454545454545453,
7984
+ "mmlu_eval_accuracy_conceptual_physics": 0.4230769230769231,
7985
+ "mmlu_eval_accuracy_econometrics": 0.3333333333333333,
7986
+ "mmlu_eval_accuracy_electrical_engineering": 0.25,
7987
+ "mmlu_eval_accuracy_elementary_mathematics": 0.3170731707317073,
7988
+ "mmlu_eval_accuracy_formal_logic": 0.21428571428571427,
7989
+ "mmlu_eval_accuracy_global_facts": 0.3,
7990
+ "mmlu_eval_accuracy_high_school_biology": 0.375,
7991
+ "mmlu_eval_accuracy_high_school_chemistry": 0.2727272727272727,
7992
+ "mmlu_eval_accuracy_high_school_computer_science": 0.5555555555555556,
7993
+ "mmlu_eval_accuracy_high_school_european_history": 0.6111111111111112,
7994
+ "mmlu_eval_accuracy_high_school_geography": 0.8181818181818182,
7995
+ "mmlu_eval_accuracy_high_school_government_and_politics": 0.6666666666666666,
7996
+ "mmlu_eval_accuracy_high_school_macroeconomics": 0.4883720930232558,
7997
+ "mmlu_eval_accuracy_high_school_mathematics": 0.2413793103448276,
7998
+ "mmlu_eval_accuracy_high_school_microeconomics": 0.5384615384615384,
7999
+ "mmlu_eval_accuracy_high_school_physics": 0.058823529411764705,
8000
+ "mmlu_eval_accuracy_high_school_psychology": 0.8833333333333333,
8001
+ "mmlu_eval_accuracy_high_school_statistics": 0.391304347826087,
8002
+ "mmlu_eval_accuracy_high_school_us_history": 0.6363636363636364,
8003
+ "mmlu_eval_accuracy_high_school_world_history": 0.6923076923076923,
8004
+ "mmlu_eval_accuracy_human_aging": 0.6956521739130435,
8005
+ "mmlu_eval_accuracy_human_sexuality": 0.3333333333333333,
8006
+ "mmlu_eval_accuracy_international_law": 0.9230769230769231,
8007
+ "mmlu_eval_accuracy_jurisprudence": 0.36363636363636365,
8008
+ "mmlu_eval_accuracy_logical_fallacies": 0.6111111111111112,
8009
+ "mmlu_eval_accuracy_machine_learning": 0.18181818181818182,
8010
+ "mmlu_eval_accuracy_management": 0.6363636363636364,
8011
+ "mmlu_eval_accuracy_marketing": 0.76,
8012
+ "mmlu_eval_accuracy_medical_genetics": 0.8181818181818182,
8013
+ "mmlu_eval_accuracy_miscellaneous": 0.6395348837209303,
8014
+ "mmlu_eval_accuracy_moral_disputes": 0.47368421052631576,
8015
+ "mmlu_eval_accuracy_moral_scenarios": 0.25,
8016
+ "mmlu_eval_accuracy_nutrition": 0.6363636363636364,
8017
+ "mmlu_eval_accuracy_philosophy": 0.47058823529411764,
8018
+ "mmlu_eval_accuracy_prehistory": 0.5142857142857142,
8019
+ "mmlu_eval_accuracy_professional_accounting": 0.3225806451612903,
8020
+ "mmlu_eval_accuracy_professional_law": 0.3588235294117647,
8021
+ "mmlu_eval_accuracy_professional_medicine": 0.5483870967741935,
8022
+ "mmlu_eval_accuracy_professional_psychology": 0.5072463768115942,
8023
+ "mmlu_eval_accuracy_public_relations": 0.5833333333333334,
8024
+ "mmlu_eval_accuracy_security_studies": 0.4074074074074074,
8025
+ "mmlu_eval_accuracy_sociology": 0.6818181818181818,
8026
+ "mmlu_eval_accuracy_us_foreign_policy": 0.7272727272727273,
8027
+ "mmlu_eval_accuracy_virology": 0.5,
8028
+ "mmlu_eval_accuracy_world_religions": 0.6842105263157895,
8029
+ "mmlu_loss": 1.4071070384574622,
8030
+ "step": 8400
8031
+ },
8032
+ {
8033
+ "epoch": 2.66,
8034
+ "learning_rate": 0.0002,
8035
+ "loss": 0.5795,
8036
+ "step": 8410
8037
+ },
8038
+ {
8039
+ "epoch": 2.67,
8040
+ "learning_rate": 0.0002,
8041
+ "loss": 0.6109,
8042
+ "step": 8420
8043
+ },
8044
+ {
8045
+ "epoch": 2.67,
8046
+ "learning_rate": 0.0002,
8047
+ "loss": 0.6136,
8048
+ "step": 8430
8049
+ },
8050
+ {
8051
+ "epoch": 2.67,
8052
+ "learning_rate": 0.0002,
8053
+ "loss": 0.5795,
8054
+ "step": 8440
8055
+ },
8056
+ {
8057
+ "epoch": 2.68,
8058
+ "learning_rate": 0.0002,
8059
+ "loss": 0.5639,
8060
+ "step": 8450
8061
+ },
8062
+ {
8063
+ "epoch": 2.68,
8064
+ "learning_rate": 0.0002,
8065
+ "loss": 0.5869,
8066
+ "step": 8460
8067
+ },
8068
+ {
8069
+ "epoch": 2.68,
8070
+ "learning_rate": 0.0002,
8071
+ "loss": 0.5946,
8072
+ "step": 8470
8073
+ },
8074
+ {
8075
+ "epoch": 2.69,
8076
+ "learning_rate": 0.0002,
8077
+ "loss": 0.5745,
8078
+ "step": 8480
8079
+ },
8080
+ {
8081
+ "epoch": 2.69,
8082
+ "learning_rate": 0.0002,
8083
+ "loss": 0.573,
8084
+ "step": 8490
8085
+ },
8086
+ {
8087
+ "epoch": 2.69,
8088
+ "learning_rate": 0.0002,
8089
+ "loss": 0.5846,
8090
+ "step": 8500
8091
+ },
8092
+ {
8093
+ "epoch": 2.7,
8094
+ "learning_rate": 0.0002,
8095
+ "loss": 0.6058,
8096
+ "step": 8510
8097
+ },
8098
+ {
8099
+ "epoch": 2.7,
8100
+ "learning_rate": 0.0002,
8101
+ "loss": 0.5072,
8102
+ "step": 8520
8103
+ },
8104
+ {
8105
+ "epoch": 2.7,
8106
+ "learning_rate": 0.0002,
8107
+ "loss": 0.6296,
8108
+ "step": 8530
8109
+ },
8110
+ {
8111
+ "epoch": 2.71,
8112
+ "learning_rate": 0.0002,
8113
+ "loss": 0.6057,
8114
+ "step": 8540
8115
+ },
8116
+ {
8117
+ "epoch": 2.71,
8118
+ "learning_rate": 0.0002,
8119
+ "loss": 0.544,
8120
+ "step": 8550
8121
+ },
8122
+ {
8123
+ "epoch": 2.71,
8124
+ "learning_rate": 0.0002,
8125
+ "loss": 0.6256,
8126
+ "step": 8560
8127
+ },
8128
+ {
8129
+ "epoch": 2.72,
8130
+ "learning_rate": 0.0002,
8131
+ "loss": 0.6307,
8132
+ "step": 8570
8133
+ },
8134
+ {
8135
+ "epoch": 2.72,
8136
+ "learning_rate": 0.0002,
8137
+ "loss": 0.5717,
8138
+ "step": 8580
8139
+ },
8140
+ {
8141
+ "epoch": 2.72,
8142
+ "learning_rate": 0.0002,
8143
+ "loss": 0.5946,
8144
+ "step": 8590
8145
+ },
8146
+ {
8147
+ "epoch": 2.72,
8148
+ "learning_rate": 0.0002,
8149
+ "loss": 0.6025,
8150
+ "step": 8600
8151
+ },
8152
+ {
8153
+ "epoch": 2.72,
8154
+ "eval_loss": 0.7557567358016968,
8155
+ "eval_runtime": 111.5506,
8156
+ "eval_samples_per_second": 8.965,
8157
+ "eval_steps_per_second": 4.482,
8158
+ "step": 8600
8159
+ },
8160
+ {
8161
+ "epoch": 2.72,
8162
+ "mmlu_eval_accuracy": 0.4768331170577644,
8163
+ "mmlu_eval_accuracy_abstract_algebra": 0.2727272727272727,
8164
+ "mmlu_eval_accuracy_anatomy": 0.5714285714285714,
8165
+ "mmlu_eval_accuracy_astronomy": 0.4375,
8166
+ "mmlu_eval_accuracy_business_ethics": 0.5454545454545454,
8167
+ "mmlu_eval_accuracy_clinical_knowledge": 0.5862068965517241,
8168
+ "mmlu_eval_accuracy_college_biology": 0.375,
8169
+ "mmlu_eval_accuracy_college_chemistry": 0.375,
8170
+ "mmlu_eval_accuracy_college_computer_science": 0.36363636363636365,
8171
+ "mmlu_eval_accuracy_college_mathematics": 0.18181818181818182,
8172
+ "mmlu_eval_accuracy_college_medicine": 0.5,
8173
+ "mmlu_eval_accuracy_college_physics": 0.18181818181818182,
8174
+ "mmlu_eval_accuracy_computer_security": 0.45454545454545453,
8175
+ "mmlu_eval_accuracy_conceptual_physics": 0.4230769230769231,
8176
+ "mmlu_eval_accuracy_econometrics": 0.3333333333333333,
8177
+ "mmlu_eval_accuracy_electrical_engineering": 0.25,
8178
+ "mmlu_eval_accuracy_elementary_mathematics": 0.34146341463414637,
8179
+ "mmlu_eval_accuracy_formal_logic": 0.21428571428571427,
8180
+ "mmlu_eval_accuracy_global_facts": 0.3,
8181
+ "mmlu_eval_accuracy_high_school_biology": 0.40625,
8182
+ "mmlu_eval_accuracy_high_school_chemistry": 0.22727272727272727,
8183
+ "mmlu_eval_accuracy_high_school_computer_science": 0.5555555555555556,
8184
+ "mmlu_eval_accuracy_high_school_european_history": 0.6111111111111112,
8185
+ "mmlu_eval_accuracy_high_school_geography": 0.7727272727272727,
8186
+ "mmlu_eval_accuracy_high_school_government_and_politics": 0.6190476190476191,
8187
+ "mmlu_eval_accuracy_high_school_macroeconomics": 0.4883720930232558,
8188
+ "mmlu_eval_accuracy_high_school_mathematics": 0.1724137931034483,
8189
+ "mmlu_eval_accuracy_high_school_microeconomics": 0.5,
8190
+ "mmlu_eval_accuracy_high_school_physics": 0.11764705882352941,
8191
+ "mmlu_eval_accuracy_high_school_psychology": 0.9,
8192
+ "mmlu_eval_accuracy_high_school_statistics": 0.30434782608695654,
8193
+ "mmlu_eval_accuracy_high_school_us_history": 0.6363636363636364,
8194
+ "mmlu_eval_accuracy_high_school_world_history": 0.6923076923076923,
8195
+ "mmlu_eval_accuracy_human_aging": 0.6956521739130435,
8196
+ "mmlu_eval_accuracy_human_sexuality": 0.3333333333333333,
8197
+ "mmlu_eval_accuracy_international_law": 0.9230769230769231,
8198
+ "mmlu_eval_accuracy_jurisprudence": 0.36363636363636365,
8199
+ "mmlu_eval_accuracy_logical_fallacies": 0.6111111111111112,
8200
+ "mmlu_eval_accuracy_machine_learning": 0.09090909090909091,
8201
+ "mmlu_eval_accuracy_management": 0.6363636363636364,
8202
+ "mmlu_eval_accuracy_marketing": 0.76,
8203
+ "mmlu_eval_accuracy_medical_genetics": 0.8181818181818182,
8204
+ "mmlu_eval_accuracy_miscellaneous": 0.6395348837209303,
8205
+ "mmlu_eval_accuracy_moral_disputes": 0.5263157894736842,
8206
+ "mmlu_eval_accuracy_moral_scenarios": 0.23,
8207
+ "mmlu_eval_accuracy_nutrition": 0.6363636363636364,
8208
+ "mmlu_eval_accuracy_philosophy": 0.47058823529411764,
8209
+ "mmlu_eval_accuracy_prehistory": 0.5428571428571428,
8210
+ "mmlu_eval_accuracy_professional_accounting": 0.2903225806451613,
8211
+ "mmlu_eval_accuracy_professional_law": 0.3352941176470588,
8212
+ "mmlu_eval_accuracy_professional_medicine": 0.5483870967741935,
8213
+ "mmlu_eval_accuracy_professional_psychology": 0.4782608695652174,
8214
+ "mmlu_eval_accuracy_public_relations": 0.5833333333333334,
8215
+ "mmlu_eval_accuracy_security_studies": 0.4074074074074074,
8216
+ "mmlu_eval_accuracy_sociology": 0.6363636363636364,
8217
+ "mmlu_eval_accuracy_us_foreign_policy": 0.7272727272727273,
8218
+ "mmlu_eval_accuracy_virology": 0.5,
8219
+ "mmlu_eval_accuracy_world_religions": 0.6842105263157895,
8220
+ "mmlu_loss": 1.4071306704152036,
8221
+ "step": 8600
8222
+ },
8223
+ {
8224
+ "epoch": 2.73,
8225
+ "learning_rate": 0.0002,
8226
+ "loss": 0.6607,
8227
+ "step": 8610
8228
+ },
8229
+ {
8230
+ "epoch": 2.73,
8231
+ "learning_rate": 0.0002,
8232
+ "loss": 0.6087,
8233
+ "step": 8620
8234
+ },
8235
+ {
8236
+ "epoch": 2.73,
8237
+ "learning_rate": 0.0002,
8238
+ "loss": 0.6077,
8239
+ "step": 8630
8240
+ },
8241
+ {
8242
+ "epoch": 2.74,
8243
+ "learning_rate": 0.0002,
8244
+ "loss": 0.5746,
8245
+ "step": 8640
8246
+ },
8247
+ {
8248
+ "epoch": 2.74,
8249
+ "learning_rate": 0.0002,
8250
+ "loss": 0.6372,
8251
+ "step": 8650
8252
+ },
8253
+ {
8254
+ "epoch": 2.74,
8255
+ "learning_rate": 0.0002,
8256
+ "loss": 0.603,
8257
+ "step": 8660
8258
+ },
8259
+ {
8260
+ "epoch": 2.75,
8261
+ "learning_rate": 0.0002,
8262
+ "loss": 0.5913,
8263
+ "step": 8670
8264
+ },
8265
+ {
8266
+ "epoch": 2.75,
8267
+ "learning_rate": 0.0002,
8268
+ "loss": 0.664,
8269
+ "step": 8680
8270
+ },
8271
+ {
8272
+ "epoch": 2.75,
8273
+ "learning_rate": 0.0002,
8274
+ "loss": 0.5766,
8275
+ "step": 8690
8276
+ },
8277
+ {
8278
+ "epoch": 2.76,
8279
+ "learning_rate": 0.0002,
8280
+ "loss": 0.6316,
8281
+ "step": 8700
8282
+ },
8283
+ {
8284
+ "epoch": 2.76,
8285
+ "learning_rate": 0.0002,
8286
+ "loss": 0.5913,
8287
+ "step": 8710
8288
+ },
8289
+ {
8290
+ "epoch": 2.76,
8291
+ "learning_rate": 0.0002,
8292
+ "loss": 0.5974,
8293
+ "step": 8720
8294
+ },
8295
+ {
8296
+ "epoch": 2.77,
8297
+ "learning_rate": 0.0002,
8298
+ "loss": 0.6519,
8299
+ "step": 8730
8300
+ },
8301
+ {
8302
+ "epoch": 2.77,
8303
+ "learning_rate": 0.0002,
8304
+ "loss": 0.6502,
8305
+ "step": 8740
8306
+ },
8307
+ {
8308
+ "epoch": 2.77,
8309
+ "learning_rate": 0.0002,
8310
+ "loss": 0.6069,
8311
+ "step": 8750
8312
+ },
8313
+ {
8314
+ "epoch": 2.78,
8315
+ "learning_rate": 0.0002,
8316
+ "loss": 0.6073,
8317
+ "step": 8760
8318
+ },
8319
+ {
8320
+ "epoch": 2.78,
8321
+ "learning_rate": 0.0002,
8322
+ "loss": 0.5314,
8323
+ "step": 8770
8324
+ },
8325
+ {
8326
+ "epoch": 2.78,
8327
+ "learning_rate": 0.0002,
8328
+ "loss": 0.6515,
8329
+ "step": 8780
8330
+ },
8331
+ {
8332
+ "epoch": 2.79,
8333
+ "learning_rate": 0.0002,
8334
+ "loss": 0.6515,
8335
+ "step": 8790
8336
+ },
8337
+ {
8338
+ "epoch": 2.79,
8339
+ "learning_rate": 0.0002,
8340
+ "loss": 0.6188,
8341
+ "step": 8800
8342
+ },
8343
+ {
8344
+ "epoch": 2.79,
8345
+ "eval_loss": 0.7575909495353699,
8346
+ "eval_runtime": 111.2733,
8347
+ "eval_samples_per_second": 8.987,
8348
+ "eval_steps_per_second": 4.493,
8349
+ "step": 8800
8350
+ },
8351
+ {
8352
+ "epoch": 2.79,
8353
+ "mmlu_eval_accuracy": 0.489029874622582,
8354
+ "mmlu_eval_accuracy_abstract_algebra": 0.2727272727272727,
8355
+ "mmlu_eval_accuracy_anatomy": 0.6428571428571429,
8356
+ "mmlu_eval_accuracy_astronomy": 0.375,
8357
+ "mmlu_eval_accuracy_business_ethics": 0.45454545454545453,
8358
+ "mmlu_eval_accuracy_clinical_knowledge": 0.6206896551724138,
8359
+ "mmlu_eval_accuracy_college_biology": 0.375,
8360
+ "mmlu_eval_accuracy_college_chemistry": 0.375,
8361
+ "mmlu_eval_accuracy_college_computer_science": 0.36363636363636365,
8362
+ "mmlu_eval_accuracy_college_mathematics": 0.18181818181818182,
8363
+ "mmlu_eval_accuracy_college_medicine": 0.4090909090909091,
8364
+ "mmlu_eval_accuracy_college_physics": 0.2727272727272727,
8365
+ "mmlu_eval_accuracy_computer_security": 0.5454545454545454,
8366
+ "mmlu_eval_accuracy_conceptual_physics": 0.4230769230769231,
8367
+ "mmlu_eval_accuracy_econometrics": 0.25,
8368
+ "mmlu_eval_accuracy_electrical_engineering": 0.25,
8369
+ "mmlu_eval_accuracy_elementary_mathematics": 0.3170731707317073,
8370
+ "mmlu_eval_accuracy_formal_logic": 0.21428571428571427,
8371
+ "mmlu_eval_accuracy_global_facts": 0.5,
8372
+ "mmlu_eval_accuracy_high_school_biology": 0.40625,
8373
+ "mmlu_eval_accuracy_high_school_chemistry": 0.22727272727272727,
8374
+ "mmlu_eval_accuracy_high_school_computer_science": 0.5555555555555556,
8375
+ "mmlu_eval_accuracy_high_school_european_history": 0.5555555555555556,
8376
+ "mmlu_eval_accuracy_high_school_geography": 0.8636363636363636,
8377
+ "mmlu_eval_accuracy_high_school_government_and_politics": 0.6190476190476191,
8378
+ "mmlu_eval_accuracy_high_school_macroeconomics": 0.5116279069767442,
8379
+ "mmlu_eval_accuracy_high_school_mathematics": 0.20689655172413793,
8380
+ "mmlu_eval_accuracy_high_school_microeconomics": 0.4230769230769231,
8381
+ "mmlu_eval_accuracy_high_school_physics": 0.11764705882352941,
8382
+ "mmlu_eval_accuracy_high_school_psychology": 0.8666666666666667,
8383
+ "mmlu_eval_accuracy_high_school_statistics": 0.30434782608695654,
8384
+ "mmlu_eval_accuracy_high_school_us_history": 0.6818181818181818,
8385
+ "mmlu_eval_accuracy_high_school_world_history": 0.6538461538461539,
8386
+ "mmlu_eval_accuracy_human_aging": 0.6956521739130435,
8387
+ "mmlu_eval_accuracy_human_sexuality": 0.4166666666666667,
8388
+ "mmlu_eval_accuracy_international_law": 0.9230769230769231,
8389
+ "mmlu_eval_accuracy_jurisprudence": 0.36363636363636365,
8390
+ "mmlu_eval_accuracy_logical_fallacies": 0.6666666666666666,
8391
+ "mmlu_eval_accuracy_machine_learning": 0.2727272727272727,
8392
+ "mmlu_eval_accuracy_management": 0.6363636363636364,
8393
+ "mmlu_eval_accuracy_marketing": 0.68,
8394
+ "mmlu_eval_accuracy_medical_genetics": 0.9090909090909091,
8395
+ "mmlu_eval_accuracy_miscellaneous": 0.6395348837209303,
8396
+ "mmlu_eval_accuracy_moral_disputes": 0.5,
8397
+ "mmlu_eval_accuracy_moral_scenarios": 0.27,
8398
+ "mmlu_eval_accuracy_nutrition": 0.6060606060606061,
8399
+ "mmlu_eval_accuracy_philosophy": 0.5,
8400
+ "mmlu_eval_accuracy_prehistory": 0.6285714285714286,
8401
+ "mmlu_eval_accuracy_professional_accounting": 0.3548387096774194,
8402
+ "mmlu_eval_accuracy_professional_law": 0.3588235294117647,
8403
+ "mmlu_eval_accuracy_professional_medicine": 0.5483870967741935,
8404
+ "mmlu_eval_accuracy_professional_psychology": 0.4927536231884058,
8405
+ "mmlu_eval_accuracy_public_relations": 0.5833333333333334,
8406
+ "mmlu_eval_accuracy_security_studies": 0.4444444444444444,
8407
+ "mmlu_eval_accuracy_sociology": 0.6363636363636364,
8408
+ "mmlu_eval_accuracy_us_foreign_policy": 0.7272727272727273,
8409
+ "mmlu_eval_accuracy_virology": 0.5,
8410
+ "mmlu_eval_accuracy_world_religions": 0.6842105263157895,
8411
+ "mmlu_loss": 1.3448165364505105,
8412
+ "step": 8800
8413
+ },
8414
+ {
8415
+ "epoch": 2.79,
8416
+ "learning_rate": 0.0002,
8417
+ "loss": 0.574,
8418
+ "step": 8810
8419
+ },
8420
+ {
8421
+ "epoch": 2.79,
8422
+ "learning_rate": 0.0002,
8423
+ "loss": 0.5965,
8424
+ "step": 8820
8425
+ },
8426
+ {
8427
+ "epoch": 2.8,
8428
+ "learning_rate": 0.0002,
8429
+ "loss": 0.5669,
8430
+ "step": 8830
8431
+ },
8432
+ {
8433
+ "epoch": 2.8,
8434
+ "learning_rate": 0.0002,
8435
+ "loss": 0.6088,
8436
+ "step": 8840
8437
+ },
8438
+ {
8439
+ "epoch": 2.8,
8440
+ "learning_rate": 0.0002,
8441
+ "loss": 0.542,
8442
+ "step": 8850
8443
+ },
8444
+ {
8445
+ "epoch": 2.81,
8446
+ "learning_rate": 0.0002,
8447
+ "loss": 0.5791,
8448
+ "step": 8860
8449
+ },
8450
+ {
8451
+ "epoch": 2.81,
8452
+ "learning_rate": 0.0002,
8453
+ "loss": 0.5663,
8454
+ "step": 8870
8455
+ },
8456
+ {
8457
+ "epoch": 2.81,
8458
+ "learning_rate": 0.0002,
8459
+ "loss": 0.6236,
8460
+ "step": 8880
8461
+ },
8462
+ {
8463
+ "epoch": 2.82,
8464
+ "learning_rate": 0.0002,
8465
+ "loss": 0.5634,
8466
+ "step": 8890
8467
+ },
8468
+ {
8469
+ "epoch": 2.82,
8470
+ "learning_rate": 0.0002,
8471
+ "loss": 0.5925,
8472
+ "step": 8900
8473
+ },
8474
+ {
8475
+ "epoch": 2.82,
8476
+ "learning_rate": 0.0002,
8477
+ "loss": 0.6357,
8478
+ "step": 8910
8479
+ },
8480
+ {
8481
+ "epoch": 2.83,
8482
+ "learning_rate": 0.0002,
8483
+ "loss": 0.5965,
8484
+ "step": 8920
8485
+ },
8486
+ {
8487
+ "epoch": 2.83,
8488
+ "learning_rate": 0.0002,
8489
+ "loss": 0.6071,
8490
+ "step": 8930
8491
+ },
8492
+ {
8493
+ "epoch": 2.83,
8494
+ "learning_rate": 0.0002,
8495
+ "loss": 0.6561,
8496
+ "step": 8940
8497
+ },
8498
+ {
8499
+ "epoch": 2.84,
8500
+ "learning_rate": 0.0002,
8501
+ "loss": 0.556,
8502
+ "step": 8950
8503
+ },
8504
+ {
8505
+ "epoch": 2.84,
8506
+ "learning_rate": 0.0002,
8507
+ "loss": 0.6129,
8508
+ "step": 8960
8509
+ },
8510
+ {
8511
+ "epoch": 2.84,
8512
+ "learning_rate": 0.0002,
8513
+ "loss": 0.535,
8514
+ "step": 8970
8515
+ },
8516
+ {
8517
+ "epoch": 2.85,
8518
+ "learning_rate": 0.0002,
8519
+ "loss": 0.6206,
8520
+ "step": 8980
8521
+ },
8522
+ {
8523
+ "epoch": 2.85,
8524
+ "learning_rate": 0.0002,
8525
+ "loss": 0.571,
8526
+ "step": 8990
8527
+ },
8528
+ {
8529
+ "epoch": 2.85,
8530
+ "learning_rate": 0.0002,
8531
+ "loss": 0.6166,
8532
+ "step": 9000
8533
+ },
8534
+ {
8535
+ "epoch": 2.85,
8536
+ "eval_loss": 0.7525665163993835,
8537
+ "eval_runtime": 111.1851,
8538
+ "eval_samples_per_second": 8.994,
8539
+ "eval_steps_per_second": 4.497,
8540
+ "step": 9000
8541
+ },
8542
+ {
8543
+ "epoch": 2.85,
8544
+ "mmlu_eval_accuracy": 0.485164659462464,
8545
+ "mmlu_eval_accuracy_abstract_algebra": 0.2727272727272727,
8546
+ "mmlu_eval_accuracy_anatomy": 0.6428571428571429,
8547
+ "mmlu_eval_accuracy_astronomy": 0.375,
8548
+ "mmlu_eval_accuracy_business_ethics": 0.5454545454545454,
8549
+ "mmlu_eval_accuracy_clinical_knowledge": 0.5517241379310345,
8550
+ "mmlu_eval_accuracy_college_biology": 0.375,
8551
+ "mmlu_eval_accuracy_college_chemistry": 0.25,
8552
+ "mmlu_eval_accuracy_college_computer_science": 0.36363636363636365,
8553
+ "mmlu_eval_accuracy_college_mathematics": 0.18181818181818182,
8554
+ "mmlu_eval_accuracy_college_medicine": 0.36363636363636365,
8555
+ "mmlu_eval_accuracy_college_physics": 0.18181818181818182,
8556
+ "mmlu_eval_accuracy_computer_security": 0.5454545454545454,
8557
+ "mmlu_eval_accuracy_conceptual_physics": 0.4230769230769231,
8558
+ "mmlu_eval_accuracy_econometrics": 0.25,
8559
+ "mmlu_eval_accuracy_electrical_engineering": 0.3125,
8560
+ "mmlu_eval_accuracy_elementary_mathematics": 0.34146341463414637,
8561
+ "mmlu_eval_accuracy_formal_logic": 0.14285714285714285,
8562
+ "mmlu_eval_accuracy_global_facts": 0.5,
8563
+ "mmlu_eval_accuracy_high_school_biology": 0.375,
8564
+ "mmlu_eval_accuracy_high_school_chemistry": 0.2727272727272727,
8565
+ "mmlu_eval_accuracy_high_school_computer_science": 0.5555555555555556,
8566
+ "mmlu_eval_accuracy_high_school_european_history": 0.6111111111111112,
8567
+ "mmlu_eval_accuracy_high_school_geography": 0.7272727272727273,
8568
+ "mmlu_eval_accuracy_high_school_government_and_politics": 0.5714285714285714,
8569
+ "mmlu_eval_accuracy_high_school_macroeconomics": 0.5116279069767442,
8570
+ "mmlu_eval_accuracy_high_school_mathematics": 0.20689655172413793,
8571
+ "mmlu_eval_accuracy_high_school_microeconomics": 0.5384615384615384,
8572
+ "mmlu_eval_accuracy_high_school_physics": 0.11764705882352941,
8573
+ "mmlu_eval_accuracy_high_school_psychology": 0.8666666666666667,
8574
+ "mmlu_eval_accuracy_high_school_statistics": 0.30434782608695654,
8575
+ "mmlu_eval_accuracy_high_school_us_history": 0.6363636363636364,
8576
+ "mmlu_eval_accuracy_high_school_world_history": 0.7307692307692307,
8577
+ "mmlu_eval_accuracy_human_aging": 0.7391304347826086,
8578
+ "mmlu_eval_accuracy_human_sexuality": 0.4166666666666667,
8579
+ "mmlu_eval_accuracy_international_law": 0.9230769230769231,
8580
+ "mmlu_eval_accuracy_jurisprudence": 0.45454545454545453,
8581
+ "mmlu_eval_accuracy_logical_fallacies": 0.6111111111111112,
8582
+ "mmlu_eval_accuracy_machine_learning": 0.2727272727272727,
8583
+ "mmlu_eval_accuracy_management": 0.6363636363636364,
8584
+ "mmlu_eval_accuracy_marketing": 0.64,
8585
+ "mmlu_eval_accuracy_medical_genetics": 0.9090909090909091,
8586
+ "mmlu_eval_accuracy_miscellaneous": 0.686046511627907,
8587
+ "mmlu_eval_accuracy_moral_disputes": 0.47368421052631576,
8588
+ "mmlu_eval_accuracy_moral_scenarios": 0.23,
8589
+ "mmlu_eval_accuracy_nutrition": 0.6060606060606061,
8590
+ "mmlu_eval_accuracy_philosophy": 0.5,
8591
+ "mmlu_eval_accuracy_prehistory": 0.5714285714285714,
8592
+ "mmlu_eval_accuracy_professional_accounting": 0.3548387096774194,
8593
+ "mmlu_eval_accuracy_professional_law": 0.32941176470588235,
8594
+ "mmlu_eval_accuracy_professional_medicine": 0.5483870967741935,
8595
+ "mmlu_eval_accuracy_professional_psychology": 0.4782608695652174,
8596
+ "mmlu_eval_accuracy_public_relations": 0.5,
8597
+ "mmlu_eval_accuracy_security_studies": 0.4444444444444444,
8598
+ "mmlu_eval_accuracy_sociology": 0.6818181818181818,
8599
+ "mmlu_eval_accuracy_us_foreign_policy": 0.8181818181818182,
8600
+ "mmlu_eval_accuracy_virology": 0.5,
8601
+ "mmlu_eval_accuracy_world_religions": 0.6842105263157895,
8602
+ "mmlu_loss": 1.3841694026952,
8603
+ "step": 9000
8604
+ },
8605
+ {
8606
+ "epoch": 2.85,
8607
+ "learning_rate": 0.0002,
8608
+ "loss": 0.553,
8609
+ "step": 9010
8610
+ },
8611
+ {
8612
+ "epoch": 2.86,
8613
+ "learning_rate": 0.0002,
8614
+ "loss": 0.572,
8615
+ "step": 9020
8616
+ },
8617
+ {
8618
+ "epoch": 2.86,
8619
+ "learning_rate": 0.0002,
8620
+ "loss": 0.6215,
8621
+ "step": 9030
8622
+ },
8623
+ {
8624
+ "epoch": 2.86,
8625
+ "learning_rate": 0.0002,
8626
+ "loss": 0.545,
8627
+ "step": 9040
8628
+ },
8629
+ {
8630
+ "epoch": 2.87,
8631
+ "learning_rate": 0.0002,
8632
+ "loss": 0.6241,
8633
+ "step": 9050
8634
+ },
8635
+ {
8636
+ "epoch": 2.87,
8637
+ "learning_rate": 0.0002,
8638
+ "loss": 0.5556,
8639
+ "step": 9060
8640
+ },
8641
+ {
8642
+ "epoch": 2.87,
8643
+ "learning_rate": 0.0002,
8644
+ "loss": 0.5995,
8645
+ "step": 9070
8646
+ },
8647
+ {
8648
+ "epoch": 2.88,
8649
+ "learning_rate": 0.0002,
8650
+ "loss": 0.6454,
8651
+ "step": 9080
8652
+ },
8653
+ {
8654
+ "epoch": 2.88,
8655
+ "learning_rate": 0.0002,
8656
+ "loss": 0.619,
8657
+ "step": 9090
8658
+ },
8659
+ {
8660
+ "epoch": 2.88,
8661
+ "learning_rate": 0.0002,
8662
+ "loss": 0.5746,
8663
+ "step": 9100
8664
+ },
8665
+ {
8666
+ "epoch": 2.89,
8667
+ "learning_rate": 0.0002,
8668
+ "loss": 0.6706,
8669
+ "step": 9110
8670
+ },
8671
+ {
8672
+ "epoch": 2.89,
8673
+ "learning_rate": 0.0002,
8674
+ "loss": 0.631,
8675
+ "step": 9120
8676
+ },
8677
+ {
8678
+ "epoch": 2.89,
8679
+ "learning_rate": 0.0002,
8680
+ "loss": 0.6514,
8681
+ "step": 9130
8682
+ },
8683
+ {
8684
+ "epoch": 2.9,
8685
+ "learning_rate": 0.0002,
8686
+ "loss": 0.6435,
8687
+ "step": 9140
8688
+ },
8689
+ {
8690
+ "epoch": 2.9,
8691
+ "learning_rate": 0.0002,
8692
+ "loss": 0.6059,
8693
+ "step": 9150
8694
+ },
8695
+ {
8696
+ "epoch": 2.9,
8697
+ "learning_rate": 0.0002,
8698
+ "loss": 0.5627,
8699
+ "step": 9160
8700
+ },
8701
+ {
8702
+ "epoch": 2.91,
8703
+ "learning_rate": 0.0002,
8704
+ "loss": 0.5996,
8705
+ "step": 9170
8706
+ },
8707
+ {
8708
+ "epoch": 2.91,
8709
+ "learning_rate": 0.0002,
8710
+ "loss": 0.73,
8711
+ "step": 9180
8712
+ },
8713
+ {
8714
+ "epoch": 2.91,
8715
+ "learning_rate": 0.0002,
8716
+ "loss": 0.6054,
8717
+ "step": 9190
8718
+ },
8719
+ {
8720
+ "epoch": 2.92,
8721
+ "learning_rate": 0.0002,
8722
+ "loss": 0.6257,
8723
+ "step": 9200
8724
+ },
8725
+ {
8726
+ "epoch": 2.92,
8727
+ "eval_loss": 0.7559297680854797,
8728
+ "eval_runtime": 111.3501,
8729
+ "eval_samples_per_second": 8.981,
8730
+ "eval_steps_per_second": 4.49,
8731
+ "step": 9200
8732
+ },
8733
+ {
8734
+ "epoch": 2.92,
8735
+ "mmlu_eval_accuracy": 0.46939903146057577,
8736
+ "mmlu_eval_accuracy_abstract_algebra": 0.2727272727272727,
8737
+ "mmlu_eval_accuracy_anatomy": 0.6428571428571429,
8738
+ "mmlu_eval_accuracy_astronomy": 0.4375,
8739
+ "mmlu_eval_accuracy_business_ethics": 0.5454545454545454,
8740
+ "mmlu_eval_accuracy_clinical_knowledge": 0.5172413793103449,
8741
+ "mmlu_eval_accuracy_college_biology": 0.375,
8742
+ "mmlu_eval_accuracy_college_chemistry": 0.25,
8743
+ "mmlu_eval_accuracy_college_computer_science": 0.36363636363636365,
8744
+ "mmlu_eval_accuracy_college_mathematics": 0.09090909090909091,
8745
+ "mmlu_eval_accuracy_college_medicine": 0.4090909090909091,
8746
+ "mmlu_eval_accuracy_college_physics": 0.18181818181818182,
8747
+ "mmlu_eval_accuracy_computer_security": 0.36363636363636365,
8748
+ "mmlu_eval_accuracy_conceptual_physics": 0.4230769230769231,
8749
+ "mmlu_eval_accuracy_econometrics": 0.16666666666666666,
8750
+ "mmlu_eval_accuracy_electrical_engineering": 0.25,
8751
+ "mmlu_eval_accuracy_elementary_mathematics": 0.34146341463414637,
8752
+ "mmlu_eval_accuracy_formal_logic": 0.14285714285714285,
8753
+ "mmlu_eval_accuracy_global_facts": 0.3,
8754
+ "mmlu_eval_accuracy_high_school_biology": 0.375,
8755
+ "mmlu_eval_accuracy_high_school_chemistry": 0.22727272727272727,
8756
+ "mmlu_eval_accuracy_high_school_computer_science": 0.5555555555555556,
8757
+ "mmlu_eval_accuracy_high_school_european_history": 0.5555555555555556,
8758
+ "mmlu_eval_accuracy_high_school_geography": 0.7272727272727273,
8759
+ "mmlu_eval_accuracy_high_school_government_and_politics": 0.6190476190476191,
8760
+ "mmlu_eval_accuracy_high_school_macroeconomics": 0.46511627906976744,
8761
+ "mmlu_eval_accuracy_high_school_mathematics": 0.20689655172413793,
8762
+ "mmlu_eval_accuracy_high_school_microeconomics": 0.4230769230769231,
8763
+ "mmlu_eval_accuracy_high_school_physics": 0.058823529411764705,
8764
+ "mmlu_eval_accuracy_high_school_psychology": 0.8333333333333334,
8765
+ "mmlu_eval_accuracy_high_school_statistics": 0.30434782608695654,
8766
+ "mmlu_eval_accuracy_high_school_us_history": 0.6363636363636364,
8767
+ "mmlu_eval_accuracy_high_school_world_history": 0.6923076923076923,
8768
+ "mmlu_eval_accuracy_human_aging": 0.7391304347826086,
8769
+ "mmlu_eval_accuracy_human_sexuality": 0.4166666666666667,
8770
+ "mmlu_eval_accuracy_international_law": 0.8461538461538461,
8771
+ "mmlu_eval_accuracy_jurisprudence": 0.45454545454545453,
8772
+ "mmlu_eval_accuracy_logical_fallacies": 0.6111111111111112,
8773
+ "mmlu_eval_accuracy_machine_learning": 0.18181818181818182,
8774
+ "mmlu_eval_accuracy_management": 0.7272727272727273,
8775
+ "mmlu_eval_accuracy_marketing": 0.72,
8776
+ "mmlu_eval_accuracy_medical_genetics": 0.9090909090909091,
8777
+ "mmlu_eval_accuracy_miscellaneous": 0.6511627906976745,
8778
+ "mmlu_eval_accuracy_moral_disputes": 0.4473684210526316,
8779
+ "mmlu_eval_accuracy_moral_scenarios": 0.27,
8780
+ "mmlu_eval_accuracy_nutrition": 0.5757575757575758,
8781
+ "mmlu_eval_accuracy_philosophy": 0.5588235294117647,
8782
+ "mmlu_eval_accuracy_prehistory": 0.5714285714285714,
8783
+ "mmlu_eval_accuracy_professional_accounting": 0.3548387096774194,
8784
+ "mmlu_eval_accuracy_professional_law": 0.3176470588235294,
8785
+ "mmlu_eval_accuracy_professional_medicine": 0.5806451612903226,
8786
+ "mmlu_eval_accuracy_professional_psychology": 0.4927536231884058,
8787
+ "mmlu_eval_accuracy_public_relations": 0.5833333333333334,
8788
+ "mmlu_eval_accuracy_security_studies": 0.4444444444444444,
8789
+ "mmlu_eval_accuracy_sociology": 0.6363636363636364,
8790
+ "mmlu_eval_accuracy_us_foreign_policy": 0.7272727272727273,
8791
+ "mmlu_eval_accuracy_virology": 0.5,
8792
+ "mmlu_eval_accuracy_world_religions": 0.6842105263157895,
8793
+ "mmlu_loss": 1.4868696913628914,
8794
+ "step": 9200
8795
+ },
8796
+ {
8797
+ "epoch": 2.92,
8798
+ "learning_rate": 0.0002,
8799
+ "loss": 0.6179,
8800
+ "step": 9210
8801
+ },
8802
+ {
8803
+ "epoch": 2.92,
8804
+ "learning_rate": 0.0002,
8805
+ "loss": 0.5577,
8806
+ "step": 9220
8807
+ },
8808
+ {
8809
+ "epoch": 2.92,
8810
+ "learning_rate": 0.0002,
8811
+ "loss": 0.6043,
8812
+ "step": 9230
8813
+ },
8814
+ {
8815
+ "epoch": 2.93,
8816
+ "learning_rate": 0.0002,
8817
+ "loss": 0.6509,
8818
+ "step": 9240
8819
+ },
8820
+ {
8821
+ "epoch": 2.93,
8822
+ "learning_rate": 0.0002,
8823
+ "loss": 0.6184,
8824
+ "step": 9250
8825
+ },
8826
+ {
8827
+ "epoch": 2.93,
8828
+ "learning_rate": 0.0002,
8829
+ "loss": 0.6402,
8830
+ "step": 9260
8831
+ },
8832
+ {
8833
+ "epoch": 2.94,
8834
+ "learning_rate": 0.0002,
8835
+ "loss": 0.6614,
8836
+ "step": 9270
8837
+ },
8838
+ {
8839
+ "epoch": 2.94,
8840
+ "learning_rate": 0.0002,
8841
+ "loss": 0.6394,
8842
+ "step": 9280
8843
+ },
8844
+ {
8845
+ "epoch": 2.94,
8846
+ "learning_rate": 0.0002,
8847
+ "loss": 0.6085,
8848
+ "step": 9290
8849
+ },
8850
+ {
8851
+ "epoch": 2.95,
8852
+ "learning_rate": 0.0002,
8853
+ "loss": 0.6297,
8854
+ "step": 9300
8855
+ },
8856
+ {
8857
+ "epoch": 2.95,
8858
+ "learning_rate": 0.0002,
8859
+ "loss": 0.6099,
8860
+ "step": 9310
8861
+ },
8862
+ {
8863
+ "epoch": 2.95,
8864
+ "learning_rate": 0.0002,
8865
+ "loss": 0.6259,
8866
+ "step": 9320
8867
+ },
8868
+ {
8869
+ "epoch": 2.96,
8870
+ "learning_rate": 0.0002,
8871
+ "loss": 0.6226,
8872
+ "step": 9330
8873
+ },
8874
+ {
8875
+ "epoch": 2.96,
8876
+ "learning_rate": 0.0002,
8877
+ "loss": 0.6528,
8878
+ "step": 9340
8879
+ },
8880
+ {
8881
+ "epoch": 2.96,
8882
+ "learning_rate": 0.0002,
8883
+ "loss": 0.6388,
8884
+ "step": 9350
8885
+ },
8886
+ {
8887
+ "epoch": 2.97,
8888
+ "learning_rate": 0.0002,
8889
+ "loss": 0.6374,
8890
+ "step": 9360
8891
+ },
8892
+ {
8893
+ "epoch": 2.97,
8894
+ "learning_rate": 0.0002,
8895
+ "loss": 0.6096,
8896
+ "step": 9370
8897
+ },
8898
+ {
8899
+ "epoch": 2.97,
8900
+ "learning_rate": 0.0002,
8901
+ "loss": 0.598,
8902
+ "step": 9380
8903
+ },
8904
+ {
8905
+ "epoch": 2.98,
8906
+ "learning_rate": 0.0002,
8907
+ "loss": 0.5925,
8908
+ "step": 9390
8909
+ },
8910
+ {
8911
+ "epoch": 2.98,
8912
+ "learning_rate": 0.0002,
8913
+ "loss": 0.6067,
8914
+ "step": 9400
8915
+ },
8916
+ {
8917
+ "epoch": 2.98,
8918
+ "eval_loss": 0.754828929901123,
8919
+ "eval_runtime": 111.4146,
8920
+ "eval_samples_per_second": 8.975,
8921
+ "eval_steps_per_second": 4.488,
8922
+ "step": 9400
8923
+ },
8924
+ {
8925
+ "epoch": 2.98,
8926
+ "mmlu_eval_accuracy": 0.4664813810860653,
8927
+ "mmlu_eval_accuracy_abstract_algebra": 0.2727272727272727,
8928
+ "mmlu_eval_accuracy_anatomy": 0.6428571428571429,
8929
+ "mmlu_eval_accuracy_astronomy": 0.4375,
8930
+ "mmlu_eval_accuracy_business_ethics": 0.6363636363636364,
8931
+ "mmlu_eval_accuracy_clinical_knowledge": 0.5172413793103449,
8932
+ "mmlu_eval_accuracy_college_biology": 0.375,
8933
+ "mmlu_eval_accuracy_college_chemistry": 0.25,
8934
+ "mmlu_eval_accuracy_college_computer_science": 0.2727272727272727,
8935
+ "mmlu_eval_accuracy_college_mathematics": 0.09090909090909091,
8936
+ "mmlu_eval_accuracy_college_medicine": 0.36363636363636365,
8937
+ "mmlu_eval_accuracy_college_physics": 0.18181818181818182,
8938
+ "mmlu_eval_accuracy_computer_security": 0.45454545454545453,
8939
+ "mmlu_eval_accuracy_conceptual_physics": 0.4230769230769231,
8940
+ "mmlu_eval_accuracy_econometrics": 0.16666666666666666,
8941
+ "mmlu_eval_accuracy_electrical_engineering": 0.3125,
8942
+ "mmlu_eval_accuracy_elementary_mathematics": 0.34146341463414637,
8943
+ "mmlu_eval_accuracy_formal_logic": 0.14285714285714285,
8944
+ "mmlu_eval_accuracy_global_facts": 0.5,
8945
+ "mmlu_eval_accuracy_high_school_biology": 0.375,
8946
+ "mmlu_eval_accuracy_high_school_chemistry": 0.22727272727272727,
8947
+ "mmlu_eval_accuracy_high_school_computer_science": 0.5555555555555556,
8948
+ "mmlu_eval_accuracy_high_school_european_history": 0.6111111111111112,
8949
+ "mmlu_eval_accuracy_high_school_geography": 0.8636363636363636,
8950
+ "mmlu_eval_accuracy_high_school_government_and_politics": 0.5714285714285714,
8951
+ "mmlu_eval_accuracy_high_school_macroeconomics": 0.4883720930232558,
8952
+ "mmlu_eval_accuracy_high_school_mathematics": 0.20689655172413793,
8953
+ "mmlu_eval_accuracy_high_school_microeconomics": 0.5,
8954
+ "mmlu_eval_accuracy_high_school_physics": 0.058823529411764705,
8955
+ "mmlu_eval_accuracy_high_school_psychology": 0.85,
8956
+ "mmlu_eval_accuracy_high_school_statistics": 0.30434782608695654,
8957
+ "mmlu_eval_accuracy_high_school_us_history": 0.6363636363636364,
8958
+ "mmlu_eval_accuracy_high_school_world_history": 0.6923076923076923,
8959
+ "mmlu_eval_accuracy_human_aging": 0.6956521739130435,
8960
+ "mmlu_eval_accuracy_human_sexuality": 0.4166666666666667,
8961
+ "mmlu_eval_accuracy_international_law": 0.9230769230769231,
8962
+ "mmlu_eval_accuracy_jurisprudence": 0.36363636363636365,
8963
+ "mmlu_eval_accuracy_logical_fallacies": 0.5555555555555556,
8964
+ "mmlu_eval_accuracy_machine_learning": 0.18181818181818182,
8965
+ "mmlu_eval_accuracy_management": 0.6363636363636364,
8966
+ "mmlu_eval_accuracy_marketing": 0.68,
8967
+ "mmlu_eval_accuracy_medical_genetics": 0.8181818181818182,
8968
+ "mmlu_eval_accuracy_miscellaneous": 0.6511627906976745,
8969
+ "mmlu_eval_accuracy_moral_disputes": 0.5526315789473685,
8970
+ "mmlu_eval_accuracy_moral_scenarios": 0.27,
8971
+ "mmlu_eval_accuracy_nutrition": 0.5757575757575758,
8972
+ "mmlu_eval_accuracy_philosophy": 0.5,
8973
+ "mmlu_eval_accuracy_prehistory": 0.5142857142857142,
8974
+ "mmlu_eval_accuracy_professional_accounting": 0.3548387096774194,
8975
+ "mmlu_eval_accuracy_professional_law": 0.31176470588235294,
8976
+ "mmlu_eval_accuracy_professional_medicine": 0.5161290322580645,
8977
+ "mmlu_eval_accuracy_professional_psychology": 0.4927536231884058,
8978
+ "mmlu_eval_accuracy_public_relations": 0.5,
8979
+ "mmlu_eval_accuracy_security_studies": 0.4074074074074074,
8980
+ "mmlu_eval_accuracy_sociology": 0.6363636363636364,
8981
+ "mmlu_eval_accuracy_us_foreign_policy": 0.6363636363636364,
8982
+ "mmlu_eval_accuracy_virology": 0.4444444444444444,
8983
+ "mmlu_eval_accuracy_world_religions": 0.631578947368421,
8984
+ "mmlu_loss": 1.263844348665317,
8985
+ "step": 9400
8986
+ },
8987
+ {
8988
+ "epoch": 2.98,
8989
+ "learning_rate": 0.0002,
8990
+ "loss": 0.6571,
8991
+ "step": 9410
8992
+ },
8993
+ {
8994
+ "epoch": 2.98,
8995
+ "learning_rate": 0.0002,
8996
+ "loss": 0.6461,
8997
+ "step": 9420
8998
+ },
8999
+ {
9000
+ "epoch": 2.99,
9001
+ "learning_rate": 0.0002,
9002
+ "loss": 0.5997,
9003
+ "step": 9430
9004
+ },
9005
+ {
9006
+ "epoch": 2.99,
9007
+ "learning_rate": 0.0002,
9008
+ "loss": 0.5978,
9009
+ "step": 9440
9010
+ },
9011
+ {
9012
+ "epoch": 2.99,
9013
+ "learning_rate": 0.0002,
9014
+ "loss": 0.6302,
9015
+ "step": 9450
9016
+ },
9017
+ {
9018
+ "epoch": 3.0,
9019
+ "learning_rate": 0.0002,
9020
+ "loss": 0.5653,
9021
+ "step": 9460
9022
+ },
9023
+ {
9024
+ "epoch": 3.0,
9025
+ "learning_rate": 0.0002,
9026
+ "loss": 0.6311,
9027
+ "step": 9470
9028
+ },
9029
+ {
9030
+ "epoch": 3.0,
9031
+ "learning_rate": 0.0002,
9032
+ "loss": 0.5036,
9033
+ "step": 9480
9034
+ },
9035
+ {
9036
+ "epoch": 3.01,
9037
+ "learning_rate": 0.0002,
9038
+ "loss": 0.4517,
9039
+ "step": 9490
9040
+ },
9041
+ {
9042
+ "epoch": 3.01,
9043
+ "learning_rate": 0.0002,
9044
+ "loss": 0.4403,
9045
+ "step": 9500
9046
+ },
9047
+ {
9048
+ "epoch": 3.01,
9049
+ "learning_rate": 0.0002,
9050
+ "loss": 0.4921,
9051
+ "step": 9510
9052
+ },
9053
+ {
9054
+ "epoch": 3.02,
9055
+ "learning_rate": 0.0002,
9056
+ "loss": 0.464,
9057
+ "step": 9520
9058
+ },
9059
+ {
9060
+ "epoch": 3.02,
9061
+ "learning_rate": 0.0002,
9062
+ "loss": 0.4751,
9063
+ "step": 9530
9064
+ },
9065
+ {
9066
+ "epoch": 3.02,
9067
+ "learning_rate": 0.0002,
9068
+ "loss": 0.521,
9069
+ "step": 9540
9070
+ },
9071
+ {
9072
+ "epoch": 3.03,
9073
+ "learning_rate": 0.0002,
9074
+ "loss": 0.4261,
9075
+ "step": 9550
9076
+ },
9077
+ {
9078
+ "epoch": 3.03,
9079
+ "learning_rate": 0.0002,
9080
+ "loss": 0.4827,
9081
+ "step": 9560
9082
+ },
9083
+ {
9084
+ "epoch": 3.03,
9085
+ "learning_rate": 0.0002,
9086
+ "loss": 0.521,
9087
+ "step": 9570
9088
+ },
9089
+ {
9090
+ "epoch": 3.04,
9091
+ "learning_rate": 0.0002,
9092
+ "loss": 0.5273,
9093
+ "step": 9580
9094
+ },
9095
+ {
9096
+ "epoch": 3.04,
9097
+ "learning_rate": 0.0002,
9098
+ "loss": 0.5053,
9099
+ "step": 9590
9100
+ },
9101
+ {
9102
+ "epoch": 3.04,
9103
+ "learning_rate": 0.0002,
9104
+ "loss": 0.4598,
9105
+ "step": 9600
9106
+ },
9107
+ {
9108
+ "epoch": 3.04,
9109
+ "eval_loss": 0.8103159070014954,
9110
+ "eval_runtime": 111.1191,
9111
+ "eval_samples_per_second": 8.999,
9112
+ "eval_steps_per_second": 4.5,
9113
+ "step": 9600
9114
+ },
9115
+ {
9116
+ "epoch": 3.04,
9117
+ "mmlu_eval_accuracy": 0.4738733345661677,
9118
+ "mmlu_eval_accuracy_abstract_algebra": 0.36363636363636365,
9119
+ "mmlu_eval_accuracy_anatomy": 0.5,
9120
+ "mmlu_eval_accuracy_astronomy": 0.375,
9121
+ "mmlu_eval_accuracy_business_ethics": 0.45454545454545453,
9122
+ "mmlu_eval_accuracy_clinical_knowledge": 0.5517241379310345,
9123
+ "mmlu_eval_accuracy_college_biology": 0.375,
9124
+ "mmlu_eval_accuracy_college_chemistry": 0.375,
9125
+ "mmlu_eval_accuracy_college_computer_science": 0.45454545454545453,
9126
+ "mmlu_eval_accuracy_college_mathematics": 0.09090909090909091,
9127
+ "mmlu_eval_accuracy_college_medicine": 0.36363636363636365,
9128
+ "mmlu_eval_accuracy_college_physics": 0.18181818181818182,
9129
+ "mmlu_eval_accuracy_computer_security": 0.2727272727272727,
9130
+ "mmlu_eval_accuracy_conceptual_physics": 0.4230769230769231,
9131
+ "mmlu_eval_accuracy_econometrics": 0.25,
9132
+ "mmlu_eval_accuracy_electrical_engineering": 0.3125,
9133
+ "mmlu_eval_accuracy_elementary_mathematics": 0.3170731707317073,
9134
+ "mmlu_eval_accuracy_formal_logic": 0.14285714285714285,
9135
+ "mmlu_eval_accuracy_global_facts": 0.5,
9136
+ "mmlu_eval_accuracy_high_school_biology": 0.375,
9137
+ "mmlu_eval_accuracy_high_school_chemistry": 0.2727272727272727,
9138
+ "mmlu_eval_accuracy_high_school_computer_science": 0.5555555555555556,
9139
+ "mmlu_eval_accuracy_high_school_european_history": 0.6111111111111112,
9140
+ "mmlu_eval_accuracy_high_school_geography": 0.7272727272727273,
9141
+ "mmlu_eval_accuracy_high_school_government_and_politics": 0.6190476190476191,
9142
+ "mmlu_eval_accuracy_high_school_macroeconomics": 0.4418604651162791,
9143
+ "mmlu_eval_accuracy_high_school_mathematics": 0.1724137931034483,
9144
+ "mmlu_eval_accuracy_high_school_microeconomics": 0.6153846153846154,
9145
+ "mmlu_eval_accuracy_high_school_physics": 0.11764705882352941,
9146
+ "mmlu_eval_accuracy_high_school_psychology": 0.85,
9147
+ "mmlu_eval_accuracy_high_school_statistics": 0.34782608695652173,
9148
+ "mmlu_eval_accuracy_high_school_us_history": 0.6363636363636364,
9149
+ "mmlu_eval_accuracy_high_school_world_history": 0.6923076923076923,
9150
+ "mmlu_eval_accuracy_human_aging": 0.7391304347826086,
9151
+ "mmlu_eval_accuracy_human_sexuality": 0.4166666666666667,
9152
+ "mmlu_eval_accuracy_international_law": 1.0,
9153
+ "mmlu_eval_accuracy_jurisprudence": 0.45454545454545453,
9154
+ "mmlu_eval_accuracy_logical_fallacies": 0.5555555555555556,
9155
+ "mmlu_eval_accuracy_machine_learning": 0.0,
9156
+ "mmlu_eval_accuracy_management": 0.6363636363636364,
9157
+ "mmlu_eval_accuracy_marketing": 0.64,
9158
+ "mmlu_eval_accuracy_medical_genetics": 0.9090909090909091,
9159
+ "mmlu_eval_accuracy_miscellaneous": 0.686046511627907,
9160
+ "mmlu_eval_accuracy_moral_disputes": 0.5263157894736842,
9161
+ "mmlu_eval_accuracy_moral_scenarios": 0.24,
9162
+ "mmlu_eval_accuracy_nutrition": 0.5757575757575758,
9163
+ "mmlu_eval_accuracy_philosophy": 0.5,
9164
+ "mmlu_eval_accuracy_prehistory": 0.5428571428571428,
9165
+ "mmlu_eval_accuracy_professional_accounting": 0.3225806451612903,
9166
+ "mmlu_eval_accuracy_professional_law": 0.3235294117647059,
9167
+ "mmlu_eval_accuracy_professional_medicine": 0.5483870967741935,
9168
+ "mmlu_eval_accuracy_professional_psychology": 0.463768115942029,
9169
+ "mmlu_eval_accuracy_public_relations": 0.5,
9170
+ "mmlu_eval_accuracy_security_studies": 0.4074074074074074,
9171
+ "mmlu_eval_accuracy_sociology": 0.7727272727272727,
9172
+ "mmlu_eval_accuracy_us_foreign_policy": 0.7272727272727273,
9173
+ "mmlu_eval_accuracy_virology": 0.5,
9174
+ "mmlu_eval_accuracy_world_religions": 0.6842105263157895,
9175
+ "mmlu_loss": 1.2339814666551647,
9176
+ "step": 9600
9177
  }
9178
  ],
9179
  "max_steps": 10000,
9180
  "num_train_epochs": 4,
9181
+ "total_flos": 2.914930323192791e+18,
9182
  "trial_name": null,
9183
  "trial_params": null
9184
  }
{checkpoint-7600 β†’ checkpoint-9600}/training_args.bin RENAMED
File without changes