expert-16 / checkpoint-6400 /trainer_state.json
Farouk
Training in progress, step 6400
14d0c05
raw
history blame
196 kB
{
"best_metric": 0.7293602228164673,
"best_model_checkpoint": "experts/expert-16/checkpoint-6200",
"epoch": 2.0278833967046896,
"global_step": 6400,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0,
"learning_rate": 0.0002,
"loss": 0.8339,
"step": 10
},
{
"epoch": 0.01,
"learning_rate": 0.0002,
"loss": 0.8289,
"step": 20
},
{
"epoch": 0.01,
"learning_rate": 0.0002,
"loss": 0.9041,
"step": 30
},
{
"epoch": 0.01,
"learning_rate": 0.0002,
"loss": 0.8491,
"step": 40
},
{
"epoch": 0.02,
"learning_rate": 0.0002,
"loss": 0.8151,
"step": 50
},
{
"epoch": 0.02,
"learning_rate": 0.0002,
"loss": 0.79,
"step": 60
},
{
"epoch": 0.02,
"learning_rate": 0.0002,
"loss": 0.7835,
"step": 70
},
{
"epoch": 0.03,
"learning_rate": 0.0002,
"loss": 0.8831,
"step": 80
},
{
"epoch": 0.03,
"learning_rate": 0.0002,
"loss": 0.8607,
"step": 90
},
{
"epoch": 0.03,
"learning_rate": 0.0002,
"loss": 0.7876,
"step": 100
},
{
"epoch": 0.03,
"learning_rate": 0.0002,
"loss": 0.8031,
"step": 110
},
{
"epoch": 0.04,
"learning_rate": 0.0002,
"loss": 0.8207,
"step": 120
},
{
"epoch": 0.04,
"learning_rate": 0.0002,
"loss": 0.807,
"step": 130
},
{
"epoch": 0.04,
"learning_rate": 0.0002,
"loss": 0.9262,
"step": 140
},
{
"epoch": 0.05,
"learning_rate": 0.0002,
"loss": 0.7964,
"step": 150
},
{
"epoch": 0.05,
"learning_rate": 0.0002,
"loss": 0.7879,
"step": 160
},
{
"epoch": 0.05,
"learning_rate": 0.0002,
"loss": 0.7587,
"step": 170
},
{
"epoch": 0.06,
"learning_rate": 0.0002,
"loss": 0.8091,
"step": 180
},
{
"epoch": 0.06,
"learning_rate": 0.0002,
"loss": 0.8615,
"step": 190
},
{
"epoch": 0.06,
"learning_rate": 0.0002,
"loss": 0.8672,
"step": 200
},
{
"epoch": 0.06,
"eval_loss": 0.7779108881950378,
"eval_runtime": 110.9863,
"eval_samples_per_second": 9.01,
"eval_steps_per_second": 4.505,
"step": 200
},
{
"epoch": 0.06,
"mmlu_eval_accuracy": 0.4744171116325413,
"mmlu_eval_accuracy_abstract_algebra": 0.36363636363636365,
"mmlu_eval_accuracy_anatomy": 0.7142857142857143,
"mmlu_eval_accuracy_astronomy": 0.4375,
"mmlu_eval_accuracy_business_ethics": 0.5454545454545454,
"mmlu_eval_accuracy_clinical_knowledge": 0.4482758620689655,
"mmlu_eval_accuracy_college_biology": 0.4375,
"mmlu_eval_accuracy_college_chemistry": 0.125,
"mmlu_eval_accuracy_college_computer_science": 0.18181818181818182,
"mmlu_eval_accuracy_college_mathematics": 0.2727272727272727,
"mmlu_eval_accuracy_college_medicine": 0.45454545454545453,
"mmlu_eval_accuracy_college_physics": 0.36363636363636365,
"mmlu_eval_accuracy_computer_security": 0.36363636363636365,
"mmlu_eval_accuracy_conceptual_physics": 0.34615384615384615,
"mmlu_eval_accuracy_econometrics": 0.16666666666666666,
"mmlu_eval_accuracy_electrical_engineering": 0.25,
"mmlu_eval_accuracy_elementary_mathematics": 0.3170731707317073,
"mmlu_eval_accuracy_formal_logic": 0.07142857142857142,
"mmlu_eval_accuracy_global_facts": 0.4,
"mmlu_eval_accuracy_high_school_biology": 0.375,
"mmlu_eval_accuracy_high_school_chemistry": 0.22727272727272727,
"mmlu_eval_accuracy_high_school_computer_science": 0.5555555555555556,
"mmlu_eval_accuracy_high_school_european_history": 0.6666666666666666,
"mmlu_eval_accuracy_high_school_geography": 0.8636363636363636,
"mmlu_eval_accuracy_high_school_government_and_politics": 0.5238095238095238,
"mmlu_eval_accuracy_high_school_macroeconomics": 0.4418604651162791,
"mmlu_eval_accuracy_high_school_mathematics": 0.20689655172413793,
"mmlu_eval_accuracy_high_school_microeconomics": 0.3076923076923077,
"mmlu_eval_accuracy_high_school_physics": 0.11764705882352941,
"mmlu_eval_accuracy_high_school_psychology": 0.8666666666666667,
"mmlu_eval_accuracy_high_school_statistics": 0.30434782608695654,
"mmlu_eval_accuracy_high_school_us_history": 0.5909090909090909,
"mmlu_eval_accuracy_high_school_world_history": 0.7692307692307693,
"mmlu_eval_accuracy_human_aging": 0.6956521739130435,
"mmlu_eval_accuracy_human_sexuality": 0.3333333333333333,
"mmlu_eval_accuracy_international_law": 0.8461538461538461,
"mmlu_eval_accuracy_jurisprudence": 0.36363636363636365,
"mmlu_eval_accuracy_logical_fallacies": 0.6666666666666666,
"mmlu_eval_accuracy_machine_learning": 0.18181818181818182,
"mmlu_eval_accuracy_management": 0.6363636363636364,
"mmlu_eval_accuracy_marketing": 0.88,
"mmlu_eval_accuracy_medical_genetics": 0.9090909090909091,
"mmlu_eval_accuracy_miscellaneous": 0.6627906976744186,
"mmlu_eval_accuracy_moral_disputes": 0.4473684210526316,
"mmlu_eval_accuracy_moral_scenarios": 0.27,
"mmlu_eval_accuracy_nutrition": 0.6666666666666666,
"mmlu_eval_accuracy_philosophy": 0.5,
"mmlu_eval_accuracy_prehistory": 0.42857142857142855,
"mmlu_eval_accuracy_professional_accounting": 0.3225806451612903,
"mmlu_eval_accuracy_professional_law": 0.3176470588235294,
"mmlu_eval_accuracy_professional_medicine": 0.5483870967741935,
"mmlu_eval_accuracy_professional_psychology": 0.4927536231884058,
"mmlu_eval_accuracy_public_relations": 0.6666666666666666,
"mmlu_eval_accuracy_security_studies": 0.5185185185185185,
"mmlu_eval_accuracy_sociology": 0.6818181818181818,
"mmlu_eval_accuracy_us_foreign_policy": 0.6363636363636364,
"mmlu_eval_accuracy_virology": 0.5555555555555556,
"mmlu_eval_accuracy_world_religions": 0.7368421052631579,
"mmlu_loss": 1.5868234255450824,
"step": 200
},
{
"epoch": 0.07,
"learning_rate": 0.0002,
"loss": 0.8316,
"step": 210
},
{
"epoch": 0.07,
"learning_rate": 0.0002,
"loss": 0.8454,
"step": 220
},
{
"epoch": 0.07,
"learning_rate": 0.0002,
"loss": 0.8434,
"step": 230
},
{
"epoch": 0.08,
"learning_rate": 0.0002,
"loss": 0.821,
"step": 240
},
{
"epoch": 0.08,
"learning_rate": 0.0002,
"loss": 0.7893,
"step": 250
},
{
"epoch": 0.08,
"learning_rate": 0.0002,
"loss": 0.8242,
"step": 260
},
{
"epoch": 0.09,
"learning_rate": 0.0002,
"loss": 0.8128,
"step": 270
},
{
"epoch": 0.09,
"learning_rate": 0.0002,
"loss": 0.8344,
"step": 280
},
{
"epoch": 0.09,
"learning_rate": 0.0002,
"loss": 0.8338,
"step": 290
},
{
"epoch": 0.1,
"learning_rate": 0.0002,
"loss": 0.7981,
"step": 300
},
{
"epoch": 0.1,
"learning_rate": 0.0002,
"loss": 0.781,
"step": 310
},
{
"epoch": 0.1,
"learning_rate": 0.0002,
"loss": 0.7717,
"step": 320
},
{
"epoch": 0.1,
"learning_rate": 0.0002,
"loss": 0.767,
"step": 330
},
{
"epoch": 0.11,
"learning_rate": 0.0002,
"loss": 0.7925,
"step": 340
},
{
"epoch": 0.11,
"learning_rate": 0.0002,
"loss": 0.8226,
"step": 350
},
{
"epoch": 0.11,
"learning_rate": 0.0002,
"loss": 0.7912,
"step": 360
},
{
"epoch": 0.12,
"learning_rate": 0.0002,
"loss": 0.8093,
"step": 370
},
{
"epoch": 0.12,
"learning_rate": 0.0002,
"loss": 0.7648,
"step": 380
},
{
"epoch": 0.12,
"learning_rate": 0.0002,
"loss": 0.7866,
"step": 390
},
{
"epoch": 0.13,
"learning_rate": 0.0002,
"loss": 0.7976,
"step": 400
},
{
"epoch": 0.13,
"eval_loss": 0.7656086683273315,
"eval_runtime": 110.9802,
"eval_samples_per_second": 9.011,
"eval_steps_per_second": 4.505,
"step": 400
},
{
"epoch": 0.13,
"mmlu_eval_accuracy": 0.47124130233512024,
"mmlu_eval_accuracy_abstract_algebra": 0.2727272727272727,
"mmlu_eval_accuracy_anatomy": 0.6428571428571429,
"mmlu_eval_accuracy_astronomy": 0.4375,
"mmlu_eval_accuracy_business_ethics": 0.5454545454545454,
"mmlu_eval_accuracy_clinical_knowledge": 0.4482758620689655,
"mmlu_eval_accuracy_college_biology": 0.4375,
"mmlu_eval_accuracy_college_chemistry": 0.125,
"mmlu_eval_accuracy_college_computer_science": 0.18181818181818182,
"mmlu_eval_accuracy_college_mathematics": 0.2727272727272727,
"mmlu_eval_accuracy_college_medicine": 0.45454545454545453,
"mmlu_eval_accuracy_college_physics": 0.36363636363636365,
"mmlu_eval_accuracy_computer_security": 0.45454545454545453,
"mmlu_eval_accuracy_conceptual_physics": 0.34615384615384615,
"mmlu_eval_accuracy_econometrics": 0.16666666666666666,
"mmlu_eval_accuracy_electrical_engineering": 0.25,
"mmlu_eval_accuracy_elementary_mathematics": 0.2926829268292683,
"mmlu_eval_accuracy_formal_logic": 0.07142857142857142,
"mmlu_eval_accuracy_global_facts": 0.4,
"mmlu_eval_accuracy_high_school_biology": 0.40625,
"mmlu_eval_accuracy_high_school_chemistry": 0.22727272727272727,
"mmlu_eval_accuracy_high_school_computer_science": 0.5555555555555556,
"mmlu_eval_accuracy_high_school_european_history": 0.6111111111111112,
"mmlu_eval_accuracy_high_school_geography": 0.9090909090909091,
"mmlu_eval_accuracy_high_school_government_and_politics": 0.5238095238095238,
"mmlu_eval_accuracy_high_school_macroeconomics": 0.46511627906976744,
"mmlu_eval_accuracy_high_school_mathematics": 0.2413793103448276,
"mmlu_eval_accuracy_high_school_microeconomics": 0.34615384615384615,
"mmlu_eval_accuracy_high_school_physics": 0.11764705882352941,
"mmlu_eval_accuracy_high_school_psychology": 0.8666666666666667,
"mmlu_eval_accuracy_high_school_statistics": 0.30434782608695654,
"mmlu_eval_accuracy_high_school_us_history": 0.6363636363636364,
"mmlu_eval_accuracy_high_school_world_history": 0.7692307692307693,
"mmlu_eval_accuracy_human_aging": 0.6956521739130435,
"mmlu_eval_accuracy_human_sexuality": 0.3333333333333333,
"mmlu_eval_accuracy_international_law": 0.8461538461538461,
"mmlu_eval_accuracy_jurisprudence": 0.36363636363636365,
"mmlu_eval_accuracy_logical_fallacies": 0.6111111111111112,
"mmlu_eval_accuracy_machine_learning": 0.18181818181818182,
"mmlu_eval_accuracy_management": 0.6363636363636364,
"mmlu_eval_accuracy_marketing": 0.84,
"mmlu_eval_accuracy_medical_genetics": 0.9090909090909091,
"mmlu_eval_accuracy_miscellaneous": 0.6511627906976745,
"mmlu_eval_accuracy_moral_disputes": 0.4473684210526316,
"mmlu_eval_accuracy_moral_scenarios": 0.25,
"mmlu_eval_accuracy_nutrition": 0.6060606060606061,
"mmlu_eval_accuracy_philosophy": 0.5,
"mmlu_eval_accuracy_prehistory": 0.4857142857142857,
"mmlu_eval_accuracy_professional_accounting": 0.3870967741935484,
"mmlu_eval_accuracy_professional_law": 0.3058823529411765,
"mmlu_eval_accuracy_professional_medicine": 0.5483870967741935,
"mmlu_eval_accuracy_professional_psychology": 0.5217391304347826,
"mmlu_eval_accuracy_public_relations": 0.6666666666666666,
"mmlu_eval_accuracy_security_studies": 0.5185185185185185,
"mmlu_eval_accuracy_sociology": 0.6818181818181818,
"mmlu_eval_accuracy_us_foreign_policy": 0.5454545454545454,
"mmlu_eval_accuracy_virology": 0.5,
"mmlu_eval_accuracy_world_religions": 0.6842105263157895,
"mmlu_loss": 1.4339068503199297,
"step": 400
},
{
"epoch": 0.13,
"learning_rate": 0.0002,
"loss": 0.8182,
"step": 410
},
{
"epoch": 0.13,
"learning_rate": 0.0002,
"loss": 0.8438,
"step": 420
},
{
"epoch": 0.14,
"learning_rate": 0.0002,
"loss": 0.8184,
"step": 430
},
{
"epoch": 0.14,
"learning_rate": 0.0002,
"loss": 0.8202,
"step": 440
},
{
"epoch": 0.14,
"learning_rate": 0.0002,
"loss": 0.8264,
"step": 450
},
{
"epoch": 0.15,
"learning_rate": 0.0002,
"loss": 0.8384,
"step": 460
},
{
"epoch": 0.15,
"learning_rate": 0.0002,
"loss": 0.8372,
"step": 470
},
{
"epoch": 0.15,
"learning_rate": 0.0002,
"loss": 0.8072,
"step": 480
},
{
"epoch": 0.16,
"learning_rate": 0.0002,
"loss": 0.8214,
"step": 490
},
{
"epoch": 0.16,
"learning_rate": 0.0002,
"loss": 0.814,
"step": 500
},
{
"epoch": 0.16,
"learning_rate": 0.0002,
"loss": 0.847,
"step": 510
},
{
"epoch": 0.16,
"learning_rate": 0.0002,
"loss": 0.8444,
"step": 520
},
{
"epoch": 0.17,
"learning_rate": 0.0002,
"loss": 0.8096,
"step": 530
},
{
"epoch": 0.17,
"learning_rate": 0.0002,
"loss": 0.8496,
"step": 540
},
{
"epoch": 0.17,
"learning_rate": 0.0002,
"loss": 0.7729,
"step": 550
},
{
"epoch": 0.18,
"learning_rate": 0.0002,
"loss": 0.7826,
"step": 560
},
{
"epoch": 0.18,
"learning_rate": 0.0002,
"loss": 0.7478,
"step": 570
},
{
"epoch": 0.18,
"learning_rate": 0.0002,
"loss": 0.7953,
"step": 580
},
{
"epoch": 0.19,
"learning_rate": 0.0002,
"loss": 0.7363,
"step": 590
},
{
"epoch": 0.19,
"learning_rate": 0.0002,
"loss": 0.7971,
"step": 600
},
{
"epoch": 0.19,
"eval_loss": 0.7616064548492432,
"eval_runtime": 110.9404,
"eval_samples_per_second": 9.014,
"eval_steps_per_second": 4.507,
"step": 600
},
{
"epoch": 0.19,
"mmlu_eval_accuracy": 0.4749850916074463,
"mmlu_eval_accuracy_abstract_algebra": 0.2727272727272727,
"mmlu_eval_accuracy_anatomy": 0.7142857142857143,
"mmlu_eval_accuracy_astronomy": 0.4375,
"mmlu_eval_accuracy_business_ethics": 0.5454545454545454,
"mmlu_eval_accuracy_clinical_knowledge": 0.4827586206896552,
"mmlu_eval_accuracy_college_biology": 0.4375,
"mmlu_eval_accuracy_college_chemistry": 0.25,
"mmlu_eval_accuracy_college_computer_science": 0.18181818181818182,
"mmlu_eval_accuracy_college_mathematics": 0.2727272727272727,
"mmlu_eval_accuracy_college_medicine": 0.2727272727272727,
"mmlu_eval_accuracy_college_physics": 0.36363636363636365,
"mmlu_eval_accuracy_computer_security": 0.36363636363636365,
"mmlu_eval_accuracy_conceptual_physics": 0.4230769230769231,
"mmlu_eval_accuracy_econometrics": 0.16666666666666666,
"mmlu_eval_accuracy_electrical_engineering": 0.25,
"mmlu_eval_accuracy_elementary_mathematics": 0.2682926829268293,
"mmlu_eval_accuracy_formal_logic": 0.07142857142857142,
"mmlu_eval_accuracy_global_facts": 0.3,
"mmlu_eval_accuracy_high_school_biology": 0.40625,
"mmlu_eval_accuracy_high_school_chemistry": 0.36363636363636365,
"mmlu_eval_accuracy_high_school_computer_science": 0.5555555555555556,
"mmlu_eval_accuracy_high_school_european_history": 0.6111111111111112,
"mmlu_eval_accuracy_high_school_geography": 0.9090909090909091,
"mmlu_eval_accuracy_high_school_government_and_politics": 0.47619047619047616,
"mmlu_eval_accuracy_high_school_macroeconomics": 0.46511627906976744,
"mmlu_eval_accuracy_high_school_mathematics": 0.27586206896551724,
"mmlu_eval_accuracy_high_school_microeconomics": 0.3076923076923077,
"mmlu_eval_accuracy_high_school_physics": 0.11764705882352941,
"mmlu_eval_accuracy_high_school_psychology": 0.8666666666666667,
"mmlu_eval_accuracy_high_school_statistics": 0.34782608695652173,
"mmlu_eval_accuracy_high_school_us_history": 0.5909090909090909,
"mmlu_eval_accuracy_high_school_world_history": 0.7692307692307693,
"mmlu_eval_accuracy_human_aging": 0.6521739130434783,
"mmlu_eval_accuracy_human_sexuality": 0.3333333333333333,
"mmlu_eval_accuracy_international_law": 0.8461538461538461,
"mmlu_eval_accuracy_jurisprudence": 0.36363636363636365,
"mmlu_eval_accuracy_logical_fallacies": 0.6666666666666666,
"mmlu_eval_accuracy_machine_learning": 0.18181818181818182,
"mmlu_eval_accuracy_management": 0.6363636363636364,
"mmlu_eval_accuracy_marketing": 0.84,
"mmlu_eval_accuracy_medical_genetics": 0.9090909090909091,
"mmlu_eval_accuracy_miscellaneous": 0.6511627906976745,
"mmlu_eval_accuracy_moral_disputes": 0.4473684210526316,
"mmlu_eval_accuracy_moral_scenarios": 0.26,
"mmlu_eval_accuracy_nutrition": 0.6060606060606061,
"mmlu_eval_accuracy_philosophy": 0.5294117647058824,
"mmlu_eval_accuracy_prehistory": 0.5142857142857142,
"mmlu_eval_accuracy_professional_accounting": 0.41935483870967744,
"mmlu_eval_accuracy_professional_law": 0.3,
"mmlu_eval_accuracy_professional_medicine": 0.5483870967741935,
"mmlu_eval_accuracy_professional_psychology": 0.5072463768115942,
"mmlu_eval_accuracy_public_relations": 0.6666666666666666,
"mmlu_eval_accuracy_security_studies": 0.5555555555555556,
"mmlu_eval_accuracy_sociology": 0.6818181818181818,
"mmlu_eval_accuracy_us_foreign_policy": 0.6363636363636364,
"mmlu_eval_accuracy_virology": 0.5,
"mmlu_eval_accuracy_world_religions": 0.6842105263157895,
"mmlu_loss": 1.5647042619341658,
"step": 600
},
{
"epoch": 0.19,
"learning_rate": 0.0002,
"loss": 0.7936,
"step": 610
},
{
"epoch": 0.2,
"learning_rate": 0.0002,
"loss": 0.7319,
"step": 620
},
{
"epoch": 0.2,
"learning_rate": 0.0002,
"loss": 0.79,
"step": 630
},
{
"epoch": 0.2,
"learning_rate": 0.0002,
"loss": 0.7806,
"step": 640
},
{
"epoch": 0.21,
"learning_rate": 0.0002,
"loss": 0.8833,
"step": 650
},
{
"epoch": 0.21,
"learning_rate": 0.0002,
"loss": 0.7711,
"step": 660
},
{
"epoch": 0.21,
"learning_rate": 0.0002,
"loss": 0.8242,
"step": 670
},
{
"epoch": 0.22,
"learning_rate": 0.0002,
"loss": 0.7948,
"step": 680
},
{
"epoch": 0.22,
"learning_rate": 0.0002,
"loss": 0.7417,
"step": 690
},
{
"epoch": 0.22,
"learning_rate": 0.0002,
"loss": 0.7275,
"step": 700
},
{
"epoch": 0.22,
"learning_rate": 0.0002,
"loss": 0.8137,
"step": 710
},
{
"epoch": 0.23,
"learning_rate": 0.0002,
"loss": 0.8568,
"step": 720
},
{
"epoch": 0.23,
"learning_rate": 0.0002,
"loss": 0.802,
"step": 730
},
{
"epoch": 0.23,
"learning_rate": 0.0002,
"loss": 0.8202,
"step": 740
},
{
"epoch": 0.24,
"learning_rate": 0.0002,
"loss": 0.8077,
"step": 750
},
{
"epoch": 0.24,
"learning_rate": 0.0002,
"loss": 0.814,
"step": 760
},
{
"epoch": 0.24,
"learning_rate": 0.0002,
"loss": 0.7971,
"step": 770
},
{
"epoch": 0.25,
"learning_rate": 0.0002,
"loss": 0.798,
"step": 780
},
{
"epoch": 0.25,
"learning_rate": 0.0002,
"loss": 0.7806,
"step": 790
},
{
"epoch": 0.25,
"learning_rate": 0.0002,
"loss": 0.8042,
"step": 800
},
{
"epoch": 0.25,
"eval_loss": 0.7563537359237671,
"eval_runtime": 111.023,
"eval_samples_per_second": 9.007,
"eval_steps_per_second": 4.504,
"step": 800
},
{
"epoch": 0.25,
"mmlu_eval_accuracy": 0.4796267144005645,
"mmlu_eval_accuracy_abstract_algebra": 0.2727272727272727,
"mmlu_eval_accuracy_anatomy": 0.7142857142857143,
"mmlu_eval_accuracy_astronomy": 0.4375,
"mmlu_eval_accuracy_business_ethics": 0.5454545454545454,
"mmlu_eval_accuracy_clinical_knowledge": 0.4827586206896552,
"mmlu_eval_accuracy_college_biology": 0.4375,
"mmlu_eval_accuracy_college_chemistry": 0.125,
"mmlu_eval_accuracy_college_computer_science": 0.2727272727272727,
"mmlu_eval_accuracy_college_mathematics": 0.2727272727272727,
"mmlu_eval_accuracy_college_medicine": 0.36363636363636365,
"mmlu_eval_accuracy_college_physics": 0.36363636363636365,
"mmlu_eval_accuracy_computer_security": 0.45454545454545453,
"mmlu_eval_accuracy_conceptual_physics": 0.34615384615384615,
"mmlu_eval_accuracy_econometrics": 0.16666666666666666,
"mmlu_eval_accuracy_electrical_engineering": 0.25,
"mmlu_eval_accuracy_elementary_mathematics": 0.2926829268292683,
"mmlu_eval_accuracy_formal_logic": 0.07142857142857142,
"mmlu_eval_accuracy_global_facts": 0.4,
"mmlu_eval_accuracy_high_school_biology": 0.375,
"mmlu_eval_accuracy_high_school_chemistry": 0.2727272727272727,
"mmlu_eval_accuracy_high_school_computer_science": 0.5555555555555556,
"mmlu_eval_accuracy_high_school_european_history": 0.6666666666666666,
"mmlu_eval_accuracy_high_school_geography": 0.9090909090909091,
"mmlu_eval_accuracy_high_school_government_and_politics": 0.5238095238095238,
"mmlu_eval_accuracy_high_school_macroeconomics": 0.46511627906976744,
"mmlu_eval_accuracy_high_school_mathematics": 0.27586206896551724,
"mmlu_eval_accuracy_high_school_microeconomics": 0.38461538461538464,
"mmlu_eval_accuracy_high_school_physics": 0.11764705882352941,
"mmlu_eval_accuracy_high_school_psychology": 0.8833333333333333,
"mmlu_eval_accuracy_high_school_statistics": 0.2608695652173913,
"mmlu_eval_accuracy_high_school_us_history": 0.5909090909090909,
"mmlu_eval_accuracy_high_school_world_history": 0.7307692307692307,
"mmlu_eval_accuracy_human_aging": 0.7391304347826086,
"mmlu_eval_accuracy_human_sexuality": 0.3333333333333333,
"mmlu_eval_accuracy_international_law": 0.8461538461538461,
"mmlu_eval_accuracy_jurisprudence": 0.45454545454545453,
"mmlu_eval_accuracy_logical_fallacies": 0.6666666666666666,
"mmlu_eval_accuracy_machine_learning": 0.2727272727272727,
"mmlu_eval_accuracy_management": 0.6363636363636364,
"mmlu_eval_accuracy_marketing": 0.84,
"mmlu_eval_accuracy_medical_genetics": 0.9090909090909091,
"mmlu_eval_accuracy_miscellaneous": 0.6744186046511628,
"mmlu_eval_accuracy_moral_disputes": 0.5,
"mmlu_eval_accuracy_moral_scenarios": 0.23,
"mmlu_eval_accuracy_nutrition": 0.6363636363636364,
"mmlu_eval_accuracy_philosophy": 0.4411764705882353,
"mmlu_eval_accuracy_prehistory": 0.45714285714285713,
"mmlu_eval_accuracy_professional_accounting": 0.3548387096774194,
"mmlu_eval_accuracy_professional_law": 0.3,
"mmlu_eval_accuracy_professional_medicine": 0.5806451612903226,
"mmlu_eval_accuracy_professional_psychology": 0.5072463768115942,
"mmlu_eval_accuracy_public_relations": 0.6666666666666666,
"mmlu_eval_accuracy_security_studies": 0.4074074074074074,
"mmlu_eval_accuracy_sociology": 0.6363636363636364,
"mmlu_eval_accuracy_us_foreign_policy": 0.7272727272727273,
"mmlu_eval_accuracy_virology": 0.5555555555555556,
"mmlu_eval_accuracy_world_religions": 0.6842105263157895,
"mmlu_loss": 1.4866046660796157,
"step": 800
},
{
"epoch": 0.26,
"learning_rate": 0.0002,
"loss": 0.8119,
"step": 810
},
{
"epoch": 0.26,
"learning_rate": 0.0002,
"loss": 0.8156,
"step": 820
},
{
"epoch": 0.26,
"learning_rate": 0.0002,
"loss": 0.8288,
"step": 830
},
{
"epoch": 0.27,
"learning_rate": 0.0002,
"loss": 0.8008,
"step": 840
},
{
"epoch": 0.27,
"learning_rate": 0.0002,
"loss": 0.8649,
"step": 850
},
{
"epoch": 0.27,
"learning_rate": 0.0002,
"loss": 0.8242,
"step": 860
},
{
"epoch": 0.28,
"learning_rate": 0.0002,
"loss": 0.8255,
"step": 870
},
{
"epoch": 0.28,
"learning_rate": 0.0002,
"loss": 0.8467,
"step": 880
},
{
"epoch": 0.28,
"learning_rate": 0.0002,
"loss": 0.8264,
"step": 890
},
{
"epoch": 0.29,
"learning_rate": 0.0002,
"loss": 0.7833,
"step": 900
},
{
"epoch": 0.29,
"learning_rate": 0.0002,
"loss": 0.8338,
"step": 910
},
{
"epoch": 0.29,
"learning_rate": 0.0002,
"loss": 0.8062,
"step": 920
},
{
"epoch": 0.29,
"learning_rate": 0.0002,
"loss": 0.8112,
"step": 930
},
{
"epoch": 0.3,
"learning_rate": 0.0002,
"loss": 0.7469,
"step": 940
},
{
"epoch": 0.3,
"learning_rate": 0.0002,
"loss": 0.7897,
"step": 950
},
{
"epoch": 0.3,
"learning_rate": 0.0002,
"loss": 0.8081,
"step": 960
},
{
"epoch": 0.31,
"learning_rate": 0.0002,
"loss": 0.7571,
"step": 970
},
{
"epoch": 0.31,
"learning_rate": 0.0002,
"loss": 0.8161,
"step": 980
},
{
"epoch": 0.31,
"learning_rate": 0.0002,
"loss": 0.7759,
"step": 990
},
{
"epoch": 0.32,
"learning_rate": 0.0002,
"loss": 0.7417,
"step": 1000
},
{
"epoch": 0.32,
"eval_loss": 0.754473865032196,
"eval_runtime": 111.0233,
"eval_samples_per_second": 9.007,
"eval_steps_per_second": 4.504,
"step": 1000
},
{
"epoch": 0.32,
"mmlu_eval_accuracy": 0.4749030525395577,
"mmlu_eval_accuracy_abstract_algebra": 0.2727272727272727,
"mmlu_eval_accuracy_anatomy": 0.7142857142857143,
"mmlu_eval_accuracy_astronomy": 0.4375,
"mmlu_eval_accuracy_business_ethics": 0.5454545454545454,
"mmlu_eval_accuracy_clinical_knowledge": 0.4827586206896552,
"mmlu_eval_accuracy_college_biology": 0.4375,
"mmlu_eval_accuracy_college_chemistry": 0.125,
"mmlu_eval_accuracy_college_computer_science": 0.18181818181818182,
"mmlu_eval_accuracy_college_mathematics": 0.2727272727272727,
"mmlu_eval_accuracy_college_medicine": 0.36363636363636365,
"mmlu_eval_accuracy_college_physics": 0.36363636363636365,
"mmlu_eval_accuracy_computer_security": 0.45454545454545453,
"mmlu_eval_accuracy_conceptual_physics": 0.4230769230769231,
"mmlu_eval_accuracy_econometrics": 0.16666666666666666,
"mmlu_eval_accuracy_electrical_engineering": 0.25,
"mmlu_eval_accuracy_elementary_mathematics": 0.3170731707317073,
"mmlu_eval_accuracy_formal_logic": 0.07142857142857142,
"mmlu_eval_accuracy_global_facts": 0.4,
"mmlu_eval_accuracy_high_school_biology": 0.375,
"mmlu_eval_accuracy_high_school_chemistry": 0.22727272727272727,
"mmlu_eval_accuracy_high_school_computer_science": 0.5555555555555556,
"mmlu_eval_accuracy_high_school_european_history": 0.6111111111111112,
"mmlu_eval_accuracy_high_school_geography": 0.8636363636363636,
"mmlu_eval_accuracy_high_school_government_and_politics": 0.5714285714285714,
"mmlu_eval_accuracy_high_school_macroeconomics": 0.4883720930232558,
"mmlu_eval_accuracy_high_school_mathematics": 0.20689655172413793,
"mmlu_eval_accuracy_high_school_microeconomics": 0.2692307692307692,
"mmlu_eval_accuracy_high_school_physics": 0.11764705882352941,
"mmlu_eval_accuracy_high_school_psychology": 0.8833333333333333,
"mmlu_eval_accuracy_high_school_statistics": 0.34782608695652173,
"mmlu_eval_accuracy_high_school_us_history": 0.6363636363636364,
"mmlu_eval_accuracy_high_school_world_history": 0.6923076923076923,
"mmlu_eval_accuracy_human_aging": 0.6956521739130435,
"mmlu_eval_accuracy_human_sexuality": 0.3333333333333333,
"mmlu_eval_accuracy_international_law": 0.8461538461538461,
"mmlu_eval_accuracy_jurisprudence": 0.36363636363636365,
"mmlu_eval_accuracy_logical_fallacies": 0.6111111111111112,
"mmlu_eval_accuracy_machine_learning": 0.18181818181818182,
"mmlu_eval_accuracy_management": 0.5454545454545454,
"mmlu_eval_accuracy_marketing": 0.88,
"mmlu_eval_accuracy_medical_genetics": 0.9090909090909091,
"mmlu_eval_accuracy_miscellaneous": 0.6511627906976745,
"mmlu_eval_accuracy_moral_disputes": 0.47368421052631576,
"mmlu_eval_accuracy_moral_scenarios": 0.25,
"mmlu_eval_accuracy_nutrition": 0.6666666666666666,
"mmlu_eval_accuracy_philosophy": 0.47058823529411764,
"mmlu_eval_accuracy_prehistory": 0.4857142857142857,
"mmlu_eval_accuracy_professional_accounting": 0.3870967741935484,
"mmlu_eval_accuracy_professional_law": 0.29411764705882354,
"mmlu_eval_accuracy_professional_medicine": 0.5806451612903226,
"mmlu_eval_accuracy_professional_psychology": 0.5362318840579711,
"mmlu_eval_accuracy_public_relations": 0.6666666666666666,
"mmlu_eval_accuracy_security_studies": 0.4444444444444444,
"mmlu_eval_accuracy_sociology": 0.6363636363636364,
"mmlu_eval_accuracy_us_foreign_policy": 0.6363636363636364,
"mmlu_eval_accuracy_virology": 0.5555555555555556,
"mmlu_eval_accuracy_world_religions": 0.8421052631578947,
"mmlu_loss": 1.596783688734468,
"step": 1000
},
{
"epoch": 0.32,
"learning_rate": 0.0002,
"loss": 0.827,
"step": 1010
},
{
"epoch": 0.32,
"learning_rate": 0.0002,
"loss": 0.8345,
"step": 1020
},
{
"epoch": 0.33,
"learning_rate": 0.0002,
"loss": 0.7883,
"step": 1030
},
{
"epoch": 0.33,
"learning_rate": 0.0002,
"loss": 0.7774,
"step": 1040
},
{
"epoch": 0.33,
"learning_rate": 0.0002,
"loss": 0.8175,
"step": 1050
},
{
"epoch": 0.34,
"learning_rate": 0.0002,
"loss": 0.8,
"step": 1060
},
{
"epoch": 0.34,
"learning_rate": 0.0002,
"loss": 0.8049,
"step": 1070
},
{
"epoch": 0.34,
"learning_rate": 0.0002,
"loss": 0.8116,
"step": 1080
},
{
"epoch": 0.35,
"learning_rate": 0.0002,
"loss": 0.7852,
"step": 1090
},
{
"epoch": 0.35,
"learning_rate": 0.0002,
"loss": 0.7429,
"step": 1100
},
{
"epoch": 0.35,
"learning_rate": 0.0002,
"loss": 0.794,
"step": 1110
},
{
"epoch": 0.35,
"learning_rate": 0.0002,
"loss": 0.7549,
"step": 1120
},
{
"epoch": 0.36,
"learning_rate": 0.0002,
"loss": 0.7347,
"step": 1130
},
{
"epoch": 0.36,
"learning_rate": 0.0002,
"loss": 0.7482,
"step": 1140
},
{
"epoch": 0.36,
"learning_rate": 0.0002,
"loss": 0.7393,
"step": 1150
},
{
"epoch": 0.37,
"learning_rate": 0.0002,
"loss": 0.8103,
"step": 1160
},
{
"epoch": 0.37,
"learning_rate": 0.0002,
"loss": 0.8075,
"step": 1170
},
{
"epoch": 0.37,
"learning_rate": 0.0002,
"loss": 0.7831,
"step": 1180
},
{
"epoch": 0.38,
"learning_rate": 0.0002,
"loss": 0.792,
"step": 1190
},
{
"epoch": 0.38,
"learning_rate": 0.0002,
"loss": 0.7955,
"step": 1200
},
{
"epoch": 0.38,
"eval_loss": 0.7498393654823303,
"eval_runtime": 110.9719,
"eval_samples_per_second": 9.011,
"eval_steps_per_second": 4.506,
"step": 1200
},
{
"epoch": 0.38,
"mmlu_eval_accuracy": 0.4769718071089565,
"mmlu_eval_accuracy_abstract_algebra": 0.2727272727272727,
"mmlu_eval_accuracy_anatomy": 0.7142857142857143,
"mmlu_eval_accuracy_astronomy": 0.4375,
"mmlu_eval_accuracy_business_ethics": 0.5454545454545454,
"mmlu_eval_accuracy_clinical_knowledge": 0.5172413793103449,
"mmlu_eval_accuracy_college_biology": 0.4375,
"mmlu_eval_accuracy_college_chemistry": 0.25,
"mmlu_eval_accuracy_college_computer_science": 0.18181818181818182,
"mmlu_eval_accuracy_college_mathematics": 0.2727272727272727,
"mmlu_eval_accuracy_college_medicine": 0.4090909090909091,
"mmlu_eval_accuracy_college_physics": 0.36363636363636365,
"mmlu_eval_accuracy_computer_security": 0.2727272727272727,
"mmlu_eval_accuracy_conceptual_physics": 0.38461538461538464,
"mmlu_eval_accuracy_econometrics": 0.16666666666666666,
"mmlu_eval_accuracy_electrical_engineering": 0.25,
"mmlu_eval_accuracy_elementary_mathematics": 0.3170731707317073,
"mmlu_eval_accuracy_formal_logic": 0.07142857142857142,
"mmlu_eval_accuracy_global_facts": 0.4,
"mmlu_eval_accuracy_high_school_biology": 0.40625,
"mmlu_eval_accuracy_high_school_chemistry": 0.22727272727272727,
"mmlu_eval_accuracy_high_school_computer_science": 0.5555555555555556,
"mmlu_eval_accuracy_high_school_european_history": 0.6111111111111112,
"mmlu_eval_accuracy_high_school_geography": 0.8636363636363636,
"mmlu_eval_accuracy_high_school_government_and_politics": 0.5238095238095238,
"mmlu_eval_accuracy_high_school_macroeconomics": 0.46511627906976744,
"mmlu_eval_accuracy_high_school_mathematics": 0.2413793103448276,
"mmlu_eval_accuracy_high_school_microeconomics": 0.38461538461538464,
"mmlu_eval_accuracy_high_school_physics": 0.11764705882352941,
"mmlu_eval_accuracy_high_school_psychology": 0.8833333333333333,
"mmlu_eval_accuracy_high_school_statistics": 0.391304347826087,
"mmlu_eval_accuracy_high_school_us_history": 0.6818181818181818,
"mmlu_eval_accuracy_high_school_world_history": 0.7307692307692307,
"mmlu_eval_accuracy_human_aging": 0.7391304347826086,
"mmlu_eval_accuracy_human_sexuality": 0.3333333333333333,
"mmlu_eval_accuracy_international_law": 0.8461538461538461,
"mmlu_eval_accuracy_jurisprudence": 0.36363636363636365,
"mmlu_eval_accuracy_logical_fallacies": 0.6111111111111112,
"mmlu_eval_accuracy_machine_learning": 0.2727272727272727,
"mmlu_eval_accuracy_management": 0.6363636363636364,
"mmlu_eval_accuracy_marketing": 0.84,
"mmlu_eval_accuracy_medical_genetics": 0.9090909090909091,
"mmlu_eval_accuracy_miscellaneous": 0.6744186046511628,
"mmlu_eval_accuracy_moral_disputes": 0.4473684210526316,
"mmlu_eval_accuracy_moral_scenarios": 0.24,
"mmlu_eval_accuracy_nutrition": 0.6060606060606061,
"mmlu_eval_accuracy_philosophy": 0.5294117647058824,
"mmlu_eval_accuracy_prehistory": 0.4857142857142857,
"mmlu_eval_accuracy_professional_accounting": 0.41935483870967744,
"mmlu_eval_accuracy_professional_law": 0.3058823529411765,
"mmlu_eval_accuracy_professional_medicine": 0.5806451612903226,
"mmlu_eval_accuracy_professional_psychology": 0.5217391304347826,
"mmlu_eval_accuracy_public_relations": 0.6666666666666666,
"mmlu_eval_accuracy_security_studies": 0.4444444444444444,
"mmlu_eval_accuracy_sociology": 0.6363636363636364,
"mmlu_eval_accuracy_us_foreign_policy": 0.5454545454545454,
"mmlu_eval_accuracy_virology": 0.5,
"mmlu_eval_accuracy_world_religions": 0.6842105263157895,
"mmlu_loss": 1.6994269322166244,
"step": 1200
},
{
"epoch": 0.38,
"learning_rate": 0.0002,
"loss": 0.7821,
"step": 1210
},
{
"epoch": 0.39,
"learning_rate": 0.0002,
"loss": 0.8138,
"step": 1220
},
{
"epoch": 0.39,
"learning_rate": 0.0002,
"loss": 0.717,
"step": 1230
},
{
"epoch": 0.39,
"learning_rate": 0.0002,
"loss": 0.7406,
"step": 1240
},
{
"epoch": 0.4,
"learning_rate": 0.0002,
"loss": 0.8031,
"step": 1250
},
{
"epoch": 0.4,
"learning_rate": 0.0002,
"loss": 0.7974,
"step": 1260
},
{
"epoch": 0.4,
"learning_rate": 0.0002,
"loss": 0.8001,
"step": 1270
},
{
"epoch": 0.41,
"learning_rate": 0.0002,
"loss": 0.766,
"step": 1280
},
{
"epoch": 0.41,
"learning_rate": 0.0002,
"loss": 0.7679,
"step": 1290
},
{
"epoch": 0.41,
"learning_rate": 0.0002,
"loss": 0.8017,
"step": 1300
},
{
"epoch": 0.42,
"learning_rate": 0.0002,
"loss": 0.8027,
"step": 1310
},
{
"epoch": 0.42,
"learning_rate": 0.0002,
"loss": 0.7819,
"step": 1320
},
{
"epoch": 0.42,
"learning_rate": 0.0002,
"loss": 0.7558,
"step": 1330
},
{
"epoch": 0.42,
"learning_rate": 0.0002,
"loss": 0.8363,
"step": 1340
},
{
"epoch": 0.43,
"learning_rate": 0.0002,
"loss": 0.7809,
"step": 1350
},
{
"epoch": 0.43,
"learning_rate": 0.0002,
"loss": 0.8114,
"step": 1360
},
{
"epoch": 0.43,
"learning_rate": 0.0002,
"loss": 0.8446,
"step": 1370
},
{
"epoch": 0.44,
"learning_rate": 0.0002,
"loss": 0.7877,
"step": 1380
},
{
"epoch": 0.44,
"learning_rate": 0.0002,
"loss": 0.8309,
"step": 1390
},
{
"epoch": 0.44,
"learning_rate": 0.0002,
"loss": 0.8131,
"step": 1400
},
{
"epoch": 0.44,
"eval_loss": 0.7500243186950684,
"eval_runtime": 111.0409,
"eval_samples_per_second": 9.006,
"eval_steps_per_second": 4.503,
"step": 1400
},
{
"epoch": 0.44,
"mmlu_eval_accuracy": 0.4758070782649682,
"mmlu_eval_accuracy_abstract_algebra": 0.2727272727272727,
"mmlu_eval_accuracy_anatomy": 0.7142857142857143,
"mmlu_eval_accuracy_astronomy": 0.4375,
"mmlu_eval_accuracy_business_ethics": 0.5454545454545454,
"mmlu_eval_accuracy_clinical_knowledge": 0.5172413793103449,
"mmlu_eval_accuracy_college_biology": 0.5,
"mmlu_eval_accuracy_college_chemistry": 0.25,
"mmlu_eval_accuracy_college_computer_science": 0.2727272727272727,
"mmlu_eval_accuracy_college_mathematics": 0.2727272727272727,
"mmlu_eval_accuracy_college_medicine": 0.36363636363636365,
"mmlu_eval_accuracy_college_physics": 0.2727272727272727,
"mmlu_eval_accuracy_computer_security": 0.36363636363636365,
"mmlu_eval_accuracy_conceptual_physics": 0.4230769230769231,
"mmlu_eval_accuracy_econometrics": 0.16666666666666666,
"mmlu_eval_accuracy_electrical_engineering": 0.25,
"mmlu_eval_accuracy_elementary_mathematics": 0.3170731707317073,
"mmlu_eval_accuracy_formal_logic": 0.07142857142857142,
"mmlu_eval_accuracy_global_facts": 0.4,
"mmlu_eval_accuracy_high_school_biology": 0.375,
"mmlu_eval_accuracy_high_school_chemistry": 0.22727272727272727,
"mmlu_eval_accuracy_high_school_computer_science": 0.5555555555555556,
"mmlu_eval_accuracy_high_school_european_history": 0.5555555555555556,
"mmlu_eval_accuracy_high_school_geography": 0.8636363636363636,
"mmlu_eval_accuracy_high_school_government_and_politics": 0.42857142857142855,
"mmlu_eval_accuracy_high_school_macroeconomics": 0.46511627906976744,
"mmlu_eval_accuracy_high_school_mathematics": 0.2413793103448276,
"mmlu_eval_accuracy_high_school_microeconomics": 0.34615384615384615,
"mmlu_eval_accuracy_high_school_physics": 0.11764705882352941,
"mmlu_eval_accuracy_high_school_psychology": 0.8833333333333333,
"mmlu_eval_accuracy_high_school_statistics": 0.34782608695652173,
"mmlu_eval_accuracy_high_school_us_history": 0.5909090909090909,
"mmlu_eval_accuracy_high_school_world_history": 0.6923076923076923,
"mmlu_eval_accuracy_human_aging": 0.6956521739130435,
"mmlu_eval_accuracy_human_sexuality": 0.3333333333333333,
"mmlu_eval_accuracy_international_law": 0.8461538461538461,
"mmlu_eval_accuracy_jurisprudence": 0.36363636363636365,
"mmlu_eval_accuracy_logical_fallacies": 0.6111111111111112,
"mmlu_eval_accuracy_machine_learning": 0.18181818181818182,
"mmlu_eval_accuracy_management": 0.6363636363636364,
"mmlu_eval_accuracy_marketing": 0.88,
"mmlu_eval_accuracy_medical_genetics": 0.9090909090909091,
"mmlu_eval_accuracy_miscellaneous": 0.6395348837209303,
"mmlu_eval_accuracy_moral_disputes": 0.4473684210526316,
"mmlu_eval_accuracy_moral_scenarios": 0.24,
"mmlu_eval_accuracy_nutrition": 0.6363636363636364,
"mmlu_eval_accuracy_philosophy": 0.5588235294117647,
"mmlu_eval_accuracy_prehistory": 0.4857142857142857,
"mmlu_eval_accuracy_professional_accounting": 0.45161290322580644,
"mmlu_eval_accuracy_professional_law": 0.32941176470588235,
"mmlu_eval_accuracy_professional_medicine": 0.6129032258064516,
"mmlu_eval_accuracy_professional_psychology": 0.5217391304347826,
"mmlu_eval_accuracy_public_relations": 0.6666666666666666,
"mmlu_eval_accuracy_security_studies": 0.4074074074074074,
"mmlu_eval_accuracy_sociology": 0.6363636363636364,
"mmlu_eval_accuracy_us_foreign_policy": 0.6363636363636364,
"mmlu_eval_accuracy_virology": 0.5555555555555556,
"mmlu_eval_accuracy_world_religions": 0.7368421052631579,
"mmlu_loss": 1.4974891513041355,
"step": 1400
},
{
"epoch": 0.45,
"learning_rate": 0.0002,
"loss": 0.8122,
"step": 1410
},
{
"epoch": 0.45,
"learning_rate": 0.0002,
"loss": 0.7754,
"step": 1420
},
{
"epoch": 0.45,
"learning_rate": 0.0002,
"loss": 0.8116,
"step": 1430
},
{
"epoch": 0.46,
"learning_rate": 0.0002,
"loss": 0.7442,
"step": 1440
},
{
"epoch": 0.46,
"learning_rate": 0.0002,
"loss": 0.7638,
"step": 1450
},
{
"epoch": 0.46,
"learning_rate": 0.0002,
"loss": 0.7746,
"step": 1460
},
{
"epoch": 0.47,
"learning_rate": 0.0002,
"loss": 0.7616,
"step": 1470
},
{
"epoch": 0.47,
"learning_rate": 0.0002,
"loss": 0.8144,
"step": 1480
},
{
"epoch": 0.47,
"learning_rate": 0.0002,
"loss": 0.7924,
"step": 1490
},
{
"epoch": 0.48,
"learning_rate": 0.0002,
"loss": 0.8075,
"step": 1500
},
{
"epoch": 0.48,
"learning_rate": 0.0002,
"loss": 0.769,
"step": 1510
},
{
"epoch": 0.48,
"learning_rate": 0.0002,
"loss": 0.7296,
"step": 1520
},
{
"epoch": 0.48,
"learning_rate": 0.0002,
"loss": 0.8284,
"step": 1530
},
{
"epoch": 0.49,
"learning_rate": 0.0002,
"loss": 0.82,
"step": 1540
},
{
"epoch": 0.49,
"learning_rate": 0.0002,
"loss": 0.7619,
"step": 1550
},
{
"epoch": 0.49,
"learning_rate": 0.0002,
"loss": 0.7862,
"step": 1560
},
{
"epoch": 0.5,
"learning_rate": 0.0002,
"loss": 0.7835,
"step": 1570
},
{
"epoch": 0.5,
"learning_rate": 0.0002,
"loss": 0.7624,
"step": 1580
},
{
"epoch": 0.5,
"learning_rate": 0.0002,
"loss": 0.8021,
"step": 1590
},
{
"epoch": 0.51,
"learning_rate": 0.0002,
"loss": 0.793,
"step": 1600
},
{
"epoch": 0.51,
"eval_loss": 0.7459111213684082,
"eval_runtime": 111.062,
"eval_samples_per_second": 9.004,
"eval_steps_per_second": 4.502,
"step": 1600
},
{
"epoch": 0.51,
"mmlu_eval_accuracy": 0.46888485292306403,
"mmlu_eval_accuracy_abstract_algebra": 0.2727272727272727,
"mmlu_eval_accuracy_anatomy": 0.7142857142857143,
"mmlu_eval_accuracy_astronomy": 0.4375,
"mmlu_eval_accuracy_business_ethics": 0.5454545454545454,
"mmlu_eval_accuracy_clinical_knowledge": 0.5517241379310345,
"mmlu_eval_accuracy_college_biology": 0.4375,
"mmlu_eval_accuracy_college_chemistry": 0.25,
"mmlu_eval_accuracy_college_computer_science": 0.2727272727272727,
"mmlu_eval_accuracy_college_mathematics": 0.2727272727272727,
"mmlu_eval_accuracy_college_medicine": 0.36363636363636365,
"mmlu_eval_accuracy_college_physics": 0.2727272727272727,
"mmlu_eval_accuracy_computer_security": 0.36363636363636365,
"mmlu_eval_accuracy_conceptual_physics": 0.38461538461538464,
"mmlu_eval_accuracy_econometrics": 0.16666666666666666,
"mmlu_eval_accuracy_electrical_engineering": 0.3125,
"mmlu_eval_accuracy_elementary_mathematics": 0.3170731707317073,
"mmlu_eval_accuracy_formal_logic": 0.07142857142857142,
"mmlu_eval_accuracy_global_facts": 0.3,
"mmlu_eval_accuracy_high_school_biology": 0.375,
"mmlu_eval_accuracy_high_school_chemistry": 0.2727272727272727,
"mmlu_eval_accuracy_high_school_computer_science": 0.5555555555555556,
"mmlu_eval_accuracy_high_school_european_history": 0.6666666666666666,
"mmlu_eval_accuracy_high_school_geography": 0.8636363636363636,
"mmlu_eval_accuracy_high_school_government_and_politics": 0.5238095238095238,
"mmlu_eval_accuracy_high_school_macroeconomics": 0.46511627906976744,
"mmlu_eval_accuracy_high_school_mathematics": 0.2413793103448276,
"mmlu_eval_accuracy_high_school_microeconomics": 0.3076923076923077,
"mmlu_eval_accuracy_high_school_physics": 0.11764705882352941,
"mmlu_eval_accuracy_high_school_psychology": 0.8833333333333333,
"mmlu_eval_accuracy_high_school_statistics": 0.34782608695652173,
"mmlu_eval_accuracy_high_school_us_history": 0.5909090909090909,
"mmlu_eval_accuracy_high_school_world_history": 0.6923076923076923,
"mmlu_eval_accuracy_human_aging": 0.7391304347826086,
"mmlu_eval_accuracy_human_sexuality": 0.3333333333333333,
"mmlu_eval_accuracy_international_law": 0.8461538461538461,
"mmlu_eval_accuracy_jurisprudence": 0.2727272727272727,
"mmlu_eval_accuracy_logical_fallacies": 0.5555555555555556,
"mmlu_eval_accuracy_machine_learning": 0.18181818181818182,
"mmlu_eval_accuracy_management": 0.5454545454545454,
"mmlu_eval_accuracy_marketing": 0.88,
"mmlu_eval_accuracy_medical_genetics": 0.9090909090909091,
"mmlu_eval_accuracy_miscellaneous": 0.6511627906976745,
"mmlu_eval_accuracy_moral_disputes": 0.5,
"mmlu_eval_accuracy_moral_scenarios": 0.24,
"mmlu_eval_accuracy_nutrition": 0.6363636363636364,
"mmlu_eval_accuracy_philosophy": 0.5,
"mmlu_eval_accuracy_prehistory": 0.42857142857142855,
"mmlu_eval_accuracy_professional_accounting": 0.41935483870967744,
"mmlu_eval_accuracy_professional_law": 0.31176470588235294,
"mmlu_eval_accuracy_professional_medicine": 0.5483870967741935,
"mmlu_eval_accuracy_professional_psychology": 0.4927536231884058,
"mmlu_eval_accuracy_public_relations": 0.6666666666666666,
"mmlu_eval_accuracy_security_studies": 0.48148148148148145,
"mmlu_eval_accuracy_sociology": 0.5909090909090909,
"mmlu_eval_accuracy_us_foreign_policy": 0.5454545454545454,
"mmlu_eval_accuracy_virology": 0.5555555555555556,
"mmlu_eval_accuracy_world_religions": 0.6842105263157895,
"mmlu_loss": 1.6951039852112453,
"step": 1600
},
{
"epoch": 0.51,
"learning_rate": 0.0002,
"loss": 0.7999,
"step": 1610
},
{
"epoch": 0.51,
"learning_rate": 0.0002,
"loss": 0.7959,
"step": 1620
},
{
"epoch": 0.52,
"learning_rate": 0.0002,
"loss": 0.7887,
"step": 1630
},
{
"epoch": 0.52,
"learning_rate": 0.0002,
"loss": 0.7186,
"step": 1640
},
{
"epoch": 0.52,
"learning_rate": 0.0002,
"loss": 0.8049,
"step": 1650
},
{
"epoch": 0.53,
"learning_rate": 0.0002,
"loss": 0.7934,
"step": 1660
},
{
"epoch": 0.53,
"learning_rate": 0.0002,
"loss": 0.8369,
"step": 1670
},
{
"epoch": 0.53,
"learning_rate": 0.0002,
"loss": 0.7567,
"step": 1680
},
{
"epoch": 0.54,
"learning_rate": 0.0002,
"loss": 0.8058,
"step": 1690
},
{
"epoch": 0.54,
"learning_rate": 0.0002,
"loss": 0.7818,
"step": 1700
},
{
"epoch": 0.54,
"learning_rate": 0.0002,
"loss": 0.7115,
"step": 1710
},
{
"epoch": 0.54,
"learning_rate": 0.0002,
"loss": 0.7434,
"step": 1720
},
{
"epoch": 0.55,
"learning_rate": 0.0002,
"loss": 0.7788,
"step": 1730
},
{
"epoch": 0.55,
"learning_rate": 0.0002,
"loss": 0.7824,
"step": 1740
},
{
"epoch": 0.55,
"learning_rate": 0.0002,
"loss": 0.7198,
"step": 1750
},
{
"epoch": 0.56,
"learning_rate": 0.0002,
"loss": 0.8059,
"step": 1760
},
{
"epoch": 0.56,
"learning_rate": 0.0002,
"loss": 0.7892,
"step": 1770
},
{
"epoch": 0.56,
"learning_rate": 0.0002,
"loss": 0.8048,
"step": 1780
},
{
"epoch": 0.57,
"learning_rate": 0.0002,
"loss": 0.7938,
"step": 1790
},
{
"epoch": 0.57,
"learning_rate": 0.0002,
"loss": 0.791,
"step": 1800
},
{
"epoch": 0.57,
"eval_loss": 0.744739830493927,
"eval_runtime": 111.1326,
"eval_samples_per_second": 8.998,
"eval_steps_per_second": 4.499,
"step": 1800
},
{
"epoch": 0.57,
"mmlu_eval_accuracy": 0.4764276491893982,
"mmlu_eval_accuracy_abstract_algebra": 0.2727272727272727,
"mmlu_eval_accuracy_anatomy": 0.6428571428571429,
"mmlu_eval_accuracy_astronomy": 0.4375,
"mmlu_eval_accuracy_business_ethics": 0.5454545454545454,
"mmlu_eval_accuracy_clinical_knowledge": 0.4482758620689655,
"mmlu_eval_accuracy_college_biology": 0.4375,
"mmlu_eval_accuracy_college_chemistry": 0.125,
"mmlu_eval_accuracy_college_computer_science": 0.2727272727272727,
"mmlu_eval_accuracy_college_mathematics": 0.2727272727272727,
"mmlu_eval_accuracy_college_medicine": 0.36363636363636365,
"mmlu_eval_accuracy_college_physics": 0.36363636363636365,
"mmlu_eval_accuracy_computer_security": 0.45454545454545453,
"mmlu_eval_accuracy_conceptual_physics": 0.38461538461538464,
"mmlu_eval_accuracy_econometrics": 0.16666666666666666,
"mmlu_eval_accuracy_electrical_engineering": 0.25,
"mmlu_eval_accuracy_elementary_mathematics": 0.3170731707317073,
"mmlu_eval_accuracy_formal_logic": 0.07142857142857142,
"mmlu_eval_accuracy_global_facts": 0.3,
"mmlu_eval_accuracy_high_school_biology": 0.375,
"mmlu_eval_accuracy_high_school_chemistry": 0.3181818181818182,
"mmlu_eval_accuracy_high_school_computer_science": 0.5555555555555556,
"mmlu_eval_accuracy_high_school_european_history": 0.6111111111111112,
"mmlu_eval_accuracy_high_school_geography": 0.8636363636363636,
"mmlu_eval_accuracy_high_school_government_and_politics": 0.5238095238095238,
"mmlu_eval_accuracy_high_school_macroeconomics": 0.4883720930232558,
"mmlu_eval_accuracy_high_school_mathematics": 0.2413793103448276,
"mmlu_eval_accuracy_high_school_microeconomics": 0.3076923076923077,
"mmlu_eval_accuracy_high_school_physics": 0.11764705882352941,
"mmlu_eval_accuracy_high_school_psychology": 0.9,
"mmlu_eval_accuracy_high_school_statistics": 0.34782608695652173,
"mmlu_eval_accuracy_high_school_us_history": 0.5909090909090909,
"mmlu_eval_accuracy_high_school_world_history": 0.7307692307692307,
"mmlu_eval_accuracy_human_aging": 0.7391304347826086,
"mmlu_eval_accuracy_human_sexuality": 0.3333333333333333,
"mmlu_eval_accuracy_international_law": 0.8461538461538461,
"mmlu_eval_accuracy_jurisprudence": 0.36363636363636365,
"mmlu_eval_accuracy_logical_fallacies": 0.6666666666666666,
"mmlu_eval_accuracy_machine_learning": 0.18181818181818182,
"mmlu_eval_accuracy_management": 0.6363636363636364,
"mmlu_eval_accuracy_marketing": 0.88,
"mmlu_eval_accuracy_medical_genetics": 0.9090909090909091,
"mmlu_eval_accuracy_miscellaneous": 0.686046511627907,
"mmlu_eval_accuracy_moral_disputes": 0.4473684210526316,
"mmlu_eval_accuracy_moral_scenarios": 0.24,
"mmlu_eval_accuracy_nutrition": 0.6060606060606061,
"mmlu_eval_accuracy_philosophy": 0.5882352941176471,
"mmlu_eval_accuracy_prehistory": 0.42857142857142855,
"mmlu_eval_accuracy_professional_accounting": 0.3870967741935484,
"mmlu_eval_accuracy_professional_law": 0.3,
"mmlu_eval_accuracy_professional_medicine": 0.6451612903225806,
"mmlu_eval_accuracy_professional_psychology": 0.5072463768115942,
"mmlu_eval_accuracy_public_relations": 0.6666666666666666,
"mmlu_eval_accuracy_security_studies": 0.4444444444444444,
"mmlu_eval_accuracy_sociology": 0.5909090909090909,
"mmlu_eval_accuracy_us_foreign_policy": 0.7272727272727273,
"mmlu_eval_accuracy_virology": 0.5,
"mmlu_eval_accuracy_world_religions": 0.7368421052631579,
"mmlu_loss": 1.7224162761754218,
"step": 1800
},
{
"epoch": 0.57,
"learning_rate": 0.0002,
"loss": 0.7335,
"step": 1810
},
{
"epoch": 0.58,
"learning_rate": 0.0002,
"loss": 0.7762,
"step": 1820
},
{
"epoch": 0.58,
"learning_rate": 0.0002,
"loss": 0.75,
"step": 1830
},
{
"epoch": 0.58,
"learning_rate": 0.0002,
"loss": 0.7875,
"step": 1840
},
{
"epoch": 0.59,
"learning_rate": 0.0002,
"loss": 0.7749,
"step": 1850
},
{
"epoch": 0.59,
"learning_rate": 0.0002,
"loss": 0.8516,
"step": 1860
},
{
"epoch": 0.59,
"learning_rate": 0.0002,
"loss": 0.7729,
"step": 1870
},
{
"epoch": 0.6,
"learning_rate": 0.0002,
"loss": 0.7664,
"step": 1880
},
{
"epoch": 0.6,
"learning_rate": 0.0002,
"loss": 0.802,
"step": 1890
},
{
"epoch": 0.6,
"learning_rate": 0.0002,
"loss": 0.7791,
"step": 1900
},
{
"epoch": 0.61,
"learning_rate": 0.0002,
"loss": 0.8041,
"step": 1910
},
{
"epoch": 0.61,
"learning_rate": 0.0002,
"loss": 0.7671,
"step": 1920
},
{
"epoch": 0.61,
"learning_rate": 0.0002,
"loss": 0.7785,
"step": 1930
},
{
"epoch": 0.61,
"learning_rate": 0.0002,
"loss": 0.782,
"step": 1940
},
{
"epoch": 0.62,
"learning_rate": 0.0002,
"loss": 0.8032,
"step": 1950
},
{
"epoch": 0.62,
"learning_rate": 0.0002,
"loss": 0.8065,
"step": 1960
},
{
"epoch": 0.62,
"learning_rate": 0.0002,
"loss": 0.7713,
"step": 1970
},
{
"epoch": 0.63,
"learning_rate": 0.0002,
"loss": 0.7709,
"step": 1980
},
{
"epoch": 0.63,
"learning_rate": 0.0002,
"loss": 0.8036,
"step": 1990
},
{
"epoch": 0.63,
"learning_rate": 0.0002,
"loss": 0.7614,
"step": 2000
},
{
"epoch": 0.63,
"eval_loss": 0.7417653799057007,
"eval_runtime": 111.078,
"eval_samples_per_second": 9.003,
"eval_steps_per_second": 4.501,
"step": 2000
},
{
"epoch": 0.63,
"mmlu_eval_accuracy": 0.4656871532254676,
"mmlu_eval_accuracy_abstract_algebra": 0.2727272727272727,
"mmlu_eval_accuracy_anatomy": 0.7142857142857143,
"mmlu_eval_accuracy_astronomy": 0.375,
"mmlu_eval_accuracy_business_ethics": 0.5454545454545454,
"mmlu_eval_accuracy_clinical_knowledge": 0.4827586206896552,
"mmlu_eval_accuracy_college_biology": 0.4375,
"mmlu_eval_accuracy_college_chemistry": 0.25,
"mmlu_eval_accuracy_college_computer_science": 0.36363636363636365,
"mmlu_eval_accuracy_college_mathematics": 0.18181818181818182,
"mmlu_eval_accuracy_college_medicine": 0.3181818181818182,
"mmlu_eval_accuracy_college_physics": 0.36363636363636365,
"mmlu_eval_accuracy_computer_security": 0.2727272727272727,
"mmlu_eval_accuracy_conceptual_physics": 0.4230769230769231,
"mmlu_eval_accuracy_econometrics": 0.16666666666666666,
"mmlu_eval_accuracy_electrical_engineering": 0.25,
"mmlu_eval_accuracy_elementary_mathematics": 0.2926829268292683,
"mmlu_eval_accuracy_formal_logic": 0.14285714285714285,
"mmlu_eval_accuracy_global_facts": 0.5,
"mmlu_eval_accuracy_high_school_biology": 0.40625,
"mmlu_eval_accuracy_high_school_chemistry": 0.2727272727272727,
"mmlu_eval_accuracy_high_school_computer_science": 0.5555555555555556,
"mmlu_eval_accuracy_high_school_european_history": 0.6111111111111112,
"mmlu_eval_accuracy_high_school_geography": 0.7727272727272727,
"mmlu_eval_accuracy_high_school_government_and_politics": 0.47619047619047616,
"mmlu_eval_accuracy_high_school_macroeconomics": 0.4418604651162791,
"mmlu_eval_accuracy_high_school_mathematics": 0.2413793103448276,
"mmlu_eval_accuracy_high_school_microeconomics": 0.38461538461538464,
"mmlu_eval_accuracy_high_school_physics": 0.23529411764705882,
"mmlu_eval_accuracy_high_school_psychology": 0.8333333333333334,
"mmlu_eval_accuracy_high_school_statistics": 0.30434782608695654,
"mmlu_eval_accuracy_high_school_us_history": 0.5909090909090909,
"mmlu_eval_accuracy_high_school_world_history": 0.6923076923076923,
"mmlu_eval_accuracy_human_aging": 0.6086956521739131,
"mmlu_eval_accuracy_human_sexuality": 0.3333333333333333,
"mmlu_eval_accuracy_international_law": 0.9230769230769231,
"mmlu_eval_accuracy_jurisprudence": 0.2727272727272727,
"mmlu_eval_accuracy_logical_fallacies": 0.6666666666666666,
"mmlu_eval_accuracy_machine_learning": 0.09090909090909091,
"mmlu_eval_accuracy_management": 0.6363636363636364,
"mmlu_eval_accuracy_marketing": 0.76,
"mmlu_eval_accuracy_medical_genetics": 0.9090909090909091,
"mmlu_eval_accuracy_miscellaneous": 0.6627906976744186,
"mmlu_eval_accuracy_moral_disputes": 0.47368421052631576,
"mmlu_eval_accuracy_moral_scenarios": 0.25,
"mmlu_eval_accuracy_nutrition": 0.5757575757575758,
"mmlu_eval_accuracy_philosophy": 0.5588235294117647,
"mmlu_eval_accuracy_prehistory": 0.42857142857142855,
"mmlu_eval_accuracy_professional_accounting": 0.3225806451612903,
"mmlu_eval_accuracy_professional_law": 0.3058823529411765,
"mmlu_eval_accuracy_professional_medicine": 0.5483870967741935,
"mmlu_eval_accuracy_professional_psychology": 0.4927536231884058,
"mmlu_eval_accuracy_public_relations": 0.6666666666666666,
"mmlu_eval_accuracy_security_studies": 0.48148148148148145,
"mmlu_eval_accuracy_sociology": 0.6363636363636364,
"mmlu_eval_accuracy_us_foreign_policy": 0.6363636363636364,
"mmlu_eval_accuracy_virology": 0.5,
"mmlu_eval_accuracy_world_religions": 0.631578947368421,
"mmlu_loss": 1.632609389158204,
"step": 2000
},
{
"epoch": 0.64,
"learning_rate": 0.0002,
"loss": 0.8459,
"step": 2010
},
{
"epoch": 0.64,
"learning_rate": 0.0002,
"loss": 0.7348,
"step": 2020
},
{
"epoch": 0.64,
"learning_rate": 0.0002,
"loss": 0.811,
"step": 2030
},
{
"epoch": 0.65,
"learning_rate": 0.0002,
"loss": 0.7091,
"step": 2040
},
{
"epoch": 0.65,
"learning_rate": 0.0002,
"loss": 0.7715,
"step": 2050
},
{
"epoch": 0.65,
"learning_rate": 0.0002,
"loss": 0.8017,
"step": 2060
},
{
"epoch": 0.66,
"learning_rate": 0.0002,
"loss": 0.7734,
"step": 2070
},
{
"epoch": 0.66,
"learning_rate": 0.0002,
"loss": 0.8292,
"step": 2080
},
{
"epoch": 0.66,
"learning_rate": 0.0002,
"loss": 0.7873,
"step": 2090
},
{
"epoch": 0.67,
"learning_rate": 0.0002,
"loss": 0.757,
"step": 2100
},
{
"epoch": 0.67,
"learning_rate": 0.0002,
"loss": 0.7986,
"step": 2110
},
{
"epoch": 0.67,
"learning_rate": 0.0002,
"loss": 0.7848,
"step": 2120
},
{
"epoch": 0.67,
"learning_rate": 0.0002,
"loss": 0.7579,
"step": 2130
},
{
"epoch": 0.68,
"learning_rate": 0.0002,
"loss": 0.7683,
"step": 2140
},
{
"epoch": 0.68,
"learning_rate": 0.0002,
"loss": 0.7958,
"step": 2150
},
{
"epoch": 0.68,
"learning_rate": 0.0002,
"loss": 0.8009,
"step": 2160
},
{
"epoch": 0.69,
"learning_rate": 0.0002,
"loss": 0.7504,
"step": 2170
},
{
"epoch": 0.69,
"learning_rate": 0.0002,
"loss": 0.7558,
"step": 2180
},
{
"epoch": 0.69,
"learning_rate": 0.0002,
"loss": 0.7143,
"step": 2190
},
{
"epoch": 0.7,
"learning_rate": 0.0002,
"loss": 0.7767,
"step": 2200
},
{
"epoch": 0.7,
"eval_loss": 0.7396783232688904,
"eval_runtime": 111.0434,
"eval_samples_per_second": 9.005,
"eval_steps_per_second": 4.503,
"step": 2200
},
{
"epoch": 0.7,
"mmlu_eval_accuracy": 0.48937488654796385,
"mmlu_eval_accuracy_abstract_algebra": 0.2727272727272727,
"mmlu_eval_accuracy_anatomy": 0.6428571428571429,
"mmlu_eval_accuracy_astronomy": 0.4375,
"mmlu_eval_accuracy_business_ethics": 0.6363636363636364,
"mmlu_eval_accuracy_clinical_knowledge": 0.5172413793103449,
"mmlu_eval_accuracy_college_biology": 0.4375,
"mmlu_eval_accuracy_college_chemistry": 0.25,
"mmlu_eval_accuracy_college_computer_science": 0.36363636363636365,
"mmlu_eval_accuracy_college_mathematics": 0.2727272727272727,
"mmlu_eval_accuracy_college_medicine": 0.4090909090909091,
"mmlu_eval_accuracy_college_physics": 0.36363636363636365,
"mmlu_eval_accuracy_computer_security": 0.36363636363636365,
"mmlu_eval_accuracy_conceptual_physics": 0.46153846153846156,
"mmlu_eval_accuracy_econometrics": 0.16666666666666666,
"mmlu_eval_accuracy_electrical_engineering": 0.3125,
"mmlu_eval_accuracy_elementary_mathematics": 0.36585365853658536,
"mmlu_eval_accuracy_formal_logic": 0.2857142857142857,
"mmlu_eval_accuracy_global_facts": 0.4,
"mmlu_eval_accuracy_high_school_biology": 0.375,
"mmlu_eval_accuracy_high_school_chemistry": 0.18181818181818182,
"mmlu_eval_accuracy_high_school_computer_science": 0.5555555555555556,
"mmlu_eval_accuracy_high_school_european_history": 0.6111111111111112,
"mmlu_eval_accuracy_high_school_geography": 0.8636363636363636,
"mmlu_eval_accuracy_high_school_government_and_politics": 0.5714285714285714,
"mmlu_eval_accuracy_high_school_macroeconomics": 0.4883720930232558,
"mmlu_eval_accuracy_high_school_mathematics": 0.20689655172413793,
"mmlu_eval_accuracy_high_school_microeconomics": 0.38461538461538464,
"mmlu_eval_accuracy_high_school_physics": 0.11764705882352941,
"mmlu_eval_accuracy_high_school_psychology": 0.8666666666666667,
"mmlu_eval_accuracy_high_school_statistics": 0.391304347826087,
"mmlu_eval_accuracy_high_school_us_history": 0.5909090909090909,
"mmlu_eval_accuracy_high_school_world_history": 0.6538461538461539,
"mmlu_eval_accuracy_human_aging": 0.7391304347826086,
"mmlu_eval_accuracy_human_sexuality": 0.3333333333333333,
"mmlu_eval_accuracy_international_law": 0.8461538461538461,
"mmlu_eval_accuracy_jurisprudence": 0.45454545454545453,
"mmlu_eval_accuracy_logical_fallacies": 0.6111111111111112,
"mmlu_eval_accuracy_machine_learning": 0.18181818181818182,
"mmlu_eval_accuracy_management": 0.6363636363636364,
"mmlu_eval_accuracy_marketing": 0.88,
"mmlu_eval_accuracy_medical_genetics": 0.9090909090909091,
"mmlu_eval_accuracy_miscellaneous": 0.686046511627907,
"mmlu_eval_accuracy_moral_disputes": 0.47368421052631576,
"mmlu_eval_accuracy_moral_scenarios": 0.24,
"mmlu_eval_accuracy_nutrition": 0.6363636363636364,
"mmlu_eval_accuracy_philosophy": 0.5,
"mmlu_eval_accuracy_prehistory": 0.4,
"mmlu_eval_accuracy_professional_accounting": 0.3870967741935484,
"mmlu_eval_accuracy_professional_law": 0.3058823529411765,
"mmlu_eval_accuracy_professional_medicine": 0.6129032258064516,
"mmlu_eval_accuracy_professional_psychology": 0.5072463768115942,
"mmlu_eval_accuracy_public_relations": 0.5,
"mmlu_eval_accuracy_security_studies": 0.48148148148148145,
"mmlu_eval_accuracy_sociology": 0.6818181818181818,
"mmlu_eval_accuracy_us_foreign_policy": 0.7272727272727273,
"mmlu_eval_accuracy_virology": 0.5555555555555556,
"mmlu_eval_accuracy_world_religions": 0.7894736842105263,
"mmlu_loss": 1.464440327400327,
"step": 2200
},
{
"epoch": 0.7,
"learning_rate": 0.0002,
"loss": 0.7848,
"step": 2210
},
{
"epoch": 0.7,
"learning_rate": 0.0002,
"loss": 0.7864,
"step": 2220
},
{
"epoch": 0.71,
"learning_rate": 0.0002,
"loss": 0.7609,
"step": 2230
},
{
"epoch": 0.71,
"learning_rate": 0.0002,
"loss": 0.7782,
"step": 2240
},
{
"epoch": 0.71,
"learning_rate": 0.0002,
"loss": 0.7825,
"step": 2250
},
{
"epoch": 0.72,
"learning_rate": 0.0002,
"loss": 0.85,
"step": 2260
},
{
"epoch": 0.72,
"learning_rate": 0.0002,
"loss": 0.7802,
"step": 2270
},
{
"epoch": 0.72,
"learning_rate": 0.0002,
"loss": 0.7715,
"step": 2280
},
{
"epoch": 0.73,
"learning_rate": 0.0002,
"loss": 0.8032,
"step": 2290
},
{
"epoch": 0.73,
"learning_rate": 0.0002,
"loss": 0.854,
"step": 2300
},
{
"epoch": 0.73,
"learning_rate": 0.0002,
"loss": 0.8123,
"step": 2310
},
{
"epoch": 0.74,
"learning_rate": 0.0002,
"loss": 0.8101,
"step": 2320
},
{
"epoch": 0.74,
"learning_rate": 0.0002,
"loss": 0.8075,
"step": 2330
},
{
"epoch": 0.74,
"learning_rate": 0.0002,
"loss": 0.817,
"step": 2340
},
{
"epoch": 0.74,
"learning_rate": 0.0002,
"loss": 0.7747,
"step": 2350
},
{
"epoch": 0.75,
"learning_rate": 0.0002,
"loss": 0.8012,
"step": 2360
},
{
"epoch": 0.75,
"learning_rate": 0.0002,
"loss": 0.7893,
"step": 2370
},
{
"epoch": 0.75,
"learning_rate": 0.0002,
"loss": 0.7661,
"step": 2380
},
{
"epoch": 0.76,
"learning_rate": 0.0002,
"loss": 0.7711,
"step": 2390
},
{
"epoch": 0.76,
"learning_rate": 0.0002,
"loss": 0.8136,
"step": 2400
},
{
"epoch": 0.76,
"eval_loss": 0.7395493388175964,
"eval_runtime": 110.7923,
"eval_samples_per_second": 9.026,
"eval_steps_per_second": 4.513,
"step": 2400
},
{
"epoch": 0.76,
"mmlu_eval_accuracy": 0.4873047408851529,
"mmlu_eval_accuracy_abstract_algebra": 0.2727272727272727,
"mmlu_eval_accuracy_anatomy": 0.7142857142857143,
"mmlu_eval_accuracy_astronomy": 0.4375,
"mmlu_eval_accuracy_business_ethics": 0.6363636363636364,
"mmlu_eval_accuracy_clinical_knowledge": 0.5172413793103449,
"mmlu_eval_accuracy_college_biology": 0.4375,
"mmlu_eval_accuracy_college_chemistry": 0.25,
"mmlu_eval_accuracy_college_computer_science": 0.36363636363636365,
"mmlu_eval_accuracy_college_mathematics": 0.2727272727272727,
"mmlu_eval_accuracy_college_medicine": 0.4090909090909091,
"mmlu_eval_accuracy_college_physics": 0.2727272727272727,
"mmlu_eval_accuracy_computer_security": 0.36363636363636365,
"mmlu_eval_accuracy_conceptual_physics": 0.4230769230769231,
"mmlu_eval_accuracy_econometrics": 0.16666666666666666,
"mmlu_eval_accuracy_electrical_engineering": 0.3125,
"mmlu_eval_accuracy_elementary_mathematics": 0.34146341463414637,
"mmlu_eval_accuracy_formal_logic": 0.07142857142857142,
"mmlu_eval_accuracy_global_facts": 0.4,
"mmlu_eval_accuracy_high_school_biology": 0.40625,
"mmlu_eval_accuracy_high_school_chemistry": 0.22727272727272727,
"mmlu_eval_accuracy_high_school_computer_science": 0.5555555555555556,
"mmlu_eval_accuracy_high_school_european_history": 0.6111111111111112,
"mmlu_eval_accuracy_high_school_geography": 0.9090909090909091,
"mmlu_eval_accuracy_high_school_government_and_politics": 0.5714285714285714,
"mmlu_eval_accuracy_high_school_macroeconomics": 0.4418604651162791,
"mmlu_eval_accuracy_high_school_mathematics": 0.20689655172413793,
"mmlu_eval_accuracy_high_school_microeconomics": 0.38461538461538464,
"mmlu_eval_accuracy_high_school_physics": 0.058823529411764705,
"mmlu_eval_accuracy_high_school_psychology": 0.8666666666666667,
"mmlu_eval_accuracy_high_school_statistics": 0.391304347826087,
"mmlu_eval_accuracy_high_school_us_history": 0.5909090909090909,
"mmlu_eval_accuracy_high_school_world_history": 0.7307692307692307,
"mmlu_eval_accuracy_human_aging": 0.7391304347826086,
"mmlu_eval_accuracy_human_sexuality": 0.3333333333333333,
"mmlu_eval_accuracy_international_law": 0.8461538461538461,
"mmlu_eval_accuracy_jurisprudence": 0.36363636363636365,
"mmlu_eval_accuracy_logical_fallacies": 0.6111111111111112,
"mmlu_eval_accuracy_machine_learning": 0.2727272727272727,
"mmlu_eval_accuracy_management": 0.6363636363636364,
"mmlu_eval_accuracy_marketing": 0.88,
"mmlu_eval_accuracy_medical_genetics": 0.9090909090909091,
"mmlu_eval_accuracy_miscellaneous": 0.6744186046511628,
"mmlu_eval_accuracy_moral_disputes": 0.5,
"mmlu_eval_accuracy_moral_scenarios": 0.24,
"mmlu_eval_accuracy_nutrition": 0.6363636363636364,
"mmlu_eval_accuracy_philosophy": 0.5294117647058824,
"mmlu_eval_accuracy_prehistory": 0.4,
"mmlu_eval_accuracy_professional_accounting": 0.41935483870967744,
"mmlu_eval_accuracy_professional_law": 0.3235294117647059,
"mmlu_eval_accuracy_professional_medicine": 0.5806451612903226,
"mmlu_eval_accuracy_professional_psychology": 0.5072463768115942,
"mmlu_eval_accuracy_public_relations": 0.6666666666666666,
"mmlu_eval_accuracy_security_studies": 0.48148148148148145,
"mmlu_eval_accuracy_sociology": 0.6818181818181818,
"mmlu_eval_accuracy_us_foreign_policy": 0.6363636363636364,
"mmlu_eval_accuracy_virology": 0.5555555555555556,
"mmlu_eval_accuracy_world_religions": 0.7368421052631579,
"mmlu_loss": 1.3917097181237397,
"step": 2400
},
{
"epoch": 0.76,
"learning_rate": 0.0002,
"loss": 0.7579,
"step": 2410
},
{
"epoch": 0.77,
"learning_rate": 0.0002,
"loss": 0.8421,
"step": 2420
},
{
"epoch": 0.77,
"learning_rate": 0.0002,
"loss": 0.7957,
"step": 2430
},
{
"epoch": 0.77,
"learning_rate": 0.0002,
"loss": 0.7452,
"step": 2440
},
{
"epoch": 0.78,
"learning_rate": 0.0002,
"loss": 0.8478,
"step": 2450
},
{
"epoch": 0.78,
"learning_rate": 0.0002,
"loss": 0.8443,
"step": 2460
},
{
"epoch": 0.78,
"learning_rate": 0.0002,
"loss": 0.8409,
"step": 2470
},
{
"epoch": 0.79,
"learning_rate": 0.0002,
"loss": 0.8168,
"step": 2480
},
{
"epoch": 0.79,
"learning_rate": 0.0002,
"loss": 0.7648,
"step": 2490
},
{
"epoch": 0.79,
"learning_rate": 0.0002,
"loss": 0.7938,
"step": 2500
},
{
"epoch": 0.8,
"learning_rate": 0.0002,
"loss": 0.791,
"step": 2510
},
{
"epoch": 0.8,
"learning_rate": 0.0002,
"loss": 0.7691,
"step": 2520
},
{
"epoch": 0.8,
"learning_rate": 0.0002,
"loss": 0.7648,
"step": 2530
},
{
"epoch": 0.8,
"learning_rate": 0.0002,
"loss": 0.7575,
"step": 2540
},
{
"epoch": 0.81,
"learning_rate": 0.0002,
"loss": 0.7797,
"step": 2550
},
{
"epoch": 0.81,
"learning_rate": 0.0002,
"loss": 0.7742,
"step": 2560
},
{
"epoch": 0.81,
"learning_rate": 0.0002,
"loss": 0.8391,
"step": 2570
},
{
"epoch": 0.82,
"learning_rate": 0.0002,
"loss": 0.7746,
"step": 2580
},
{
"epoch": 0.82,
"learning_rate": 0.0002,
"loss": 0.7534,
"step": 2590
},
{
"epoch": 0.82,
"learning_rate": 0.0002,
"loss": 0.7395,
"step": 2600
},
{
"epoch": 0.82,
"eval_loss": 0.7380212545394897,
"eval_runtime": 111.0553,
"eval_samples_per_second": 9.005,
"eval_steps_per_second": 4.502,
"step": 2600
},
{
"epoch": 0.82,
"mmlu_eval_accuracy": 0.4979448031756729,
"mmlu_eval_accuracy_abstract_algebra": 0.2727272727272727,
"mmlu_eval_accuracy_anatomy": 0.7142857142857143,
"mmlu_eval_accuracy_astronomy": 0.4375,
"mmlu_eval_accuracy_business_ethics": 0.5454545454545454,
"mmlu_eval_accuracy_clinical_knowledge": 0.4827586206896552,
"mmlu_eval_accuracy_college_biology": 0.5,
"mmlu_eval_accuracy_college_chemistry": 0.25,
"mmlu_eval_accuracy_college_computer_science": 0.2727272727272727,
"mmlu_eval_accuracy_college_mathematics": 0.36363636363636365,
"mmlu_eval_accuracy_college_medicine": 0.4090909090909091,
"mmlu_eval_accuracy_college_physics": 0.45454545454545453,
"mmlu_eval_accuracy_computer_security": 0.36363636363636365,
"mmlu_eval_accuracy_conceptual_physics": 0.4230769230769231,
"mmlu_eval_accuracy_econometrics": 0.16666666666666666,
"mmlu_eval_accuracy_electrical_engineering": 0.25,
"mmlu_eval_accuracy_elementary_mathematics": 0.3170731707317073,
"mmlu_eval_accuracy_formal_logic": 0.14285714285714285,
"mmlu_eval_accuracy_global_facts": 0.4,
"mmlu_eval_accuracy_high_school_biology": 0.40625,
"mmlu_eval_accuracy_high_school_chemistry": 0.22727272727272727,
"mmlu_eval_accuracy_high_school_computer_science": 0.5555555555555556,
"mmlu_eval_accuracy_high_school_european_history": 0.6111111111111112,
"mmlu_eval_accuracy_high_school_geography": 0.9090909090909091,
"mmlu_eval_accuracy_high_school_government_and_politics": 0.6190476190476191,
"mmlu_eval_accuracy_high_school_macroeconomics": 0.5116279069767442,
"mmlu_eval_accuracy_high_school_mathematics": 0.20689655172413793,
"mmlu_eval_accuracy_high_school_microeconomics": 0.4230769230769231,
"mmlu_eval_accuracy_high_school_physics": 0.058823529411764705,
"mmlu_eval_accuracy_high_school_psychology": 0.8833333333333333,
"mmlu_eval_accuracy_high_school_statistics": 0.391304347826087,
"mmlu_eval_accuracy_high_school_us_history": 0.6363636363636364,
"mmlu_eval_accuracy_high_school_world_history": 0.6923076923076923,
"mmlu_eval_accuracy_human_aging": 0.7391304347826086,
"mmlu_eval_accuracy_human_sexuality": 0.3333333333333333,
"mmlu_eval_accuracy_international_law": 0.9230769230769231,
"mmlu_eval_accuracy_jurisprudence": 0.36363636363636365,
"mmlu_eval_accuracy_logical_fallacies": 0.6666666666666666,
"mmlu_eval_accuracy_machine_learning": 0.2727272727272727,
"mmlu_eval_accuracy_management": 0.6363636363636364,
"mmlu_eval_accuracy_marketing": 0.88,
"mmlu_eval_accuracy_medical_genetics": 0.9090909090909091,
"mmlu_eval_accuracy_miscellaneous": 0.6627906976744186,
"mmlu_eval_accuracy_moral_disputes": 0.5263157894736842,
"mmlu_eval_accuracy_moral_scenarios": 0.24,
"mmlu_eval_accuracy_nutrition": 0.6363636363636364,
"mmlu_eval_accuracy_philosophy": 0.5294117647058824,
"mmlu_eval_accuracy_prehistory": 0.45714285714285713,
"mmlu_eval_accuracy_professional_accounting": 0.45161290322580644,
"mmlu_eval_accuracy_professional_law": 0.34705882352941175,
"mmlu_eval_accuracy_professional_medicine": 0.6451612903225806,
"mmlu_eval_accuracy_professional_psychology": 0.5217391304347826,
"mmlu_eval_accuracy_public_relations": 0.6666666666666666,
"mmlu_eval_accuracy_security_studies": 0.5185185185185185,
"mmlu_eval_accuracy_sociology": 0.6818181818181818,
"mmlu_eval_accuracy_us_foreign_policy": 0.6363636363636364,
"mmlu_eval_accuracy_virology": 0.5555555555555556,
"mmlu_eval_accuracy_world_religions": 0.6842105263157895,
"mmlu_loss": 1.3912735815614696,
"step": 2600
},
{
"epoch": 0.83,
"learning_rate": 0.0002,
"loss": 0.7792,
"step": 2610
},
{
"epoch": 0.83,
"learning_rate": 0.0002,
"loss": 0.7228,
"step": 2620
},
{
"epoch": 0.83,
"learning_rate": 0.0002,
"loss": 0.7294,
"step": 2630
},
{
"epoch": 0.84,
"learning_rate": 0.0002,
"loss": 0.6968,
"step": 2640
},
{
"epoch": 0.84,
"learning_rate": 0.0002,
"loss": 0.7463,
"step": 2650
},
{
"epoch": 0.84,
"learning_rate": 0.0002,
"loss": 0.7588,
"step": 2660
},
{
"epoch": 0.85,
"learning_rate": 0.0002,
"loss": 0.7406,
"step": 2670
},
{
"epoch": 0.85,
"learning_rate": 0.0002,
"loss": 0.7817,
"step": 2680
},
{
"epoch": 0.85,
"learning_rate": 0.0002,
"loss": 0.808,
"step": 2690
},
{
"epoch": 0.86,
"learning_rate": 0.0002,
"loss": 0.771,
"step": 2700
},
{
"epoch": 0.86,
"learning_rate": 0.0002,
"loss": 0.7678,
"step": 2710
},
{
"epoch": 0.86,
"learning_rate": 0.0002,
"loss": 0.7885,
"step": 2720
},
{
"epoch": 0.87,
"learning_rate": 0.0002,
"loss": 0.8297,
"step": 2730
},
{
"epoch": 0.87,
"learning_rate": 0.0002,
"loss": 0.8218,
"step": 2740
},
{
"epoch": 0.87,
"learning_rate": 0.0002,
"loss": 0.7742,
"step": 2750
},
{
"epoch": 0.87,
"learning_rate": 0.0002,
"loss": 0.7512,
"step": 2760
},
{
"epoch": 0.88,
"learning_rate": 0.0002,
"loss": 0.7508,
"step": 2770
},
{
"epoch": 0.88,
"learning_rate": 0.0002,
"loss": 0.7947,
"step": 2780
},
{
"epoch": 0.88,
"learning_rate": 0.0002,
"loss": 0.7399,
"step": 2790
},
{
"epoch": 0.89,
"learning_rate": 0.0002,
"loss": 0.7589,
"step": 2800
},
{
"epoch": 0.89,
"eval_loss": 0.7355720400810242,
"eval_runtime": 110.8718,
"eval_samples_per_second": 9.019,
"eval_steps_per_second": 4.51,
"step": 2800
},
{
"epoch": 0.89,
"mmlu_eval_accuracy": 0.48346048137181885,
"mmlu_eval_accuracy_abstract_algebra": 0.18181818181818182,
"mmlu_eval_accuracy_anatomy": 0.7142857142857143,
"mmlu_eval_accuracy_astronomy": 0.4375,
"mmlu_eval_accuracy_business_ethics": 0.5454545454545454,
"mmlu_eval_accuracy_clinical_knowledge": 0.4827586206896552,
"mmlu_eval_accuracy_college_biology": 0.5,
"mmlu_eval_accuracy_college_chemistry": 0.25,
"mmlu_eval_accuracy_college_computer_science": 0.2727272727272727,
"mmlu_eval_accuracy_college_mathematics": 0.2727272727272727,
"mmlu_eval_accuracy_college_medicine": 0.36363636363636365,
"mmlu_eval_accuracy_college_physics": 0.36363636363636365,
"mmlu_eval_accuracy_computer_security": 0.36363636363636365,
"mmlu_eval_accuracy_conceptual_physics": 0.38461538461538464,
"mmlu_eval_accuracy_econometrics": 0.16666666666666666,
"mmlu_eval_accuracy_electrical_engineering": 0.25,
"mmlu_eval_accuracy_elementary_mathematics": 0.3902439024390244,
"mmlu_eval_accuracy_formal_logic": 0.14285714285714285,
"mmlu_eval_accuracy_global_facts": 0.3,
"mmlu_eval_accuracy_high_school_biology": 0.375,
"mmlu_eval_accuracy_high_school_chemistry": 0.2727272727272727,
"mmlu_eval_accuracy_high_school_computer_science": 0.5555555555555556,
"mmlu_eval_accuracy_high_school_european_history": 0.7222222222222222,
"mmlu_eval_accuracy_high_school_geography": 0.9090909090909091,
"mmlu_eval_accuracy_high_school_government_and_politics": 0.6190476190476191,
"mmlu_eval_accuracy_high_school_macroeconomics": 0.4186046511627907,
"mmlu_eval_accuracy_high_school_mathematics": 0.20689655172413793,
"mmlu_eval_accuracy_high_school_microeconomics": 0.4230769230769231,
"mmlu_eval_accuracy_high_school_physics": 0.17647058823529413,
"mmlu_eval_accuracy_high_school_psychology": 0.8833333333333333,
"mmlu_eval_accuracy_high_school_statistics": 0.391304347826087,
"mmlu_eval_accuracy_high_school_us_history": 0.5909090909090909,
"mmlu_eval_accuracy_high_school_world_history": 0.7307692307692307,
"mmlu_eval_accuracy_human_aging": 0.7391304347826086,
"mmlu_eval_accuracy_human_sexuality": 0.3333333333333333,
"mmlu_eval_accuracy_international_law": 0.8461538461538461,
"mmlu_eval_accuracy_jurisprudence": 0.2727272727272727,
"mmlu_eval_accuracy_logical_fallacies": 0.6111111111111112,
"mmlu_eval_accuracy_machine_learning": 0.18181818181818182,
"mmlu_eval_accuracy_management": 0.7272727272727273,
"mmlu_eval_accuracy_marketing": 0.84,
"mmlu_eval_accuracy_medical_genetics": 0.9090909090909091,
"mmlu_eval_accuracy_miscellaneous": 0.6627906976744186,
"mmlu_eval_accuracy_moral_disputes": 0.47368421052631576,
"mmlu_eval_accuracy_moral_scenarios": 0.24,
"mmlu_eval_accuracy_nutrition": 0.696969696969697,
"mmlu_eval_accuracy_philosophy": 0.47058823529411764,
"mmlu_eval_accuracy_prehistory": 0.42857142857142855,
"mmlu_eval_accuracy_professional_accounting": 0.45161290322580644,
"mmlu_eval_accuracy_professional_law": 0.3058823529411765,
"mmlu_eval_accuracy_professional_medicine": 0.5806451612903226,
"mmlu_eval_accuracy_professional_psychology": 0.5362318840579711,
"mmlu_eval_accuracy_public_relations": 0.5,
"mmlu_eval_accuracy_security_studies": 0.48148148148148145,
"mmlu_eval_accuracy_sociology": 0.6818181818181818,
"mmlu_eval_accuracy_us_foreign_policy": 0.6363636363636364,
"mmlu_eval_accuracy_virology": 0.5555555555555556,
"mmlu_eval_accuracy_world_religions": 0.7368421052631579,
"mmlu_loss": 1.3572583458831353,
"step": 2800
},
{
"epoch": 0.89,
"learning_rate": 0.0002,
"loss": 0.8099,
"step": 2810
},
{
"epoch": 0.89,
"learning_rate": 0.0002,
"loss": 0.7303,
"step": 2820
},
{
"epoch": 0.9,
"learning_rate": 0.0002,
"loss": 0.8154,
"step": 2830
},
{
"epoch": 0.9,
"learning_rate": 0.0002,
"loss": 0.8166,
"step": 2840
},
{
"epoch": 0.9,
"learning_rate": 0.0002,
"loss": 0.7425,
"step": 2850
},
{
"epoch": 0.91,
"learning_rate": 0.0002,
"loss": 0.8223,
"step": 2860
},
{
"epoch": 0.91,
"learning_rate": 0.0002,
"loss": 0.7443,
"step": 2870
},
{
"epoch": 0.91,
"learning_rate": 0.0002,
"loss": 0.7733,
"step": 2880
},
{
"epoch": 0.92,
"learning_rate": 0.0002,
"loss": 0.8092,
"step": 2890
},
{
"epoch": 0.92,
"learning_rate": 0.0002,
"loss": 0.7371,
"step": 2900
},
{
"epoch": 0.92,
"learning_rate": 0.0002,
"loss": 0.7323,
"step": 2910
},
{
"epoch": 0.93,
"learning_rate": 0.0002,
"loss": 0.7716,
"step": 2920
},
{
"epoch": 0.93,
"learning_rate": 0.0002,
"loss": 0.7824,
"step": 2930
},
{
"epoch": 0.93,
"learning_rate": 0.0002,
"loss": 0.7373,
"step": 2940
},
{
"epoch": 0.93,
"learning_rate": 0.0002,
"loss": 0.7384,
"step": 2950
},
{
"epoch": 0.94,
"learning_rate": 0.0002,
"loss": 0.7598,
"step": 2960
},
{
"epoch": 0.94,
"learning_rate": 0.0002,
"loss": 0.7211,
"step": 2970
},
{
"epoch": 0.94,
"learning_rate": 0.0002,
"loss": 0.7886,
"step": 2980
},
{
"epoch": 0.95,
"learning_rate": 0.0002,
"loss": 0.8107,
"step": 2990
},
{
"epoch": 0.95,
"learning_rate": 0.0002,
"loss": 0.8389,
"step": 3000
},
{
"epoch": 0.95,
"eval_loss": 0.7343361377716064,
"eval_runtime": 110.9061,
"eval_samples_per_second": 9.017,
"eval_steps_per_second": 4.508,
"step": 3000
},
{
"epoch": 0.95,
"mmlu_eval_accuracy": 0.5003901788212859,
"mmlu_eval_accuracy_abstract_algebra": 0.36363636363636365,
"mmlu_eval_accuracy_anatomy": 0.7142857142857143,
"mmlu_eval_accuracy_astronomy": 0.375,
"mmlu_eval_accuracy_business_ethics": 0.45454545454545453,
"mmlu_eval_accuracy_clinical_knowledge": 0.5517241379310345,
"mmlu_eval_accuracy_college_biology": 0.4375,
"mmlu_eval_accuracy_college_chemistry": 0.25,
"mmlu_eval_accuracy_college_computer_science": 0.2727272727272727,
"mmlu_eval_accuracy_college_mathematics": 0.18181818181818182,
"mmlu_eval_accuracy_college_medicine": 0.4090909090909091,
"mmlu_eval_accuracy_college_physics": 0.36363636363636365,
"mmlu_eval_accuracy_computer_security": 0.5454545454545454,
"mmlu_eval_accuracy_conceptual_physics": 0.4230769230769231,
"mmlu_eval_accuracy_econometrics": 0.25,
"mmlu_eval_accuracy_electrical_engineering": 0.25,
"mmlu_eval_accuracy_elementary_mathematics": 0.3902439024390244,
"mmlu_eval_accuracy_formal_logic": 0.2857142857142857,
"mmlu_eval_accuracy_global_facts": 0.5,
"mmlu_eval_accuracy_high_school_biology": 0.53125,
"mmlu_eval_accuracy_high_school_chemistry": 0.18181818181818182,
"mmlu_eval_accuracy_high_school_computer_science": 0.5555555555555556,
"mmlu_eval_accuracy_high_school_european_history": 0.6666666666666666,
"mmlu_eval_accuracy_high_school_geography": 0.8636363636363636,
"mmlu_eval_accuracy_high_school_government_and_politics": 0.6666666666666666,
"mmlu_eval_accuracy_high_school_macroeconomics": 0.4186046511627907,
"mmlu_eval_accuracy_high_school_mathematics": 0.20689655172413793,
"mmlu_eval_accuracy_high_school_microeconomics": 0.5,
"mmlu_eval_accuracy_high_school_physics": 0.17647058823529413,
"mmlu_eval_accuracy_high_school_psychology": 0.8833333333333333,
"mmlu_eval_accuracy_high_school_statistics": 0.34782608695652173,
"mmlu_eval_accuracy_high_school_us_history": 0.5909090909090909,
"mmlu_eval_accuracy_high_school_world_history": 0.7307692307692307,
"mmlu_eval_accuracy_human_aging": 0.7391304347826086,
"mmlu_eval_accuracy_human_sexuality": 0.3333333333333333,
"mmlu_eval_accuracy_international_law": 0.8461538461538461,
"mmlu_eval_accuracy_jurisprudence": 0.36363636363636365,
"mmlu_eval_accuracy_logical_fallacies": 0.6666666666666666,
"mmlu_eval_accuracy_machine_learning": 0.36363636363636365,
"mmlu_eval_accuracy_management": 0.6363636363636364,
"mmlu_eval_accuracy_marketing": 0.84,
"mmlu_eval_accuracy_medical_genetics": 0.9090909090909091,
"mmlu_eval_accuracy_miscellaneous": 0.6511627906976745,
"mmlu_eval_accuracy_moral_disputes": 0.47368421052631576,
"mmlu_eval_accuracy_moral_scenarios": 0.22,
"mmlu_eval_accuracy_nutrition": 0.6363636363636364,
"mmlu_eval_accuracy_philosophy": 0.5,
"mmlu_eval_accuracy_prehistory": 0.5714285714285714,
"mmlu_eval_accuracy_professional_accounting": 0.3548387096774194,
"mmlu_eval_accuracy_professional_law": 0.35294117647058826,
"mmlu_eval_accuracy_professional_medicine": 0.5483870967741935,
"mmlu_eval_accuracy_professional_psychology": 0.4927536231884058,
"mmlu_eval_accuracy_public_relations": 0.5833333333333334,
"mmlu_eval_accuracy_security_studies": 0.4444444444444444,
"mmlu_eval_accuracy_sociology": 0.7272727272727273,
"mmlu_eval_accuracy_us_foreign_policy": 0.6363636363636364,
"mmlu_eval_accuracy_virology": 0.5555555555555556,
"mmlu_eval_accuracy_world_religions": 0.7368421052631579,
"mmlu_loss": 1.217419229584014,
"step": 3000
},
{
"epoch": 0.95,
"learning_rate": 0.0002,
"loss": 0.7964,
"step": 3010
},
{
"epoch": 0.96,
"learning_rate": 0.0002,
"loss": 0.7841,
"step": 3020
},
{
"epoch": 0.96,
"learning_rate": 0.0002,
"loss": 0.7951,
"step": 3030
},
{
"epoch": 0.96,
"learning_rate": 0.0002,
"loss": 0.7523,
"step": 3040
},
{
"epoch": 0.97,
"learning_rate": 0.0002,
"loss": 0.7729,
"step": 3050
},
{
"epoch": 0.97,
"learning_rate": 0.0002,
"loss": 0.705,
"step": 3060
},
{
"epoch": 0.97,
"learning_rate": 0.0002,
"loss": 0.7745,
"step": 3070
},
{
"epoch": 0.98,
"learning_rate": 0.0002,
"loss": 0.7992,
"step": 3080
},
{
"epoch": 0.98,
"learning_rate": 0.0002,
"loss": 0.7836,
"step": 3090
},
{
"epoch": 0.98,
"learning_rate": 0.0002,
"loss": 0.7347,
"step": 3100
},
{
"epoch": 0.99,
"learning_rate": 0.0002,
"loss": 0.7213,
"step": 3110
},
{
"epoch": 0.99,
"learning_rate": 0.0002,
"loss": 0.7427,
"step": 3120
},
{
"epoch": 0.99,
"learning_rate": 0.0002,
"loss": 0.7799,
"step": 3130
},
{
"epoch": 0.99,
"learning_rate": 0.0002,
"loss": 0.825,
"step": 3140
},
{
"epoch": 1.0,
"learning_rate": 0.0002,
"loss": 0.7389,
"step": 3150
},
{
"epoch": 1.0,
"learning_rate": 0.0002,
"loss": 0.8275,
"step": 3160
},
{
"epoch": 1.0,
"learning_rate": 0.0002,
"loss": 0.7484,
"step": 3170
},
{
"epoch": 1.01,
"learning_rate": 0.0002,
"loss": 0.7419,
"step": 3180
},
{
"epoch": 1.01,
"learning_rate": 0.0002,
"loss": 0.6543,
"step": 3190
},
{
"epoch": 1.01,
"learning_rate": 0.0002,
"loss": 0.6952,
"step": 3200
},
{
"epoch": 1.01,
"eval_loss": 0.7377473711967468,
"eval_runtime": 111.1786,
"eval_samples_per_second": 8.995,
"eval_steps_per_second": 4.497,
"step": 3200
},
{
"epoch": 1.01,
"mmlu_eval_accuracy": 0.48409598343583005,
"mmlu_eval_accuracy_abstract_algebra": 0.18181818181818182,
"mmlu_eval_accuracy_anatomy": 0.7142857142857143,
"mmlu_eval_accuracy_astronomy": 0.4375,
"mmlu_eval_accuracy_business_ethics": 0.45454545454545453,
"mmlu_eval_accuracy_clinical_knowledge": 0.4827586206896552,
"mmlu_eval_accuracy_college_biology": 0.4375,
"mmlu_eval_accuracy_college_chemistry": 0.25,
"mmlu_eval_accuracy_college_computer_science": 0.36363636363636365,
"mmlu_eval_accuracy_college_mathematics": 0.18181818181818182,
"mmlu_eval_accuracy_college_medicine": 0.4090909090909091,
"mmlu_eval_accuracy_college_physics": 0.36363636363636365,
"mmlu_eval_accuracy_computer_security": 0.36363636363636365,
"mmlu_eval_accuracy_conceptual_physics": 0.38461538461538464,
"mmlu_eval_accuracy_econometrics": 0.25,
"mmlu_eval_accuracy_electrical_engineering": 0.25,
"mmlu_eval_accuracy_elementary_mathematics": 0.43902439024390244,
"mmlu_eval_accuracy_formal_logic": 0.14285714285714285,
"mmlu_eval_accuracy_global_facts": 0.5,
"mmlu_eval_accuracy_high_school_biology": 0.375,
"mmlu_eval_accuracy_high_school_chemistry": 0.18181818181818182,
"mmlu_eval_accuracy_high_school_computer_science": 0.5555555555555556,
"mmlu_eval_accuracy_high_school_european_history": 0.7222222222222222,
"mmlu_eval_accuracy_high_school_geography": 0.9090909090909091,
"mmlu_eval_accuracy_high_school_government_and_politics": 0.6190476190476191,
"mmlu_eval_accuracy_high_school_macroeconomics": 0.4883720930232558,
"mmlu_eval_accuracy_high_school_mathematics": 0.20689655172413793,
"mmlu_eval_accuracy_high_school_microeconomics": 0.4230769230769231,
"mmlu_eval_accuracy_high_school_physics": 0.058823529411764705,
"mmlu_eval_accuracy_high_school_psychology": 0.85,
"mmlu_eval_accuracy_high_school_statistics": 0.34782608695652173,
"mmlu_eval_accuracy_high_school_us_history": 0.5454545454545454,
"mmlu_eval_accuracy_high_school_world_history": 0.7692307692307693,
"mmlu_eval_accuracy_human_aging": 0.7391304347826086,
"mmlu_eval_accuracy_human_sexuality": 0.3333333333333333,
"mmlu_eval_accuracy_international_law": 0.7692307692307693,
"mmlu_eval_accuracy_jurisprudence": 0.36363636363636365,
"mmlu_eval_accuracy_logical_fallacies": 0.6666666666666666,
"mmlu_eval_accuracy_machine_learning": 0.2727272727272727,
"mmlu_eval_accuracy_management": 0.6363636363636364,
"mmlu_eval_accuracy_marketing": 0.88,
"mmlu_eval_accuracy_medical_genetics": 0.9090909090909091,
"mmlu_eval_accuracy_miscellaneous": 0.686046511627907,
"mmlu_eval_accuracy_moral_disputes": 0.4473684210526316,
"mmlu_eval_accuracy_moral_scenarios": 0.25,
"mmlu_eval_accuracy_nutrition": 0.6666666666666666,
"mmlu_eval_accuracy_philosophy": 0.5294117647058824,
"mmlu_eval_accuracy_prehistory": 0.5142857142857142,
"mmlu_eval_accuracy_professional_accounting": 0.41935483870967744,
"mmlu_eval_accuracy_professional_law": 0.35294117647058826,
"mmlu_eval_accuracy_professional_medicine": 0.5483870967741935,
"mmlu_eval_accuracy_professional_psychology": 0.4927536231884058,
"mmlu_eval_accuracy_public_relations": 0.5,
"mmlu_eval_accuracy_security_studies": 0.4444444444444444,
"mmlu_eval_accuracy_sociology": 0.6363636363636364,
"mmlu_eval_accuracy_us_foreign_policy": 0.6363636363636364,
"mmlu_eval_accuracy_virology": 0.5555555555555556,
"mmlu_eval_accuracy_world_religions": 0.6842105263157895,
"mmlu_loss": 1.4613330938486144,
"step": 3200
},
{
"epoch": 1.02,
"learning_rate": 0.0002,
"loss": 0.664,
"step": 3210
},
{
"epoch": 1.02,
"learning_rate": 0.0002,
"loss": 0.6673,
"step": 3220
},
{
"epoch": 1.02,
"learning_rate": 0.0002,
"loss": 0.703,
"step": 3230
},
{
"epoch": 1.03,
"learning_rate": 0.0002,
"loss": 0.763,
"step": 3240
},
{
"epoch": 1.03,
"learning_rate": 0.0002,
"loss": 0.6587,
"step": 3250
},
{
"epoch": 1.03,
"learning_rate": 0.0002,
"loss": 0.6725,
"step": 3260
},
{
"epoch": 1.04,
"learning_rate": 0.0002,
"loss": 0.7518,
"step": 3270
},
{
"epoch": 1.04,
"learning_rate": 0.0002,
"loss": 0.7182,
"step": 3280
},
{
"epoch": 1.04,
"learning_rate": 0.0002,
"loss": 0.6655,
"step": 3290
},
{
"epoch": 1.05,
"learning_rate": 0.0002,
"loss": 0.6333,
"step": 3300
},
{
"epoch": 1.05,
"learning_rate": 0.0002,
"loss": 0.6699,
"step": 3310
},
{
"epoch": 1.05,
"learning_rate": 0.0002,
"loss": 0.659,
"step": 3320
},
{
"epoch": 1.06,
"learning_rate": 0.0002,
"loss": 0.7138,
"step": 3330
},
{
"epoch": 1.06,
"learning_rate": 0.0002,
"loss": 0.7309,
"step": 3340
},
{
"epoch": 1.06,
"learning_rate": 0.0002,
"loss": 0.7251,
"step": 3350
},
{
"epoch": 1.06,
"learning_rate": 0.0002,
"loss": 0.6712,
"step": 3360
},
{
"epoch": 1.07,
"learning_rate": 0.0002,
"loss": 0.6527,
"step": 3370
},
{
"epoch": 1.07,
"learning_rate": 0.0002,
"loss": 0.7752,
"step": 3380
},
{
"epoch": 1.07,
"learning_rate": 0.0002,
"loss": 0.6896,
"step": 3390
},
{
"epoch": 1.08,
"learning_rate": 0.0002,
"loss": 0.7441,
"step": 3400
},
{
"epoch": 1.08,
"eval_loss": 0.7388539910316467,
"eval_runtime": 111.0879,
"eval_samples_per_second": 9.002,
"eval_steps_per_second": 4.501,
"step": 3400
},
{
"epoch": 1.08,
"mmlu_eval_accuracy": 0.49153955280819217,
"mmlu_eval_accuracy_abstract_algebra": 0.2727272727272727,
"mmlu_eval_accuracy_anatomy": 0.6428571428571429,
"mmlu_eval_accuracy_astronomy": 0.4375,
"mmlu_eval_accuracy_business_ethics": 0.5454545454545454,
"mmlu_eval_accuracy_clinical_knowledge": 0.5172413793103449,
"mmlu_eval_accuracy_college_biology": 0.375,
"mmlu_eval_accuracy_college_chemistry": 0.25,
"mmlu_eval_accuracy_college_computer_science": 0.36363636363636365,
"mmlu_eval_accuracy_college_mathematics": 0.18181818181818182,
"mmlu_eval_accuracy_college_medicine": 0.3181818181818182,
"mmlu_eval_accuracy_college_physics": 0.36363636363636365,
"mmlu_eval_accuracy_computer_security": 0.36363636363636365,
"mmlu_eval_accuracy_conceptual_physics": 0.4230769230769231,
"mmlu_eval_accuracy_econometrics": 0.25,
"mmlu_eval_accuracy_electrical_engineering": 0.3125,
"mmlu_eval_accuracy_elementary_mathematics": 0.3902439024390244,
"mmlu_eval_accuracy_formal_logic": 0.14285714285714285,
"mmlu_eval_accuracy_global_facts": 0.5,
"mmlu_eval_accuracy_high_school_biology": 0.40625,
"mmlu_eval_accuracy_high_school_chemistry": 0.22727272727272727,
"mmlu_eval_accuracy_high_school_computer_science": 0.5555555555555556,
"mmlu_eval_accuracy_high_school_european_history": 0.6666666666666666,
"mmlu_eval_accuracy_high_school_geography": 0.8636363636363636,
"mmlu_eval_accuracy_high_school_government_and_politics": 0.6666666666666666,
"mmlu_eval_accuracy_high_school_macroeconomics": 0.4418604651162791,
"mmlu_eval_accuracy_high_school_mathematics": 0.2413793103448276,
"mmlu_eval_accuracy_high_school_microeconomics": 0.46153846153846156,
"mmlu_eval_accuracy_high_school_physics": 0.11764705882352941,
"mmlu_eval_accuracy_high_school_psychology": 0.8666666666666667,
"mmlu_eval_accuracy_high_school_statistics": 0.34782608695652173,
"mmlu_eval_accuracy_high_school_us_history": 0.6363636363636364,
"mmlu_eval_accuracy_high_school_world_history": 0.7692307692307693,
"mmlu_eval_accuracy_human_aging": 0.7391304347826086,
"mmlu_eval_accuracy_human_sexuality": 0.3333333333333333,
"mmlu_eval_accuracy_international_law": 0.7692307692307693,
"mmlu_eval_accuracy_jurisprudence": 0.45454545454545453,
"mmlu_eval_accuracy_logical_fallacies": 0.6666666666666666,
"mmlu_eval_accuracy_machine_learning": 0.2727272727272727,
"mmlu_eval_accuracy_management": 0.6363636363636364,
"mmlu_eval_accuracy_marketing": 0.88,
"mmlu_eval_accuracy_medical_genetics": 0.9090909090909091,
"mmlu_eval_accuracy_miscellaneous": 0.6976744186046512,
"mmlu_eval_accuracy_moral_disputes": 0.5,
"mmlu_eval_accuracy_moral_scenarios": 0.24,
"mmlu_eval_accuracy_nutrition": 0.6363636363636364,
"mmlu_eval_accuracy_philosophy": 0.5294117647058824,
"mmlu_eval_accuracy_prehistory": 0.4857142857142857,
"mmlu_eval_accuracy_professional_accounting": 0.3548387096774194,
"mmlu_eval_accuracy_professional_law": 0.3235294117647059,
"mmlu_eval_accuracy_professional_medicine": 0.5483870967741935,
"mmlu_eval_accuracy_professional_psychology": 0.5217391304347826,
"mmlu_eval_accuracy_public_relations": 0.5833333333333334,
"mmlu_eval_accuracy_security_studies": 0.4444444444444444,
"mmlu_eval_accuracy_sociology": 0.5909090909090909,
"mmlu_eval_accuracy_us_foreign_policy": 0.6363636363636364,
"mmlu_eval_accuracy_virology": 0.5555555555555556,
"mmlu_eval_accuracy_world_religions": 0.7894736842105263,
"mmlu_loss": 1.3683368821990707,
"step": 3400
},
{
"epoch": 1.08,
"learning_rate": 0.0002,
"loss": 0.723,
"step": 3410
},
{
"epoch": 1.08,
"learning_rate": 0.0002,
"loss": 0.7545,
"step": 3420
},
{
"epoch": 1.09,
"learning_rate": 0.0002,
"loss": 0.6885,
"step": 3430
},
{
"epoch": 1.09,
"learning_rate": 0.0002,
"loss": 0.7021,
"step": 3440
},
{
"epoch": 1.09,
"learning_rate": 0.0002,
"loss": 0.7284,
"step": 3450
},
{
"epoch": 1.1,
"learning_rate": 0.0002,
"loss": 0.6811,
"step": 3460
},
{
"epoch": 1.1,
"learning_rate": 0.0002,
"loss": 0.7076,
"step": 3470
},
{
"epoch": 1.1,
"learning_rate": 0.0002,
"loss": 0.7074,
"step": 3480
},
{
"epoch": 1.11,
"learning_rate": 0.0002,
"loss": 0.6734,
"step": 3490
},
{
"epoch": 1.11,
"learning_rate": 0.0002,
"loss": 0.7243,
"step": 3500
},
{
"epoch": 1.11,
"learning_rate": 0.0002,
"loss": 0.7347,
"step": 3510
},
{
"epoch": 1.12,
"learning_rate": 0.0002,
"loss": 0.6888,
"step": 3520
},
{
"epoch": 1.12,
"learning_rate": 0.0002,
"loss": 0.7332,
"step": 3530
},
{
"epoch": 1.12,
"learning_rate": 0.0002,
"loss": 0.7117,
"step": 3540
},
{
"epoch": 1.12,
"learning_rate": 0.0002,
"loss": 0.6575,
"step": 3550
},
{
"epoch": 1.13,
"learning_rate": 0.0002,
"loss": 0.729,
"step": 3560
},
{
"epoch": 1.13,
"learning_rate": 0.0002,
"loss": 0.6825,
"step": 3570
},
{
"epoch": 1.13,
"learning_rate": 0.0002,
"loss": 0.6935,
"step": 3580
},
{
"epoch": 1.14,
"learning_rate": 0.0002,
"loss": 0.7004,
"step": 3590
},
{
"epoch": 1.14,
"learning_rate": 0.0002,
"loss": 0.7237,
"step": 3600
},
{
"epoch": 1.14,
"eval_loss": 0.7381147742271423,
"eval_runtime": 111.0101,
"eval_samples_per_second": 9.008,
"eval_steps_per_second": 4.504,
"step": 3600
},
{
"epoch": 1.14,
"mmlu_eval_accuracy": 0.49167050353968145,
"mmlu_eval_accuracy_abstract_algebra": 0.2727272727272727,
"mmlu_eval_accuracy_anatomy": 0.5714285714285714,
"mmlu_eval_accuracy_astronomy": 0.4375,
"mmlu_eval_accuracy_business_ethics": 0.45454545454545453,
"mmlu_eval_accuracy_clinical_knowledge": 0.5172413793103449,
"mmlu_eval_accuracy_college_biology": 0.5,
"mmlu_eval_accuracy_college_chemistry": 0.25,
"mmlu_eval_accuracy_college_computer_science": 0.2727272727272727,
"mmlu_eval_accuracy_college_mathematics": 0.2727272727272727,
"mmlu_eval_accuracy_college_medicine": 0.4090909090909091,
"mmlu_eval_accuracy_college_physics": 0.36363636363636365,
"mmlu_eval_accuracy_computer_security": 0.36363636363636365,
"mmlu_eval_accuracy_conceptual_physics": 0.4230769230769231,
"mmlu_eval_accuracy_econometrics": 0.16666666666666666,
"mmlu_eval_accuracy_electrical_engineering": 0.25,
"mmlu_eval_accuracy_elementary_mathematics": 0.3902439024390244,
"mmlu_eval_accuracy_formal_logic": 0.14285714285714285,
"mmlu_eval_accuracy_global_facts": 0.5,
"mmlu_eval_accuracy_high_school_biology": 0.46875,
"mmlu_eval_accuracy_high_school_chemistry": 0.22727272727272727,
"mmlu_eval_accuracy_high_school_computer_science": 0.5555555555555556,
"mmlu_eval_accuracy_high_school_european_history": 0.6111111111111112,
"mmlu_eval_accuracy_high_school_geography": 0.8636363636363636,
"mmlu_eval_accuracy_high_school_government_and_politics": 0.6190476190476191,
"mmlu_eval_accuracy_high_school_macroeconomics": 0.5116279069767442,
"mmlu_eval_accuracy_high_school_mathematics": 0.13793103448275862,
"mmlu_eval_accuracy_high_school_microeconomics": 0.4230769230769231,
"mmlu_eval_accuracy_high_school_physics": 0.11764705882352941,
"mmlu_eval_accuracy_high_school_psychology": 0.85,
"mmlu_eval_accuracy_high_school_statistics": 0.34782608695652173,
"mmlu_eval_accuracy_high_school_us_history": 0.5909090909090909,
"mmlu_eval_accuracy_high_school_world_history": 0.6923076923076923,
"mmlu_eval_accuracy_human_aging": 0.7391304347826086,
"mmlu_eval_accuracy_human_sexuality": 0.3333333333333333,
"mmlu_eval_accuracy_international_law": 0.8461538461538461,
"mmlu_eval_accuracy_jurisprudence": 0.45454545454545453,
"mmlu_eval_accuracy_logical_fallacies": 0.6111111111111112,
"mmlu_eval_accuracy_machine_learning": 0.2727272727272727,
"mmlu_eval_accuracy_management": 0.7272727272727273,
"mmlu_eval_accuracy_marketing": 0.84,
"mmlu_eval_accuracy_medical_genetics": 0.9090909090909091,
"mmlu_eval_accuracy_miscellaneous": 0.7209302325581395,
"mmlu_eval_accuracy_moral_disputes": 0.5263157894736842,
"mmlu_eval_accuracy_moral_scenarios": 0.24,
"mmlu_eval_accuracy_nutrition": 0.6666666666666666,
"mmlu_eval_accuracy_philosophy": 0.5,
"mmlu_eval_accuracy_prehistory": 0.4857142857142857,
"mmlu_eval_accuracy_professional_accounting": 0.3225806451612903,
"mmlu_eval_accuracy_professional_law": 0.3352941176470588,
"mmlu_eval_accuracy_professional_medicine": 0.5483870967741935,
"mmlu_eval_accuracy_professional_psychology": 0.5362318840579711,
"mmlu_eval_accuracy_public_relations": 0.5,
"mmlu_eval_accuracy_security_studies": 0.4444444444444444,
"mmlu_eval_accuracy_sociology": 0.7272727272727273,
"mmlu_eval_accuracy_us_foreign_policy": 0.8181818181818182,
"mmlu_eval_accuracy_virology": 0.5555555555555556,
"mmlu_eval_accuracy_world_religions": 0.7894736842105263,
"mmlu_loss": 1.5044772917545806,
"step": 3600
},
{
"epoch": 1.14,
"learning_rate": 0.0002,
"loss": 0.7361,
"step": 3610
},
{
"epoch": 1.15,
"learning_rate": 0.0002,
"loss": 0.7179,
"step": 3620
},
{
"epoch": 1.15,
"learning_rate": 0.0002,
"loss": 0.7499,
"step": 3630
},
{
"epoch": 1.15,
"learning_rate": 0.0002,
"loss": 0.7319,
"step": 3640
},
{
"epoch": 1.16,
"learning_rate": 0.0002,
"loss": 0.7104,
"step": 3650
},
{
"epoch": 1.16,
"learning_rate": 0.0002,
"loss": 0.6892,
"step": 3660
},
{
"epoch": 1.16,
"learning_rate": 0.0002,
"loss": 0.7666,
"step": 3670
},
{
"epoch": 1.17,
"learning_rate": 0.0002,
"loss": 0.632,
"step": 3680
},
{
"epoch": 1.17,
"learning_rate": 0.0002,
"loss": 0.713,
"step": 3690
},
{
"epoch": 1.17,
"learning_rate": 0.0002,
"loss": 0.6958,
"step": 3700
},
{
"epoch": 1.18,
"learning_rate": 0.0002,
"loss": 0.7253,
"step": 3710
},
{
"epoch": 1.18,
"learning_rate": 0.0002,
"loss": 0.7608,
"step": 3720
},
{
"epoch": 1.18,
"learning_rate": 0.0002,
"loss": 0.7277,
"step": 3730
},
{
"epoch": 1.19,
"learning_rate": 0.0002,
"loss": 0.7346,
"step": 3740
},
{
"epoch": 1.19,
"learning_rate": 0.0002,
"loss": 0.7075,
"step": 3750
},
{
"epoch": 1.19,
"learning_rate": 0.0002,
"loss": 0.6278,
"step": 3760
},
{
"epoch": 1.19,
"learning_rate": 0.0002,
"loss": 0.7088,
"step": 3770
},
{
"epoch": 1.2,
"learning_rate": 0.0002,
"loss": 0.7667,
"step": 3780
},
{
"epoch": 1.2,
"learning_rate": 0.0002,
"loss": 0.7051,
"step": 3790
},
{
"epoch": 1.2,
"learning_rate": 0.0002,
"loss": 0.699,
"step": 3800
},
{
"epoch": 1.2,
"eval_loss": 0.7395787239074707,
"eval_runtime": 110.7949,
"eval_samples_per_second": 9.026,
"eval_steps_per_second": 4.513,
"step": 3800
},
{
"epoch": 1.2,
"mmlu_eval_accuracy": 0.48410138439418055,
"mmlu_eval_accuracy_abstract_algebra": 0.36363636363636365,
"mmlu_eval_accuracy_anatomy": 0.5,
"mmlu_eval_accuracy_astronomy": 0.4375,
"mmlu_eval_accuracy_business_ethics": 0.45454545454545453,
"mmlu_eval_accuracy_clinical_knowledge": 0.4827586206896552,
"mmlu_eval_accuracy_college_biology": 0.4375,
"mmlu_eval_accuracy_college_chemistry": 0.25,
"mmlu_eval_accuracy_college_computer_science": 0.2727272727272727,
"mmlu_eval_accuracy_college_mathematics": 0.18181818181818182,
"mmlu_eval_accuracy_college_medicine": 0.3181818181818182,
"mmlu_eval_accuracy_college_physics": 0.36363636363636365,
"mmlu_eval_accuracy_computer_security": 0.36363636363636365,
"mmlu_eval_accuracy_conceptual_physics": 0.4230769230769231,
"mmlu_eval_accuracy_econometrics": 0.16666666666666666,
"mmlu_eval_accuracy_electrical_engineering": 0.25,
"mmlu_eval_accuracy_elementary_mathematics": 0.34146341463414637,
"mmlu_eval_accuracy_formal_logic": 0.14285714285714285,
"mmlu_eval_accuracy_global_facts": 0.4,
"mmlu_eval_accuracy_high_school_biology": 0.40625,
"mmlu_eval_accuracy_high_school_chemistry": 0.18181818181818182,
"mmlu_eval_accuracy_high_school_computer_science": 0.5555555555555556,
"mmlu_eval_accuracy_high_school_european_history": 0.6666666666666666,
"mmlu_eval_accuracy_high_school_geography": 0.9090909090909091,
"mmlu_eval_accuracy_high_school_government_and_politics": 0.7619047619047619,
"mmlu_eval_accuracy_high_school_macroeconomics": 0.4883720930232558,
"mmlu_eval_accuracy_high_school_mathematics": 0.20689655172413793,
"mmlu_eval_accuracy_high_school_microeconomics": 0.38461538461538464,
"mmlu_eval_accuracy_high_school_physics": 0.058823529411764705,
"mmlu_eval_accuracy_high_school_psychology": 0.8666666666666667,
"mmlu_eval_accuracy_high_school_statistics": 0.34782608695652173,
"mmlu_eval_accuracy_high_school_us_history": 0.6818181818181818,
"mmlu_eval_accuracy_high_school_world_history": 0.6923076923076923,
"mmlu_eval_accuracy_human_aging": 0.7391304347826086,
"mmlu_eval_accuracy_human_sexuality": 0.3333333333333333,
"mmlu_eval_accuracy_international_law": 0.7692307692307693,
"mmlu_eval_accuracy_jurisprudence": 0.36363636363636365,
"mmlu_eval_accuracy_logical_fallacies": 0.6111111111111112,
"mmlu_eval_accuracy_machine_learning": 0.2727272727272727,
"mmlu_eval_accuracy_management": 0.7272727272727273,
"mmlu_eval_accuracy_marketing": 0.8,
"mmlu_eval_accuracy_medical_genetics": 0.9090909090909091,
"mmlu_eval_accuracy_miscellaneous": 0.686046511627907,
"mmlu_eval_accuracy_moral_disputes": 0.47368421052631576,
"mmlu_eval_accuracy_moral_scenarios": 0.24,
"mmlu_eval_accuracy_nutrition": 0.6363636363636364,
"mmlu_eval_accuracy_philosophy": 0.5294117647058824,
"mmlu_eval_accuracy_prehistory": 0.4857142857142857,
"mmlu_eval_accuracy_professional_accounting": 0.3870967741935484,
"mmlu_eval_accuracy_professional_law": 0.3411764705882353,
"mmlu_eval_accuracy_professional_medicine": 0.5806451612903226,
"mmlu_eval_accuracy_professional_psychology": 0.5217391304347826,
"mmlu_eval_accuracy_public_relations": 0.5,
"mmlu_eval_accuracy_security_studies": 0.4444444444444444,
"mmlu_eval_accuracy_sociology": 0.6818181818181818,
"mmlu_eval_accuracy_us_foreign_policy": 0.9090909090909091,
"mmlu_eval_accuracy_virology": 0.5555555555555556,
"mmlu_eval_accuracy_world_religions": 0.7368421052631579,
"mmlu_loss": 1.414894336547615,
"step": 3800
},
{
"epoch": 1.21,
"learning_rate": 0.0002,
"loss": 0.6892,
"step": 3810
},
{
"epoch": 1.21,
"learning_rate": 0.0002,
"loss": 0.6753,
"step": 3820
},
{
"epoch": 1.21,
"learning_rate": 0.0002,
"loss": 0.6998,
"step": 3830
},
{
"epoch": 1.22,
"learning_rate": 0.0002,
"loss": 0.686,
"step": 3840
},
{
"epoch": 1.22,
"learning_rate": 0.0002,
"loss": 0.7254,
"step": 3850
},
{
"epoch": 1.22,
"learning_rate": 0.0002,
"loss": 0.6942,
"step": 3860
},
{
"epoch": 1.23,
"learning_rate": 0.0002,
"loss": 0.6729,
"step": 3870
},
{
"epoch": 1.23,
"learning_rate": 0.0002,
"loss": 0.7486,
"step": 3880
},
{
"epoch": 1.23,
"learning_rate": 0.0002,
"loss": 0.6997,
"step": 3890
},
{
"epoch": 1.24,
"learning_rate": 0.0002,
"loss": 0.7308,
"step": 3900
},
{
"epoch": 1.24,
"learning_rate": 0.0002,
"loss": 0.7214,
"step": 3910
},
{
"epoch": 1.24,
"learning_rate": 0.0002,
"loss": 0.6879,
"step": 3920
},
{
"epoch": 1.25,
"learning_rate": 0.0002,
"loss": 0.6662,
"step": 3930
},
{
"epoch": 1.25,
"learning_rate": 0.0002,
"loss": 0.7045,
"step": 3940
},
{
"epoch": 1.25,
"learning_rate": 0.0002,
"loss": 0.7908,
"step": 3950
},
{
"epoch": 1.25,
"learning_rate": 0.0002,
"loss": 0.72,
"step": 3960
},
{
"epoch": 1.26,
"learning_rate": 0.0002,
"loss": 0.6646,
"step": 3970
},
{
"epoch": 1.26,
"learning_rate": 0.0002,
"loss": 0.7421,
"step": 3980
},
{
"epoch": 1.26,
"learning_rate": 0.0002,
"loss": 0.7489,
"step": 3990
},
{
"epoch": 1.27,
"learning_rate": 0.0002,
"loss": 0.7082,
"step": 4000
},
{
"epoch": 1.27,
"eval_loss": 0.7381725907325745,
"eval_runtime": 111.1345,
"eval_samples_per_second": 8.998,
"eval_steps_per_second": 4.499,
"step": 4000
},
{
"epoch": 1.27,
"mmlu_eval_accuracy": 0.48533511185669687,
"mmlu_eval_accuracy_abstract_algebra": 0.2727272727272727,
"mmlu_eval_accuracy_anatomy": 0.5714285714285714,
"mmlu_eval_accuracy_astronomy": 0.4375,
"mmlu_eval_accuracy_business_ethics": 0.45454545454545453,
"mmlu_eval_accuracy_clinical_knowledge": 0.4827586206896552,
"mmlu_eval_accuracy_college_biology": 0.375,
"mmlu_eval_accuracy_college_chemistry": 0.25,
"mmlu_eval_accuracy_college_computer_science": 0.2727272727272727,
"mmlu_eval_accuracy_college_mathematics": 0.18181818181818182,
"mmlu_eval_accuracy_college_medicine": 0.4090909090909091,
"mmlu_eval_accuracy_college_physics": 0.36363636363636365,
"mmlu_eval_accuracy_computer_security": 0.45454545454545453,
"mmlu_eval_accuracy_conceptual_physics": 0.4230769230769231,
"mmlu_eval_accuracy_econometrics": 0.16666666666666666,
"mmlu_eval_accuracy_electrical_engineering": 0.25,
"mmlu_eval_accuracy_elementary_mathematics": 0.34146341463414637,
"mmlu_eval_accuracy_formal_logic": 0.14285714285714285,
"mmlu_eval_accuracy_global_facts": 0.6,
"mmlu_eval_accuracy_high_school_biology": 0.375,
"mmlu_eval_accuracy_high_school_chemistry": 0.13636363636363635,
"mmlu_eval_accuracy_high_school_computer_science": 0.5555555555555556,
"mmlu_eval_accuracy_high_school_european_history": 0.7222222222222222,
"mmlu_eval_accuracy_high_school_geography": 0.8636363636363636,
"mmlu_eval_accuracy_high_school_government_and_politics": 0.7142857142857143,
"mmlu_eval_accuracy_high_school_macroeconomics": 0.3953488372093023,
"mmlu_eval_accuracy_high_school_mathematics": 0.1724137931034483,
"mmlu_eval_accuracy_high_school_microeconomics": 0.38461538461538464,
"mmlu_eval_accuracy_high_school_physics": 0.11764705882352941,
"mmlu_eval_accuracy_high_school_psychology": 0.85,
"mmlu_eval_accuracy_high_school_statistics": 0.34782608695652173,
"mmlu_eval_accuracy_high_school_us_history": 0.5909090909090909,
"mmlu_eval_accuracy_high_school_world_history": 0.7307692307692307,
"mmlu_eval_accuracy_human_aging": 0.7391304347826086,
"mmlu_eval_accuracy_human_sexuality": 0.3333333333333333,
"mmlu_eval_accuracy_international_law": 0.9230769230769231,
"mmlu_eval_accuracy_jurisprudence": 0.36363636363636365,
"mmlu_eval_accuracy_logical_fallacies": 0.6111111111111112,
"mmlu_eval_accuracy_machine_learning": 0.18181818181818182,
"mmlu_eval_accuracy_management": 0.6363636363636364,
"mmlu_eval_accuracy_marketing": 0.8,
"mmlu_eval_accuracy_medical_genetics": 0.9090909090909091,
"mmlu_eval_accuracy_miscellaneous": 0.7093023255813954,
"mmlu_eval_accuracy_moral_disputes": 0.5,
"mmlu_eval_accuracy_moral_scenarios": 0.24,
"mmlu_eval_accuracy_nutrition": 0.696969696969697,
"mmlu_eval_accuracy_philosophy": 0.5294117647058824,
"mmlu_eval_accuracy_prehistory": 0.42857142857142855,
"mmlu_eval_accuracy_professional_accounting": 0.45161290322580644,
"mmlu_eval_accuracy_professional_law": 0.31176470588235294,
"mmlu_eval_accuracy_professional_medicine": 0.5806451612903226,
"mmlu_eval_accuracy_professional_psychology": 0.5072463768115942,
"mmlu_eval_accuracy_public_relations": 0.5833333333333334,
"mmlu_eval_accuracy_security_studies": 0.48148148148148145,
"mmlu_eval_accuracy_sociology": 0.6818181818181818,
"mmlu_eval_accuracy_us_foreign_policy": 0.8181818181818182,
"mmlu_eval_accuracy_virology": 0.5555555555555556,
"mmlu_eval_accuracy_world_religions": 0.6842105263157895,
"mmlu_loss": 1.3075970206652858,
"step": 4000
},
{
"epoch": 1.27,
"learning_rate": 0.0002,
"loss": 0.6578,
"step": 4010
},
{
"epoch": 1.27,
"learning_rate": 0.0002,
"loss": 0.7462,
"step": 4020
},
{
"epoch": 1.28,
"learning_rate": 0.0002,
"loss": 0.699,
"step": 4030
},
{
"epoch": 1.28,
"learning_rate": 0.0002,
"loss": 0.7144,
"step": 4040
},
{
"epoch": 1.28,
"learning_rate": 0.0002,
"loss": 0.6771,
"step": 4050
},
{
"epoch": 1.29,
"learning_rate": 0.0002,
"loss": 0.7198,
"step": 4060
},
{
"epoch": 1.29,
"learning_rate": 0.0002,
"loss": 0.6848,
"step": 4070
},
{
"epoch": 1.29,
"learning_rate": 0.0002,
"loss": 0.762,
"step": 4080
},
{
"epoch": 1.3,
"learning_rate": 0.0002,
"loss": 0.7354,
"step": 4090
},
{
"epoch": 1.3,
"learning_rate": 0.0002,
"loss": 0.6529,
"step": 4100
},
{
"epoch": 1.3,
"learning_rate": 0.0002,
"loss": 0.6373,
"step": 4110
},
{
"epoch": 1.31,
"learning_rate": 0.0002,
"loss": 0.7415,
"step": 4120
},
{
"epoch": 1.31,
"learning_rate": 0.0002,
"loss": 0.6646,
"step": 4130
},
{
"epoch": 1.31,
"learning_rate": 0.0002,
"loss": 0.6904,
"step": 4140
},
{
"epoch": 1.31,
"learning_rate": 0.0002,
"loss": 0.7462,
"step": 4150
},
{
"epoch": 1.32,
"learning_rate": 0.0002,
"loss": 0.7261,
"step": 4160
},
{
"epoch": 1.32,
"learning_rate": 0.0002,
"loss": 0.6866,
"step": 4170
},
{
"epoch": 1.32,
"learning_rate": 0.0002,
"loss": 0.6789,
"step": 4180
},
{
"epoch": 1.33,
"learning_rate": 0.0002,
"loss": 0.6943,
"step": 4190
},
{
"epoch": 1.33,
"learning_rate": 0.0002,
"loss": 0.6644,
"step": 4200
},
{
"epoch": 1.33,
"eval_loss": 0.7391716241836548,
"eval_runtime": 111.1279,
"eval_samples_per_second": 8.999,
"eval_steps_per_second": 4.499,
"step": 4200
},
{
"epoch": 1.33,
"mmlu_eval_accuracy": 0.48595716946128337,
"mmlu_eval_accuracy_abstract_algebra": 0.2727272727272727,
"mmlu_eval_accuracy_anatomy": 0.6428571428571429,
"mmlu_eval_accuracy_astronomy": 0.4375,
"mmlu_eval_accuracy_business_ethics": 0.45454545454545453,
"mmlu_eval_accuracy_clinical_knowledge": 0.5172413793103449,
"mmlu_eval_accuracy_college_biology": 0.4375,
"mmlu_eval_accuracy_college_chemistry": 0.375,
"mmlu_eval_accuracy_college_computer_science": 0.2727272727272727,
"mmlu_eval_accuracy_college_mathematics": 0.2727272727272727,
"mmlu_eval_accuracy_college_medicine": 0.4090909090909091,
"mmlu_eval_accuracy_college_physics": 0.2727272727272727,
"mmlu_eval_accuracy_computer_security": 0.45454545454545453,
"mmlu_eval_accuracy_conceptual_physics": 0.4230769230769231,
"mmlu_eval_accuracy_econometrics": 0.16666666666666666,
"mmlu_eval_accuracy_electrical_engineering": 0.1875,
"mmlu_eval_accuracy_elementary_mathematics": 0.3902439024390244,
"mmlu_eval_accuracy_formal_logic": 0.14285714285714285,
"mmlu_eval_accuracy_global_facts": 0.4,
"mmlu_eval_accuracy_high_school_biology": 0.375,
"mmlu_eval_accuracy_high_school_chemistry": 0.18181818181818182,
"mmlu_eval_accuracy_high_school_computer_science": 0.5555555555555556,
"mmlu_eval_accuracy_high_school_european_history": 0.6111111111111112,
"mmlu_eval_accuracy_high_school_geography": 0.8181818181818182,
"mmlu_eval_accuracy_high_school_government_and_politics": 0.6666666666666666,
"mmlu_eval_accuracy_high_school_macroeconomics": 0.4883720930232558,
"mmlu_eval_accuracy_high_school_mathematics": 0.1724137931034483,
"mmlu_eval_accuracy_high_school_microeconomics": 0.5,
"mmlu_eval_accuracy_high_school_physics": 0.11764705882352941,
"mmlu_eval_accuracy_high_school_psychology": 0.85,
"mmlu_eval_accuracy_high_school_statistics": 0.30434782608695654,
"mmlu_eval_accuracy_high_school_us_history": 0.5454545454545454,
"mmlu_eval_accuracy_high_school_world_history": 0.6923076923076923,
"mmlu_eval_accuracy_human_aging": 0.7391304347826086,
"mmlu_eval_accuracy_human_sexuality": 0.4166666666666667,
"mmlu_eval_accuracy_international_law": 0.9230769230769231,
"mmlu_eval_accuracy_jurisprudence": 0.36363636363636365,
"mmlu_eval_accuracy_logical_fallacies": 0.6666666666666666,
"mmlu_eval_accuracy_machine_learning": 0.2727272727272727,
"mmlu_eval_accuracy_management": 0.7272727272727273,
"mmlu_eval_accuracy_marketing": 0.72,
"mmlu_eval_accuracy_medical_genetics": 0.9090909090909091,
"mmlu_eval_accuracy_miscellaneous": 0.6976744186046512,
"mmlu_eval_accuracy_moral_disputes": 0.5,
"mmlu_eval_accuracy_moral_scenarios": 0.24,
"mmlu_eval_accuracy_nutrition": 0.7272727272727273,
"mmlu_eval_accuracy_philosophy": 0.5,
"mmlu_eval_accuracy_prehistory": 0.4857142857142857,
"mmlu_eval_accuracy_professional_accounting": 0.3548387096774194,
"mmlu_eval_accuracy_professional_law": 0.3235294117647059,
"mmlu_eval_accuracy_professional_medicine": 0.5483870967741935,
"mmlu_eval_accuracy_professional_psychology": 0.5072463768115942,
"mmlu_eval_accuracy_public_relations": 0.5833333333333334,
"mmlu_eval_accuracy_security_studies": 0.48148148148148145,
"mmlu_eval_accuracy_sociology": 0.7272727272727273,
"mmlu_eval_accuracy_us_foreign_policy": 0.6363636363636364,
"mmlu_eval_accuracy_virology": 0.5555555555555556,
"mmlu_eval_accuracy_world_religions": 0.6842105263157895,
"mmlu_loss": 1.2436322906776758,
"step": 4200
},
{
"epoch": 1.33,
"learning_rate": 0.0002,
"loss": 0.7427,
"step": 4210
},
{
"epoch": 1.34,
"learning_rate": 0.0002,
"loss": 0.628,
"step": 4220
},
{
"epoch": 1.34,
"learning_rate": 0.0002,
"loss": 0.6656,
"step": 4230
},
{
"epoch": 1.34,
"learning_rate": 0.0002,
"loss": 0.6631,
"step": 4240
},
{
"epoch": 1.35,
"learning_rate": 0.0002,
"loss": 0.7031,
"step": 4250
},
{
"epoch": 1.35,
"learning_rate": 0.0002,
"loss": 0.7102,
"step": 4260
},
{
"epoch": 1.35,
"learning_rate": 0.0002,
"loss": 0.7077,
"step": 4270
},
{
"epoch": 1.36,
"learning_rate": 0.0002,
"loss": 0.7679,
"step": 4280
},
{
"epoch": 1.36,
"learning_rate": 0.0002,
"loss": 0.6569,
"step": 4290
},
{
"epoch": 1.36,
"learning_rate": 0.0002,
"loss": 0.6911,
"step": 4300
},
{
"epoch": 1.37,
"learning_rate": 0.0002,
"loss": 0.7468,
"step": 4310
},
{
"epoch": 1.37,
"learning_rate": 0.0002,
"loss": 0.6641,
"step": 4320
},
{
"epoch": 1.37,
"learning_rate": 0.0002,
"loss": 0.7248,
"step": 4330
},
{
"epoch": 1.38,
"learning_rate": 0.0002,
"loss": 0.706,
"step": 4340
},
{
"epoch": 1.38,
"learning_rate": 0.0002,
"loss": 0.717,
"step": 4350
},
{
"epoch": 1.38,
"learning_rate": 0.0002,
"loss": 0.6462,
"step": 4360
},
{
"epoch": 1.38,
"learning_rate": 0.0002,
"loss": 0.6752,
"step": 4370
},
{
"epoch": 1.39,
"learning_rate": 0.0002,
"loss": 0.7239,
"step": 4380
},
{
"epoch": 1.39,
"learning_rate": 0.0002,
"loss": 0.6665,
"step": 4390
},
{
"epoch": 1.39,
"learning_rate": 0.0002,
"loss": 0.7077,
"step": 4400
},
{
"epoch": 1.39,
"eval_loss": 0.7374858260154724,
"eval_runtime": 111.3021,
"eval_samples_per_second": 8.985,
"eval_steps_per_second": 4.492,
"step": 4400
},
{
"epoch": 1.39,
"mmlu_eval_accuracy": 0.49250240895964725,
"mmlu_eval_accuracy_abstract_algebra": 0.36363636363636365,
"mmlu_eval_accuracy_anatomy": 0.7857142857142857,
"mmlu_eval_accuracy_astronomy": 0.4375,
"mmlu_eval_accuracy_business_ethics": 0.45454545454545453,
"mmlu_eval_accuracy_clinical_knowledge": 0.5172413793103449,
"mmlu_eval_accuracy_college_biology": 0.4375,
"mmlu_eval_accuracy_college_chemistry": 0.25,
"mmlu_eval_accuracy_college_computer_science": 0.2727272727272727,
"mmlu_eval_accuracy_college_mathematics": 0.18181818181818182,
"mmlu_eval_accuracy_college_medicine": 0.4090909090909091,
"mmlu_eval_accuracy_college_physics": 0.36363636363636365,
"mmlu_eval_accuracy_computer_security": 0.5454545454545454,
"mmlu_eval_accuracy_conceptual_physics": 0.4230769230769231,
"mmlu_eval_accuracy_econometrics": 0.3333333333333333,
"mmlu_eval_accuracy_electrical_engineering": 0.25,
"mmlu_eval_accuracy_elementary_mathematics": 0.2926829268292683,
"mmlu_eval_accuracy_formal_logic": 0.21428571428571427,
"mmlu_eval_accuracy_global_facts": 0.6,
"mmlu_eval_accuracy_high_school_biology": 0.4375,
"mmlu_eval_accuracy_high_school_chemistry": 0.22727272727272727,
"mmlu_eval_accuracy_high_school_computer_science": 0.5555555555555556,
"mmlu_eval_accuracy_high_school_european_history": 0.6111111111111112,
"mmlu_eval_accuracy_high_school_geography": 0.8181818181818182,
"mmlu_eval_accuracy_high_school_government_and_politics": 0.6666666666666666,
"mmlu_eval_accuracy_high_school_macroeconomics": 0.4418604651162791,
"mmlu_eval_accuracy_high_school_mathematics": 0.1724137931034483,
"mmlu_eval_accuracy_high_school_microeconomics": 0.5,
"mmlu_eval_accuracy_high_school_physics": 0.11764705882352941,
"mmlu_eval_accuracy_high_school_psychology": 0.8833333333333333,
"mmlu_eval_accuracy_high_school_statistics": 0.30434782608695654,
"mmlu_eval_accuracy_high_school_us_history": 0.5454545454545454,
"mmlu_eval_accuracy_high_school_world_history": 0.6923076923076923,
"mmlu_eval_accuracy_human_aging": 0.7391304347826086,
"mmlu_eval_accuracy_human_sexuality": 0.3333333333333333,
"mmlu_eval_accuracy_international_law": 0.8461538461538461,
"mmlu_eval_accuracy_jurisprudence": 0.36363636363636365,
"mmlu_eval_accuracy_logical_fallacies": 0.6666666666666666,
"mmlu_eval_accuracy_machine_learning": 0.2727272727272727,
"mmlu_eval_accuracy_management": 0.7272727272727273,
"mmlu_eval_accuracy_marketing": 0.76,
"mmlu_eval_accuracy_medical_genetics": 0.9090909090909091,
"mmlu_eval_accuracy_miscellaneous": 0.6627906976744186,
"mmlu_eval_accuracy_moral_disputes": 0.47368421052631576,
"mmlu_eval_accuracy_moral_scenarios": 0.23,
"mmlu_eval_accuracy_nutrition": 0.7575757575757576,
"mmlu_eval_accuracy_philosophy": 0.47058823529411764,
"mmlu_eval_accuracy_prehistory": 0.4857142857142857,
"mmlu_eval_accuracy_professional_accounting": 0.25806451612903225,
"mmlu_eval_accuracy_professional_law": 0.3352941176470588,
"mmlu_eval_accuracy_professional_medicine": 0.5161290322580645,
"mmlu_eval_accuracy_professional_psychology": 0.4927536231884058,
"mmlu_eval_accuracy_public_relations": 0.6666666666666666,
"mmlu_eval_accuracy_security_studies": 0.4444444444444444,
"mmlu_eval_accuracy_sociology": 0.5909090909090909,
"mmlu_eval_accuracy_us_foreign_policy": 0.7272727272727273,
"mmlu_eval_accuracy_virology": 0.5,
"mmlu_eval_accuracy_world_religions": 0.7368421052631579,
"mmlu_loss": 1.2793094400637455,
"step": 4400
},
{
"epoch": 1.4,
"learning_rate": 0.0002,
"loss": 0.7042,
"step": 4410
},
{
"epoch": 1.4,
"learning_rate": 0.0002,
"loss": 0.7554,
"step": 4420
},
{
"epoch": 1.4,
"learning_rate": 0.0002,
"loss": 0.757,
"step": 4430
},
{
"epoch": 1.41,
"learning_rate": 0.0002,
"loss": 0.7173,
"step": 4440
},
{
"epoch": 1.41,
"learning_rate": 0.0002,
"loss": 0.6655,
"step": 4450
},
{
"epoch": 1.41,
"learning_rate": 0.0002,
"loss": 0.6991,
"step": 4460
},
{
"epoch": 1.42,
"learning_rate": 0.0002,
"loss": 0.7148,
"step": 4470
},
{
"epoch": 1.42,
"learning_rate": 0.0002,
"loss": 0.7085,
"step": 4480
},
{
"epoch": 1.42,
"learning_rate": 0.0002,
"loss": 0.6955,
"step": 4490
},
{
"epoch": 1.43,
"learning_rate": 0.0002,
"loss": 0.7139,
"step": 4500
},
{
"epoch": 1.43,
"learning_rate": 0.0002,
"loss": 0.7262,
"step": 4510
},
{
"epoch": 1.43,
"learning_rate": 0.0002,
"loss": 0.7705,
"step": 4520
},
{
"epoch": 1.44,
"learning_rate": 0.0002,
"loss": 0.7028,
"step": 4530
},
{
"epoch": 1.44,
"learning_rate": 0.0002,
"loss": 0.7146,
"step": 4540
},
{
"epoch": 1.44,
"learning_rate": 0.0002,
"loss": 0.6868,
"step": 4550
},
{
"epoch": 1.44,
"learning_rate": 0.0002,
"loss": 0.6591,
"step": 4560
},
{
"epoch": 1.45,
"learning_rate": 0.0002,
"loss": 0.7019,
"step": 4570
},
{
"epoch": 1.45,
"learning_rate": 0.0002,
"loss": 0.6676,
"step": 4580
},
{
"epoch": 1.45,
"learning_rate": 0.0002,
"loss": 0.7085,
"step": 4590
},
{
"epoch": 1.46,
"learning_rate": 0.0002,
"loss": 0.664,
"step": 4600
},
{
"epoch": 1.46,
"eval_loss": 0.7358158230781555,
"eval_runtime": 111.2934,
"eval_samples_per_second": 8.985,
"eval_steps_per_second": 4.493,
"step": 4600
},
{
"epoch": 1.46,
"mmlu_eval_accuracy": 0.4861300261183481,
"mmlu_eval_accuracy_abstract_algebra": 0.36363636363636365,
"mmlu_eval_accuracy_anatomy": 0.7142857142857143,
"mmlu_eval_accuracy_astronomy": 0.4375,
"mmlu_eval_accuracy_business_ethics": 0.45454545454545453,
"mmlu_eval_accuracy_clinical_knowledge": 0.5172413793103449,
"mmlu_eval_accuracy_college_biology": 0.375,
"mmlu_eval_accuracy_college_chemistry": 0.25,
"mmlu_eval_accuracy_college_computer_science": 0.2727272727272727,
"mmlu_eval_accuracy_college_mathematics": 0.18181818181818182,
"mmlu_eval_accuracy_college_medicine": 0.45454545454545453,
"mmlu_eval_accuracy_college_physics": 0.36363636363636365,
"mmlu_eval_accuracy_computer_security": 0.45454545454545453,
"mmlu_eval_accuracy_conceptual_physics": 0.4230769230769231,
"mmlu_eval_accuracy_econometrics": 0.3333333333333333,
"mmlu_eval_accuracy_electrical_engineering": 0.25,
"mmlu_eval_accuracy_elementary_mathematics": 0.34146341463414637,
"mmlu_eval_accuracy_formal_logic": 0.21428571428571427,
"mmlu_eval_accuracy_global_facts": 0.6,
"mmlu_eval_accuracy_high_school_biology": 0.46875,
"mmlu_eval_accuracy_high_school_chemistry": 0.18181818181818182,
"mmlu_eval_accuracy_high_school_computer_science": 0.5555555555555556,
"mmlu_eval_accuracy_high_school_european_history": 0.5555555555555556,
"mmlu_eval_accuracy_high_school_geography": 0.8181818181818182,
"mmlu_eval_accuracy_high_school_government_and_politics": 0.5714285714285714,
"mmlu_eval_accuracy_high_school_macroeconomics": 0.4883720930232558,
"mmlu_eval_accuracy_high_school_mathematics": 0.1724137931034483,
"mmlu_eval_accuracy_high_school_microeconomics": 0.46153846153846156,
"mmlu_eval_accuracy_high_school_physics": 0.11764705882352941,
"mmlu_eval_accuracy_high_school_psychology": 0.8666666666666667,
"mmlu_eval_accuracy_high_school_statistics": 0.34782608695652173,
"mmlu_eval_accuracy_high_school_us_history": 0.5454545454545454,
"mmlu_eval_accuracy_high_school_world_history": 0.6923076923076923,
"mmlu_eval_accuracy_human_aging": 0.7391304347826086,
"mmlu_eval_accuracy_human_sexuality": 0.3333333333333333,
"mmlu_eval_accuracy_international_law": 0.8461538461538461,
"mmlu_eval_accuracy_jurisprudence": 0.36363636363636365,
"mmlu_eval_accuracy_logical_fallacies": 0.6666666666666666,
"mmlu_eval_accuracy_machine_learning": 0.2727272727272727,
"mmlu_eval_accuracy_management": 0.6363636363636364,
"mmlu_eval_accuracy_marketing": 0.8,
"mmlu_eval_accuracy_medical_genetics": 0.9090909090909091,
"mmlu_eval_accuracy_miscellaneous": 0.6511627906976745,
"mmlu_eval_accuracy_moral_disputes": 0.4473684210526316,
"mmlu_eval_accuracy_moral_scenarios": 0.24,
"mmlu_eval_accuracy_nutrition": 0.6666666666666666,
"mmlu_eval_accuracy_philosophy": 0.5,
"mmlu_eval_accuracy_prehistory": 0.5142857142857142,
"mmlu_eval_accuracy_professional_accounting": 0.25806451612903225,
"mmlu_eval_accuracy_professional_law": 0.3352941176470588,
"mmlu_eval_accuracy_professional_medicine": 0.5161290322580645,
"mmlu_eval_accuracy_professional_psychology": 0.463768115942029,
"mmlu_eval_accuracy_public_relations": 0.6666666666666666,
"mmlu_eval_accuracy_security_studies": 0.4444444444444444,
"mmlu_eval_accuracy_sociology": 0.6818181818181818,
"mmlu_eval_accuracy_us_foreign_policy": 0.7272727272727273,
"mmlu_eval_accuracy_virology": 0.5,
"mmlu_eval_accuracy_world_religions": 0.6842105263157895,
"mmlu_loss": 1.3095654961567946,
"step": 4600
},
{
"epoch": 1.46,
"learning_rate": 0.0002,
"loss": 0.6486,
"step": 4610
},
{
"epoch": 1.46,
"learning_rate": 0.0002,
"loss": 0.6999,
"step": 4620
},
{
"epoch": 1.47,
"learning_rate": 0.0002,
"loss": 0.6458,
"step": 4630
},
{
"epoch": 1.47,
"learning_rate": 0.0002,
"loss": 0.6762,
"step": 4640
},
{
"epoch": 1.47,
"learning_rate": 0.0002,
"loss": 0.6924,
"step": 4650
},
{
"epoch": 1.48,
"learning_rate": 0.0002,
"loss": 0.682,
"step": 4660
},
{
"epoch": 1.48,
"learning_rate": 0.0002,
"loss": 0.7081,
"step": 4670
},
{
"epoch": 1.48,
"learning_rate": 0.0002,
"loss": 0.7506,
"step": 4680
},
{
"epoch": 1.49,
"learning_rate": 0.0002,
"loss": 0.7311,
"step": 4690
},
{
"epoch": 1.49,
"learning_rate": 0.0002,
"loss": 0.6463,
"step": 4700
},
{
"epoch": 1.49,
"learning_rate": 0.0002,
"loss": 0.6741,
"step": 4710
},
{
"epoch": 1.5,
"learning_rate": 0.0002,
"loss": 0.6626,
"step": 4720
},
{
"epoch": 1.5,
"learning_rate": 0.0002,
"loss": 0.712,
"step": 4730
},
{
"epoch": 1.5,
"learning_rate": 0.0002,
"loss": 0.6676,
"step": 4740
},
{
"epoch": 1.51,
"learning_rate": 0.0002,
"loss": 0.7193,
"step": 4750
},
{
"epoch": 1.51,
"learning_rate": 0.0002,
"loss": 0.6699,
"step": 4760
},
{
"epoch": 1.51,
"learning_rate": 0.0002,
"loss": 0.6718,
"step": 4770
},
{
"epoch": 1.51,
"learning_rate": 0.0002,
"loss": 0.6899,
"step": 4780
},
{
"epoch": 1.52,
"learning_rate": 0.0002,
"loss": 0.6954,
"step": 4790
},
{
"epoch": 1.52,
"learning_rate": 0.0002,
"loss": 0.7187,
"step": 4800
},
{
"epoch": 1.52,
"eval_loss": 0.7387924790382385,
"eval_runtime": 111.1141,
"eval_samples_per_second": 9.0,
"eval_steps_per_second": 4.5,
"step": 4800
},
{
"epoch": 1.52,
"mmlu_eval_accuracy": 0.4879926358283337,
"mmlu_eval_accuracy_abstract_algebra": 0.36363636363636365,
"mmlu_eval_accuracy_anatomy": 0.7142857142857143,
"mmlu_eval_accuracy_astronomy": 0.4375,
"mmlu_eval_accuracy_business_ethics": 0.45454545454545453,
"mmlu_eval_accuracy_clinical_knowledge": 0.4827586206896552,
"mmlu_eval_accuracy_college_biology": 0.375,
"mmlu_eval_accuracy_college_chemistry": 0.375,
"mmlu_eval_accuracy_college_computer_science": 0.2727272727272727,
"mmlu_eval_accuracy_college_mathematics": 0.18181818181818182,
"mmlu_eval_accuracy_college_medicine": 0.3181818181818182,
"mmlu_eval_accuracy_college_physics": 0.36363636363636365,
"mmlu_eval_accuracy_computer_security": 0.45454545454545453,
"mmlu_eval_accuracy_conceptual_physics": 0.4230769230769231,
"mmlu_eval_accuracy_econometrics": 0.25,
"mmlu_eval_accuracy_electrical_engineering": 0.3125,
"mmlu_eval_accuracy_elementary_mathematics": 0.3902439024390244,
"mmlu_eval_accuracy_formal_logic": 0.14285714285714285,
"mmlu_eval_accuracy_global_facts": 0.5,
"mmlu_eval_accuracy_high_school_biology": 0.46875,
"mmlu_eval_accuracy_high_school_chemistry": 0.18181818181818182,
"mmlu_eval_accuracy_high_school_computer_science": 0.5555555555555556,
"mmlu_eval_accuracy_high_school_european_history": 0.5555555555555556,
"mmlu_eval_accuracy_high_school_geography": 0.8636363636363636,
"mmlu_eval_accuracy_high_school_government_and_politics": 0.5238095238095238,
"mmlu_eval_accuracy_high_school_macroeconomics": 0.5116279069767442,
"mmlu_eval_accuracy_high_school_mathematics": 0.13793103448275862,
"mmlu_eval_accuracy_high_school_microeconomics": 0.5384615384615384,
"mmlu_eval_accuracy_high_school_physics": 0.058823529411764705,
"mmlu_eval_accuracy_high_school_psychology": 0.8666666666666667,
"mmlu_eval_accuracy_high_school_statistics": 0.34782608695652173,
"mmlu_eval_accuracy_high_school_us_history": 0.5909090909090909,
"mmlu_eval_accuracy_high_school_world_history": 0.7307692307692307,
"mmlu_eval_accuracy_human_aging": 0.7391304347826086,
"mmlu_eval_accuracy_human_sexuality": 0.3333333333333333,
"mmlu_eval_accuracy_international_law": 0.8461538461538461,
"mmlu_eval_accuracy_jurisprudence": 0.36363636363636365,
"mmlu_eval_accuracy_logical_fallacies": 0.6666666666666666,
"mmlu_eval_accuracy_machine_learning": 0.36363636363636365,
"mmlu_eval_accuracy_management": 0.7272727272727273,
"mmlu_eval_accuracy_marketing": 0.8,
"mmlu_eval_accuracy_medical_genetics": 0.9090909090909091,
"mmlu_eval_accuracy_miscellaneous": 0.6744186046511628,
"mmlu_eval_accuracy_moral_disputes": 0.4473684210526316,
"mmlu_eval_accuracy_moral_scenarios": 0.23,
"mmlu_eval_accuracy_nutrition": 0.696969696969697,
"mmlu_eval_accuracy_philosophy": 0.47058823529411764,
"mmlu_eval_accuracy_prehistory": 0.5428571428571428,
"mmlu_eval_accuracy_professional_accounting": 0.3225806451612903,
"mmlu_eval_accuracy_professional_law": 0.31176470588235294,
"mmlu_eval_accuracy_professional_medicine": 0.5483870967741935,
"mmlu_eval_accuracy_professional_psychology": 0.463768115942029,
"mmlu_eval_accuracy_public_relations": 0.6666666666666666,
"mmlu_eval_accuracy_security_studies": 0.4444444444444444,
"mmlu_eval_accuracy_sociology": 0.5909090909090909,
"mmlu_eval_accuracy_us_foreign_policy": 0.7272727272727273,
"mmlu_eval_accuracy_virology": 0.5,
"mmlu_eval_accuracy_world_religions": 0.6842105263157895,
"mmlu_loss": 1.4884750641901874,
"step": 4800
},
{
"epoch": 1.52,
"learning_rate": 0.0002,
"loss": 0.6733,
"step": 4810
},
{
"epoch": 1.53,
"learning_rate": 0.0002,
"loss": 0.6607,
"step": 4820
},
{
"epoch": 1.53,
"learning_rate": 0.0002,
"loss": 0.6933,
"step": 4830
},
{
"epoch": 1.53,
"learning_rate": 0.0002,
"loss": 0.7517,
"step": 4840
},
{
"epoch": 1.54,
"learning_rate": 0.0002,
"loss": 0.7391,
"step": 4850
},
{
"epoch": 1.54,
"learning_rate": 0.0002,
"loss": 0.6636,
"step": 4860
},
{
"epoch": 1.54,
"learning_rate": 0.0002,
"loss": 0.7221,
"step": 4870
},
{
"epoch": 1.55,
"learning_rate": 0.0002,
"loss": 0.6967,
"step": 4880
},
{
"epoch": 1.55,
"learning_rate": 0.0002,
"loss": 0.7117,
"step": 4890
},
{
"epoch": 1.55,
"learning_rate": 0.0002,
"loss": 0.6256,
"step": 4900
},
{
"epoch": 1.56,
"learning_rate": 0.0002,
"loss": 0.7923,
"step": 4910
},
{
"epoch": 1.56,
"learning_rate": 0.0002,
"loss": 0.7151,
"step": 4920
},
{
"epoch": 1.56,
"learning_rate": 0.0002,
"loss": 0.7119,
"step": 4930
},
{
"epoch": 1.57,
"learning_rate": 0.0002,
"loss": 0.7105,
"step": 4940
},
{
"epoch": 1.57,
"learning_rate": 0.0002,
"loss": 0.6653,
"step": 4950
},
{
"epoch": 1.57,
"learning_rate": 0.0002,
"loss": 0.7084,
"step": 4960
},
{
"epoch": 1.57,
"learning_rate": 0.0002,
"loss": 0.6644,
"step": 4970
},
{
"epoch": 1.58,
"learning_rate": 0.0002,
"loss": 0.6665,
"step": 4980
},
{
"epoch": 1.58,
"learning_rate": 0.0002,
"loss": 0.6746,
"step": 4990
},
{
"epoch": 1.58,
"learning_rate": 0.0002,
"loss": 0.7223,
"step": 5000
},
{
"epoch": 1.58,
"eval_loss": 0.7373215556144714,
"eval_runtime": 111.2649,
"eval_samples_per_second": 8.988,
"eval_steps_per_second": 4.494,
"step": 5000
},
{
"epoch": 1.58,
"mmlu_eval_accuracy": 0.46701126611778865,
"mmlu_eval_accuracy_abstract_algebra": 0.2727272727272727,
"mmlu_eval_accuracy_anatomy": 0.6428571428571429,
"mmlu_eval_accuracy_astronomy": 0.4375,
"mmlu_eval_accuracy_business_ethics": 0.45454545454545453,
"mmlu_eval_accuracy_clinical_knowledge": 0.4827586206896552,
"mmlu_eval_accuracy_college_biology": 0.375,
"mmlu_eval_accuracy_college_chemistry": 0.25,
"mmlu_eval_accuracy_college_computer_science": 0.2727272727272727,
"mmlu_eval_accuracy_college_mathematics": 0.2727272727272727,
"mmlu_eval_accuracy_college_medicine": 0.36363636363636365,
"mmlu_eval_accuracy_college_physics": 0.36363636363636365,
"mmlu_eval_accuracy_computer_security": 0.36363636363636365,
"mmlu_eval_accuracy_conceptual_physics": 0.4230769230769231,
"mmlu_eval_accuracy_econometrics": 0.16666666666666666,
"mmlu_eval_accuracy_electrical_engineering": 0.25,
"mmlu_eval_accuracy_elementary_mathematics": 0.34146341463414637,
"mmlu_eval_accuracy_formal_logic": 0.21428571428571427,
"mmlu_eval_accuracy_global_facts": 0.3,
"mmlu_eval_accuracy_high_school_biology": 0.34375,
"mmlu_eval_accuracy_high_school_chemistry": 0.18181818181818182,
"mmlu_eval_accuracy_high_school_computer_science": 0.5555555555555556,
"mmlu_eval_accuracy_high_school_european_history": 0.5555555555555556,
"mmlu_eval_accuracy_high_school_geography": 0.7727272727272727,
"mmlu_eval_accuracy_high_school_government_and_politics": 0.5714285714285714,
"mmlu_eval_accuracy_high_school_macroeconomics": 0.4883720930232558,
"mmlu_eval_accuracy_high_school_mathematics": 0.2413793103448276,
"mmlu_eval_accuracy_high_school_microeconomics": 0.38461538461538464,
"mmlu_eval_accuracy_high_school_physics": 0.058823529411764705,
"mmlu_eval_accuracy_high_school_psychology": 0.8833333333333333,
"mmlu_eval_accuracy_high_school_statistics": 0.34782608695652173,
"mmlu_eval_accuracy_high_school_us_history": 0.6363636363636364,
"mmlu_eval_accuracy_high_school_world_history": 0.7307692307692307,
"mmlu_eval_accuracy_human_aging": 0.6956521739130435,
"mmlu_eval_accuracy_human_sexuality": 0.3333333333333333,
"mmlu_eval_accuracy_international_law": 0.8461538461538461,
"mmlu_eval_accuracy_jurisprudence": 0.36363636363636365,
"mmlu_eval_accuracy_logical_fallacies": 0.6111111111111112,
"mmlu_eval_accuracy_machine_learning": 0.09090909090909091,
"mmlu_eval_accuracy_management": 0.7272727272727273,
"mmlu_eval_accuracy_marketing": 0.72,
"mmlu_eval_accuracy_medical_genetics": 0.9090909090909091,
"mmlu_eval_accuracy_miscellaneous": 0.6744186046511628,
"mmlu_eval_accuracy_moral_disputes": 0.4473684210526316,
"mmlu_eval_accuracy_moral_scenarios": 0.23,
"mmlu_eval_accuracy_nutrition": 0.6363636363636364,
"mmlu_eval_accuracy_philosophy": 0.47058823529411764,
"mmlu_eval_accuracy_prehistory": 0.4857142857142857,
"mmlu_eval_accuracy_professional_accounting": 0.3225806451612903,
"mmlu_eval_accuracy_professional_law": 0.3235294117647059,
"mmlu_eval_accuracy_professional_medicine": 0.5806451612903226,
"mmlu_eval_accuracy_professional_psychology": 0.4927536231884058,
"mmlu_eval_accuracy_public_relations": 0.6666666666666666,
"mmlu_eval_accuracy_security_studies": 0.4444444444444444,
"mmlu_eval_accuracy_sociology": 0.6363636363636364,
"mmlu_eval_accuracy_us_foreign_policy": 0.7272727272727273,
"mmlu_eval_accuracy_virology": 0.5,
"mmlu_eval_accuracy_world_religions": 0.6842105263157895,
"mmlu_loss": 1.4578603010031324,
"step": 5000
},
{
"epoch": 1.59,
"learning_rate": 0.0002,
"loss": 0.6833,
"step": 5010
},
{
"epoch": 1.59,
"learning_rate": 0.0002,
"loss": 0.7323,
"step": 5020
},
{
"epoch": 1.59,
"learning_rate": 0.0002,
"loss": 0.7224,
"step": 5030
},
{
"epoch": 1.6,
"learning_rate": 0.0002,
"loss": 0.734,
"step": 5040
},
{
"epoch": 1.6,
"learning_rate": 0.0002,
"loss": 0.692,
"step": 5050
},
{
"epoch": 1.6,
"learning_rate": 0.0002,
"loss": 0.7083,
"step": 5060
},
{
"epoch": 1.61,
"learning_rate": 0.0002,
"loss": 0.6993,
"step": 5070
},
{
"epoch": 1.61,
"learning_rate": 0.0002,
"loss": 0.755,
"step": 5080
},
{
"epoch": 1.61,
"learning_rate": 0.0002,
"loss": 0.7323,
"step": 5090
},
{
"epoch": 1.62,
"learning_rate": 0.0002,
"loss": 0.6725,
"step": 5100
},
{
"epoch": 1.62,
"learning_rate": 0.0002,
"loss": 0.6989,
"step": 5110
},
{
"epoch": 1.62,
"learning_rate": 0.0002,
"loss": 0.6938,
"step": 5120
},
{
"epoch": 1.63,
"learning_rate": 0.0002,
"loss": 0.6895,
"step": 5130
},
{
"epoch": 1.63,
"learning_rate": 0.0002,
"loss": 0.6915,
"step": 5140
},
{
"epoch": 1.63,
"learning_rate": 0.0002,
"loss": 0.7672,
"step": 5150
},
{
"epoch": 1.63,
"learning_rate": 0.0002,
"loss": 0.6413,
"step": 5160
},
{
"epoch": 1.64,
"learning_rate": 0.0002,
"loss": 0.7195,
"step": 5170
},
{
"epoch": 1.64,
"learning_rate": 0.0002,
"loss": 0.6783,
"step": 5180
},
{
"epoch": 1.64,
"learning_rate": 0.0002,
"loss": 0.6457,
"step": 5190
},
{
"epoch": 1.65,
"learning_rate": 0.0002,
"loss": 0.6959,
"step": 5200
},
{
"epoch": 1.65,
"eval_loss": 0.736714243888855,
"eval_runtime": 111.0389,
"eval_samples_per_second": 9.006,
"eval_steps_per_second": 4.503,
"step": 5200
},
{
"epoch": 1.65,
"mmlu_eval_accuracy": 0.4835750759985151,
"mmlu_eval_accuracy_abstract_algebra": 0.36363636363636365,
"mmlu_eval_accuracy_anatomy": 0.7857142857142857,
"mmlu_eval_accuracy_astronomy": 0.375,
"mmlu_eval_accuracy_business_ethics": 0.45454545454545453,
"mmlu_eval_accuracy_clinical_knowledge": 0.4827586206896552,
"mmlu_eval_accuracy_college_biology": 0.4375,
"mmlu_eval_accuracy_college_chemistry": 0.25,
"mmlu_eval_accuracy_college_computer_science": 0.2727272727272727,
"mmlu_eval_accuracy_college_mathematics": 0.2727272727272727,
"mmlu_eval_accuracy_college_medicine": 0.45454545454545453,
"mmlu_eval_accuracy_college_physics": 0.36363636363636365,
"mmlu_eval_accuracy_computer_security": 0.5454545454545454,
"mmlu_eval_accuracy_conceptual_physics": 0.4230769230769231,
"mmlu_eval_accuracy_econometrics": 0.25,
"mmlu_eval_accuracy_electrical_engineering": 0.25,
"mmlu_eval_accuracy_elementary_mathematics": 0.36585365853658536,
"mmlu_eval_accuracy_formal_logic": 0.21428571428571427,
"mmlu_eval_accuracy_global_facts": 0.5,
"mmlu_eval_accuracy_high_school_biology": 0.375,
"mmlu_eval_accuracy_high_school_chemistry": 0.18181818181818182,
"mmlu_eval_accuracy_high_school_computer_science": 0.5555555555555556,
"mmlu_eval_accuracy_high_school_european_history": 0.6111111111111112,
"mmlu_eval_accuracy_high_school_geography": 0.7727272727272727,
"mmlu_eval_accuracy_high_school_government_and_politics": 0.5714285714285714,
"mmlu_eval_accuracy_high_school_macroeconomics": 0.4883720930232558,
"mmlu_eval_accuracy_high_school_mathematics": 0.1724137931034483,
"mmlu_eval_accuracy_high_school_microeconomics": 0.5,
"mmlu_eval_accuracy_high_school_physics": 0.17647058823529413,
"mmlu_eval_accuracy_high_school_psychology": 0.85,
"mmlu_eval_accuracy_high_school_statistics": 0.2608695652173913,
"mmlu_eval_accuracy_high_school_us_history": 0.6363636363636364,
"mmlu_eval_accuracy_high_school_world_history": 0.7307692307692307,
"mmlu_eval_accuracy_human_aging": 0.7391304347826086,
"mmlu_eval_accuracy_human_sexuality": 0.4166666666666667,
"mmlu_eval_accuracy_international_law": 0.8461538461538461,
"mmlu_eval_accuracy_jurisprudence": 0.36363636363636365,
"mmlu_eval_accuracy_logical_fallacies": 0.6666666666666666,
"mmlu_eval_accuracy_machine_learning": 0.2727272727272727,
"mmlu_eval_accuracy_management": 0.6363636363636364,
"mmlu_eval_accuracy_marketing": 0.76,
"mmlu_eval_accuracy_medical_genetics": 0.8181818181818182,
"mmlu_eval_accuracy_miscellaneous": 0.6395348837209303,
"mmlu_eval_accuracy_moral_disputes": 0.47368421052631576,
"mmlu_eval_accuracy_moral_scenarios": 0.23,
"mmlu_eval_accuracy_nutrition": 0.696969696969697,
"mmlu_eval_accuracy_philosophy": 0.4411764705882353,
"mmlu_eval_accuracy_prehistory": 0.5142857142857142,
"mmlu_eval_accuracy_professional_accounting": 0.22580645161290322,
"mmlu_eval_accuracy_professional_law": 0.3058823529411765,
"mmlu_eval_accuracy_professional_medicine": 0.5483870967741935,
"mmlu_eval_accuracy_professional_psychology": 0.4927536231884058,
"mmlu_eval_accuracy_public_relations": 0.5833333333333334,
"mmlu_eval_accuracy_security_studies": 0.4074074074074074,
"mmlu_eval_accuracy_sociology": 0.6818181818181818,
"mmlu_eval_accuracy_us_foreign_policy": 0.7272727272727273,
"mmlu_eval_accuracy_virology": 0.5,
"mmlu_eval_accuracy_world_religions": 0.631578947368421,
"mmlu_loss": 1.2928575183564004,
"step": 5200
},
{
"epoch": 1.65,
"learning_rate": 0.0002,
"loss": 0.654,
"step": 5210
},
{
"epoch": 1.65,
"learning_rate": 0.0002,
"loss": 0.692,
"step": 5220
},
{
"epoch": 1.66,
"learning_rate": 0.0002,
"loss": 0.6774,
"step": 5230
},
{
"epoch": 1.66,
"learning_rate": 0.0002,
"loss": 0.6383,
"step": 5240
},
{
"epoch": 1.66,
"learning_rate": 0.0002,
"loss": 0.6949,
"step": 5250
},
{
"epoch": 1.67,
"learning_rate": 0.0002,
"loss": 0.6992,
"step": 5260
},
{
"epoch": 1.67,
"learning_rate": 0.0002,
"loss": 0.6612,
"step": 5270
},
{
"epoch": 1.67,
"learning_rate": 0.0002,
"loss": 0.7651,
"step": 5280
},
{
"epoch": 1.68,
"learning_rate": 0.0002,
"loss": 0.6994,
"step": 5290
},
{
"epoch": 1.68,
"learning_rate": 0.0002,
"loss": 0.7105,
"step": 5300
},
{
"epoch": 1.68,
"learning_rate": 0.0002,
"loss": 0.6972,
"step": 5310
},
{
"epoch": 1.69,
"learning_rate": 0.0002,
"loss": 0.7531,
"step": 5320
},
{
"epoch": 1.69,
"learning_rate": 0.0002,
"loss": 0.7072,
"step": 5330
},
{
"epoch": 1.69,
"learning_rate": 0.0002,
"loss": 0.6964,
"step": 5340
},
{
"epoch": 1.7,
"learning_rate": 0.0002,
"loss": 0.7574,
"step": 5350
},
{
"epoch": 1.7,
"learning_rate": 0.0002,
"loss": 0.7155,
"step": 5360
},
{
"epoch": 1.7,
"learning_rate": 0.0002,
"loss": 0.7104,
"step": 5370
},
{
"epoch": 1.7,
"learning_rate": 0.0002,
"loss": 0.7495,
"step": 5380
},
{
"epoch": 1.71,
"learning_rate": 0.0002,
"loss": 0.7259,
"step": 5390
},
{
"epoch": 1.71,
"learning_rate": 0.0002,
"loss": 0.7394,
"step": 5400
},
{
"epoch": 1.71,
"eval_loss": 0.7311118245124817,
"eval_runtime": 111.2623,
"eval_samples_per_second": 8.988,
"eval_steps_per_second": 4.494,
"step": 5400
},
{
"epoch": 1.71,
"mmlu_eval_accuracy": 0.4848351410303045,
"mmlu_eval_accuracy_abstract_algebra": 0.2727272727272727,
"mmlu_eval_accuracy_anatomy": 0.7142857142857143,
"mmlu_eval_accuracy_astronomy": 0.4375,
"mmlu_eval_accuracy_business_ethics": 0.45454545454545453,
"mmlu_eval_accuracy_clinical_knowledge": 0.5172413793103449,
"mmlu_eval_accuracy_college_biology": 0.375,
"mmlu_eval_accuracy_college_chemistry": 0.25,
"mmlu_eval_accuracy_college_computer_science": 0.2727272727272727,
"mmlu_eval_accuracy_college_mathematics": 0.36363636363636365,
"mmlu_eval_accuracy_college_medicine": 0.36363636363636365,
"mmlu_eval_accuracy_college_physics": 0.36363636363636365,
"mmlu_eval_accuracy_computer_security": 0.45454545454545453,
"mmlu_eval_accuracy_conceptual_physics": 0.4230769230769231,
"mmlu_eval_accuracy_econometrics": 0.16666666666666666,
"mmlu_eval_accuracy_electrical_engineering": 0.25,
"mmlu_eval_accuracy_elementary_mathematics": 0.3902439024390244,
"mmlu_eval_accuracy_formal_logic": 0.2857142857142857,
"mmlu_eval_accuracy_global_facts": 0.4,
"mmlu_eval_accuracy_high_school_biology": 0.40625,
"mmlu_eval_accuracy_high_school_chemistry": 0.22727272727272727,
"mmlu_eval_accuracy_high_school_computer_science": 0.5555555555555556,
"mmlu_eval_accuracy_high_school_european_history": 0.6666666666666666,
"mmlu_eval_accuracy_high_school_geography": 0.8636363636363636,
"mmlu_eval_accuracy_high_school_government_and_politics": 0.6190476190476191,
"mmlu_eval_accuracy_high_school_macroeconomics": 0.4883720930232558,
"mmlu_eval_accuracy_high_school_mathematics": 0.1724137931034483,
"mmlu_eval_accuracy_high_school_microeconomics": 0.38461538461538464,
"mmlu_eval_accuracy_high_school_physics": 0.11764705882352941,
"mmlu_eval_accuracy_high_school_psychology": 0.8666666666666667,
"mmlu_eval_accuracy_high_school_statistics": 0.391304347826087,
"mmlu_eval_accuracy_high_school_us_history": 0.6363636363636364,
"mmlu_eval_accuracy_high_school_world_history": 0.6923076923076923,
"mmlu_eval_accuracy_human_aging": 0.7391304347826086,
"mmlu_eval_accuracy_human_sexuality": 0.4166666666666667,
"mmlu_eval_accuracy_international_law": 0.7692307692307693,
"mmlu_eval_accuracy_jurisprudence": 0.36363636363636365,
"mmlu_eval_accuracy_logical_fallacies": 0.6111111111111112,
"mmlu_eval_accuracy_machine_learning": 0.18181818181818182,
"mmlu_eval_accuracy_management": 0.7272727272727273,
"mmlu_eval_accuracy_marketing": 0.76,
"mmlu_eval_accuracy_medical_genetics": 0.9090909090909091,
"mmlu_eval_accuracy_miscellaneous": 0.6511627906976745,
"mmlu_eval_accuracy_moral_disputes": 0.5526315789473685,
"mmlu_eval_accuracy_moral_scenarios": 0.22,
"mmlu_eval_accuracy_nutrition": 0.6060606060606061,
"mmlu_eval_accuracy_philosophy": 0.4411764705882353,
"mmlu_eval_accuracy_prehistory": 0.4857142857142857,
"mmlu_eval_accuracy_professional_accounting": 0.3548387096774194,
"mmlu_eval_accuracy_professional_law": 0.3058823529411765,
"mmlu_eval_accuracy_professional_medicine": 0.6129032258064516,
"mmlu_eval_accuracy_professional_psychology": 0.4927536231884058,
"mmlu_eval_accuracy_public_relations": 0.5833333333333334,
"mmlu_eval_accuracy_security_studies": 0.4074074074074074,
"mmlu_eval_accuracy_sociology": 0.6363636363636364,
"mmlu_eval_accuracy_us_foreign_policy": 0.7272727272727273,
"mmlu_eval_accuracy_virology": 0.5,
"mmlu_eval_accuracy_world_religions": 0.7368421052631579,
"mmlu_loss": 1.348453690776937,
"step": 5400
},
{
"epoch": 1.71,
"learning_rate": 0.0002,
"loss": 0.7047,
"step": 5410
},
{
"epoch": 1.72,
"learning_rate": 0.0002,
"loss": 0.7001,
"step": 5420
},
{
"epoch": 1.72,
"learning_rate": 0.0002,
"loss": 0.6759,
"step": 5430
},
{
"epoch": 1.72,
"learning_rate": 0.0002,
"loss": 0.707,
"step": 5440
},
{
"epoch": 1.73,
"learning_rate": 0.0002,
"loss": 0.6648,
"step": 5450
},
{
"epoch": 1.73,
"learning_rate": 0.0002,
"loss": 0.7223,
"step": 5460
},
{
"epoch": 1.73,
"learning_rate": 0.0002,
"loss": 0.722,
"step": 5470
},
{
"epoch": 1.74,
"learning_rate": 0.0002,
"loss": 0.7848,
"step": 5480
},
{
"epoch": 1.74,
"learning_rate": 0.0002,
"loss": 0.6956,
"step": 5490
},
{
"epoch": 1.74,
"learning_rate": 0.0002,
"loss": 0.6584,
"step": 5500
},
{
"epoch": 1.75,
"learning_rate": 0.0002,
"loss": 0.7522,
"step": 5510
},
{
"epoch": 1.75,
"learning_rate": 0.0002,
"loss": 0.7374,
"step": 5520
},
{
"epoch": 1.75,
"learning_rate": 0.0002,
"loss": 0.635,
"step": 5530
},
{
"epoch": 1.76,
"learning_rate": 0.0002,
"loss": 0.6947,
"step": 5540
},
{
"epoch": 1.76,
"learning_rate": 0.0002,
"loss": 0.6948,
"step": 5550
},
{
"epoch": 1.76,
"learning_rate": 0.0002,
"loss": 0.676,
"step": 5560
},
{
"epoch": 1.76,
"learning_rate": 0.0002,
"loss": 0.7053,
"step": 5570
},
{
"epoch": 1.77,
"learning_rate": 0.0002,
"loss": 0.6868,
"step": 5580
},
{
"epoch": 1.77,
"learning_rate": 0.0002,
"loss": 0.7307,
"step": 5590
},
{
"epoch": 1.77,
"learning_rate": 0.0002,
"loss": 0.6902,
"step": 5600
},
{
"epoch": 1.77,
"eval_loss": 0.7314637899398804,
"eval_runtime": 111.0487,
"eval_samples_per_second": 9.005,
"eval_steps_per_second": 4.503,
"step": 5600
},
{
"epoch": 1.77,
"mmlu_eval_accuracy": 0.48467107795368586,
"mmlu_eval_accuracy_abstract_algebra": 0.2727272727272727,
"mmlu_eval_accuracy_anatomy": 0.7142857142857143,
"mmlu_eval_accuracy_astronomy": 0.5,
"mmlu_eval_accuracy_business_ethics": 0.45454545454545453,
"mmlu_eval_accuracy_clinical_knowledge": 0.5172413793103449,
"mmlu_eval_accuracy_college_biology": 0.375,
"mmlu_eval_accuracy_college_chemistry": 0.375,
"mmlu_eval_accuracy_college_computer_science": 0.2727272727272727,
"mmlu_eval_accuracy_college_mathematics": 0.2727272727272727,
"mmlu_eval_accuracy_college_medicine": 0.36363636363636365,
"mmlu_eval_accuracy_college_physics": 0.36363636363636365,
"mmlu_eval_accuracy_computer_security": 0.5454545454545454,
"mmlu_eval_accuracy_conceptual_physics": 0.4230769230769231,
"mmlu_eval_accuracy_econometrics": 0.25,
"mmlu_eval_accuracy_electrical_engineering": 0.375,
"mmlu_eval_accuracy_elementary_mathematics": 0.3170731707317073,
"mmlu_eval_accuracy_formal_logic": 0.2857142857142857,
"mmlu_eval_accuracy_global_facts": 0.3,
"mmlu_eval_accuracy_high_school_biology": 0.375,
"mmlu_eval_accuracy_high_school_chemistry": 0.22727272727272727,
"mmlu_eval_accuracy_high_school_computer_science": 0.5555555555555556,
"mmlu_eval_accuracy_high_school_european_history": 0.6111111111111112,
"mmlu_eval_accuracy_high_school_geography": 0.8636363636363636,
"mmlu_eval_accuracy_high_school_government_and_politics": 0.6190476190476191,
"mmlu_eval_accuracy_high_school_macroeconomics": 0.4418604651162791,
"mmlu_eval_accuracy_high_school_mathematics": 0.13793103448275862,
"mmlu_eval_accuracy_high_school_microeconomics": 0.46153846153846156,
"mmlu_eval_accuracy_high_school_physics": 0.058823529411764705,
"mmlu_eval_accuracy_high_school_psychology": 0.85,
"mmlu_eval_accuracy_high_school_statistics": 0.2608695652173913,
"mmlu_eval_accuracy_high_school_us_history": 0.6363636363636364,
"mmlu_eval_accuracy_high_school_world_history": 0.7307692307692307,
"mmlu_eval_accuracy_human_aging": 0.782608695652174,
"mmlu_eval_accuracy_human_sexuality": 0.4166666666666667,
"mmlu_eval_accuracy_international_law": 0.8461538461538461,
"mmlu_eval_accuracy_jurisprudence": 0.36363636363636365,
"mmlu_eval_accuracy_logical_fallacies": 0.6666666666666666,
"mmlu_eval_accuracy_machine_learning": 0.18181818181818182,
"mmlu_eval_accuracy_management": 0.6363636363636364,
"mmlu_eval_accuracy_marketing": 0.76,
"mmlu_eval_accuracy_medical_genetics": 0.9090909090909091,
"mmlu_eval_accuracy_miscellaneous": 0.627906976744186,
"mmlu_eval_accuracy_moral_disputes": 0.5,
"mmlu_eval_accuracy_moral_scenarios": 0.25,
"mmlu_eval_accuracy_nutrition": 0.7272727272727273,
"mmlu_eval_accuracy_philosophy": 0.5,
"mmlu_eval_accuracy_prehistory": 0.5142857142857142,
"mmlu_eval_accuracy_professional_accounting": 0.1935483870967742,
"mmlu_eval_accuracy_professional_law": 0.3411764705882353,
"mmlu_eval_accuracy_professional_medicine": 0.5483870967741935,
"mmlu_eval_accuracy_professional_psychology": 0.4782608695652174,
"mmlu_eval_accuracy_public_relations": 0.5,
"mmlu_eval_accuracy_security_studies": 0.48148148148148145,
"mmlu_eval_accuracy_sociology": 0.6818181818181818,
"mmlu_eval_accuracy_us_foreign_policy": 0.7272727272727273,
"mmlu_eval_accuracy_virology": 0.5,
"mmlu_eval_accuracy_world_religions": 0.6842105263157895,
"mmlu_loss": 1.3874250733665636,
"step": 5600
},
{
"epoch": 1.78,
"learning_rate": 0.0002,
"loss": 0.6558,
"step": 5610
},
{
"epoch": 1.78,
"learning_rate": 0.0002,
"loss": 0.714,
"step": 5620
},
{
"epoch": 1.78,
"learning_rate": 0.0002,
"loss": 0.7019,
"step": 5630
},
{
"epoch": 1.79,
"learning_rate": 0.0002,
"loss": 0.7084,
"step": 5640
},
{
"epoch": 1.79,
"learning_rate": 0.0002,
"loss": 0.7184,
"step": 5650
},
{
"epoch": 1.79,
"learning_rate": 0.0002,
"loss": 0.6524,
"step": 5660
},
{
"epoch": 1.8,
"learning_rate": 0.0002,
"loss": 0.7265,
"step": 5670
},
{
"epoch": 1.8,
"learning_rate": 0.0002,
"loss": 0.7164,
"step": 5680
},
{
"epoch": 1.8,
"learning_rate": 0.0002,
"loss": 0.6825,
"step": 5690
},
{
"epoch": 1.81,
"learning_rate": 0.0002,
"loss": 0.7427,
"step": 5700
},
{
"epoch": 1.81,
"learning_rate": 0.0002,
"loss": 0.7416,
"step": 5710
},
{
"epoch": 1.81,
"learning_rate": 0.0002,
"loss": 0.7027,
"step": 5720
},
{
"epoch": 1.82,
"learning_rate": 0.0002,
"loss": 0.7039,
"step": 5730
},
{
"epoch": 1.82,
"learning_rate": 0.0002,
"loss": 0.7108,
"step": 5740
},
{
"epoch": 1.82,
"learning_rate": 0.0002,
"loss": 0.6257,
"step": 5750
},
{
"epoch": 1.83,
"learning_rate": 0.0002,
"loss": 0.6665,
"step": 5760
},
{
"epoch": 1.83,
"learning_rate": 0.0002,
"loss": 0.7371,
"step": 5770
},
{
"epoch": 1.83,
"learning_rate": 0.0002,
"loss": 0.7194,
"step": 5780
},
{
"epoch": 1.83,
"learning_rate": 0.0002,
"loss": 0.7164,
"step": 5790
},
{
"epoch": 1.84,
"learning_rate": 0.0002,
"loss": 0.6887,
"step": 5800
},
{
"epoch": 1.84,
"eval_loss": 0.732559084892273,
"eval_runtime": 111.5342,
"eval_samples_per_second": 8.966,
"eval_steps_per_second": 4.483,
"step": 5800
},
{
"epoch": 1.84,
"mmlu_eval_accuracy": 0.4740066355704332,
"mmlu_eval_accuracy_abstract_algebra": 0.2727272727272727,
"mmlu_eval_accuracy_anatomy": 0.6428571428571429,
"mmlu_eval_accuracy_astronomy": 0.4375,
"mmlu_eval_accuracy_business_ethics": 0.45454545454545453,
"mmlu_eval_accuracy_clinical_knowledge": 0.4827586206896552,
"mmlu_eval_accuracy_college_biology": 0.375,
"mmlu_eval_accuracy_college_chemistry": 0.375,
"mmlu_eval_accuracy_college_computer_science": 0.2727272727272727,
"mmlu_eval_accuracy_college_mathematics": 0.36363636363636365,
"mmlu_eval_accuracy_college_medicine": 0.3181818181818182,
"mmlu_eval_accuracy_college_physics": 0.2727272727272727,
"mmlu_eval_accuracy_computer_security": 0.45454545454545453,
"mmlu_eval_accuracy_conceptual_physics": 0.4230769230769231,
"mmlu_eval_accuracy_econometrics": 0.16666666666666666,
"mmlu_eval_accuracy_electrical_engineering": 0.25,
"mmlu_eval_accuracy_elementary_mathematics": 0.3170731707317073,
"mmlu_eval_accuracy_formal_logic": 0.21428571428571427,
"mmlu_eval_accuracy_global_facts": 0.4,
"mmlu_eval_accuracy_high_school_biology": 0.375,
"mmlu_eval_accuracy_high_school_chemistry": 0.22727272727272727,
"mmlu_eval_accuracy_high_school_computer_science": 0.5555555555555556,
"mmlu_eval_accuracy_high_school_european_history": 0.5555555555555556,
"mmlu_eval_accuracy_high_school_geography": 0.8181818181818182,
"mmlu_eval_accuracy_high_school_government_and_politics": 0.5714285714285714,
"mmlu_eval_accuracy_high_school_macroeconomics": 0.4186046511627907,
"mmlu_eval_accuracy_high_school_mathematics": 0.20689655172413793,
"mmlu_eval_accuracy_high_school_microeconomics": 0.4230769230769231,
"mmlu_eval_accuracy_high_school_physics": 0.11764705882352941,
"mmlu_eval_accuracy_high_school_psychology": 0.8333333333333334,
"mmlu_eval_accuracy_high_school_statistics": 0.30434782608695654,
"mmlu_eval_accuracy_high_school_us_history": 0.6363636363636364,
"mmlu_eval_accuracy_high_school_world_history": 0.6538461538461539,
"mmlu_eval_accuracy_human_aging": 0.6956521739130435,
"mmlu_eval_accuracy_human_sexuality": 0.3333333333333333,
"mmlu_eval_accuracy_international_law": 0.9230769230769231,
"mmlu_eval_accuracy_jurisprudence": 0.36363636363636365,
"mmlu_eval_accuracy_logical_fallacies": 0.6111111111111112,
"mmlu_eval_accuracy_machine_learning": 0.18181818181818182,
"mmlu_eval_accuracy_management": 0.6363636363636364,
"mmlu_eval_accuracy_marketing": 0.72,
"mmlu_eval_accuracy_medical_genetics": 0.9090909090909091,
"mmlu_eval_accuracy_miscellaneous": 0.6511627906976745,
"mmlu_eval_accuracy_moral_disputes": 0.42105263157894735,
"mmlu_eval_accuracy_moral_scenarios": 0.25,
"mmlu_eval_accuracy_nutrition": 0.6363636363636364,
"mmlu_eval_accuracy_philosophy": 0.5,
"mmlu_eval_accuracy_prehistory": 0.5142857142857142,
"mmlu_eval_accuracy_professional_accounting": 0.3870967741935484,
"mmlu_eval_accuracy_professional_law": 0.29411764705882354,
"mmlu_eval_accuracy_professional_medicine": 0.6451612903225806,
"mmlu_eval_accuracy_professional_psychology": 0.4927536231884058,
"mmlu_eval_accuracy_public_relations": 0.6666666666666666,
"mmlu_eval_accuracy_security_studies": 0.4444444444444444,
"mmlu_eval_accuracy_sociology": 0.6363636363636364,
"mmlu_eval_accuracy_us_foreign_policy": 0.7272727272727273,
"mmlu_eval_accuracy_virology": 0.5555555555555556,
"mmlu_eval_accuracy_world_religions": 0.631578947368421,
"mmlu_loss": 1.3845557023822173,
"step": 5800
},
{
"epoch": 1.84,
"learning_rate": 0.0002,
"loss": 0.668,
"step": 5810
},
{
"epoch": 1.84,
"learning_rate": 0.0002,
"loss": 0.6993,
"step": 5820
},
{
"epoch": 1.85,
"learning_rate": 0.0002,
"loss": 0.7418,
"step": 5830
},
{
"epoch": 1.85,
"learning_rate": 0.0002,
"loss": 0.6916,
"step": 5840
},
{
"epoch": 1.85,
"learning_rate": 0.0002,
"loss": 0.7564,
"step": 5850
},
{
"epoch": 1.86,
"learning_rate": 0.0002,
"loss": 0.641,
"step": 5860
},
{
"epoch": 1.86,
"learning_rate": 0.0002,
"loss": 0.7593,
"step": 5870
},
{
"epoch": 1.86,
"learning_rate": 0.0002,
"loss": 0.6886,
"step": 5880
},
{
"epoch": 1.87,
"learning_rate": 0.0002,
"loss": 0.7053,
"step": 5890
},
{
"epoch": 1.87,
"learning_rate": 0.0002,
"loss": 0.6201,
"step": 5900
},
{
"epoch": 1.87,
"learning_rate": 0.0002,
"loss": 0.6998,
"step": 5910
},
{
"epoch": 1.88,
"learning_rate": 0.0002,
"loss": 0.6768,
"step": 5920
},
{
"epoch": 1.88,
"learning_rate": 0.0002,
"loss": 0.711,
"step": 5930
},
{
"epoch": 1.88,
"learning_rate": 0.0002,
"loss": 0.681,
"step": 5940
},
{
"epoch": 1.89,
"learning_rate": 0.0002,
"loss": 0.7145,
"step": 5950
},
{
"epoch": 1.89,
"learning_rate": 0.0002,
"loss": 0.7513,
"step": 5960
},
{
"epoch": 1.89,
"learning_rate": 0.0002,
"loss": 0.6817,
"step": 5970
},
{
"epoch": 1.89,
"learning_rate": 0.0002,
"loss": 0.6757,
"step": 5980
},
{
"epoch": 1.9,
"learning_rate": 0.0002,
"loss": 0.6899,
"step": 5990
},
{
"epoch": 1.9,
"learning_rate": 0.0002,
"loss": 0.6821,
"step": 6000
},
{
"epoch": 1.9,
"eval_loss": 0.7302425503730774,
"eval_runtime": 111.0525,
"eval_samples_per_second": 9.005,
"eval_steps_per_second": 4.502,
"step": 6000
},
{
"epoch": 1.9,
"mmlu_eval_accuracy": 0.47023094937776666,
"mmlu_eval_accuracy_abstract_algebra": 0.36363636363636365,
"mmlu_eval_accuracy_anatomy": 0.6428571428571429,
"mmlu_eval_accuracy_astronomy": 0.4375,
"mmlu_eval_accuracy_business_ethics": 0.5454545454545454,
"mmlu_eval_accuracy_clinical_knowledge": 0.5172413793103449,
"mmlu_eval_accuracy_college_biology": 0.375,
"mmlu_eval_accuracy_college_chemistry": 0.125,
"mmlu_eval_accuracy_college_computer_science": 0.18181818181818182,
"mmlu_eval_accuracy_college_mathematics": 0.36363636363636365,
"mmlu_eval_accuracy_college_medicine": 0.36363636363636365,
"mmlu_eval_accuracy_college_physics": 0.36363636363636365,
"mmlu_eval_accuracy_computer_security": 0.36363636363636365,
"mmlu_eval_accuracy_conceptual_physics": 0.4230769230769231,
"mmlu_eval_accuracy_econometrics": 0.25,
"mmlu_eval_accuracy_electrical_engineering": 0.25,
"mmlu_eval_accuracy_elementary_mathematics": 0.2682926829268293,
"mmlu_eval_accuracy_formal_logic": 0.14285714285714285,
"mmlu_eval_accuracy_global_facts": 0.4,
"mmlu_eval_accuracy_high_school_biology": 0.34375,
"mmlu_eval_accuracy_high_school_chemistry": 0.3181818181818182,
"mmlu_eval_accuracy_high_school_computer_science": 0.5555555555555556,
"mmlu_eval_accuracy_high_school_european_history": 0.6111111111111112,
"mmlu_eval_accuracy_high_school_geography": 0.7727272727272727,
"mmlu_eval_accuracy_high_school_government_and_politics": 0.5714285714285714,
"mmlu_eval_accuracy_high_school_macroeconomics": 0.4186046511627907,
"mmlu_eval_accuracy_high_school_mathematics": 0.2413793103448276,
"mmlu_eval_accuracy_high_school_microeconomics": 0.38461538461538464,
"mmlu_eval_accuracy_high_school_physics": 0.11764705882352941,
"mmlu_eval_accuracy_high_school_psychology": 0.8666666666666667,
"mmlu_eval_accuracy_high_school_statistics": 0.21739130434782608,
"mmlu_eval_accuracy_high_school_us_history": 0.6363636363636364,
"mmlu_eval_accuracy_high_school_world_history": 0.7307692307692307,
"mmlu_eval_accuracy_human_aging": 0.6956521739130435,
"mmlu_eval_accuracy_human_sexuality": 0.3333333333333333,
"mmlu_eval_accuracy_international_law": 0.9230769230769231,
"mmlu_eval_accuracy_jurisprudence": 0.36363636363636365,
"mmlu_eval_accuracy_logical_fallacies": 0.6111111111111112,
"mmlu_eval_accuracy_machine_learning": 0.18181818181818182,
"mmlu_eval_accuracy_management": 0.6363636363636364,
"mmlu_eval_accuracy_marketing": 0.72,
"mmlu_eval_accuracy_medical_genetics": 0.9090909090909091,
"mmlu_eval_accuracy_miscellaneous": 0.627906976744186,
"mmlu_eval_accuracy_moral_disputes": 0.47368421052631576,
"mmlu_eval_accuracy_moral_scenarios": 0.24,
"mmlu_eval_accuracy_nutrition": 0.5757575757575758,
"mmlu_eval_accuracy_philosophy": 0.47058823529411764,
"mmlu_eval_accuracy_prehistory": 0.4857142857142857,
"mmlu_eval_accuracy_professional_accounting": 0.3870967741935484,
"mmlu_eval_accuracy_professional_law": 0.3,
"mmlu_eval_accuracy_professional_medicine": 0.6451612903225806,
"mmlu_eval_accuracy_professional_psychology": 0.5072463768115942,
"mmlu_eval_accuracy_public_relations": 0.6666666666666666,
"mmlu_eval_accuracy_security_studies": 0.48148148148148145,
"mmlu_eval_accuracy_sociology": 0.6363636363636364,
"mmlu_eval_accuracy_us_foreign_policy": 0.6363636363636364,
"mmlu_eval_accuracy_virology": 0.5,
"mmlu_eval_accuracy_world_religions": 0.631578947368421,
"mmlu_loss": 1.4106916200087525,
"step": 6000
},
{
"epoch": 1.9,
"learning_rate": 0.0002,
"loss": 0.7115,
"step": 6010
},
{
"epoch": 1.91,
"learning_rate": 0.0002,
"loss": 0.6862,
"step": 6020
},
{
"epoch": 1.91,
"learning_rate": 0.0002,
"loss": 0.6705,
"step": 6030
},
{
"epoch": 1.91,
"learning_rate": 0.0002,
"loss": 0.6848,
"step": 6040
},
{
"epoch": 1.92,
"learning_rate": 0.0002,
"loss": 0.7765,
"step": 6050
},
{
"epoch": 1.92,
"learning_rate": 0.0002,
"loss": 0.6801,
"step": 6060
},
{
"epoch": 1.92,
"learning_rate": 0.0002,
"loss": 0.6648,
"step": 6070
},
{
"epoch": 1.93,
"learning_rate": 0.0002,
"loss": 0.6847,
"step": 6080
},
{
"epoch": 1.93,
"learning_rate": 0.0002,
"loss": 0.665,
"step": 6090
},
{
"epoch": 1.93,
"learning_rate": 0.0002,
"loss": 0.7627,
"step": 6100
},
{
"epoch": 1.94,
"learning_rate": 0.0002,
"loss": 0.6874,
"step": 6110
},
{
"epoch": 1.94,
"learning_rate": 0.0002,
"loss": 0.6907,
"step": 6120
},
{
"epoch": 1.94,
"learning_rate": 0.0002,
"loss": 0.6369,
"step": 6130
},
{
"epoch": 1.95,
"learning_rate": 0.0002,
"loss": 0.7289,
"step": 6140
},
{
"epoch": 1.95,
"learning_rate": 0.0002,
"loss": 0.7233,
"step": 6150
},
{
"epoch": 1.95,
"learning_rate": 0.0002,
"loss": 0.68,
"step": 6160
},
{
"epoch": 1.96,
"learning_rate": 0.0002,
"loss": 0.6842,
"step": 6170
},
{
"epoch": 1.96,
"learning_rate": 0.0002,
"loss": 0.7125,
"step": 6180
},
{
"epoch": 1.96,
"learning_rate": 0.0002,
"loss": 0.683,
"step": 6190
},
{
"epoch": 1.96,
"learning_rate": 0.0002,
"loss": 0.7097,
"step": 6200
},
{
"epoch": 1.96,
"eval_loss": 0.7293602228164673,
"eval_runtime": 111.0579,
"eval_samples_per_second": 9.004,
"eval_steps_per_second": 4.502,
"step": 6200
},
{
"epoch": 1.96,
"mmlu_eval_accuracy": 0.4704848103487601,
"mmlu_eval_accuracy_abstract_algebra": 0.2727272727272727,
"mmlu_eval_accuracy_anatomy": 0.6428571428571429,
"mmlu_eval_accuracy_astronomy": 0.4375,
"mmlu_eval_accuracy_business_ethics": 0.5454545454545454,
"mmlu_eval_accuracy_clinical_knowledge": 0.4482758620689655,
"mmlu_eval_accuracy_college_biology": 0.375,
"mmlu_eval_accuracy_college_chemistry": 0.25,
"mmlu_eval_accuracy_college_computer_science": 0.2727272727272727,
"mmlu_eval_accuracy_college_mathematics": 0.2727272727272727,
"mmlu_eval_accuracy_college_medicine": 0.36363636363636365,
"mmlu_eval_accuracy_college_physics": 0.2727272727272727,
"mmlu_eval_accuracy_computer_security": 0.36363636363636365,
"mmlu_eval_accuracy_conceptual_physics": 0.38461538461538464,
"mmlu_eval_accuracy_econometrics": 0.25,
"mmlu_eval_accuracy_electrical_engineering": 0.25,
"mmlu_eval_accuracy_elementary_mathematics": 0.34146341463414637,
"mmlu_eval_accuracy_formal_logic": 0.14285714285714285,
"mmlu_eval_accuracy_global_facts": 0.3,
"mmlu_eval_accuracy_high_school_biology": 0.375,
"mmlu_eval_accuracy_high_school_chemistry": 0.22727272727272727,
"mmlu_eval_accuracy_high_school_computer_science": 0.5555555555555556,
"mmlu_eval_accuracy_high_school_european_history": 0.5555555555555556,
"mmlu_eval_accuracy_high_school_geography": 0.8181818181818182,
"mmlu_eval_accuracy_high_school_government_and_politics": 0.5714285714285714,
"mmlu_eval_accuracy_high_school_macroeconomics": 0.4883720930232558,
"mmlu_eval_accuracy_high_school_mathematics": 0.20689655172413793,
"mmlu_eval_accuracy_high_school_microeconomics": 0.4230769230769231,
"mmlu_eval_accuracy_high_school_physics": 0.11764705882352941,
"mmlu_eval_accuracy_high_school_psychology": 0.8666666666666667,
"mmlu_eval_accuracy_high_school_statistics": 0.30434782608695654,
"mmlu_eval_accuracy_high_school_us_history": 0.6363636363636364,
"mmlu_eval_accuracy_high_school_world_history": 0.6923076923076923,
"mmlu_eval_accuracy_human_aging": 0.782608695652174,
"mmlu_eval_accuracy_human_sexuality": 0.3333333333333333,
"mmlu_eval_accuracy_international_law": 0.9230769230769231,
"mmlu_eval_accuracy_jurisprudence": 0.36363636363636365,
"mmlu_eval_accuracy_logical_fallacies": 0.6111111111111112,
"mmlu_eval_accuracy_machine_learning": 0.18181818181818182,
"mmlu_eval_accuracy_management": 0.6363636363636364,
"mmlu_eval_accuracy_marketing": 0.68,
"mmlu_eval_accuracy_medical_genetics": 0.9090909090909091,
"mmlu_eval_accuracy_miscellaneous": 0.6511627906976745,
"mmlu_eval_accuracy_moral_disputes": 0.5526315789473685,
"mmlu_eval_accuracy_moral_scenarios": 0.28,
"mmlu_eval_accuracy_nutrition": 0.6060606060606061,
"mmlu_eval_accuracy_philosophy": 0.4411764705882353,
"mmlu_eval_accuracy_prehistory": 0.45714285714285713,
"mmlu_eval_accuracy_professional_accounting": 0.41935483870967744,
"mmlu_eval_accuracy_professional_law": 0.31176470588235294,
"mmlu_eval_accuracy_professional_medicine": 0.5806451612903226,
"mmlu_eval_accuracy_professional_psychology": 0.463768115942029,
"mmlu_eval_accuracy_public_relations": 0.6666666666666666,
"mmlu_eval_accuracy_security_studies": 0.48148148148148145,
"mmlu_eval_accuracy_sociology": 0.6363636363636364,
"mmlu_eval_accuracy_us_foreign_policy": 0.6363636363636364,
"mmlu_eval_accuracy_virology": 0.5555555555555556,
"mmlu_eval_accuracy_world_religions": 0.631578947368421,
"mmlu_loss": 1.374586288985011,
"step": 6200
},
{
"epoch": 1.97,
"learning_rate": 0.0002,
"loss": 0.7095,
"step": 6210
},
{
"epoch": 1.97,
"learning_rate": 0.0002,
"loss": 0.7681,
"step": 6220
},
{
"epoch": 1.97,
"learning_rate": 0.0002,
"loss": 0.7356,
"step": 6230
},
{
"epoch": 1.98,
"learning_rate": 0.0002,
"loss": 0.6956,
"step": 6240
},
{
"epoch": 1.98,
"learning_rate": 0.0002,
"loss": 0.7034,
"step": 6250
},
{
"epoch": 1.98,
"learning_rate": 0.0002,
"loss": 0.6532,
"step": 6260
},
{
"epoch": 1.99,
"learning_rate": 0.0002,
"loss": 0.6917,
"step": 6270
},
{
"epoch": 1.99,
"learning_rate": 0.0002,
"loss": 0.6392,
"step": 6280
},
{
"epoch": 1.99,
"learning_rate": 0.0002,
"loss": 0.6656,
"step": 6290
},
{
"epoch": 2.0,
"learning_rate": 0.0002,
"loss": 0.6829,
"step": 6300
},
{
"epoch": 2.0,
"learning_rate": 0.0002,
"loss": 0.675,
"step": 6310
},
{
"epoch": 2.0,
"learning_rate": 0.0002,
"loss": 0.6321,
"step": 6320
},
{
"epoch": 2.01,
"learning_rate": 0.0002,
"loss": 0.6109,
"step": 6330
},
{
"epoch": 2.01,
"learning_rate": 0.0002,
"loss": 0.6065,
"step": 6340
},
{
"epoch": 2.01,
"learning_rate": 0.0002,
"loss": 0.5912,
"step": 6350
},
{
"epoch": 2.02,
"learning_rate": 0.0002,
"loss": 0.613,
"step": 6360
},
{
"epoch": 2.02,
"learning_rate": 0.0002,
"loss": 0.586,
"step": 6370
},
{
"epoch": 2.02,
"learning_rate": 0.0002,
"loss": 0.6383,
"step": 6380
},
{
"epoch": 2.02,
"learning_rate": 0.0002,
"loss": 0.5629,
"step": 6390
},
{
"epoch": 2.03,
"learning_rate": 0.0002,
"loss": 0.6048,
"step": 6400
},
{
"epoch": 2.03,
"eval_loss": 0.7574472427368164,
"eval_runtime": 110.9511,
"eval_samples_per_second": 9.013,
"eval_steps_per_second": 4.506,
"step": 6400
},
{
"epoch": 2.03,
"mmlu_eval_accuracy": 0.470592564742188,
"mmlu_eval_accuracy_abstract_algebra": 0.2727272727272727,
"mmlu_eval_accuracy_anatomy": 0.6428571428571429,
"mmlu_eval_accuracy_astronomy": 0.375,
"mmlu_eval_accuracy_business_ethics": 0.45454545454545453,
"mmlu_eval_accuracy_clinical_knowledge": 0.5172413793103449,
"mmlu_eval_accuracy_college_biology": 0.375,
"mmlu_eval_accuracy_college_chemistry": 0.125,
"mmlu_eval_accuracy_college_computer_science": 0.2727272727272727,
"mmlu_eval_accuracy_college_mathematics": 0.2727272727272727,
"mmlu_eval_accuracy_college_medicine": 0.4090909090909091,
"mmlu_eval_accuracy_college_physics": 0.36363636363636365,
"mmlu_eval_accuracy_computer_security": 0.36363636363636365,
"mmlu_eval_accuracy_conceptual_physics": 0.4230769230769231,
"mmlu_eval_accuracy_econometrics": 0.25,
"mmlu_eval_accuracy_electrical_engineering": 0.25,
"mmlu_eval_accuracy_elementary_mathematics": 0.3170731707317073,
"mmlu_eval_accuracy_formal_logic": 0.14285714285714285,
"mmlu_eval_accuracy_global_facts": 0.3,
"mmlu_eval_accuracy_high_school_biology": 0.4375,
"mmlu_eval_accuracy_high_school_chemistry": 0.22727272727272727,
"mmlu_eval_accuracy_high_school_computer_science": 0.5555555555555556,
"mmlu_eval_accuracy_high_school_european_history": 0.6111111111111112,
"mmlu_eval_accuracy_high_school_geography": 0.8181818181818182,
"mmlu_eval_accuracy_high_school_government_and_politics": 0.6190476190476191,
"mmlu_eval_accuracy_high_school_macroeconomics": 0.5116279069767442,
"mmlu_eval_accuracy_high_school_mathematics": 0.20689655172413793,
"mmlu_eval_accuracy_high_school_microeconomics": 0.38461538461538464,
"mmlu_eval_accuracy_high_school_physics": 0.0,
"mmlu_eval_accuracy_high_school_psychology": 0.8333333333333334,
"mmlu_eval_accuracy_high_school_statistics": 0.34782608695652173,
"mmlu_eval_accuracy_high_school_us_history": 0.6818181818181818,
"mmlu_eval_accuracy_high_school_world_history": 0.7307692307692307,
"mmlu_eval_accuracy_human_aging": 0.7391304347826086,
"mmlu_eval_accuracy_human_sexuality": 0.3333333333333333,
"mmlu_eval_accuracy_international_law": 0.9230769230769231,
"mmlu_eval_accuracy_jurisprudence": 0.36363636363636365,
"mmlu_eval_accuracy_logical_fallacies": 0.6111111111111112,
"mmlu_eval_accuracy_machine_learning": 0.18181818181818182,
"mmlu_eval_accuracy_management": 0.6363636363636364,
"mmlu_eval_accuracy_marketing": 0.76,
"mmlu_eval_accuracy_medical_genetics": 0.9090909090909091,
"mmlu_eval_accuracy_miscellaneous": 0.6395348837209303,
"mmlu_eval_accuracy_moral_disputes": 0.5,
"mmlu_eval_accuracy_moral_scenarios": 0.26,
"mmlu_eval_accuracy_nutrition": 0.6060606060606061,
"mmlu_eval_accuracy_philosophy": 0.4411764705882353,
"mmlu_eval_accuracy_prehistory": 0.4857142857142857,
"mmlu_eval_accuracy_professional_accounting": 0.2903225806451613,
"mmlu_eval_accuracy_professional_law": 0.32941176470588235,
"mmlu_eval_accuracy_professional_medicine": 0.6129032258064516,
"mmlu_eval_accuracy_professional_psychology": 0.4927536231884058,
"mmlu_eval_accuracy_public_relations": 0.5833333333333334,
"mmlu_eval_accuracy_security_studies": 0.48148148148148145,
"mmlu_eval_accuracy_sociology": 0.6363636363636364,
"mmlu_eval_accuracy_us_foreign_policy": 0.7272727272727273,
"mmlu_eval_accuracy_virology": 0.5555555555555556,
"mmlu_eval_accuracy_world_religions": 0.631578947368421,
"mmlu_loss": 1.3004325469542422,
"step": 6400
}
],
"max_steps": 10000,
"num_train_epochs": 4,
"total_flos": 1.9426737309114532e+18,
"trial_name": null,
"trial_params": null
}