prateeky2806's picture
Training in progress, step 1600
c932487
{
"best_metric": 0.6084713339805603,
"best_model_checkpoint": "./output_v2/7b_cluster028_Nous-Hermes-llama-2-7b_partitioned_v3_standardized_028/checkpoint-1400",
"epoch": 1.6251904520060945,
"global_step": 1600,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.01,
"learning_rate": 0.0002,
"loss": 0.8709,
"step": 10
},
{
"epoch": 0.02,
"learning_rate": 0.0002,
"loss": 0.9602,
"step": 20
},
{
"epoch": 0.03,
"learning_rate": 0.0002,
"loss": 0.7606,
"step": 30
},
{
"epoch": 0.04,
"learning_rate": 0.0002,
"loss": 0.7942,
"step": 40
},
{
"epoch": 0.05,
"learning_rate": 0.0002,
"loss": 0.5849,
"step": 50
},
{
"epoch": 0.06,
"learning_rate": 0.0002,
"loss": 0.7161,
"step": 60
},
{
"epoch": 0.07,
"learning_rate": 0.0002,
"loss": 0.7699,
"step": 70
},
{
"epoch": 0.08,
"learning_rate": 0.0002,
"loss": 0.7264,
"step": 80
},
{
"epoch": 0.09,
"learning_rate": 0.0002,
"loss": 0.6845,
"step": 90
},
{
"epoch": 0.1,
"learning_rate": 0.0002,
"loss": 0.6638,
"step": 100
},
{
"epoch": 0.11,
"learning_rate": 0.0002,
"loss": 0.6089,
"step": 110
},
{
"epoch": 0.12,
"learning_rate": 0.0002,
"loss": 0.7681,
"step": 120
},
{
"epoch": 0.13,
"learning_rate": 0.0002,
"loss": 0.7489,
"step": 130
},
{
"epoch": 0.14,
"learning_rate": 0.0002,
"loss": 0.7472,
"step": 140
},
{
"epoch": 0.15,
"learning_rate": 0.0002,
"loss": 0.8521,
"step": 150
},
{
"epoch": 0.16,
"learning_rate": 0.0002,
"loss": 0.7223,
"step": 160
},
{
"epoch": 0.17,
"learning_rate": 0.0002,
"loss": 0.6727,
"step": 170
},
{
"epoch": 0.18,
"learning_rate": 0.0002,
"loss": 0.6434,
"step": 180
},
{
"epoch": 0.19,
"learning_rate": 0.0002,
"loss": 0.6754,
"step": 190
},
{
"epoch": 0.2,
"learning_rate": 0.0002,
"loss": 0.6945,
"step": 200
},
{
"epoch": 0.2,
"eval_loss": 0.6316617727279663,
"eval_runtime": 120.7896,
"eval_samples_per_second": 8.279,
"eval_steps_per_second": 4.139,
"step": 200
},
{
"epoch": 0.2,
"mmlu_eval_accuracy": 0.46933615423997516,
"mmlu_eval_accuracy_abstract_algebra": 0.2727272727272727,
"mmlu_eval_accuracy_anatomy": 0.5714285714285714,
"mmlu_eval_accuracy_astronomy": 0.375,
"mmlu_eval_accuracy_business_ethics": 0.5454545454545454,
"mmlu_eval_accuracy_clinical_knowledge": 0.4827586206896552,
"mmlu_eval_accuracy_college_biology": 0.4375,
"mmlu_eval_accuracy_college_chemistry": 0.25,
"mmlu_eval_accuracy_college_computer_science": 0.36363636363636365,
"mmlu_eval_accuracy_college_mathematics": 0.36363636363636365,
"mmlu_eval_accuracy_college_medicine": 0.4090909090909091,
"mmlu_eval_accuracy_college_physics": 0.5454545454545454,
"mmlu_eval_accuracy_computer_security": 0.36363636363636365,
"mmlu_eval_accuracy_conceptual_physics": 0.38461538461538464,
"mmlu_eval_accuracy_econometrics": 0.16666666666666666,
"mmlu_eval_accuracy_electrical_engineering": 0.375,
"mmlu_eval_accuracy_elementary_mathematics": 0.24390243902439024,
"mmlu_eval_accuracy_formal_logic": 0.35714285714285715,
"mmlu_eval_accuracy_global_facts": 0.5,
"mmlu_eval_accuracy_high_school_biology": 0.34375,
"mmlu_eval_accuracy_high_school_chemistry": 0.36363636363636365,
"mmlu_eval_accuracy_high_school_computer_science": 0.4444444444444444,
"mmlu_eval_accuracy_high_school_european_history": 0.6111111111111112,
"mmlu_eval_accuracy_high_school_geography": 0.6818181818181818,
"mmlu_eval_accuracy_high_school_government_and_politics": 0.6666666666666666,
"mmlu_eval_accuracy_high_school_macroeconomics": 0.4186046511627907,
"mmlu_eval_accuracy_high_school_mathematics": 0.27586206896551724,
"mmlu_eval_accuracy_high_school_microeconomics": 0.4230769230769231,
"mmlu_eval_accuracy_high_school_physics": 0.29411764705882354,
"mmlu_eval_accuracy_high_school_psychology": 0.7,
"mmlu_eval_accuracy_high_school_statistics": 0.34782608695652173,
"mmlu_eval_accuracy_high_school_us_history": 0.6818181818181818,
"mmlu_eval_accuracy_high_school_world_history": 0.5,
"mmlu_eval_accuracy_human_aging": 0.6956521739130435,
"mmlu_eval_accuracy_human_sexuality": 0.5,
"mmlu_eval_accuracy_international_law": 0.7692307692307693,
"mmlu_eval_accuracy_jurisprudence": 0.45454545454545453,
"mmlu_eval_accuracy_logical_fallacies": 0.5555555555555556,
"mmlu_eval_accuracy_machine_learning": 0.2727272727272727,
"mmlu_eval_accuracy_management": 0.5454545454545454,
"mmlu_eval_accuracy_marketing": 0.72,
"mmlu_eval_accuracy_medical_genetics": 0.7272727272727273,
"mmlu_eval_accuracy_miscellaneous": 0.6511627906976745,
"mmlu_eval_accuracy_moral_disputes": 0.47368421052631576,
"mmlu_eval_accuracy_moral_scenarios": 0.23,
"mmlu_eval_accuracy_nutrition": 0.6060606060606061,
"mmlu_eval_accuracy_philosophy": 0.47058823529411764,
"mmlu_eval_accuracy_prehistory": 0.45714285714285713,
"mmlu_eval_accuracy_professional_accounting": 0.2903225806451613,
"mmlu_eval_accuracy_professional_law": 0.3411764705882353,
"mmlu_eval_accuracy_professional_medicine": 0.45161290322580644,
"mmlu_eval_accuracy_professional_psychology": 0.3188405797101449,
"mmlu_eval_accuracy_public_relations": 0.5,
"mmlu_eval_accuracy_security_studies": 0.48148148148148145,
"mmlu_eval_accuracy_sociology": 0.6818181818181818,
"mmlu_eval_accuracy_us_foreign_policy": 0.7272727272727273,
"mmlu_eval_accuracy_virology": 0.3333333333333333,
"mmlu_eval_accuracy_world_religions": 0.7368421052631579,
"mmlu_loss": 0.9820772503398106,
"step": 200
},
{
"epoch": 0.21,
"learning_rate": 0.0002,
"loss": 0.6532,
"step": 210
},
{
"epoch": 0.22,
"learning_rate": 0.0002,
"loss": 0.7207,
"step": 220
},
{
"epoch": 0.23,
"learning_rate": 0.0002,
"loss": 0.7092,
"step": 230
},
{
"epoch": 0.24,
"learning_rate": 0.0002,
"loss": 0.6561,
"step": 240
},
{
"epoch": 0.25,
"learning_rate": 0.0002,
"loss": 0.6516,
"step": 250
},
{
"epoch": 0.26,
"learning_rate": 0.0002,
"loss": 0.6293,
"step": 260
},
{
"epoch": 0.27,
"learning_rate": 0.0002,
"loss": 0.6238,
"step": 270
},
{
"epoch": 0.28,
"learning_rate": 0.0002,
"loss": 0.6484,
"step": 280
},
{
"epoch": 0.29,
"learning_rate": 0.0002,
"loss": 0.6795,
"step": 290
},
{
"epoch": 0.3,
"learning_rate": 0.0002,
"loss": 0.5931,
"step": 300
},
{
"epoch": 0.31,
"learning_rate": 0.0002,
"loss": 0.7188,
"step": 310
},
{
"epoch": 0.33,
"learning_rate": 0.0002,
"loss": 0.6823,
"step": 320
},
{
"epoch": 0.34,
"learning_rate": 0.0002,
"loss": 0.7286,
"step": 330
},
{
"epoch": 0.35,
"learning_rate": 0.0002,
"loss": 0.7396,
"step": 340
},
{
"epoch": 0.36,
"learning_rate": 0.0002,
"loss": 0.6779,
"step": 350
},
{
"epoch": 0.37,
"learning_rate": 0.0002,
"loss": 0.7003,
"step": 360
},
{
"epoch": 0.38,
"learning_rate": 0.0002,
"loss": 0.6721,
"step": 370
},
{
"epoch": 0.39,
"learning_rate": 0.0002,
"loss": 0.736,
"step": 380
},
{
"epoch": 0.4,
"learning_rate": 0.0002,
"loss": 0.6221,
"step": 390
},
{
"epoch": 0.41,
"learning_rate": 0.0002,
"loss": 0.6736,
"step": 400
},
{
"epoch": 0.41,
"eval_loss": 0.6207628846168518,
"eval_runtime": 120.8451,
"eval_samples_per_second": 8.275,
"eval_steps_per_second": 4.138,
"step": 400
},
{
"epoch": 0.41,
"mmlu_eval_accuracy": 0.4837331454649875,
"mmlu_eval_accuracy_abstract_algebra": 0.36363636363636365,
"mmlu_eval_accuracy_anatomy": 0.7142857142857143,
"mmlu_eval_accuracy_astronomy": 0.3125,
"mmlu_eval_accuracy_business_ethics": 0.5454545454545454,
"mmlu_eval_accuracy_clinical_knowledge": 0.4827586206896552,
"mmlu_eval_accuracy_college_biology": 0.5625,
"mmlu_eval_accuracy_college_chemistry": 0.125,
"mmlu_eval_accuracy_college_computer_science": 0.36363636363636365,
"mmlu_eval_accuracy_college_mathematics": 0.36363636363636365,
"mmlu_eval_accuracy_college_medicine": 0.45454545454545453,
"mmlu_eval_accuracy_college_physics": 0.5454545454545454,
"mmlu_eval_accuracy_computer_security": 0.5454545454545454,
"mmlu_eval_accuracy_conceptual_physics": 0.38461538461538464,
"mmlu_eval_accuracy_econometrics": 0.08333333333333333,
"mmlu_eval_accuracy_electrical_engineering": 0.375,
"mmlu_eval_accuracy_elementary_mathematics": 0.24390243902439024,
"mmlu_eval_accuracy_formal_logic": 0.42857142857142855,
"mmlu_eval_accuracy_global_facts": 0.5,
"mmlu_eval_accuracy_high_school_biology": 0.375,
"mmlu_eval_accuracy_high_school_chemistry": 0.3181818181818182,
"mmlu_eval_accuracy_high_school_computer_science": 0.5555555555555556,
"mmlu_eval_accuracy_high_school_european_history": 0.5555555555555556,
"mmlu_eval_accuracy_high_school_geography": 0.8181818181818182,
"mmlu_eval_accuracy_high_school_government_and_politics": 0.6666666666666666,
"mmlu_eval_accuracy_high_school_macroeconomics": 0.37209302325581395,
"mmlu_eval_accuracy_high_school_mathematics": 0.2413793103448276,
"mmlu_eval_accuracy_high_school_microeconomics": 0.38461538461538464,
"mmlu_eval_accuracy_high_school_physics": 0.4117647058823529,
"mmlu_eval_accuracy_high_school_psychology": 0.7666666666666667,
"mmlu_eval_accuracy_high_school_statistics": 0.391304347826087,
"mmlu_eval_accuracy_high_school_us_history": 0.7272727272727273,
"mmlu_eval_accuracy_high_school_world_history": 0.5,
"mmlu_eval_accuracy_human_aging": 0.6521739130434783,
"mmlu_eval_accuracy_human_sexuality": 0.5,
"mmlu_eval_accuracy_international_law": 0.7692307692307693,
"mmlu_eval_accuracy_jurisprudence": 0.5454545454545454,
"mmlu_eval_accuracy_logical_fallacies": 0.5555555555555556,
"mmlu_eval_accuracy_machine_learning": 0.2727272727272727,
"mmlu_eval_accuracy_management": 0.6363636363636364,
"mmlu_eval_accuracy_marketing": 0.76,
"mmlu_eval_accuracy_medical_genetics": 0.7272727272727273,
"mmlu_eval_accuracy_miscellaneous": 0.6627906976744186,
"mmlu_eval_accuracy_moral_disputes": 0.5263157894736842,
"mmlu_eval_accuracy_moral_scenarios": 0.25,
"mmlu_eval_accuracy_nutrition": 0.5757575757575758,
"mmlu_eval_accuracy_philosophy": 0.5,
"mmlu_eval_accuracy_prehistory": 0.4857142857142857,
"mmlu_eval_accuracy_professional_accounting": 0.2903225806451613,
"mmlu_eval_accuracy_professional_law": 0.3235294117647059,
"mmlu_eval_accuracy_professional_medicine": 0.45161290322580644,
"mmlu_eval_accuracy_professional_psychology": 0.37681159420289856,
"mmlu_eval_accuracy_public_relations": 0.4166666666666667,
"mmlu_eval_accuracy_security_studies": 0.5185185185185185,
"mmlu_eval_accuracy_sociology": 0.6818181818181818,
"mmlu_eval_accuracy_us_foreign_policy": 0.5454545454545454,
"mmlu_eval_accuracy_virology": 0.3333333333333333,
"mmlu_eval_accuracy_world_religions": 0.7368421052631579,
"mmlu_loss": 1.0501697772321128,
"step": 400
},
{
"epoch": 0.42,
"learning_rate": 0.0002,
"loss": 0.6737,
"step": 410
},
{
"epoch": 0.43,
"learning_rate": 0.0002,
"loss": 0.6234,
"step": 420
},
{
"epoch": 0.44,
"learning_rate": 0.0002,
"loss": 0.6819,
"step": 430
},
{
"epoch": 0.45,
"learning_rate": 0.0002,
"loss": 0.6338,
"step": 440
},
{
"epoch": 0.46,
"learning_rate": 0.0002,
"loss": 0.8598,
"step": 450
},
{
"epoch": 0.47,
"learning_rate": 0.0002,
"loss": 0.6242,
"step": 460
},
{
"epoch": 0.48,
"learning_rate": 0.0002,
"loss": 0.6475,
"step": 470
},
{
"epoch": 0.49,
"learning_rate": 0.0002,
"loss": 0.6648,
"step": 480
},
{
"epoch": 0.5,
"learning_rate": 0.0002,
"loss": 0.6701,
"step": 490
},
{
"epoch": 0.51,
"learning_rate": 0.0002,
"loss": 0.6111,
"step": 500
},
{
"epoch": 0.52,
"learning_rate": 0.0002,
"loss": 0.7534,
"step": 510
},
{
"epoch": 0.53,
"learning_rate": 0.0002,
"loss": 0.6295,
"step": 520
},
{
"epoch": 0.54,
"learning_rate": 0.0002,
"loss": 0.6684,
"step": 530
},
{
"epoch": 0.55,
"learning_rate": 0.0002,
"loss": 0.6345,
"step": 540
},
{
"epoch": 0.56,
"learning_rate": 0.0002,
"loss": 0.6401,
"step": 550
},
{
"epoch": 0.57,
"learning_rate": 0.0002,
"loss": 0.6682,
"step": 560
},
{
"epoch": 0.58,
"learning_rate": 0.0002,
"loss": 0.7064,
"step": 570
},
{
"epoch": 0.59,
"learning_rate": 0.0002,
"loss": 0.5483,
"step": 580
},
{
"epoch": 0.6,
"learning_rate": 0.0002,
"loss": 0.6306,
"step": 590
},
{
"epoch": 0.61,
"learning_rate": 0.0002,
"loss": 0.624,
"step": 600
},
{
"epoch": 0.61,
"eval_loss": 0.6136035323143005,
"eval_runtime": 120.795,
"eval_samples_per_second": 8.278,
"eval_steps_per_second": 4.139,
"step": 600
},
{
"epoch": 0.61,
"mmlu_eval_accuracy": 0.4829167430977062,
"mmlu_eval_accuracy_abstract_algebra": 0.09090909090909091,
"mmlu_eval_accuracy_anatomy": 0.5714285714285714,
"mmlu_eval_accuracy_astronomy": 0.5,
"mmlu_eval_accuracy_business_ethics": 0.6363636363636364,
"mmlu_eval_accuracy_clinical_knowledge": 0.4482758620689655,
"mmlu_eval_accuracy_college_biology": 0.375,
"mmlu_eval_accuracy_college_chemistry": 0.375,
"mmlu_eval_accuracy_college_computer_science": 0.5454545454545454,
"mmlu_eval_accuracy_college_mathematics": 0.18181818181818182,
"mmlu_eval_accuracy_college_medicine": 0.36363636363636365,
"mmlu_eval_accuracy_college_physics": 0.45454545454545453,
"mmlu_eval_accuracy_computer_security": 0.36363636363636365,
"mmlu_eval_accuracy_conceptual_physics": 0.38461538461538464,
"mmlu_eval_accuracy_econometrics": 0.25,
"mmlu_eval_accuracy_electrical_engineering": 0.4375,
"mmlu_eval_accuracy_elementary_mathematics": 0.36585365853658536,
"mmlu_eval_accuracy_formal_logic": 0.2857142857142857,
"mmlu_eval_accuracy_global_facts": 0.6,
"mmlu_eval_accuracy_high_school_biology": 0.34375,
"mmlu_eval_accuracy_high_school_chemistry": 0.4090909090909091,
"mmlu_eval_accuracy_high_school_computer_science": 0.7777777777777778,
"mmlu_eval_accuracy_high_school_european_history": 0.6111111111111112,
"mmlu_eval_accuracy_high_school_geography": 0.7727272727272727,
"mmlu_eval_accuracy_high_school_government_and_politics": 0.6190476190476191,
"mmlu_eval_accuracy_high_school_macroeconomics": 0.3488372093023256,
"mmlu_eval_accuracy_high_school_mathematics": 0.2413793103448276,
"mmlu_eval_accuracy_high_school_microeconomics": 0.46153846153846156,
"mmlu_eval_accuracy_high_school_physics": 0.29411764705882354,
"mmlu_eval_accuracy_high_school_psychology": 0.6833333333333333,
"mmlu_eval_accuracy_high_school_statistics": 0.34782608695652173,
"mmlu_eval_accuracy_high_school_us_history": 0.6818181818181818,
"mmlu_eval_accuracy_high_school_world_history": 0.6538461538461539,
"mmlu_eval_accuracy_human_aging": 0.6956521739130435,
"mmlu_eval_accuracy_human_sexuality": 0.4166666666666667,
"mmlu_eval_accuracy_international_law": 0.7692307692307693,
"mmlu_eval_accuracy_jurisprudence": 0.36363636363636365,
"mmlu_eval_accuracy_logical_fallacies": 0.5555555555555556,
"mmlu_eval_accuracy_machine_learning": 0.2727272727272727,
"mmlu_eval_accuracy_management": 0.6363636363636364,
"mmlu_eval_accuracy_marketing": 0.8,
"mmlu_eval_accuracy_medical_genetics": 0.7272727272727273,
"mmlu_eval_accuracy_miscellaneous": 0.6627906976744186,
"mmlu_eval_accuracy_moral_disputes": 0.4473684210526316,
"mmlu_eval_accuracy_moral_scenarios": 0.23,
"mmlu_eval_accuracy_nutrition": 0.5757575757575758,
"mmlu_eval_accuracy_philosophy": 0.5294117647058824,
"mmlu_eval_accuracy_prehistory": 0.4,
"mmlu_eval_accuracy_professional_accounting": 0.3225806451612903,
"mmlu_eval_accuracy_professional_law": 0.3588235294117647,
"mmlu_eval_accuracy_professional_medicine": 0.45161290322580644,
"mmlu_eval_accuracy_professional_psychology": 0.36231884057971014,
"mmlu_eval_accuracy_public_relations": 0.5,
"mmlu_eval_accuracy_security_studies": 0.5185185185185185,
"mmlu_eval_accuracy_sociology": 0.6363636363636364,
"mmlu_eval_accuracy_us_foreign_policy": 0.6363636363636364,
"mmlu_eval_accuracy_virology": 0.4444444444444444,
"mmlu_eval_accuracy_world_religions": 0.7368421052631579,
"mmlu_loss": 1.063391035281647,
"step": 600
},
{
"epoch": 0.62,
"learning_rate": 0.0002,
"loss": 0.6211,
"step": 610
},
{
"epoch": 0.63,
"learning_rate": 0.0002,
"loss": 0.6347,
"step": 620
},
{
"epoch": 0.64,
"learning_rate": 0.0002,
"loss": 0.727,
"step": 630
},
{
"epoch": 0.65,
"learning_rate": 0.0002,
"loss": 0.6753,
"step": 640
},
{
"epoch": 0.66,
"learning_rate": 0.0002,
"loss": 0.674,
"step": 650
},
{
"epoch": 0.67,
"learning_rate": 0.0002,
"loss": 0.7054,
"step": 660
},
{
"epoch": 0.68,
"learning_rate": 0.0002,
"loss": 0.7221,
"step": 670
},
{
"epoch": 0.69,
"learning_rate": 0.0002,
"loss": 0.6147,
"step": 680
},
{
"epoch": 0.7,
"learning_rate": 0.0002,
"loss": 0.693,
"step": 690
},
{
"epoch": 0.71,
"learning_rate": 0.0002,
"loss": 0.6348,
"step": 700
},
{
"epoch": 0.72,
"learning_rate": 0.0002,
"loss": 0.604,
"step": 710
},
{
"epoch": 0.73,
"learning_rate": 0.0002,
"loss": 0.5798,
"step": 720
},
{
"epoch": 0.74,
"learning_rate": 0.0002,
"loss": 0.5844,
"step": 730
},
{
"epoch": 0.75,
"learning_rate": 0.0002,
"loss": 0.6679,
"step": 740
},
{
"epoch": 0.76,
"learning_rate": 0.0002,
"loss": 0.6564,
"step": 750
},
{
"epoch": 0.77,
"learning_rate": 0.0002,
"loss": 0.7336,
"step": 760
},
{
"epoch": 0.78,
"learning_rate": 0.0002,
"loss": 0.7271,
"step": 770
},
{
"epoch": 0.79,
"learning_rate": 0.0002,
"loss": 0.6606,
"step": 780
},
{
"epoch": 0.8,
"learning_rate": 0.0002,
"loss": 0.6415,
"step": 790
},
{
"epoch": 0.81,
"learning_rate": 0.0002,
"loss": 0.6775,
"step": 800
},
{
"epoch": 0.81,
"eval_loss": 0.6129981875419617,
"eval_runtime": 120.8205,
"eval_samples_per_second": 8.277,
"eval_steps_per_second": 4.138,
"step": 800
},
{
"epoch": 0.81,
"mmlu_eval_accuracy": 0.46517376517531633,
"mmlu_eval_accuracy_abstract_algebra": 0.2727272727272727,
"mmlu_eval_accuracy_anatomy": 0.6428571428571429,
"mmlu_eval_accuracy_astronomy": 0.4375,
"mmlu_eval_accuracy_business_ethics": 0.6363636363636364,
"mmlu_eval_accuracy_clinical_knowledge": 0.4482758620689655,
"mmlu_eval_accuracy_college_biology": 0.4375,
"mmlu_eval_accuracy_college_chemistry": 0.0,
"mmlu_eval_accuracy_college_computer_science": 0.36363636363636365,
"mmlu_eval_accuracy_college_mathematics": 0.2727272727272727,
"mmlu_eval_accuracy_college_medicine": 0.3181818181818182,
"mmlu_eval_accuracy_college_physics": 0.45454545454545453,
"mmlu_eval_accuracy_computer_security": 0.45454545454545453,
"mmlu_eval_accuracy_conceptual_physics": 0.38461538461538464,
"mmlu_eval_accuracy_econometrics": 0.16666666666666666,
"mmlu_eval_accuracy_electrical_engineering": 0.3125,
"mmlu_eval_accuracy_elementary_mathematics": 0.3170731707317073,
"mmlu_eval_accuracy_formal_logic": 0.21428571428571427,
"mmlu_eval_accuracy_global_facts": 0.5,
"mmlu_eval_accuracy_high_school_biology": 0.46875,
"mmlu_eval_accuracy_high_school_chemistry": 0.36363636363636365,
"mmlu_eval_accuracy_high_school_computer_science": 0.5555555555555556,
"mmlu_eval_accuracy_high_school_european_history": 0.5555555555555556,
"mmlu_eval_accuracy_high_school_geography": 0.7727272727272727,
"mmlu_eval_accuracy_high_school_government_and_politics": 0.5714285714285714,
"mmlu_eval_accuracy_high_school_macroeconomics": 0.37209302325581395,
"mmlu_eval_accuracy_high_school_mathematics": 0.1724137931034483,
"mmlu_eval_accuracy_high_school_microeconomics": 0.38461538461538464,
"mmlu_eval_accuracy_high_school_physics": 0.23529411764705882,
"mmlu_eval_accuracy_high_school_psychology": 0.7,
"mmlu_eval_accuracy_high_school_statistics": 0.34782608695652173,
"mmlu_eval_accuracy_high_school_us_history": 0.7272727272727273,
"mmlu_eval_accuracy_high_school_world_history": 0.5384615384615384,
"mmlu_eval_accuracy_human_aging": 0.6956521739130435,
"mmlu_eval_accuracy_human_sexuality": 0.3333333333333333,
"mmlu_eval_accuracy_international_law": 0.7692307692307693,
"mmlu_eval_accuracy_jurisprudence": 0.36363636363636365,
"mmlu_eval_accuracy_logical_fallacies": 0.5555555555555556,
"mmlu_eval_accuracy_machine_learning": 0.2727272727272727,
"mmlu_eval_accuracy_management": 0.6363636363636364,
"mmlu_eval_accuracy_marketing": 0.72,
"mmlu_eval_accuracy_medical_genetics": 0.7272727272727273,
"mmlu_eval_accuracy_miscellaneous": 0.686046511627907,
"mmlu_eval_accuracy_moral_disputes": 0.42105263157894735,
"mmlu_eval_accuracy_moral_scenarios": 0.25,
"mmlu_eval_accuracy_nutrition": 0.5757575757575758,
"mmlu_eval_accuracy_philosophy": 0.5588235294117647,
"mmlu_eval_accuracy_prehistory": 0.42857142857142855,
"mmlu_eval_accuracy_professional_accounting": 0.2903225806451613,
"mmlu_eval_accuracy_professional_law": 0.3588235294117647,
"mmlu_eval_accuracy_professional_medicine": 0.4838709677419355,
"mmlu_eval_accuracy_professional_psychology": 0.37681159420289856,
"mmlu_eval_accuracy_public_relations": 0.5833333333333334,
"mmlu_eval_accuracy_security_studies": 0.5185185185185185,
"mmlu_eval_accuracy_sociology": 0.5454545454545454,
"mmlu_eval_accuracy_us_foreign_policy": 0.7272727272727273,
"mmlu_eval_accuracy_virology": 0.5,
"mmlu_eval_accuracy_world_religions": 0.7368421052631579,
"mmlu_loss": 1.0104573983553184,
"step": 800
},
{
"epoch": 0.82,
"learning_rate": 0.0002,
"loss": 0.6279,
"step": 810
},
{
"epoch": 0.83,
"learning_rate": 0.0002,
"loss": 0.736,
"step": 820
},
{
"epoch": 0.84,
"learning_rate": 0.0002,
"loss": 0.6651,
"step": 830
},
{
"epoch": 0.85,
"learning_rate": 0.0002,
"loss": 0.7022,
"step": 840
},
{
"epoch": 0.86,
"learning_rate": 0.0002,
"loss": 0.6589,
"step": 850
},
{
"epoch": 0.87,
"learning_rate": 0.0002,
"loss": 0.6898,
"step": 860
},
{
"epoch": 0.88,
"learning_rate": 0.0002,
"loss": 0.6577,
"step": 870
},
{
"epoch": 0.89,
"learning_rate": 0.0002,
"loss": 0.6923,
"step": 880
},
{
"epoch": 0.9,
"learning_rate": 0.0002,
"loss": 0.6111,
"step": 890
},
{
"epoch": 0.91,
"learning_rate": 0.0002,
"loss": 0.7108,
"step": 900
},
{
"epoch": 0.92,
"learning_rate": 0.0002,
"loss": 0.6572,
"step": 910
},
{
"epoch": 0.93,
"learning_rate": 0.0002,
"loss": 0.6671,
"step": 920
},
{
"epoch": 0.94,
"learning_rate": 0.0002,
"loss": 0.601,
"step": 930
},
{
"epoch": 0.95,
"learning_rate": 0.0002,
"loss": 0.6132,
"step": 940
},
{
"epoch": 0.96,
"learning_rate": 0.0002,
"loss": 0.6888,
"step": 950
},
{
"epoch": 0.98,
"learning_rate": 0.0002,
"loss": 0.6872,
"step": 960
},
{
"epoch": 0.99,
"learning_rate": 0.0002,
"loss": 0.7862,
"step": 970
},
{
"epoch": 1.0,
"learning_rate": 0.0002,
"loss": 0.7163,
"step": 980
},
{
"epoch": 1.01,
"learning_rate": 0.0002,
"loss": 0.6389,
"step": 990
},
{
"epoch": 1.02,
"learning_rate": 0.0002,
"loss": 0.5417,
"step": 1000
},
{
"epoch": 1.02,
"eval_loss": 0.6085913181304932,
"eval_runtime": 122.2286,
"eval_samples_per_second": 8.181,
"eval_steps_per_second": 4.091,
"step": 1000
},
{
"epoch": 1.02,
"mmlu_eval_accuracy": 0.4658396099751714,
"mmlu_eval_accuracy_abstract_algebra": 0.2727272727272727,
"mmlu_eval_accuracy_anatomy": 0.5714285714285714,
"mmlu_eval_accuracy_astronomy": 0.4375,
"mmlu_eval_accuracy_business_ethics": 0.45454545454545453,
"mmlu_eval_accuracy_clinical_knowledge": 0.4482758620689655,
"mmlu_eval_accuracy_college_biology": 0.5,
"mmlu_eval_accuracy_college_chemistry": 0.25,
"mmlu_eval_accuracy_college_computer_science": 0.45454545454545453,
"mmlu_eval_accuracy_college_mathematics": 0.18181818181818182,
"mmlu_eval_accuracy_college_medicine": 0.3181818181818182,
"mmlu_eval_accuracy_college_physics": 0.45454545454545453,
"mmlu_eval_accuracy_computer_security": 0.36363636363636365,
"mmlu_eval_accuracy_conceptual_physics": 0.4230769230769231,
"mmlu_eval_accuracy_econometrics": 0.08333333333333333,
"mmlu_eval_accuracy_electrical_engineering": 0.3125,
"mmlu_eval_accuracy_elementary_mathematics": 0.36585365853658536,
"mmlu_eval_accuracy_formal_logic": 0.21428571428571427,
"mmlu_eval_accuracy_global_facts": 0.5,
"mmlu_eval_accuracy_high_school_biology": 0.375,
"mmlu_eval_accuracy_high_school_chemistry": 0.3181818181818182,
"mmlu_eval_accuracy_high_school_computer_science": 0.6666666666666666,
"mmlu_eval_accuracy_high_school_european_history": 0.6111111111111112,
"mmlu_eval_accuracy_high_school_geography": 0.8636363636363636,
"mmlu_eval_accuracy_high_school_government_and_politics": 0.6190476190476191,
"mmlu_eval_accuracy_high_school_macroeconomics": 0.32558139534883723,
"mmlu_eval_accuracy_high_school_mathematics": 0.20689655172413793,
"mmlu_eval_accuracy_high_school_microeconomics": 0.38461538461538464,
"mmlu_eval_accuracy_high_school_physics": 0.23529411764705882,
"mmlu_eval_accuracy_high_school_psychology": 0.6833333333333333,
"mmlu_eval_accuracy_high_school_statistics": 0.391304347826087,
"mmlu_eval_accuracy_high_school_us_history": 0.7272727272727273,
"mmlu_eval_accuracy_high_school_world_history": 0.5769230769230769,
"mmlu_eval_accuracy_human_aging": 0.6521739130434783,
"mmlu_eval_accuracy_human_sexuality": 0.3333333333333333,
"mmlu_eval_accuracy_international_law": 0.7692307692307693,
"mmlu_eval_accuracy_jurisprudence": 0.36363636363636365,
"mmlu_eval_accuracy_logical_fallacies": 0.5555555555555556,
"mmlu_eval_accuracy_machine_learning": 0.2727272727272727,
"mmlu_eval_accuracy_management": 0.6363636363636364,
"mmlu_eval_accuracy_marketing": 0.76,
"mmlu_eval_accuracy_medical_genetics": 0.7272727272727273,
"mmlu_eval_accuracy_miscellaneous": 0.6511627906976745,
"mmlu_eval_accuracy_moral_disputes": 0.42105263157894735,
"mmlu_eval_accuracy_moral_scenarios": 0.24,
"mmlu_eval_accuracy_nutrition": 0.5151515151515151,
"mmlu_eval_accuracy_philosophy": 0.5294117647058824,
"mmlu_eval_accuracy_prehistory": 0.45714285714285713,
"mmlu_eval_accuracy_professional_accounting": 0.41935483870967744,
"mmlu_eval_accuracy_professional_law": 0.3352941176470588,
"mmlu_eval_accuracy_professional_medicine": 0.45161290322580644,
"mmlu_eval_accuracy_professional_psychology": 0.34782608695652173,
"mmlu_eval_accuracy_public_relations": 0.5833333333333334,
"mmlu_eval_accuracy_security_studies": 0.5185185185185185,
"mmlu_eval_accuracy_sociology": 0.6363636363636364,
"mmlu_eval_accuracy_us_foreign_policy": 0.5454545454545454,
"mmlu_eval_accuracy_virology": 0.5555555555555556,
"mmlu_eval_accuracy_world_religions": 0.6842105263157895,
"mmlu_loss": 1.0640303809049858,
"step": 1000
},
{
"epoch": 1.03,
"learning_rate": 0.0002,
"loss": 0.4706,
"step": 1010
},
{
"epoch": 1.04,
"learning_rate": 0.0002,
"loss": 0.661,
"step": 1020
},
{
"epoch": 1.05,
"learning_rate": 0.0002,
"loss": 0.6559,
"step": 1030
},
{
"epoch": 1.06,
"learning_rate": 0.0002,
"loss": 0.4843,
"step": 1040
},
{
"epoch": 1.07,
"learning_rate": 0.0002,
"loss": 0.5342,
"step": 1050
},
{
"epoch": 1.08,
"learning_rate": 0.0002,
"loss": 0.5832,
"step": 1060
},
{
"epoch": 1.09,
"learning_rate": 0.0002,
"loss": 0.5623,
"step": 1070
},
{
"epoch": 1.1,
"learning_rate": 0.0002,
"loss": 0.5994,
"step": 1080
},
{
"epoch": 1.11,
"learning_rate": 0.0002,
"loss": 0.5743,
"step": 1090
},
{
"epoch": 1.12,
"learning_rate": 0.0002,
"loss": 0.5526,
"step": 1100
},
{
"epoch": 1.13,
"learning_rate": 0.0002,
"loss": 0.5922,
"step": 1110
},
{
"epoch": 1.14,
"learning_rate": 0.0002,
"loss": 0.6261,
"step": 1120
},
{
"epoch": 1.15,
"learning_rate": 0.0002,
"loss": 0.625,
"step": 1130
},
{
"epoch": 1.16,
"learning_rate": 0.0002,
"loss": 0.5208,
"step": 1140
},
{
"epoch": 1.17,
"learning_rate": 0.0002,
"loss": 0.5345,
"step": 1150
},
{
"epoch": 1.18,
"learning_rate": 0.0002,
"loss": 0.5428,
"step": 1160
},
{
"epoch": 1.19,
"learning_rate": 0.0002,
"loss": 0.5958,
"step": 1170
},
{
"epoch": 1.2,
"learning_rate": 0.0002,
"loss": 0.5782,
"step": 1180
},
{
"epoch": 1.21,
"learning_rate": 0.0002,
"loss": 0.5753,
"step": 1190
},
{
"epoch": 1.22,
"learning_rate": 0.0002,
"loss": 0.5718,
"step": 1200
},
{
"epoch": 1.22,
"eval_loss": 0.6110609173774719,
"eval_runtime": 120.8304,
"eval_samples_per_second": 8.276,
"eval_steps_per_second": 4.138,
"step": 1200
},
{
"epoch": 1.22,
"mmlu_eval_accuracy": 0.4750496515745048,
"mmlu_eval_accuracy_abstract_algebra": 0.36363636363636365,
"mmlu_eval_accuracy_anatomy": 0.5714285714285714,
"mmlu_eval_accuracy_astronomy": 0.3125,
"mmlu_eval_accuracy_business_ethics": 0.5454545454545454,
"mmlu_eval_accuracy_clinical_knowledge": 0.4827586206896552,
"mmlu_eval_accuracy_college_biology": 0.5,
"mmlu_eval_accuracy_college_chemistry": 0.375,
"mmlu_eval_accuracy_college_computer_science": 0.36363636363636365,
"mmlu_eval_accuracy_college_mathematics": 0.2727272727272727,
"mmlu_eval_accuracy_college_medicine": 0.3181818181818182,
"mmlu_eval_accuracy_college_physics": 0.45454545454545453,
"mmlu_eval_accuracy_computer_security": 0.5454545454545454,
"mmlu_eval_accuracy_conceptual_physics": 0.4230769230769231,
"mmlu_eval_accuracy_econometrics": 0.08333333333333333,
"mmlu_eval_accuracy_electrical_engineering": 0.375,
"mmlu_eval_accuracy_elementary_mathematics": 0.36585365853658536,
"mmlu_eval_accuracy_formal_logic": 0.21428571428571427,
"mmlu_eval_accuracy_global_facts": 0.5,
"mmlu_eval_accuracy_high_school_biology": 0.4375,
"mmlu_eval_accuracy_high_school_chemistry": 0.36363636363636365,
"mmlu_eval_accuracy_high_school_computer_science": 0.5555555555555556,
"mmlu_eval_accuracy_high_school_european_history": 0.5,
"mmlu_eval_accuracy_high_school_geography": 0.8181818181818182,
"mmlu_eval_accuracy_high_school_government_and_politics": 0.6666666666666666,
"mmlu_eval_accuracy_high_school_macroeconomics": 0.3488372093023256,
"mmlu_eval_accuracy_high_school_mathematics": 0.20689655172413793,
"mmlu_eval_accuracy_high_school_microeconomics": 0.4230769230769231,
"mmlu_eval_accuracy_high_school_physics": 0.29411764705882354,
"mmlu_eval_accuracy_high_school_psychology": 0.7,
"mmlu_eval_accuracy_high_school_statistics": 0.4782608695652174,
"mmlu_eval_accuracy_high_school_us_history": 0.7272727272727273,
"mmlu_eval_accuracy_high_school_world_history": 0.5769230769230769,
"mmlu_eval_accuracy_human_aging": 0.6086956521739131,
"mmlu_eval_accuracy_human_sexuality": 0.3333333333333333,
"mmlu_eval_accuracy_international_law": 0.7692307692307693,
"mmlu_eval_accuracy_jurisprudence": 0.2727272727272727,
"mmlu_eval_accuracy_logical_fallacies": 0.5555555555555556,
"mmlu_eval_accuracy_machine_learning": 0.2727272727272727,
"mmlu_eval_accuracy_management": 0.7272727272727273,
"mmlu_eval_accuracy_marketing": 0.8,
"mmlu_eval_accuracy_medical_genetics": 0.7272727272727273,
"mmlu_eval_accuracy_miscellaneous": 0.686046511627907,
"mmlu_eval_accuracy_moral_disputes": 0.47368421052631576,
"mmlu_eval_accuracy_moral_scenarios": 0.25,
"mmlu_eval_accuracy_nutrition": 0.6363636363636364,
"mmlu_eval_accuracy_philosophy": 0.47058823529411764,
"mmlu_eval_accuracy_prehistory": 0.4857142857142857,
"mmlu_eval_accuracy_professional_accounting": 0.2903225806451613,
"mmlu_eval_accuracy_professional_law": 0.3411764705882353,
"mmlu_eval_accuracy_professional_medicine": 0.4838709677419355,
"mmlu_eval_accuracy_professional_psychology": 0.34782608695652173,
"mmlu_eval_accuracy_public_relations": 0.5,
"mmlu_eval_accuracy_security_studies": 0.5185185185185185,
"mmlu_eval_accuracy_sociology": 0.6363636363636364,
"mmlu_eval_accuracy_us_foreign_policy": 0.5454545454545454,
"mmlu_eval_accuracy_virology": 0.4444444444444444,
"mmlu_eval_accuracy_world_religions": 0.7368421052631579,
"mmlu_loss": 0.9734070353774116,
"step": 1200
},
{
"epoch": 1.23,
"learning_rate": 0.0002,
"loss": 0.6083,
"step": 1210
},
{
"epoch": 1.24,
"learning_rate": 0.0002,
"loss": 0.5525,
"step": 1220
},
{
"epoch": 1.25,
"learning_rate": 0.0002,
"loss": 0.5798,
"step": 1230
},
{
"epoch": 1.26,
"learning_rate": 0.0002,
"loss": 0.5675,
"step": 1240
},
{
"epoch": 1.27,
"learning_rate": 0.0002,
"loss": 0.532,
"step": 1250
},
{
"epoch": 1.28,
"learning_rate": 0.0002,
"loss": 0.6019,
"step": 1260
},
{
"epoch": 1.29,
"learning_rate": 0.0002,
"loss": 0.5088,
"step": 1270
},
{
"epoch": 1.3,
"learning_rate": 0.0002,
"loss": 0.5697,
"step": 1280
},
{
"epoch": 1.31,
"learning_rate": 0.0002,
"loss": 0.6149,
"step": 1290
},
{
"epoch": 1.32,
"learning_rate": 0.0002,
"loss": 0.5419,
"step": 1300
},
{
"epoch": 1.33,
"learning_rate": 0.0002,
"loss": 0.5393,
"step": 1310
},
{
"epoch": 1.34,
"learning_rate": 0.0002,
"loss": 0.5745,
"step": 1320
},
{
"epoch": 1.35,
"learning_rate": 0.0002,
"loss": 0.6289,
"step": 1330
},
{
"epoch": 1.36,
"learning_rate": 0.0002,
"loss": 0.5299,
"step": 1340
},
{
"epoch": 1.37,
"learning_rate": 0.0002,
"loss": 0.6407,
"step": 1350
},
{
"epoch": 1.38,
"learning_rate": 0.0002,
"loss": 0.5473,
"step": 1360
},
{
"epoch": 1.39,
"learning_rate": 0.0002,
"loss": 0.6333,
"step": 1370
},
{
"epoch": 1.4,
"learning_rate": 0.0002,
"loss": 0.524,
"step": 1380
},
{
"epoch": 1.41,
"learning_rate": 0.0002,
"loss": 0.5794,
"step": 1390
},
{
"epoch": 1.42,
"learning_rate": 0.0002,
"loss": 0.613,
"step": 1400
},
{
"epoch": 1.42,
"eval_loss": 0.6084713339805603,
"eval_runtime": 120.6833,
"eval_samples_per_second": 8.286,
"eval_steps_per_second": 4.143,
"step": 1400
},
{
"epoch": 1.42,
"mmlu_eval_accuracy": 0.4752108639623148,
"mmlu_eval_accuracy_abstract_algebra": 0.2727272727272727,
"mmlu_eval_accuracy_anatomy": 0.5714285714285714,
"mmlu_eval_accuracy_astronomy": 0.5,
"mmlu_eval_accuracy_business_ethics": 0.45454545454545453,
"mmlu_eval_accuracy_clinical_knowledge": 0.4827586206896552,
"mmlu_eval_accuracy_college_biology": 0.4375,
"mmlu_eval_accuracy_college_chemistry": 0.375,
"mmlu_eval_accuracy_college_computer_science": 0.2727272727272727,
"mmlu_eval_accuracy_college_mathematics": 0.36363636363636365,
"mmlu_eval_accuracy_college_medicine": 0.4090909090909091,
"mmlu_eval_accuracy_college_physics": 0.45454545454545453,
"mmlu_eval_accuracy_computer_security": 0.7272727272727273,
"mmlu_eval_accuracy_conceptual_physics": 0.38461538461538464,
"mmlu_eval_accuracy_econometrics": 0.16666666666666666,
"mmlu_eval_accuracy_electrical_engineering": 0.3125,
"mmlu_eval_accuracy_elementary_mathematics": 0.34146341463414637,
"mmlu_eval_accuracy_formal_logic": 0.21428571428571427,
"mmlu_eval_accuracy_global_facts": 0.5,
"mmlu_eval_accuracy_high_school_biology": 0.5,
"mmlu_eval_accuracy_high_school_chemistry": 0.3181818181818182,
"mmlu_eval_accuracy_high_school_computer_science": 0.4444444444444444,
"mmlu_eval_accuracy_high_school_european_history": 0.4444444444444444,
"mmlu_eval_accuracy_high_school_geography": 0.8181818181818182,
"mmlu_eval_accuracy_high_school_government_and_politics": 0.6666666666666666,
"mmlu_eval_accuracy_high_school_macroeconomics": 0.3023255813953488,
"mmlu_eval_accuracy_high_school_mathematics": 0.13793103448275862,
"mmlu_eval_accuracy_high_school_microeconomics": 0.4230769230769231,
"mmlu_eval_accuracy_high_school_physics": 0.35294117647058826,
"mmlu_eval_accuracy_high_school_psychology": 0.7166666666666667,
"mmlu_eval_accuracy_high_school_statistics": 0.391304347826087,
"mmlu_eval_accuracy_high_school_us_history": 0.6363636363636364,
"mmlu_eval_accuracy_high_school_world_history": 0.5,
"mmlu_eval_accuracy_human_aging": 0.6521739130434783,
"mmlu_eval_accuracy_human_sexuality": 0.4166666666666667,
"mmlu_eval_accuracy_international_law": 0.7692307692307693,
"mmlu_eval_accuracy_jurisprudence": 0.45454545454545453,
"mmlu_eval_accuracy_logical_fallacies": 0.6111111111111112,
"mmlu_eval_accuracy_machine_learning": 0.36363636363636365,
"mmlu_eval_accuracy_management": 0.6363636363636364,
"mmlu_eval_accuracy_marketing": 0.8,
"mmlu_eval_accuracy_medical_genetics": 0.7272727272727273,
"mmlu_eval_accuracy_miscellaneous": 0.6744186046511628,
"mmlu_eval_accuracy_moral_disputes": 0.5263157894736842,
"mmlu_eval_accuracy_moral_scenarios": 0.23,
"mmlu_eval_accuracy_nutrition": 0.6666666666666666,
"mmlu_eval_accuracy_philosophy": 0.5294117647058824,
"mmlu_eval_accuracy_prehistory": 0.5428571428571428,
"mmlu_eval_accuracy_professional_accounting": 0.25806451612903225,
"mmlu_eval_accuracy_professional_law": 0.3588235294117647,
"mmlu_eval_accuracy_professional_medicine": 0.41935483870967744,
"mmlu_eval_accuracy_professional_psychology": 0.37681159420289856,
"mmlu_eval_accuracy_public_relations": 0.4166666666666667,
"mmlu_eval_accuracy_security_studies": 0.5185185185185185,
"mmlu_eval_accuracy_sociology": 0.6818181818181818,
"mmlu_eval_accuracy_us_foreign_policy": 0.5454545454545454,
"mmlu_eval_accuracy_virology": 0.3333333333333333,
"mmlu_eval_accuracy_world_religions": 0.6842105263157895,
"mmlu_loss": 0.9900610424266938,
"step": 1400
},
{
"epoch": 1.43,
"learning_rate": 0.0002,
"loss": 0.5765,
"step": 1410
},
{
"epoch": 1.44,
"learning_rate": 0.0002,
"loss": 0.5524,
"step": 1420
},
{
"epoch": 1.45,
"learning_rate": 0.0002,
"loss": 0.5906,
"step": 1430
},
{
"epoch": 1.46,
"learning_rate": 0.0002,
"loss": 0.6262,
"step": 1440
},
{
"epoch": 1.47,
"learning_rate": 0.0002,
"loss": 0.548,
"step": 1450
},
{
"epoch": 1.48,
"learning_rate": 0.0002,
"loss": 0.572,
"step": 1460
},
{
"epoch": 1.49,
"learning_rate": 0.0002,
"loss": 0.6067,
"step": 1470
},
{
"epoch": 1.5,
"learning_rate": 0.0002,
"loss": 0.613,
"step": 1480
},
{
"epoch": 1.51,
"learning_rate": 0.0002,
"loss": 0.5768,
"step": 1490
},
{
"epoch": 1.52,
"learning_rate": 0.0002,
"loss": 0.6391,
"step": 1500
},
{
"epoch": 1.53,
"learning_rate": 0.0002,
"loss": 0.5603,
"step": 1510
},
{
"epoch": 1.54,
"learning_rate": 0.0002,
"loss": 0.606,
"step": 1520
},
{
"epoch": 1.55,
"learning_rate": 0.0002,
"loss": 0.5004,
"step": 1530
},
{
"epoch": 1.56,
"learning_rate": 0.0002,
"loss": 0.5381,
"step": 1540
},
{
"epoch": 1.57,
"learning_rate": 0.0002,
"loss": 0.5827,
"step": 1550
},
{
"epoch": 1.58,
"learning_rate": 0.0002,
"loss": 0.6144,
"step": 1560
},
{
"epoch": 1.59,
"learning_rate": 0.0002,
"loss": 0.6166,
"step": 1570
},
{
"epoch": 1.6,
"learning_rate": 0.0002,
"loss": 0.5488,
"step": 1580
},
{
"epoch": 1.62,
"learning_rate": 0.0002,
"loss": 0.6256,
"step": 1590
},
{
"epoch": 1.63,
"learning_rate": 0.0002,
"loss": 0.6024,
"step": 1600
},
{
"epoch": 1.63,
"eval_loss": 0.609889805316925,
"eval_runtime": 121.8569,
"eval_samples_per_second": 8.206,
"eval_steps_per_second": 4.103,
"step": 1600
},
{
"epoch": 1.63,
"mmlu_eval_accuracy": 0.46909942336079086,
"mmlu_eval_accuracy_abstract_algebra": 0.2727272727272727,
"mmlu_eval_accuracy_anatomy": 0.6428571428571429,
"mmlu_eval_accuracy_astronomy": 0.4375,
"mmlu_eval_accuracy_business_ethics": 0.5454545454545454,
"mmlu_eval_accuracy_clinical_knowledge": 0.4827586206896552,
"mmlu_eval_accuracy_college_biology": 0.4375,
"mmlu_eval_accuracy_college_chemistry": 0.375,
"mmlu_eval_accuracy_college_computer_science": 0.36363636363636365,
"mmlu_eval_accuracy_college_mathematics": 0.2727272727272727,
"mmlu_eval_accuracy_college_medicine": 0.4090909090909091,
"mmlu_eval_accuracy_college_physics": 0.45454545454545453,
"mmlu_eval_accuracy_computer_security": 0.45454545454545453,
"mmlu_eval_accuracy_conceptual_physics": 0.4230769230769231,
"mmlu_eval_accuracy_econometrics": 0.08333333333333333,
"mmlu_eval_accuracy_electrical_engineering": 0.375,
"mmlu_eval_accuracy_elementary_mathematics": 0.34146341463414637,
"mmlu_eval_accuracy_formal_logic": 0.21428571428571427,
"mmlu_eval_accuracy_global_facts": 0.5,
"mmlu_eval_accuracy_high_school_biology": 0.46875,
"mmlu_eval_accuracy_high_school_chemistry": 0.3181818181818182,
"mmlu_eval_accuracy_high_school_computer_science": 0.4444444444444444,
"mmlu_eval_accuracy_high_school_european_history": 0.5555555555555556,
"mmlu_eval_accuracy_high_school_geography": 0.7727272727272727,
"mmlu_eval_accuracy_high_school_government_and_politics": 0.5714285714285714,
"mmlu_eval_accuracy_high_school_macroeconomics": 0.32558139534883723,
"mmlu_eval_accuracy_high_school_mathematics": 0.10344827586206896,
"mmlu_eval_accuracy_high_school_microeconomics": 0.46153846153846156,
"mmlu_eval_accuracy_high_school_physics": 0.35294117647058826,
"mmlu_eval_accuracy_high_school_psychology": 0.7166666666666667,
"mmlu_eval_accuracy_high_school_statistics": 0.34782608695652173,
"mmlu_eval_accuracy_high_school_us_history": 0.6363636363636364,
"mmlu_eval_accuracy_high_school_world_history": 0.5769230769230769,
"mmlu_eval_accuracy_human_aging": 0.7391304347826086,
"mmlu_eval_accuracy_human_sexuality": 0.4166666666666667,
"mmlu_eval_accuracy_international_law": 0.7692307692307693,
"mmlu_eval_accuracy_jurisprudence": 0.2727272727272727,
"mmlu_eval_accuracy_logical_fallacies": 0.5555555555555556,
"mmlu_eval_accuracy_machine_learning": 0.36363636363636365,
"mmlu_eval_accuracy_management": 0.6363636363636364,
"mmlu_eval_accuracy_marketing": 0.76,
"mmlu_eval_accuracy_medical_genetics": 0.7272727272727273,
"mmlu_eval_accuracy_miscellaneous": 0.6511627906976745,
"mmlu_eval_accuracy_moral_disputes": 0.4473684210526316,
"mmlu_eval_accuracy_moral_scenarios": 0.26,
"mmlu_eval_accuracy_nutrition": 0.5151515151515151,
"mmlu_eval_accuracy_philosophy": 0.5294117647058824,
"mmlu_eval_accuracy_prehistory": 0.45714285714285713,
"mmlu_eval_accuracy_professional_accounting": 0.25806451612903225,
"mmlu_eval_accuracy_professional_law": 0.3235294117647059,
"mmlu_eval_accuracy_professional_medicine": 0.45161290322580644,
"mmlu_eval_accuracy_professional_psychology": 0.391304347826087,
"mmlu_eval_accuracy_public_relations": 0.5,
"mmlu_eval_accuracy_security_studies": 0.5185185185185185,
"mmlu_eval_accuracy_sociology": 0.6363636363636364,
"mmlu_eval_accuracy_us_foreign_policy": 0.6363636363636364,
"mmlu_eval_accuracy_virology": 0.5,
"mmlu_eval_accuracy_world_religions": 0.6842105263157895,
"mmlu_loss": 1.002792890900965,
"step": 1600
}
],
"max_steps": 5000,
"num_train_epochs": 6,
"total_flos": 2.0483545271146906e+17,
"trial_name": null,
"trial_params": null
}