|
{ |
|
"best_metric": 0.7457320690155029, |
|
"best_model_checkpoint": "experts/mistralic-expert-16/checkpoint-400", |
|
"epoch": 0.1267427122940431, |
|
"eval_steps": 200, |
|
"global_step": 400, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 0.0002, |
|
"loss": 0.7873, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 0.0002, |
|
"loss": 0.8128, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 0.0002, |
|
"loss": 0.8641, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 0.0002, |
|
"loss": 0.8246, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 0.0002, |
|
"loss": 0.7867, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 0.0002, |
|
"loss": 0.7705, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 0.0002, |
|
"loss": 0.7671, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 0.0002, |
|
"loss": 0.8725, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 0.0002, |
|
"loss": 0.8337, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 0.0002, |
|
"loss": 0.7819, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 0.0002, |
|
"loss": 0.7729, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 0.0002, |
|
"loss": 0.8169, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 0.0002, |
|
"loss": 0.7988, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 0.0002, |
|
"loss": 0.8958, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 0.0002, |
|
"loss": 0.7682, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 0.0002, |
|
"loss": 0.7729, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 0.0002, |
|
"loss": 0.7375, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 0.0002, |
|
"loss": 0.7756, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 0.0002, |
|
"loss": 0.8256, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 0.0002, |
|
"loss": 0.8504, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"eval_loss": 0.7519773244857788, |
|
"eval_runtime": 153.135, |
|
"eval_samples_per_second": 6.53, |
|
"eval_steps_per_second": 3.265, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"mmlu_eval_accuracy": 0.5986624199855438, |
|
"mmlu_eval_accuracy_abstract_algebra": 0.2727272727272727, |
|
"mmlu_eval_accuracy_anatomy": 0.42857142857142855, |
|
"mmlu_eval_accuracy_astronomy": 0.6875, |
|
"mmlu_eval_accuracy_business_ethics": 0.6363636363636364, |
|
"mmlu_eval_accuracy_clinical_knowledge": 0.5862068965517241, |
|
"mmlu_eval_accuracy_college_biology": 0.5625, |
|
"mmlu_eval_accuracy_college_chemistry": 0.375, |
|
"mmlu_eval_accuracy_college_computer_science": 0.36363636363636365, |
|
"mmlu_eval_accuracy_college_mathematics": 0.6363636363636364, |
|
"mmlu_eval_accuracy_college_medicine": 0.6363636363636364, |
|
"mmlu_eval_accuracy_college_physics": 0.45454545454545453, |
|
"mmlu_eval_accuracy_computer_security": 0.5454545454545454, |
|
"mmlu_eval_accuracy_conceptual_physics": 0.5384615384615384, |
|
"mmlu_eval_accuracy_econometrics": 0.5833333333333334, |
|
"mmlu_eval_accuracy_electrical_engineering": 0.625, |
|
"mmlu_eval_accuracy_elementary_mathematics": 0.43902439024390244, |
|
"mmlu_eval_accuracy_formal_logic": 0.21428571428571427, |
|
"mmlu_eval_accuracy_global_facts": 0.4, |
|
"mmlu_eval_accuracy_high_school_biology": 0.59375, |
|
"mmlu_eval_accuracy_high_school_chemistry": 0.36363636363636365, |
|
"mmlu_eval_accuracy_high_school_computer_science": 0.6666666666666666, |
|
"mmlu_eval_accuracy_high_school_european_history": 0.8333333333333334, |
|
"mmlu_eval_accuracy_high_school_geography": 0.8636363636363636, |
|
"mmlu_eval_accuracy_high_school_government_and_politics": 0.7142857142857143, |
|
"mmlu_eval_accuracy_high_school_macroeconomics": 0.6046511627906976, |
|
"mmlu_eval_accuracy_high_school_mathematics": 0.2413793103448276, |
|
"mmlu_eval_accuracy_high_school_microeconomics": 0.5769230769230769, |
|
"mmlu_eval_accuracy_high_school_physics": 0.23529411764705882, |
|
"mmlu_eval_accuracy_high_school_psychology": 0.85, |
|
"mmlu_eval_accuracy_high_school_statistics": 0.34782608695652173, |
|
"mmlu_eval_accuracy_high_school_us_history": 0.7727272727272727, |
|
"mmlu_eval_accuracy_high_school_world_history": 0.7307692307692307, |
|
"mmlu_eval_accuracy_human_aging": 0.7391304347826086, |
|
"mmlu_eval_accuracy_human_sexuality": 0.5, |
|
"mmlu_eval_accuracy_international_law": 0.9230769230769231, |
|
"mmlu_eval_accuracy_jurisprudence": 0.6363636363636364, |
|
"mmlu_eval_accuracy_logical_fallacies": 0.6666666666666666, |
|
"mmlu_eval_accuracy_machine_learning": 0.45454545454545453, |
|
"mmlu_eval_accuracy_management": 0.9090909090909091, |
|
"mmlu_eval_accuracy_marketing": 0.88, |
|
"mmlu_eval_accuracy_medical_genetics": 1.0, |
|
"mmlu_eval_accuracy_miscellaneous": 0.7558139534883721, |
|
"mmlu_eval_accuracy_moral_disputes": 0.5526315789473685, |
|
"mmlu_eval_accuracy_moral_scenarios": 0.27, |
|
"mmlu_eval_accuracy_nutrition": 0.696969696969697, |
|
"mmlu_eval_accuracy_philosophy": 0.7647058823529411, |
|
"mmlu_eval_accuracy_prehistory": 0.5142857142857142, |
|
"mmlu_eval_accuracy_professional_accounting": 0.6451612903225806, |
|
"mmlu_eval_accuracy_professional_law": 0.4, |
|
"mmlu_eval_accuracy_professional_medicine": 0.6129032258064516, |
|
"mmlu_eval_accuracy_professional_psychology": 0.6231884057971014, |
|
"mmlu_eval_accuracy_public_relations": 0.5, |
|
"mmlu_eval_accuracy_security_studies": 0.6296296296296297, |
|
"mmlu_eval_accuracy_sociology": 0.8181818181818182, |
|
"mmlu_eval_accuracy_us_foreign_policy": 0.9090909090909091, |
|
"mmlu_eval_accuracy_virology": 0.5, |
|
"mmlu_eval_accuracy_world_religions": 0.8421052631578947, |
|
"mmlu_loss": 1.2932990876174781, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 0.0002, |
|
"loss": 0.823, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 0.0002, |
|
"loss": 0.8241, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 0.0002, |
|
"loss": 0.8277, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 0.0002, |
|
"loss": 0.8068, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 0.0002, |
|
"loss": 0.7698, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 0.0002, |
|
"loss": 0.8068, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 0.0002, |
|
"loss": 0.7913, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 0.0002, |
|
"loss": 0.8086, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 0.0002, |
|
"loss": 0.8127, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 0.0002, |
|
"loss": 0.7804, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 0.0002, |
|
"loss": 0.7667, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 0.0002, |
|
"loss": 0.757, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 0.0002, |
|
"loss": 0.7438, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 0.0002, |
|
"loss": 0.768, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 0.0002, |
|
"loss": 0.8151, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 0.0002, |
|
"loss": 0.7718, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 0.0002, |
|
"loss": 0.7903, |
|
"step": 370 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 0.0002, |
|
"loss": 0.7447, |
|
"step": 380 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 0.0002, |
|
"loss": 0.7712, |
|
"step": 390 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 0.0002, |
|
"loss": 0.7808, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"eval_loss": 0.7457320690155029, |
|
"eval_runtime": 152.8304, |
|
"eval_samples_per_second": 6.543, |
|
"eval_steps_per_second": 3.272, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"mmlu_eval_accuracy": 0.5941853117334526, |
|
"mmlu_eval_accuracy_abstract_algebra": 0.2727272727272727, |
|
"mmlu_eval_accuracy_anatomy": 0.5, |
|
"mmlu_eval_accuracy_astronomy": 0.75, |
|
"mmlu_eval_accuracy_business_ethics": 0.5454545454545454, |
|
"mmlu_eval_accuracy_clinical_knowledge": 0.6206896551724138, |
|
"mmlu_eval_accuracy_college_biology": 0.5625, |
|
"mmlu_eval_accuracy_college_chemistry": 0.25, |
|
"mmlu_eval_accuracy_college_computer_science": 0.2727272727272727, |
|
"mmlu_eval_accuracy_college_mathematics": 0.5454545454545454, |
|
"mmlu_eval_accuracy_college_medicine": 0.6363636363636364, |
|
"mmlu_eval_accuracy_college_physics": 0.5454545454545454, |
|
"mmlu_eval_accuracy_computer_security": 0.7272727272727273, |
|
"mmlu_eval_accuracy_conceptual_physics": 0.5384615384615384, |
|
"mmlu_eval_accuracy_econometrics": 0.5, |
|
"mmlu_eval_accuracy_electrical_engineering": 0.5625, |
|
"mmlu_eval_accuracy_elementary_mathematics": 0.4634146341463415, |
|
"mmlu_eval_accuracy_formal_logic": 0.14285714285714285, |
|
"mmlu_eval_accuracy_global_facts": 0.4, |
|
"mmlu_eval_accuracy_high_school_biology": 0.625, |
|
"mmlu_eval_accuracy_high_school_chemistry": 0.45454545454545453, |
|
"mmlu_eval_accuracy_high_school_computer_science": 0.5555555555555556, |
|
"mmlu_eval_accuracy_high_school_european_history": 0.7777777777777778, |
|
"mmlu_eval_accuracy_high_school_geography": 0.8181818181818182, |
|
"mmlu_eval_accuracy_high_school_government_and_politics": 0.7619047619047619, |
|
"mmlu_eval_accuracy_high_school_macroeconomics": 0.6046511627906976, |
|
"mmlu_eval_accuracy_high_school_mathematics": 0.1724137931034483, |
|
"mmlu_eval_accuracy_high_school_microeconomics": 0.6923076923076923, |
|
"mmlu_eval_accuracy_high_school_physics": 0.17647058823529413, |
|
"mmlu_eval_accuracy_high_school_psychology": 0.8666666666666667, |
|
"mmlu_eval_accuracy_high_school_statistics": 0.43478260869565216, |
|
"mmlu_eval_accuracy_high_school_us_history": 0.7727272727272727, |
|
"mmlu_eval_accuracy_high_school_world_history": 0.6538461538461539, |
|
"mmlu_eval_accuracy_human_aging": 0.7391304347826086, |
|
"mmlu_eval_accuracy_human_sexuality": 0.5, |
|
"mmlu_eval_accuracy_international_law": 1.0, |
|
"mmlu_eval_accuracy_jurisprudence": 0.6363636363636364, |
|
"mmlu_eval_accuracy_logical_fallacies": 0.7222222222222222, |
|
"mmlu_eval_accuracy_machine_learning": 0.5454545454545454, |
|
"mmlu_eval_accuracy_management": 0.9090909090909091, |
|
"mmlu_eval_accuracy_marketing": 0.88, |
|
"mmlu_eval_accuracy_medical_genetics": 1.0, |
|
"mmlu_eval_accuracy_miscellaneous": 0.7441860465116279, |
|
"mmlu_eval_accuracy_moral_disputes": 0.6052631578947368, |
|
"mmlu_eval_accuracy_moral_scenarios": 0.32, |
|
"mmlu_eval_accuracy_nutrition": 0.6363636363636364, |
|
"mmlu_eval_accuracy_philosophy": 0.7058823529411765, |
|
"mmlu_eval_accuracy_prehistory": 0.6285714285714286, |
|
"mmlu_eval_accuracy_professional_accounting": 0.5483870967741935, |
|
"mmlu_eval_accuracy_professional_law": 0.4294117647058823, |
|
"mmlu_eval_accuracy_professional_medicine": 0.6129032258064516, |
|
"mmlu_eval_accuracy_professional_psychology": 0.5797101449275363, |
|
"mmlu_eval_accuracy_public_relations": 0.5, |
|
"mmlu_eval_accuracy_security_studies": 0.5555555555555556, |
|
"mmlu_eval_accuracy_sociology": 0.8181818181818182, |
|
"mmlu_eval_accuracy_us_foreign_policy": 0.8181818181818182, |
|
"mmlu_eval_accuracy_virology": 0.3888888888888889, |
|
"mmlu_eval_accuracy_world_religions": 0.8421052631578947, |
|
"mmlu_loss": 1.0765447558606573, |
|
"step": 400 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 9468, |
|
"num_train_epochs": 3, |
|
"save_steps": 200, |
|
"total_flos": 2.446105870364836e+17, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|