Nous-Hermes-llama-2-7b_7b_cluster05_partitioned_v3_standardized_05
/
checkpoint-1000
/trainer_state.json
{ | |
"best_metric": 0.5078858137130737, | |
"best_model_checkpoint": "./output_v2/7b_cluster05_Nous-Hermes-llama-2-7b_partitioned_v3_standardized_05/checkpoint-600", | |
"epoch": 2.973977695167286, | |
"global_step": 1000, | |
"is_hyper_param_search": false, | |
"is_local_process_zero": true, | |
"is_world_process_zero": true, | |
"log_history": [ | |
{ | |
"epoch": 0.03, | |
"learning_rate": 0.0002, | |
"loss": 0.6832, | |
"step": 10 | |
}, | |
{ | |
"epoch": 0.06, | |
"learning_rate": 0.0002, | |
"loss": 0.5389, | |
"step": 20 | |
}, | |
{ | |
"epoch": 0.09, | |
"learning_rate": 0.0002, | |
"loss": 0.5072, | |
"step": 30 | |
}, | |
{ | |
"epoch": 0.12, | |
"learning_rate": 0.0002, | |
"loss": 0.5211, | |
"step": 40 | |
}, | |
{ | |
"epoch": 0.15, | |
"learning_rate": 0.0002, | |
"loss": 0.5888, | |
"step": 50 | |
}, | |
{ | |
"epoch": 0.18, | |
"learning_rate": 0.0002, | |
"loss": 0.534, | |
"step": 60 | |
}, | |
{ | |
"epoch": 0.21, | |
"learning_rate": 0.0002, | |
"loss": 0.5684, | |
"step": 70 | |
}, | |
{ | |
"epoch": 0.24, | |
"learning_rate": 0.0002, | |
"loss": 0.514, | |
"step": 80 | |
}, | |
{ | |
"epoch": 0.27, | |
"learning_rate": 0.0002, | |
"loss": 0.5518, | |
"step": 90 | |
}, | |
{ | |
"epoch": 0.3, | |
"learning_rate": 0.0002, | |
"loss": 0.5034, | |
"step": 100 | |
}, | |
{ | |
"epoch": 0.33, | |
"learning_rate": 0.0002, | |
"loss": 0.542, | |
"step": 110 | |
}, | |
{ | |
"epoch": 0.36, | |
"learning_rate": 0.0002, | |
"loss": 0.4847, | |
"step": 120 | |
}, | |
{ | |
"epoch": 0.39, | |
"learning_rate": 0.0002, | |
"loss": 0.4772, | |
"step": 130 | |
}, | |
{ | |
"epoch": 0.42, | |
"learning_rate": 0.0002, | |
"loss": 0.5196, | |
"step": 140 | |
}, | |
{ | |
"epoch": 0.45, | |
"learning_rate": 0.0002, | |
"loss": 0.4672, | |
"step": 150 | |
}, | |
{ | |
"epoch": 0.48, | |
"learning_rate": 0.0002, | |
"loss": 0.4913, | |
"step": 160 | |
}, | |
{ | |
"epoch": 0.51, | |
"learning_rate": 0.0002, | |
"loss": 0.5498, | |
"step": 170 | |
}, | |
{ | |
"epoch": 0.54, | |
"learning_rate": 0.0002, | |
"loss": 0.5328, | |
"step": 180 | |
}, | |
{ | |
"epoch": 0.57, | |
"learning_rate": 0.0002, | |
"loss": 0.5313, | |
"step": 190 | |
}, | |
{ | |
"epoch": 0.59, | |
"learning_rate": 0.0002, | |
"loss": 0.515, | |
"step": 200 | |
}, | |
{ | |
"epoch": 0.59, | |
"eval_loss": 0.531296968460083, | |
"eval_runtime": 174.4771, | |
"eval_samples_per_second": 5.731, | |
"eval_steps_per_second": 2.866, | |
"step": 200 | |
}, | |
{ | |
"epoch": 0.59, | |
"mmlu_eval_accuracy": 0.46292469330066577, | |
"mmlu_eval_accuracy_abstract_algebra": 0.36363636363636365, | |
"mmlu_eval_accuracy_anatomy": 0.5, | |
"mmlu_eval_accuracy_astronomy": 0.5, | |
"mmlu_eval_accuracy_business_ethics": 0.6363636363636364, | |
"mmlu_eval_accuracy_clinical_knowledge": 0.41379310344827586, | |
"mmlu_eval_accuracy_college_biology": 0.3125, | |
"mmlu_eval_accuracy_college_chemistry": 0.125, | |
"mmlu_eval_accuracy_college_computer_science": 0.36363636363636365, | |
"mmlu_eval_accuracy_college_mathematics": 0.2727272727272727, | |
"mmlu_eval_accuracy_college_medicine": 0.3181818181818182, | |
"mmlu_eval_accuracy_college_physics": 0.5454545454545454, | |
"mmlu_eval_accuracy_computer_security": 0.18181818181818182, | |
"mmlu_eval_accuracy_conceptual_physics": 0.4230769230769231, | |
"mmlu_eval_accuracy_econometrics": 0.16666666666666666, | |
"mmlu_eval_accuracy_electrical_engineering": 0.5, | |
"mmlu_eval_accuracy_elementary_mathematics": 0.36585365853658536, | |
"mmlu_eval_accuracy_formal_logic": 0.2857142857142857, | |
"mmlu_eval_accuracy_global_facts": 0.6, | |
"mmlu_eval_accuracy_high_school_biology": 0.34375, | |
"mmlu_eval_accuracy_high_school_chemistry": 0.45454545454545453, | |
"mmlu_eval_accuracy_high_school_computer_science": 0.5555555555555556, | |
"mmlu_eval_accuracy_high_school_european_history": 0.6666666666666666, | |
"mmlu_eval_accuracy_high_school_geography": 0.7272727272727273, | |
"mmlu_eval_accuracy_high_school_government_and_politics": 0.6190476190476191, | |
"mmlu_eval_accuracy_high_school_macroeconomics": 0.32558139534883723, | |
"mmlu_eval_accuracy_high_school_mathematics": 0.20689655172413793, | |
"mmlu_eval_accuracy_high_school_microeconomics": 0.38461538461538464, | |
"mmlu_eval_accuracy_high_school_physics": 0.35294117647058826, | |
"mmlu_eval_accuracy_high_school_psychology": 0.65, | |
"mmlu_eval_accuracy_high_school_statistics": 0.30434782608695654, | |
"mmlu_eval_accuracy_high_school_us_history": 0.7272727272727273, | |
"mmlu_eval_accuracy_high_school_world_history": 0.46153846153846156, | |
"mmlu_eval_accuracy_human_aging": 0.6521739130434783, | |
"mmlu_eval_accuracy_human_sexuality": 0.3333333333333333, | |
"mmlu_eval_accuracy_international_law": 0.8461538461538461, | |
"mmlu_eval_accuracy_jurisprudence": 0.36363636363636365, | |
"mmlu_eval_accuracy_logical_fallacies": 0.5, | |
"mmlu_eval_accuracy_machine_learning": 0.18181818181818182, | |
"mmlu_eval_accuracy_management": 0.6363636363636364, | |
"mmlu_eval_accuracy_marketing": 0.72, | |
"mmlu_eval_accuracy_medical_genetics": 0.7272727272727273, | |
"mmlu_eval_accuracy_miscellaneous": 0.6744186046511628, | |
"mmlu_eval_accuracy_moral_disputes": 0.39473684210526316, | |
"mmlu_eval_accuracy_moral_scenarios": 0.24, | |
"mmlu_eval_accuracy_nutrition": 0.5454545454545454, | |
"mmlu_eval_accuracy_philosophy": 0.5, | |
"mmlu_eval_accuracy_prehistory": 0.4, | |
"mmlu_eval_accuracy_professional_accounting": 0.3548387096774194, | |
"mmlu_eval_accuracy_professional_law": 0.3352941176470588, | |
"mmlu_eval_accuracy_professional_medicine": 0.45161290322580644, | |
"mmlu_eval_accuracy_professional_psychology": 0.42028985507246375, | |
"mmlu_eval_accuracy_public_relations": 0.5833333333333334, | |
"mmlu_eval_accuracy_security_studies": 0.5185185185185185, | |
"mmlu_eval_accuracy_sociology": 0.5909090909090909, | |
"mmlu_eval_accuracy_us_foreign_policy": 0.6363636363636364, | |
"mmlu_eval_accuracy_virology": 0.3888888888888889, | |
"mmlu_eval_accuracy_world_religions": 0.7368421052631579, | |
"mmlu_loss": 1.3857738420406147, | |
"step": 200 | |
}, | |
{ | |
"epoch": 0.62, | |
"learning_rate": 0.0002, | |
"loss": 0.4582, | |
"step": 210 | |
}, | |
{ | |
"epoch": 0.65, | |
"learning_rate": 0.0002, | |
"loss": 0.4796, | |
"step": 220 | |
}, | |
{ | |
"epoch": 0.68, | |
"learning_rate": 0.0002, | |
"loss": 0.5081, | |
"step": 230 | |
}, | |
{ | |
"epoch": 0.71, | |
"learning_rate": 0.0002, | |
"loss": 0.5206, | |
"step": 240 | |
}, | |
{ | |
"epoch": 0.74, | |
"learning_rate": 0.0002, | |
"loss": 0.4961, | |
"step": 250 | |
}, | |
{ | |
"epoch": 0.77, | |
"learning_rate": 0.0002, | |
"loss": 0.5219, | |
"step": 260 | |
}, | |
{ | |
"epoch": 0.8, | |
"learning_rate": 0.0002, | |
"loss": 0.5311, | |
"step": 270 | |
}, | |
{ | |
"epoch": 0.83, | |
"learning_rate": 0.0002, | |
"loss": 0.5039, | |
"step": 280 | |
}, | |
{ | |
"epoch": 0.86, | |
"learning_rate": 0.0002, | |
"loss": 0.4994, | |
"step": 290 | |
}, | |
{ | |
"epoch": 0.89, | |
"learning_rate": 0.0002, | |
"loss": 0.4804, | |
"step": 300 | |
}, | |
{ | |
"epoch": 0.92, | |
"learning_rate": 0.0002, | |
"loss": 0.4791, | |
"step": 310 | |
}, | |
{ | |
"epoch": 0.95, | |
"learning_rate": 0.0002, | |
"loss": 0.4822, | |
"step": 320 | |
}, | |
{ | |
"epoch": 0.98, | |
"learning_rate": 0.0002, | |
"loss": 0.5157, | |
"step": 330 | |
}, | |
{ | |
"epoch": 1.01, | |
"learning_rate": 0.0002, | |
"loss": 0.5184, | |
"step": 340 | |
}, | |
{ | |
"epoch": 1.04, | |
"learning_rate": 0.0002, | |
"loss": 0.4562, | |
"step": 350 | |
}, | |
{ | |
"epoch": 1.07, | |
"learning_rate": 0.0002, | |
"loss": 0.4117, | |
"step": 360 | |
}, | |
{ | |
"epoch": 1.1, | |
"learning_rate": 0.0002, | |
"loss": 0.451, | |
"step": 370 | |
}, | |
{ | |
"epoch": 1.13, | |
"learning_rate": 0.0002, | |
"loss": 0.4237, | |
"step": 380 | |
}, | |
{ | |
"epoch": 1.16, | |
"learning_rate": 0.0002, | |
"loss": 0.4243, | |
"step": 390 | |
}, | |
{ | |
"epoch": 1.19, | |
"learning_rate": 0.0002, | |
"loss": 0.4409, | |
"step": 400 | |
}, | |
{ | |
"epoch": 1.19, | |
"eval_loss": 0.5141976475715637, | |
"eval_runtime": 174.8657, | |
"eval_samples_per_second": 5.719, | |
"eval_steps_per_second": 2.859, | |
"step": 400 | |
}, | |
{ | |
"epoch": 1.19, | |
"mmlu_eval_accuracy": 0.45097298030174665, | |
"mmlu_eval_accuracy_abstract_algebra": 0.18181818181818182, | |
"mmlu_eval_accuracy_anatomy": 0.5, | |
"mmlu_eval_accuracy_astronomy": 0.5, | |
"mmlu_eval_accuracy_business_ethics": 0.45454545454545453, | |
"mmlu_eval_accuracy_clinical_knowledge": 0.41379310344827586, | |
"mmlu_eval_accuracy_college_biology": 0.4375, | |
"mmlu_eval_accuracy_college_chemistry": 0.0, | |
"mmlu_eval_accuracy_college_computer_science": 0.36363636363636365, | |
"mmlu_eval_accuracy_college_mathematics": 0.2727272727272727, | |
"mmlu_eval_accuracy_college_medicine": 0.2727272727272727, | |
"mmlu_eval_accuracy_college_physics": 0.5454545454545454, | |
"mmlu_eval_accuracy_computer_security": 0.2727272727272727, | |
"mmlu_eval_accuracy_conceptual_physics": 0.4230769230769231, | |
"mmlu_eval_accuracy_econometrics": 0.16666666666666666, | |
"mmlu_eval_accuracy_electrical_engineering": 0.3125, | |
"mmlu_eval_accuracy_elementary_mathematics": 0.3902439024390244, | |
"mmlu_eval_accuracy_formal_logic": 0.2857142857142857, | |
"mmlu_eval_accuracy_global_facts": 0.6, | |
"mmlu_eval_accuracy_high_school_biology": 0.3125, | |
"mmlu_eval_accuracy_high_school_chemistry": 0.36363636363636365, | |
"mmlu_eval_accuracy_high_school_computer_science": 0.4444444444444444, | |
"mmlu_eval_accuracy_high_school_european_history": 0.5555555555555556, | |
"mmlu_eval_accuracy_high_school_geography": 0.7272727272727273, | |
"mmlu_eval_accuracy_high_school_government_and_politics": 0.6190476190476191, | |
"mmlu_eval_accuracy_high_school_macroeconomics": 0.3488372093023256, | |
"mmlu_eval_accuracy_high_school_mathematics": 0.27586206896551724, | |
"mmlu_eval_accuracy_high_school_microeconomics": 0.5, | |
"mmlu_eval_accuracy_high_school_physics": 0.29411764705882354, | |
"mmlu_eval_accuracy_high_school_psychology": 0.6833333333333333, | |
"mmlu_eval_accuracy_high_school_statistics": 0.30434782608695654, | |
"mmlu_eval_accuracy_high_school_us_history": 0.6818181818181818, | |
"mmlu_eval_accuracy_high_school_world_history": 0.5384615384615384, | |
"mmlu_eval_accuracy_human_aging": 0.6956521739130435, | |
"mmlu_eval_accuracy_human_sexuality": 0.4166666666666667, | |
"mmlu_eval_accuracy_international_law": 0.7692307692307693, | |
"mmlu_eval_accuracy_jurisprudence": 0.45454545454545453, | |
"mmlu_eval_accuracy_logical_fallacies": 0.5, | |
"mmlu_eval_accuracy_machine_learning": 0.18181818181818182, | |
"mmlu_eval_accuracy_management": 0.5454545454545454, | |
"mmlu_eval_accuracy_marketing": 0.68, | |
"mmlu_eval_accuracy_medical_genetics": 0.7272727272727273, | |
"mmlu_eval_accuracy_miscellaneous": 0.6627906976744186, | |
"mmlu_eval_accuracy_moral_disputes": 0.5, | |
"mmlu_eval_accuracy_moral_scenarios": 0.24, | |
"mmlu_eval_accuracy_nutrition": 0.5151515151515151, | |
"mmlu_eval_accuracy_philosophy": 0.47058823529411764, | |
"mmlu_eval_accuracy_prehistory": 0.45714285714285713, | |
"mmlu_eval_accuracy_professional_accounting": 0.2903225806451613, | |
"mmlu_eval_accuracy_professional_law": 0.3411764705882353, | |
"mmlu_eval_accuracy_professional_medicine": 0.45161290322580644, | |
"mmlu_eval_accuracy_professional_psychology": 0.391304347826087, | |
"mmlu_eval_accuracy_public_relations": 0.5833333333333334, | |
"mmlu_eval_accuracy_security_studies": 0.48148148148148145, | |
"mmlu_eval_accuracy_sociology": 0.6363636363636364, | |
"mmlu_eval_accuracy_us_foreign_policy": 0.5454545454545454, | |
"mmlu_eval_accuracy_virology": 0.3888888888888889, | |
"mmlu_eval_accuracy_world_religions": 0.7368421052631579, | |
"mmlu_loss": 1.0380505950970687, | |
"step": 400 | |
}, | |
{ | |
"epoch": 1.22, | |
"learning_rate": 0.0002, | |
"loss": 0.4176, | |
"step": 410 | |
}, | |
{ | |
"epoch": 1.25, | |
"learning_rate": 0.0002, | |
"loss": 0.445, | |
"step": 420 | |
}, | |
{ | |
"epoch": 1.28, | |
"learning_rate": 0.0002, | |
"loss": 0.4968, | |
"step": 430 | |
}, | |
{ | |
"epoch": 1.31, | |
"learning_rate": 0.0002, | |
"loss": 0.4573, | |
"step": 440 | |
}, | |
{ | |
"epoch": 1.34, | |
"learning_rate": 0.0002, | |
"loss": 0.4097, | |
"step": 450 | |
}, | |
{ | |
"epoch": 1.37, | |
"learning_rate": 0.0002, | |
"loss": 0.4215, | |
"step": 460 | |
}, | |
{ | |
"epoch": 1.4, | |
"learning_rate": 0.0002, | |
"loss": 0.4754, | |
"step": 470 | |
}, | |
{ | |
"epoch": 1.43, | |
"learning_rate": 0.0002, | |
"loss": 0.4463, | |
"step": 480 | |
}, | |
{ | |
"epoch": 1.46, | |
"learning_rate": 0.0002, | |
"loss": 0.4027, | |
"step": 490 | |
}, | |
{ | |
"epoch": 1.49, | |
"learning_rate": 0.0002, | |
"loss": 0.4361, | |
"step": 500 | |
}, | |
{ | |
"epoch": 1.52, | |
"learning_rate": 0.0002, | |
"loss": 0.4458, | |
"step": 510 | |
}, | |
{ | |
"epoch": 1.55, | |
"learning_rate": 0.0002, | |
"loss": 0.4445, | |
"step": 520 | |
}, | |
{ | |
"epoch": 1.58, | |
"learning_rate": 0.0002, | |
"loss": 0.4117, | |
"step": 530 | |
}, | |
{ | |
"epoch": 1.61, | |
"learning_rate": 0.0002, | |
"loss": 0.4609, | |
"step": 540 | |
}, | |
{ | |
"epoch": 1.64, | |
"learning_rate": 0.0002, | |
"loss": 0.4511, | |
"step": 550 | |
}, | |
{ | |
"epoch": 1.67, | |
"learning_rate": 0.0002, | |
"loss": 0.4385, | |
"step": 560 | |
}, | |
{ | |
"epoch": 1.7, | |
"learning_rate": 0.0002, | |
"loss": 0.4451, | |
"step": 570 | |
}, | |
{ | |
"epoch": 1.72, | |
"learning_rate": 0.0002, | |
"loss": 0.4414, | |
"step": 580 | |
}, | |
{ | |
"epoch": 1.75, | |
"learning_rate": 0.0002, | |
"loss": 0.4235, | |
"step": 590 | |
}, | |
{ | |
"epoch": 1.78, | |
"learning_rate": 0.0002, | |
"loss": 0.4954, | |
"step": 600 | |
}, | |
{ | |
"epoch": 1.78, | |
"eval_loss": 0.5078858137130737, | |
"eval_runtime": 174.9277, | |
"eval_samples_per_second": 5.717, | |
"eval_steps_per_second": 2.858, | |
"step": 600 | |
}, | |
{ | |
"epoch": 1.78, | |
"mmlu_eval_accuracy": 0.4596214580927039, | |
"mmlu_eval_accuracy_abstract_algebra": 0.2727272727272727, | |
"mmlu_eval_accuracy_anatomy": 0.5714285714285714, | |
"mmlu_eval_accuracy_astronomy": 0.5, | |
"mmlu_eval_accuracy_business_ethics": 0.6363636363636364, | |
"mmlu_eval_accuracy_clinical_knowledge": 0.4827586206896552, | |
"mmlu_eval_accuracy_college_biology": 0.4375, | |
"mmlu_eval_accuracy_college_chemistry": 0.0, | |
"mmlu_eval_accuracy_college_computer_science": 0.36363636363636365, | |
"mmlu_eval_accuracy_college_mathematics": 0.2727272727272727, | |
"mmlu_eval_accuracy_college_medicine": 0.2727272727272727, | |
"mmlu_eval_accuracy_college_physics": 0.5454545454545454, | |
"mmlu_eval_accuracy_computer_security": 0.2727272727272727, | |
"mmlu_eval_accuracy_conceptual_physics": 0.38461538461538464, | |
"mmlu_eval_accuracy_econometrics": 0.16666666666666666, | |
"mmlu_eval_accuracy_electrical_engineering": 0.375, | |
"mmlu_eval_accuracy_elementary_mathematics": 0.43902439024390244, | |
"mmlu_eval_accuracy_formal_logic": 0.2857142857142857, | |
"mmlu_eval_accuracy_global_facts": 0.5, | |
"mmlu_eval_accuracy_high_school_biology": 0.34375, | |
"mmlu_eval_accuracy_high_school_chemistry": 0.45454545454545453, | |
"mmlu_eval_accuracy_high_school_computer_science": 0.4444444444444444, | |
"mmlu_eval_accuracy_high_school_european_history": 0.5555555555555556, | |
"mmlu_eval_accuracy_high_school_geography": 0.8181818181818182, | |
"mmlu_eval_accuracy_high_school_government_and_politics": 0.6190476190476191, | |
"mmlu_eval_accuracy_high_school_macroeconomics": 0.3488372093023256, | |
"mmlu_eval_accuracy_high_school_mathematics": 0.3103448275862069, | |
"mmlu_eval_accuracy_high_school_microeconomics": 0.38461538461538464, | |
"mmlu_eval_accuracy_high_school_physics": 0.23529411764705882, | |
"mmlu_eval_accuracy_high_school_psychology": 0.6833333333333333, | |
"mmlu_eval_accuracy_high_school_statistics": 0.30434782608695654, | |
"mmlu_eval_accuracy_high_school_us_history": 0.6818181818181818, | |
"mmlu_eval_accuracy_high_school_world_history": 0.46153846153846156, | |
"mmlu_eval_accuracy_human_aging": 0.6956521739130435, | |
"mmlu_eval_accuracy_human_sexuality": 0.5, | |
"mmlu_eval_accuracy_international_law": 0.7692307692307693, | |
"mmlu_eval_accuracy_jurisprudence": 0.2727272727272727, | |
"mmlu_eval_accuracy_logical_fallacies": 0.5, | |
"mmlu_eval_accuracy_machine_learning": 0.18181818181818182, | |
"mmlu_eval_accuracy_management": 0.5454545454545454, | |
"mmlu_eval_accuracy_marketing": 0.72, | |
"mmlu_eval_accuracy_medical_genetics": 0.7272727272727273, | |
"mmlu_eval_accuracy_miscellaneous": 0.6627906976744186, | |
"mmlu_eval_accuracy_moral_disputes": 0.47368421052631576, | |
"mmlu_eval_accuracy_moral_scenarios": 0.23, | |
"mmlu_eval_accuracy_nutrition": 0.5757575757575758, | |
"mmlu_eval_accuracy_philosophy": 0.47058823529411764, | |
"mmlu_eval_accuracy_prehistory": 0.4857142857142857, | |
"mmlu_eval_accuracy_professional_accounting": 0.25806451612903225, | |
"mmlu_eval_accuracy_professional_law": 0.3588235294117647, | |
"mmlu_eval_accuracy_professional_medicine": 0.45161290322580644, | |
"mmlu_eval_accuracy_professional_psychology": 0.42028985507246375, | |
"mmlu_eval_accuracy_public_relations": 0.6666666666666666, | |
"mmlu_eval_accuracy_security_studies": 0.5555555555555556, | |
"mmlu_eval_accuracy_sociology": 0.6363636363636364, | |
"mmlu_eval_accuracy_us_foreign_policy": 0.5454545454545454, | |
"mmlu_eval_accuracy_virology": 0.3333333333333333, | |
"mmlu_eval_accuracy_world_religions": 0.7368421052631579, | |
"mmlu_loss": 1.0377603609980552, | |
"step": 600 | |
}, | |
{ | |
"epoch": 1.81, | |
"learning_rate": 0.0002, | |
"loss": 0.4865, | |
"step": 610 | |
}, | |
{ | |
"epoch": 1.84, | |
"learning_rate": 0.0002, | |
"loss": 0.4451, | |
"step": 620 | |
}, | |
{ | |
"epoch": 1.87, | |
"learning_rate": 0.0002, | |
"loss": 0.424, | |
"step": 630 | |
}, | |
{ | |
"epoch": 1.9, | |
"learning_rate": 0.0002, | |
"loss": 0.4473, | |
"step": 640 | |
}, | |
{ | |
"epoch": 1.93, | |
"learning_rate": 0.0002, | |
"loss": 0.4627, | |
"step": 650 | |
}, | |
{ | |
"epoch": 1.96, | |
"learning_rate": 0.0002, | |
"loss": 0.4298, | |
"step": 660 | |
}, | |
{ | |
"epoch": 1.99, | |
"learning_rate": 0.0002, | |
"loss": 0.4561, | |
"step": 670 | |
}, | |
{ | |
"epoch": 2.02, | |
"learning_rate": 0.0002, | |
"loss": 0.3726, | |
"step": 680 | |
}, | |
{ | |
"epoch": 2.05, | |
"learning_rate": 0.0002, | |
"loss": 0.3548, | |
"step": 690 | |
}, | |
{ | |
"epoch": 2.08, | |
"learning_rate": 0.0002, | |
"loss": 0.3565, | |
"step": 700 | |
}, | |
{ | |
"epoch": 2.11, | |
"learning_rate": 0.0002, | |
"loss": 0.3133, | |
"step": 710 | |
}, | |
{ | |
"epoch": 2.14, | |
"learning_rate": 0.0002, | |
"loss": 0.3475, | |
"step": 720 | |
}, | |
{ | |
"epoch": 2.17, | |
"learning_rate": 0.0002, | |
"loss": 0.3761, | |
"step": 730 | |
}, | |
{ | |
"epoch": 2.2, | |
"learning_rate": 0.0002, | |
"loss": 0.3336, | |
"step": 740 | |
}, | |
{ | |
"epoch": 2.23, | |
"learning_rate": 0.0002, | |
"loss": 0.392, | |
"step": 750 | |
}, | |
{ | |
"epoch": 2.26, | |
"learning_rate": 0.0002, | |
"loss": 0.3556, | |
"step": 760 | |
}, | |
{ | |
"epoch": 2.29, | |
"learning_rate": 0.0002, | |
"loss": 0.3706, | |
"step": 770 | |
}, | |
{ | |
"epoch": 2.32, | |
"learning_rate": 0.0002, | |
"loss": 0.3426, | |
"step": 780 | |
}, | |
{ | |
"epoch": 2.35, | |
"learning_rate": 0.0002, | |
"loss": 0.3273, | |
"step": 790 | |
}, | |
{ | |
"epoch": 2.38, | |
"learning_rate": 0.0002, | |
"loss": 0.3772, | |
"step": 800 | |
}, | |
{ | |
"epoch": 2.38, | |
"eval_loss": 0.5177344083786011, | |
"eval_runtime": 175.3396, | |
"eval_samples_per_second": 5.703, | |
"eval_steps_per_second": 2.852, | |
"step": 800 | |
}, | |
{ | |
"epoch": 2.38, | |
"mmlu_eval_accuracy": 0.4452625151934351, | |
"mmlu_eval_accuracy_abstract_algebra": 0.2727272727272727, | |
"mmlu_eval_accuracy_anatomy": 0.5714285714285714, | |
"mmlu_eval_accuracy_astronomy": 0.5, | |
"mmlu_eval_accuracy_business_ethics": 0.45454545454545453, | |
"mmlu_eval_accuracy_clinical_knowledge": 0.4482758620689655, | |
"mmlu_eval_accuracy_college_biology": 0.375, | |
"mmlu_eval_accuracy_college_chemistry": 0.0, | |
"mmlu_eval_accuracy_college_computer_science": 0.2727272727272727, | |
"mmlu_eval_accuracy_college_mathematics": 0.2727272727272727, | |
"mmlu_eval_accuracy_college_medicine": 0.2727272727272727, | |
"mmlu_eval_accuracy_college_physics": 0.45454545454545453, | |
"mmlu_eval_accuracy_computer_security": 0.2727272727272727, | |
"mmlu_eval_accuracy_conceptual_physics": 0.46153846153846156, | |
"mmlu_eval_accuracy_econometrics": 0.16666666666666666, | |
"mmlu_eval_accuracy_electrical_engineering": 0.3125, | |
"mmlu_eval_accuracy_elementary_mathematics": 0.4634146341463415, | |
"mmlu_eval_accuracy_formal_logic": 0.2857142857142857, | |
"mmlu_eval_accuracy_global_facts": 0.5, | |
"mmlu_eval_accuracy_high_school_biology": 0.34375, | |
"mmlu_eval_accuracy_high_school_chemistry": 0.36363636363636365, | |
"mmlu_eval_accuracy_high_school_computer_science": 0.4444444444444444, | |
"mmlu_eval_accuracy_high_school_european_history": 0.4444444444444444, | |
"mmlu_eval_accuracy_high_school_geography": 0.7727272727272727, | |
"mmlu_eval_accuracy_high_school_government_and_politics": 0.6190476190476191, | |
"mmlu_eval_accuracy_high_school_macroeconomics": 0.37209302325581395, | |
"mmlu_eval_accuracy_high_school_mathematics": 0.2413793103448276, | |
"mmlu_eval_accuracy_high_school_microeconomics": 0.2692307692307692, | |
"mmlu_eval_accuracy_high_school_physics": 0.23529411764705882, | |
"mmlu_eval_accuracy_high_school_psychology": 0.7, | |
"mmlu_eval_accuracy_high_school_statistics": 0.30434782608695654, | |
"mmlu_eval_accuracy_high_school_us_history": 0.6363636363636364, | |
"mmlu_eval_accuracy_high_school_world_history": 0.46153846153846156, | |
"mmlu_eval_accuracy_human_aging": 0.6521739130434783, | |
"mmlu_eval_accuracy_human_sexuality": 0.5, | |
"mmlu_eval_accuracy_international_law": 0.7692307692307693, | |
"mmlu_eval_accuracy_jurisprudence": 0.36363636363636365, | |
"mmlu_eval_accuracy_logical_fallacies": 0.5, | |
"mmlu_eval_accuracy_machine_learning": 0.18181818181818182, | |
"mmlu_eval_accuracy_management": 0.45454545454545453, | |
"mmlu_eval_accuracy_marketing": 0.76, | |
"mmlu_eval_accuracy_medical_genetics": 0.7272727272727273, | |
"mmlu_eval_accuracy_miscellaneous": 0.6744186046511628, | |
"mmlu_eval_accuracy_moral_disputes": 0.47368421052631576, | |
"mmlu_eval_accuracy_moral_scenarios": 0.24, | |
"mmlu_eval_accuracy_nutrition": 0.5151515151515151, | |
"mmlu_eval_accuracy_philosophy": 0.4411764705882353, | |
"mmlu_eval_accuracy_prehistory": 0.45714285714285713, | |
"mmlu_eval_accuracy_professional_accounting": 0.25806451612903225, | |
"mmlu_eval_accuracy_professional_law": 0.3588235294117647, | |
"mmlu_eval_accuracy_professional_medicine": 0.41935483870967744, | |
"mmlu_eval_accuracy_professional_psychology": 0.42028985507246375, | |
"mmlu_eval_accuracy_public_relations": 0.6666666666666666, | |
"mmlu_eval_accuracy_security_studies": 0.48148148148148145, | |
"mmlu_eval_accuracy_sociology": 0.6818181818181818, | |
"mmlu_eval_accuracy_us_foreign_policy": 0.6363636363636364, | |
"mmlu_eval_accuracy_virology": 0.4444444444444444, | |
"mmlu_eval_accuracy_world_religions": 0.7368421052631579, | |
"mmlu_loss": 0.9420785120841573, | |
"step": 800 | |
}, | |
{ | |
"epoch": 2.41, | |
"learning_rate": 0.0002, | |
"loss": 0.3678, | |
"step": 810 | |
}, | |
{ | |
"epoch": 2.44, | |
"learning_rate": 0.0002, | |
"loss": 0.316, | |
"step": 820 | |
}, | |
{ | |
"epoch": 2.47, | |
"learning_rate": 0.0002, | |
"loss": 0.3669, | |
"step": 830 | |
}, | |
{ | |
"epoch": 2.5, | |
"learning_rate": 0.0002, | |
"loss": 0.3955, | |
"step": 840 | |
}, | |
{ | |
"epoch": 2.53, | |
"learning_rate": 0.0002, | |
"loss": 0.3854, | |
"step": 850 | |
}, | |
{ | |
"epoch": 2.56, | |
"learning_rate": 0.0002, | |
"loss": 0.3514, | |
"step": 860 | |
}, | |
{ | |
"epoch": 2.59, | |
"learning_rate": 0.0002, | |
"loss": 0.3491, | |
"step": 870 | |
}, | |
{ | |
"epoch": 2.62, | |
"learning_rate": 0.0002, | |
"loss": 0.3567, | |
"step": 880 | |
}, | |
{ | |
"epoch": 2.65, | |
"learning_rate": 0.0002, | |
"loss": 0.3839, | |
"step": 890 | |
}, | |
{ | |
"epoch": 2.68, | |
"learning_rate": 0.0002, | |
"loss": 0.3291, | |
"step": 900 | |
}, | |
{ | |
"epoch": 2.71, | |
"learning_rate": 0.0002, | |
"loss": 0.3917, | |
"step": 910 | |
}, | |
{ | |
"epoch": 2.74, | |
"learning_rate": 0.0002, | |
"loss": 0.3812, | |
"step": 920 | |
}, | |
{ | |
"epoch": 2.77, | |
"learning_rate": 0.0002, | |
"loss": 0.3496, | |
"step": 930 | |
}, | |
{ | |
"epoch": 2.8, | |
"learning_rate": 0.0002, | |
"loss": 0.3339, | |
"step": 940 | |
}, | |
{ | |
"epoch": 2.83, | |
"learning_rate": 0.0002, | |
"loss": 0.3565, | |
"step": 950 | |
}, | |
{ | |
"epoch": 2.86, | |
"learning_rate": 0.0002, | |
"loss": 0.3825, | |
"step": 960 | |
}, | |
{ | |
"epoch": 2.88, | |
"learning_rate": 0.0002, | |
"loss": 0.4028, | |
"step": 970 | |
}, | |
{ | |
"epoch": 2.91, | |
"learning_rate": 0.0002, | |
"loss": 0.3621, | |
"step": 980 | |
}, | |
{ | |
"epoch": 2.94, | |
"learning_rate": 0.0002, | |
"loss": 0.3345, | |
"step": 990 | |
}, | |
{ | |
"epoch": 2.97, | |
"learning_rate": 0.0002, | |
"loss": 0.4121, | |
"step": 1000 | |
}, | |
{ | |
"epoch": 2.97, | |
"eval_loss": 0.5176346898078918, | |
"eval_runtime": 175.3431, | |
"eval_samples_per_second": 5.703, | |
"eval_steps_per_second": 2.852, | |
"step": 1000 | |
}, | |
{ | |
"epoch": 2.97, | |
"mmlu_eval_accuracy": 0.43483776787791517, | |
"mmlu_eval_accuracy_abstract_algebra": 0.2727272727272727, | |
"mmlu_eval_accuracy_anatomy": 0.6428571428571429, | |
"mmlu_eval_accuracy_astronomy": 0.4375, | |
"mmlu_eval_accuracy_business_ethics": 0.6363636363636364, | |
"mmlu_eval_accuracy_clinical_knowledge": 0.4827586206896552, | |
"mmlu_eval_accuracy_college_biology": 0.3125, | |
"mmlu_eval_accuracy_college_chemistry": 0.0, | |
"mmlu_eval_accuracy_college_computer_science": 0.2727272727272727, | |
"mmlu_eval_accuracy_college_mathematics": 0.36363636363636365, | |
"mmlu_eval_accuracy_college_medicine": 0.3181818181818182, | |
"mmlu_eval_accuracy_college_physics": 0.45454545454545453, | |
"mmlu_eval_accuracy_computer_security": 0.18181818181818182, | |
"mmlu_eval_accuracy_conceptual_physics": 0.38461538461538464, | |
"mmlu_eval_accuracy_econometrics": 0.25, | |
"mmlu_eval_accuracy_electrical_engineering": 0.375, | |
"mmlu_eval_accuracy_elementary_mathematics": 0.43902439024390244, | |
"mmlu_eval_accuracy_formal_logic": 0.2857142857142857, | |
"mmlu_eval_accuracy_global_facts": 0.3, | |
"mmlu_eval_accuracy_high_school_biology": 0.25, | |
"mmlu_eval_accuracy_high_school_chemistry": 0.36363636363636365, | |
"mmlu_eval_accuracy_high_school_computer_science": 0.4444444444444444, | |
"mmlu_eval_accuracy_high_school_european_history": 0.4444444444444444, | |
"mmlu_eval_accuracy_high_school_geography": 0.7272727272727273, | |
"mmlu_eval_accuracy_high_school_government_and_politics": 0.5714285714285714, | |
"mmlu_eval_accuracy_high_school_macroeconomics": 0.4186046511627907, | |
"mmlu_eval_accuracy_high_school_mathematics": 0.20689655172413793, | |
"mmlu_eval_accuracy_high_school_microeconomics": 0.34615384615384615, | |
"mmlu_eval_accuracy_high_school_physics": 0.17647058823529413, | |
"mmlu_eval_accuracy_high_school_psychology": 0.6666666666666666, | |
"mmlu_eval_accuracy_high_school_statistics": 0.30434782608695654, | |
"mmlu_eval_accuracy_high_school_us_history": 0.7272727272727273, | |
"mmlu_eval_accuracy_high_school_world_history": 0.46153846153846156, | |
"mmlu_eval_accuracy_human_aging": 0.6521739130434783, | |
"mmlu_eval_accuracy_human_sexuality": 0.4166666666666667, | |
"mmlu_eval_accuracy_international_law": 0.7692307692307693, | |
"mmlu_eval_accuracy_jurisprudence": 0.2727272727272727, | |
"mmlu_eval_accuracy_logical_fallacies": 0.5, | |
"mmlu_eval_accuracy_machine_learning": 0.2727272727272727, | |
"mmlu_eval_accuracy_management": 0.36363636363636365, | |
"mmlu_eval_accuracy_marketing": 0.68, | |
"mmlu_eval_accuracy_medical_genetics": 0.7272727272727273, | |
"mmlu_eval_accuracy_miscellaneous": 0.6395348837209303, | |
"mmlu_eval_accuracy_moral_disputes": 0.5263157894736842, | |
"mmlu_eval_accuracy_moral_scenarios": 0.24, | |
"mmlu_eval_accuracy_nutrition": 0.45454545454545453, | |
"mmlu_eval_accuracy_philosophy": 0.4117647058823529, | |
"mmlu_eval_accuracy_prehistory": 0.42857142857142855, | |
"mmlu_eval_accuracy_professional_accounting": 0.22580645161290322, | |
"mmlu_eval_accuracy_professional_law": 0.32941176470588235, | |
"mmlu_eval_accuracy_professional_medicine": 0.45161290322580644, | |
"mmlu_eval_accuracy_professional_psychology": 0.42028985507246375, | |
"mmlu_eval_accuracy_public_relations": 0.6666666666666666, | |
"mmlu_eval_accuracy_security_studies": 0.4444444444444444, | |
"mmlu_eval_accuracy_sociology": 0.5909090909090909, | |
"mmlu_eval_accuracy_us_foreign_policy": 0.5454545454545454, | |
"mmlu_eval_accuracy_virology": 0.5, | |
"mmlu_eval_accuracy_world_religions": 0.7368421052631579, | |
"mmlu_loss": 0.9825846157073197, | |
"step": 1000 | |
} | |
], | |
"max_steps": 5000, | |
"num_train_epochs": 15, | |
"total_flos": 2.0310520321046938e+17, | |
"trial_name": null, | |
"trial_params": null | |
} | |