Farouk commited on
Commit
bb0c7cf
·
1 Parent(s): 3fe2bb9

commit files to HF hub

Browse files
Files changed (5) hide show
  1. adapter_model.bin +1 -1
  2. optimizer.pt +1 -1
  3. rng_state.pth +1 -1
  4. scheduler.pt +1 -1
  5. trainer_state.json +576 -3
adapter_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:aec5947a0f655266a6242c15aa7d3bfab2f173eceeb83b317cc5031f573e81bf
3
  size 319977229
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:698f7f4394137c9f9a027f61094a137c19b351bca2776822ea338ffdc3b048db
3
  size 319977229
optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:993cceec25e1c3b2e79a7d7ec11c798e22767dbfe914178aff26ddfdb3abb198
3
  size 1279539973
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2e9b62486989ca29406a18af203ed5b33e5c27bed51df27f20aa9786e46628ff
3
  size 1279539973
rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:913416833317e281273f93debba0939efb5222bd5771a61896604e4043bd9f27
3
  size 14511
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c027db1dc669a948797121e90c1a982d60d14a868af0519ca5f007f6183bbdd0
3
  size 14511
scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:de7840bcb72f2f480fd301578d289cdfa174589e831b0d33e5772f3956b6beae
3
  size 627
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d1351871b6e069a7a4437616d40b913152ecc2cd046e996fcfeb300a3cf60638
3
  size 627
trainer_state.json CHANGED
@@ -1,8 +1,8 @@
1
  {
2
  "best_metric": 0.6964578628540039,
3
  "best_model_checkpoint": "./output_v2/7b_cluster06_Nous-Hermes-llama-2-7b_partitioned_v3_standardized_06/checkpoint-800",
4
- "epoch": 1.839080459770115,
5
- "global_step": 800,
6
  "is_hyper_param_search": false,
7
  "is_local_process_zero": true,
8
  "is_world_process_zero": true,
@@ -770,11 +770,584 @@
770
  "mmlu_eval_accuracy_world_religions": 0.7368421052631579,
771
  "mmlu_loss": 1.069855675548704,
772
  "step": 800
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
773
  }
774
  ],
775
  "max_steps": 5000,
776
  "num_train_epochs": 12,
777
- "total_flos": 1.8923843187612058e+17,
778
  "trial_name": null,
779
  "trial_params": null
780
  }
 
1
  {
2
  "best_metric": 0.6964578628540039,
3
  "best_model_checkpoint": "./output_v2/7b_cluster06_Nous-Hermes-llama-2-7b_partitioned_v3_standardized_06/checkpoint-800",
4
+ "epoch": 3.218390804597701,
5
+ "global_step": 1400,
6
  "is_hyper_param_search": false,
7
  "is_local_process_zero": true,
8
  "is_world_process_zero": true,
 
770
  "mmlu_eval_accuracy_world_religions": 0.7368421052631579,
771
  "mmlu_loss": 1.069855675548704,
772
  "step": 800
773
+ },
774
+ {
775
+ "epoch": 1.86,
776
+ "learning_rate": 0.0002,
777
+ "loss": 0.6798,
778
+ "step": 810
779
+ },
780
+ {
781
+ "epoch": 1.89,
782
+ "learning_rate": 0.0002,
783
+ "loss": 0.6485,
784
+ "step": 820
785
+ },
786
+ {
787
+ "epoch": 1.91,
788
+ "learning_rate": 0.0002,
789
+ "loss": 0.6419,
790
+ "step": 830
791
+ },
792
+ {
793
+ "epoch": 1.93,
794
+ "learning_rate": 0.0002,
795
+ "loss": 0.6528,
796
+ "step": 840
797
+ },
798
+ {
799
+ "epoch": 1.95,
800
+ "learning_rate": 0.0002,
801
+ "loss": 0.6674,
802
+ "step": 850
803
+ },
804
+ {
805
+ "epoch": 1.98,
806
+ "learning_rate": 0.0002,
807
+ "loss": 0.6487,
808
+ "step": 860
809
+ },
810
+ {
811
+ "epoch": 2.0,
812
+ "learning_rate": 0.0002,
813
+ "loss": 0.6742,
814
+ "step": 870
815
+ },
816
+ {
817
+ "epoch": 2.02,
818
+ "learning_rate": 0.0002,
819
+ "loss": 0.5303,
820
+ "step": 880
821
+ },
822
+ {
823
+ "epoch": 2.05,
824
+ "learning_rate": 0.0002,
825
+ "loss": 0.5264,
826
+ "step": 890
827
+ },
828
+ {
829
+ "epoch": 2.07,
830
+ "learning_rate": 0.0002,
831
+ "loss": 0.5578,
832
+ "step": 900
833
+ },
834
+ {
835
+ "epoch": 2.09,
836
+ "learning_rate": 0.0002,
837
+ "loss": 0.5399,
838
+ "step": 910
839
+ },
840
+ {
841
+ "epoch": 2.11,
842
+ "learning_rate": 0.0002,
843
+ "loss": 0.6028,
844
+ "step": 920
845
+ },
846
+ {
847
+ "epoch": 2.14,
848
+ "learning_rate": 0.0002,
849
+ "loss": 0.5292,
850
+ "step": 930
851
+ },
852
+ {
853
+ "epoch": 2.16,
854
+ "learning_rate": 0.0002,
855
+ "loss": 0.5715,
856
+ "step": 940
857
+ },
858
+ {
859
+ "epoch": 2.18,
860
+ "learning_rate": 0.0002,
861
+ "loss": 0.514,
862
+ "step": 950
863
+ },
864
+ {
865
+ "epoch": 2.21,
866
+ "learning_rate": 0.0002,
867
+ "loss": 0.5381,
868
+ "step": 960
869
+ },
870
+ {
871
+ "epoch": 2.23,
872
+ "learning_rate": 0.0002,
873
+ "loss": 0.5259,
874
+ "step": 970
875
+ },
876
+ {
877
+ "epoch": 2.25,
878
+ "learning_rate": 0.0002,
879
+ "loss": 0.5476,
880
+ "step": 980
881
+ },
882
+ {
883
+ "epoch": 2.28,
884
+ "learning_rate": 0.0002,
885
+ "loss": 0.5369,
886
+ "step": 990
887
+ },
888
+ {
889
+ "epoch": 2.3,
890
+ "learning_rate": 0.0002,
891
+ "loss": 0.5541,
892
+ "step": 1000
893
+ },
894
+ {
895
+ "epoch": 2.3,
896
+ "eval_loss": 0.7225061058998108,
897
+ "eval_runtime": 248.2663,
898
+ "eval_samples_per_second": 4.028,
899
+ "eval_steps_per_second": 2.014,
900
+ "step": 1000
901
+ },
902
+ {
903
+ "epoch": 2.3,
904
+ "mmlu_eval_accuracy": 0.46504233740978407,
905
+ "mmlu_eval_accuracy_abstract_algebra": 0.2727272727272727,
906
+ "mmlu_eval_accuracy_anatomy": 0.5714285714285714,
907
+ "mmlu_eval_accuracy_astronomy": 0.375,
908
+ "mmlu_eval_accuracy_business_ethics": 0.45454545454545453,
909
+ "mmlu_eval_accuracy_clinical_knowledge": 0.4482758620689655,
910
+ "mmlu_eval_accuracy_college_biology": 0.4375,
911
+ "mmlu_eval_accuracy_college_chemistry": 0.125,
912
+ "mmlu_eval_accuracy_college_computer_science": 0.36363636363636365,
913
+ "mmlu_eval_accuracy_college_mathematics": 0.18181818181818182,
914
+ "mmlu_eval_accuracy_college_medicine": 0.36363636363636365,
915
+ "mmlu_eval_accuracy_college_physics": 0.45454545454545453,
916
+ "mmlu_eval_accuracy_computer_security": 0.2727272727272727,
917
+ "mmlu_eval_accuracy_conceptual_physics": 0.3076923076923077,
918
+ "mmlu_eval_accuracy_econometrics": 0.16666666666666666,
919
+ "mmlu_eval_accuracy_electrical_engineering": 0.5,
920
+ "mmlu_eval_accuracy_elementary_mathematics": 0.3170731707317073,
921
+ "mmlu_eval_accuracy_formal_logic": 0.21428571428571427,
922
+ "mmlu_eval_accuracy_global_facts": 0.4,
923
+ "mmlu_eval_accuracy_high_school_biology": 0.34375,
924
+ "mmlu_eval_accuracy_high_school_chemistry": 0.4090909090909091,
925
+ "mmlu_eval_accuracy_high_school_computer_science": 0.6666666666666666,
926
+ "mmlu_eval_accuracy_high_school_european_history": 0.6111111111111112,
927
+ "mmlu_eval_accuracy_high_school_geography": 0.8636363636363636,
928
+ "mmlu_eval_accuracy_high_school_government_and_politics": 0.6190476190476191,
929
+ "mmlu_eval_accuracy_high_school_macroeconomics": 0.32558139534883723,
930
+ "mmlu_eval_accuracy_high_school_mathematics": 0.27586206896551724,
931
+ "mmlu_eval_accuracy_high_school_microeconomics": 0.38461538461538464,
932
+ "mmlu_eval_accuracy_high_school_physics": 0.29411764705882354,
933
+ "mmlu_eval_accuracy_high_school_psychology": 0.7,
934
+ "mmlu_eval_accuracy_high_school_statistics": 0.34782608695652173,
935
+ "mmlu_eval_accuracy_high_school_us_history": 0.7272727272727273,
936
+ "mmlu_eval_accuracy_high_school_world_history": 0.6538461538461539,
937
+ "mmlu_eval_accuracy_human_aging": 0.6521739130434783,
938
+ "mmlu_eval_accuracy_human_sexuality": 0.5,
939
+ "mmlu_eval_accuracy_international_law": 0.9230769230769231,
940
+ "mmlu_eval_accuracy_jurisprudence": 0.45454545454545453,
941
+ "mmlu_eval_accuracy_logical_fallacies": 0.5555555555555556,
942
+ "mmlu_eval_accuracy_machine_learning": 0.2727272727272727,
943
+ "mmlu_eval_accuracy_management": 0.6363636363636364,
944
+ "mmlu_eval_accuracy_marketing": 0.72,
945
+ "mmlu_eval_accuracy_medical_genetics": 0.6363636363636364,
946
+ "mmlu_eval_accuracy_miscellaneous": 0.686046511627907,
947
+ "mmlu_eval_accuracy_moral_disputes": 0.4473684210526316,
948
+ "mmlu_eval_accuracy_moral_scenarios": 0.24,
949
+ "mmlu_eval_accuracy_nutrition": 0.6060606060606061,
950
+ "mmlu_eval_accuracy_philosophy": 0.35294117647058826,
951
+ "mmlu_eval_accuracy_prehistory": 0.42857142857142855,
952
+ "mmlu_eval_accuracy_professional_accounting": 0.25806451612903225,
953
+ "mmlu_eval_accuracy_professional_law": 0.3588235294117647,
954
+ "mmlu_eval_accuracy_professional_medicine": 0.41935483870967744,
955
+ "mmlu_eval_accuracy_professional_psychology": 0.37681159420289856,
956
+ "mmlu_eval_accuracy_public_relations": 0.5,
957
+ "mmlu_eval_accuracy_security_studies": 0.48148148148148145,
958
+ "mmlu_eval_accuracy_sociology": 0.6818181818181818,
959
+ "mmlu_eval_accuracy_us_foreign_policy": 0.6363636363636364,
960
+ "mmlu_eval_accuracy_virology": 0.4444444444444444,
961
+ "mmlu_eval_accuracy_world_religions": 0.7894736842105263,
962
+ "mmlu_loss": 1.1883746445879302,
963
+ "step": 1000
964
+ },
965
+ {
966
+ "epoch": 2.32,
967
+ "learning_rate": 0.0002,
968
+ "loss": 0.5264,
969
+ "step": 1010
970
+ },
971
+ {
972
+ "epoch": 2.34,
973
+ "learning_rate": 0.0002,
974
+ "loss": 0.5494,
975
+ "step": 1020
976
+ },
977
+ {
978
+ "epoch": 2.37,
979
+ "learning_rate": 0.0002,
980
+ "loss": 0.5728,
981
+ "step": 1030
982
+ },
983
+ {
984
+ "epoch": 2.39,
985
+ "learning_rate": 0.0002,
986
+ "loss": 0.5291,
987
+ "step": 1040
988
+ },
989
+ {
990
+ "epoch": 2.41,
991
+ "learning_rate": 0.0002,
992
+ "loss": 0.523,
993
+ "step": 1050
994
+ },
995
+ {
996
+ "epoch": 2.44,
997
+ "learning_rate": 0.0002,
998
+ "loss": 0.5893,
999
+ "step": 1060
1000
+ },
1001
+ {
1002
+ "epoch": 2.46,
1003
+ "learning_rate": 0.0002,
1004
+ "loss": 0.5839,
1005
+ "step": 1070
1006
+ },
1007
+ {
1008
+ "epoch": 2.48,
1009
+ "learning_rate": 0.0002,
1010
+ "loss": 0.5653,
1011
+ "step": 1080
1012
+ },
1013
+ {
1014
+ "epoch": 2.51,
1015
+ "learning_rate": 0.0002,
1016
+ "loss": 0.5518,
1017
+ "step": 1090
1018
+ },
1019
+ {
1020
+ "epoch": 2.53,
1021
+ "learning_rate": 0.0002,
1022
+ "loss": 0.5497,
1023
+ "step": 1100
1024
+ },
1025
+ {
1026
+ "epoch": 2.55,
1027
+ "learning_rate": 0.0002,
1028
+ "loss": 0.5789,
1029
+ "step": 1110
1030
+ },
1031
+ {
1032
+ "epoch": 2.57,
1033
+ "learning_rate": 0.0002,
1034
+ "loss": 0.5358,
1035
+ "step": 1120
1036
+ },
1037
+ {
1038
+ "epoch": 2.6,
1039
+ "learning_rate": 0.0002,
1040
+ "loss": 0.5576,
1041
+ "step": 1130
1042
+ },
1043
+ {
1044
+ "epoch": 2.62,
1045
+ "learning_rate": 0.0002,
1046
+ "loss": 0.5015,
1047
+ "step": 1140
1048
+ },
1049
+ {
1050
+ "epoch": 2.64,
1051
+ "learning_rate": 0.0002,
1052
+ "loss": 0.5494,
1053
+ "step": 1150
1054
+ },
1055
+ {
1056
+ "epoch": 2.67,
1057
+ "learning_rate": 0.0002,
1058
+ "loss": 0.5482,
1059
+ "step": 1160
1060
+ },
1061
+ {
1062
+ "epoch": 2.69,
1063
+ "learning_rate": 0.0002,
1064
+ "loss": 0.5882,
1065
+ "step": 1170
1066
+ },
1067
+ {
1068
+ "epoch": 2.71,
1069
+ "learning_rate": 0.0002,
1070
+ "loss": 0.5525,
1071
+ "step": 1180
1072
+ },
1073
+ {
1074
+ "epoch": 2.74,
1075
+ "learning_rate": 0.0002,
1076
+ "loss": 0.5455,
1077
+ "step": 1190
1078
+ },
1079
+ {
1080
+ "epoch": 2.76,
1081
+ "learning_rate": 0.0002,
1082
+ "loss": 0.5813,
1083
+ "step": 1200
1084
+ },
1085
+ {
1086
+ "epoch": 2.76,
1087
+ "eval_loss": 0.7202425003051758,
1088
+ "eval_runtime": 248.3045,
1089
+ "eval_samples_per_second": 4.027,
1090
+ "eval_steps_per_second": 2.014,
1091
+ "step": 1200
1092
+ },
1093
+ {
1094
+ "epoch": 2.76,
1095
+ "mmlu_eval_accuracy": 0.4683330314670407,
1096
+ "mmlu_eval_accuracy_abstract_algebra": 0.2727272727272727,
1097
+ "mmlu_eval_accuracy_anatomy": 0.5714285714285714,
1098
+ "mmlu_eval_accuracy_astronomy": 0.3125,
1099
+ "mmlu_eval_accuracy_business_ethics": 0.45454545454545453,
1100
+ "mmlu_eval_accuracy_clinical_knowledge": 0.4827586206896552,
1101
+ "mmlu_eval_accuracy_college_biology": 0.3125,
1102
+ "mmlu_eval_accuracy_college_chemistry": 0.25,
1103
+ "mmlu_eval_accuracy_college_computer_science": 0.36363636363636365,
1104
+ "mmlu_eval_accuracy_college_mathematics": 0.18181818181818182,
1105
+ "mmlu_eval_accuracy_college_medicine": 0.5,
1106
+ "mmlu_eval_accuracy_college_physics": 0.45454545454545453,
1107
+ "mmlu_eval_accuracy_computer_security": 0.45454545454545453,
1108
+ "mmlu_eval_accuracy_conceptual_physics": 0.4230769230769231,
1109
+ "mmlu_eval_accuracy_econometrics": 0.16666666666666666,
1110
+ "mmlu_eval_accuracy_electrical_engineering": 0.4375,
1111
+ "mmlu_eval_accuracy_elementary_mathematics": 0.2682926829268293,
1112
+ "mmlu_eval_accuracy_formal_logic": 0.21428571428571427,
1113
+ "mmlu_eval_accuracy_global_facts": 0.4,
1114
+ "mmlu_eval_accuracy_high_school_biology": 0.375,
1115
+ "mmlu_eval_accuracy_high_school_chemistry": 0.4090909090909091,
1116
+ "mmlu_eval_accuracy_high_school_computer_science": 0.6666666666666666,
1117
+ "mmlu_eval_accuracy_high_school_european_history": 0.5555555555555556,
1118
+ "mmlu_eval_accuracy_high_school_geography": 0.8181818181818182,
1119
+ "mmlu_eval_accuracy_high_school_government_and_politics": 0.5714285714285714,
1120
+ "mmlu_eval_accuracy_high_school_macroeconomics": 0.32558139534883723,
1121
+ "mmlu_eval_accuracy_high_school_mathematics": 0.2413793103448276,
1122
+ "mmlu_eval_accuracy_high_school_microeconomics": 0.38461538461538464,
1123
+ "mmlu_eval_accuracy_high_school_physics": 0.23529411764705882,
1124
+ "mmlu_eval_accuracy_high_school_psychology": 0.8,
1125
+ "mmlu_eval_accuracy_high_school_statistics": 0.34782608695652173,
1126
+ "mmlu_eval_accuracy_high_school_us_history": 0.7272727272727273,
1127
+ "mmlu_eval_accuracy_high_school_world_history": 0.5769230769230769,
1128
+ "mmlu_eval_accuracy_human_aging": 0.6086956521739131,
1129
+ "mmlu_eval_accuracy_human_sexuality": 0.4166666666666667,
1130
+ "mmlu_eval_accuracy_international_law": 0.9230769230769231,
1131
+ "mmlu_eval_accuracy_jurisprudence": 0.36363636363636365,
1132
+ "mmlu_eval_accuracy_logical_fallacies": 0.5555555555555556,
1133
+ "mmlu_eval_accuracy_machine_learning": 0.2727272727272727,
1134
+ "mmlu_eval_accuracy_management": 0.7272727272727273,
1135
+ "mmlu_eval_accuracy_marketing": 0.72,
1136
+ "mmlu_eval_accuracy_medical_genetics": 0.6363636363636364,
1137
+ "mmlu_eval_accuracy_miscellaneous": 0.6627906976744186,
1138
+ "mmlu_eval_accuracy_moral_disputes": 0.47368421052631576,
1139
+ "mmlu_eval_accuracy_moral_scenarios": 0.24,
1140
+ "mmlu_eval_accuracy_nutrition": 0.6363636363636364,
1141
+ "mmlu_eval_accuracy_philosophy": 0.4117647058823529,
1142
+ "mmlu_eval_accuracy_prehistory": 0.4857142857142857,
1143
+ "mmlu_eval_accuracy_professional_accounting": 0.1935483870967742,
1144
+ "mmlu_eval_accuracy_professional_law": 0.3058823529411765,
1145
+ "mmlu_eval_accuracy_professional_medicine": 0.41935483870967744,
1146
+ "mmlu_eval_accuracy_professional_psychology": 0.4057971014492754,
1147
+ "mmlu_eval_accuracy_public_relations": 0.4166666666666667,
1148
+ "mmlu_eval_accuracy_security_studies": 0.48148148148148145,
1149
+ "mmlu_eval_accuracy_sociology": 0.7272727272727273,
1150
+ "mmlu_eval_accuracy_us_foreign_policy": 0.8181818181818182,
1151
+ "mmlu_eval_accuracy_virology": 0.5,
1152
+ "mmlu_eval_accuracy_world_religions": 0.7368421052631579,
1153
+ "mmlu_loss": 1.1609547404528597,
1154
+ "step": 1200
1155
+ },
1156
+ {
1157
+ "epoch": 2.78,
1158
+ "learning_rate": 0.0002,
1159
+ "loss": 0.5819,
1160
+ "step": 1210
1161
+ },
1162
+ {
1163
+ "epoch": 2.8,
1164
+ "learning_rate": 0.0002,
1165
+ "loss": 0.5756,
1166
+ "step": 1220
1167
+ },
1168
+ {
1169
+ "epoch": 2.83,
1170
+ "learning_rate": 0.0002,
1171
+ "loss": 0.5345,
1172
+ "step": 1230
1173
+ },
1174
+ {
1175
+ "epoch": 2.85,
1176
+ "learning_rate": 0.0002,
1177
+ "loss": 0.5642,
1178
+ "step": 1240
1179
+ },
1180
+ {
1181
+ "epoch": 2.87,
1182
+ "learning_rate": 0.0002,
1183
+ "loss": 0.5226,
1184
+ "step": 1250
1185
+ },
1186
+ {
1187
+ "epoch": 2.9,
1188
+ "learning_rate": 0.0002,
1189
+ "loss": 0.5812,
1190
+ "step": 1260
1191
+ },
1192
+ {
1193
+ "epoch": 2.92,
1194
+ "learning_rate": 0.0002,
1195
+ "loss": 0.5701,
1196
+ "step": 1270
1197
+ },
1198
+ {
1199
+ "epoch": 2.94,
1200
+ "learning_rate": 0.0002,
1201
+ "loss": 0.5534,
1202
+ "step": 1280
1203
+ },
1204
+ {
1205
+ "epoch": 2.97,
1206
+ "learning_rate": 0.0002,
1207
+ "loss": 0.526,
1208
+ "step": 1290
1209
+ },
1210
+ {
1211
+ "epoch": 2.99,
1212
+ "learning_rate": 0.0002,
1213
+ "loss": 0.5366,
1214
+ "step": 1300
1215
+ },
1216
+ {
1217
+ "epoch": 3.01,
1218
+ "learning_rate": 0.0002,
1219
+ "loss": 0.4988,
1220
+ "step": 1310
1221
+ },
1222
+ {
1223
+ "epoch": 3.03,
1224
+ "learning_rate": 0.0002,
1225
+ "loss": 0.4567,
1226
+ "step": 1320
1227
+ },
1228
+ {
1229
+ "epoch": 3.06,
1230
+ "learning_rate": 0.0002,
1231
+ "loss": 0.4366,
1232
+ "step": 1330
1233
+ },
1234
+ {
1235
+ "epoch": 3.08,
1236
+ "learning_rate": 0.0002,
1237
+ "loss": 0.4387,
1238
+ "step": 1340
1239
+ },
1240
+ {
1241
+ "epoch": 3.1,
1242
+ "learning_rate": 0.0002,
1243
+ "loss": 0.3887,
1244
+ "step": 1350
1245
+ },
1246
+ {
1247
+ "epoch": 3.13,
1248
+ "learning_rate": 0.0002,
1249
+ "loss": 0.4344,
1250
+ "step": 1360
1251
+ },
1252
+ {
1253
+ "epoch": 3.15,
1254
+ "learning_rate": 0.0002,
1255
+ "loss": 0.4543,
1256
+ "step": 1370
1257
+ },
1258
+ {
1259
+ "epoch": 3.17,
1260
+ "learning_rate": 0.0002,
1261
+ "loss": 0.4104,
1262
+ "step": 1380
1263
+ },
1264
+ {
1265
+ "epoch": 3.2,
1266
+ "learning_rate": 0.0002,
1267
+ "loss": 0.4564,
1268
+ "step": 1390
1269
+ },
1270
+ {
1271
+ "epoch": 3.22,
1272
+ "learning_rate": 0.0002,
1273
+ "loss": 0.4217,
1274
+ "step": 1400
1275
+ },
1276
+ {
1277
+ "epoch": 3.22,
1278
+ "eval_loss": 0.7734031081199646,
1279
+ "eval_runtime": 248.2207,
1280
+ "eval_samples_per_second": 4.029,
1281
+ "eval_steps_per_second": 2.014,
1282
+ "step": 1400
1283
+ },
1284
+ {
1285
+ "epoch": 3.22,
1286
+ "mmlu_eval_accuracy": 0.4660104484440945,
1287
+ "mmlu_eval_accuracy_abstract_algebra": 0.2727272727272727,
1288
+ "mmlu_eval_accuracy_anatomy": 0.5714285714285714,
1289
+ "mmlu_eval_accuracy_astronomy": 0.375,
1290
+ "mmlu_eval_accuracy_business_ethics": 0.5454545454545454,
1291
+ "mmlu_eval_accuracy_clinical_knowledge": 0.4482758620689655,
1292
+ "mmlu_eval_accuracy_college_biology": 0.375,
1293
+ "mmlu_eval_accuracy_college_chemistry": 0.125,
1294
+ "mmlu_eval_accuracy_college_computer_science": 0.36363636363636365,
1295
+ "mmlu_eval_accuracy_college_mathematics": 0.18181818181818182,
1296
+ "mmlu_eval_accuracy_college_medicine": 0.45454545454545453,
1297
+ "mmlu_eval_accuracy_college_physics": 0.45454545454545453,
1298
+ "mmlu_eval_accuracy_computer_security": 0.2727272727272727,
1299
+ "mmlu_eval_accuracy_conceptual_physics": 0.3076923076923077,
1300
+ "mmlu_eval_accuracy_econometrics": 0.16666666666666666,
1301
+ "mmlu_eval_accuracy_electrical_engineering": 0.4375,
1302
+ "mmlu_eval_accuracy_elementary_mathematics": 0.3170731707317073,
1303
+ "mmlu_eval_accuracy_formal_logic": 0.21428571428571427,
1304
+ "mmlu_eval_accuracy_global_facts": 0.4,
1305
+ "mmlu_eval_accuracy_high_school_biology": 0.34375,
1306
+ "mmlu_eval_accuracy_high_school_chemistry": 0.45454545454545453,
1307
+ "mmlu_eval_accuracy_high_school_computer_science": 0.5555555555555556,
1308
+ "mmlu_eval_accuracy_high_school_european_history": 0.5555555555555556,
1309
+ "mmlu_eval_accuracy_high_school_geography": 0.7727272727272727,
1310
+ "mmlu_eval_accuracy_high_school_government_and_politics": 0.6666666666666666,
1311
+ "mmlu_eval_accuracy_high_school_macroeconomics": 0.27906976744186046,
1312
+ "mmlu_eval_accuracy_high_school_mathematics": 0.3448275862068966,
1313
+ "mmlu_eval_accuracy_high_school_microeconomics": 0.4230769230769231,
1314
+ "mmlu_eval_accuracy_high_school_physics": 0.29411764705882354,
1315
+ "mmlu_eval_accuracy_high_school_psychology": 0.7333333333333333,
1316
+ "mmlu_eval_accuracy_high_school_statistics": 0.34782608695652173,
1317
+ "mmlu_eval_accuracy_high_school_us_history": 0.7272727272727273,
1318
+ "mmlu_eval_accuracy_high_school_world_history": 0.6538461538461539,
1319
+ "mmlu_eval_accuracy_human_aging": 0.6956521739130435,
1320
+ "mmlu_eval_accuracy_human_sexuality": 0.5,
1321
+ "mmlu_eval_accuracy_international_law": 0.8461538461538461,
1322
+ "mmlu_eval_accuracy_jurisprudence": 0.36363636363636365,
1323
+ "mmlu_eval_accuracy_logical_fallacies": 0.5555555555555556,
1324
+ "mmlu_eval_accuracy_machine_learning": 0.2727272727272727,
1325
+ "mmlu_eval_accuracy_management": 0.6363636363636364,
1326
+ "mmlu_eval_accuracy_marketing": 0.72,
1327
+ "mmlu_eval_accuracy_medical_genetics": 0.6363636363636364,
1328
+ "mmlu_eval_accuracy_miscellaneous": 0.6976744186046512,
1329
+ "mmlu_eval_accuracy_moral_disputes": 0.4473684210526316,
1330
+ "mmlu_eval_accuracy_moral_scenarios": 0.24,
1331
+ "mmlu_eval_accuracy_nutrition": 0.6060606060606061,
1332
+ "mmlu_eval_accuracy_philosophy": 0.38235294117647056,
1333
+ "mmlu_eval_accuracy_prehistory": 0.4,
1334
+ "mmlu_eval_accuracy_professional_accounting": 0.3548387096774194,
1335
+ "mmlu_eval_accuracy_professional_law": 0.3411764705882353,
1336
+ "mmlu_eval_accuracy_professional_medicine": 0.41935483870967744,
1337
+ "mmlu_eval_accuracy_professional_psychology": 0.4057971014492754,
1338
+ "mmlu_eval_accuracy_public_relations": 0.5,
1339
+ "mmlu_eval_accuracy_security_studies": 0.4074074074074074,
1340
+ "mmlu_eval_accuracy_sociology": 0.6818181818181818,
1341
+ "mmlu_eval_accuracy_us_foreign_policy": 0.7272727272727273,
1342
+ "mmlu_eval_accuracy_virology": 0.5,
1343
+ "mmlu_eval_accuracy_world_religions": 0.7894736842105263,
1344
+ "mmlu_loss": 1.0775621755841351,
1345
+ "step": 1400
1346
  }
1347
  ],
1348
  "max_steps": 5000,
1349
  "num_train_epochs": 12,
1350
+ "total_flos": 3.3098129260712755e+17,
1351
  "trial_name": null,
1352
  "trial_params": null
1353
  }