dwadden commited on
Commit
3389d64
1 Parent(s): a94bea7

Upload 8 files

Browse files

Evaluation metrics

heavy-OLMoE-7B-A1B-main.json ADDED
@@ -0,0 +1,82 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "name": "heavy",
3
+ "uuid": "83bb37f6-ddd0-43d8-894e-446d345ff7c1",
4
+ "model": "/net/nfs.cirrascale/allennlp/davidw/checkpoints/moe-release/OLMoE-7B-A1B/main",
5
+ "creation_date": "2024_08_06-17_10_22",
6
+ "eval_metrics": {
7
+ "icl": {
8
+ "mmlu_zeroshot": 0.4163723833728255,
9
+ "hellaswag_zeroshot": 0.7599083781242371,
10
+ "jeopardy": 0.4884525716304779,
11
+ "triviaqa_sm_sub": 0.5419999957084656,
12
+ "gsm8k_cot": 0.06444276124238968,
13
+ "agi_eval_sat_math_cot": 0.05454545468091965,
14
+ "aqua_cot": 0.02448979578912258,
15
+ "svamp_cot": 0.30000001192092896,
16
+ "bigbench_qa_wikidata": 0.6824467182159424,
17
+ "arc_easy": 0.7714646458625793,
18
+ "arc_challenge": 0.5332764387130737,
19
+ "mmlu_fewshot": 0.5218371202548345,
20
+ "bigbench_misconceptions": 0.5981734991073608,
21
+ "copa": 0.8100000023841858,
22
+ "siqa": 0.7154554724693298,
23
+ "commonsense_qa": 0.6723996996879578,
24
+ "piqa": 0.8133841156959534,
25
+ "openbook_qa": 0.414000004529953,
26
+ "bigbench_novel_concepts": 0.625,
27
+ "bigbench_strange_stories": 0.6666666865348816,
28
+ "bigbench_strategy_qa": 0.5622542500495911,
29
+ "lambada_openai": 0.7269551753997803,
30
+ "hellaswag": 0.776239812374115,
31
+ "winograd": 0.8278388381004333,
32
+ "winogrande": 0.6803472638130188,
33
+ "bigbench_conlang_translation": 0.03658536449074745,
34
+ "bigbench_language_identification": 0.3192000091075897,
35
+ "bigbench_conceptual_combinations": 0.5145630836486816,
36
+ "bigbench_elementary_math_qa": 0.26218554377555847,
37
+ "bigbench_dyck_languages": 0.19300000369548798,
38
+ "agi_eval_lsat_ar": 0.24347825348377228,
39
+ "bigbench_cs_algorithms": 0.47121211886405945,
40
+ "bigbench_logical_deduction": 0.26600000262260437,
41
+ "bigbench_operators": 0.3619047701358795,
42
+ "bigbench_repeat_copy_logic": 0.15625,
43
+ "simple_arithmetic_nospaces": 0.17599999904632568,
44
+ "simple_arithmetic_withspaces": 0.19499999284744263,
45
+ "math_qa": 0.26449882984161377,
46
+ "logi_qa": 0.3486943244934082,
47
+ "pubmed_qa_labeled": 0.5609999895095825,
48
+ "squad": 0.5288552641868591,
49
+ "agi_eval_lsat_rc": 0.4738805890083313,
50
+ "agi_eval_lsat_lr": 0.4019607901573181,
51
+ "coqa": 0.43692848086357117,
52
+ "bigbench_understanding_fables": 0.47089946269989014,
53
+ "boolq": 0.7333333492279053,
54
+ "agi_eval_sat_en": 0.553398072719574,
55
+ "winogender_mc_female": 0.5,
56
+ "winogender_mc_male": 0.550000011920929,
57
+ "enterprise_pii_classification": 0.523122251033783,
58
+ "bbq": 0.4975668625398116,
59
+ "gpqa_main": 0.2477678507566452,
60
+ "gpqa_diamond": 0.2222222238779068
61
+ }
62
+ },
63
+ "missing tasks": "[]",
64
+ "aggregated_task_categories_centered": {
65
+ "commonsense reasoning": 0.4523028646368181,
66
+ "language understanding": 0.4706549446387432,
67
+ "reading comprehension": 0.39537327579761805,
68
+ "safety": 0.03534456274726175,
69
+ "symbolic problem solving": 0.15998990932590068,
70
+ "world knowledge": 0.3526500500886761
71
+ },
72
+ "aggregated_centered_results": 0.31347879381791993,
73
+ "aggregated_results": 0.4633482752871252,
74
+ "rw_small": 0.7147216796875,
75
+ "rw_small_centered": 0.5053010230873064,
76
+ "95%_CI_above": 0.5612098809380036,
77
+ "95%_CI_above_centered": 0.41586115306825494,
78
+ "99%_CI_above": 0.5724260063275047,
79
+ "99%_CI_above_centered": 0.45794212339228574,
80
+ "low_variance_datasets": 0.5636761779134924,
81
+ "low_variance_datasets_centered": 0.462566793680692
82
+ }
heavy-OLMoE-7B-A1B-step1220000-tokens5117B.json ADDED
@@ -0,0 +1,82 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "name": "heavy",
3
+ "uuid": "70f3f719-28b5-46f5-b58e-bd89765d1e40",
4
+ "model": "/net/nfs.cirrascale/allennlp/davidw/checkpoints/moe-release/OLMoE-7B-A1B/step1220000-tokens5117B",
5
+ "creation_date": "2024_08_06-17_05_21",
6
+ "eval_metrics": {
7
+ "icl": {
8
+ "mmlu_zeroshot": 0.4273025756865217,
9
+ "hellaswag_zeroshot": 0.7597092390060425,
10
+ "jeopardy": 0.4871806979179382,
11
+ "triviaqa_sm_sub": 0.5303333401679993,
12
+ "gsm8k_cot": 0.07354056090116501,
13
+ "agi_eval_sat_math_cot": 0.040909089148044586,
14
+ "aqua_cot": 0.02857142873108387,
15
+ "svamp_cot": 0.2800000011920929,
16
+ "bigbench_qa_wikidata": 0.6884503960609436,
17
+ "arc_easy": 0.7845118045806885,
18
+ "arc_challenge": 0.5341296792030334,
19
+ "mmlu_fewshot": 0.5194499257363772,
20
+ "bigbench_misconceptions": 0.5525113940238953,
21
+ "copa": 0.800000011920929,
22
+ "siqa": 0.707267165184021,
23
+ "commonsense_qa": 0.6699426770210266,
24
+ "piqa": 0.8117519021034241,
25
+ "openbook_qa": 0.4399999976158142,
26
+ "bigbench_novel_concepts": 0.625,
27
+ "bigbench_strange_stories": 0.6839080452919006,
28
+ "bigbench_strategy_qa": 0.5810397267341614,
29
+ "lambada_openai": 0.7219095826148987,
30
+ "hellaswag": 0.7750447988510132,
31
+ "winograd": 0.831501841545105,
32
+ "winogrande": 0.6850828528404236,
33
+ "bigbench_conlang_translation": 0.060975611209869385,
34
+ "bigbench_language_identification": 0.34049999713897705,
35
+ "bigbench_conceptual_combinations": 0.5048543810844421,
36
+ "bigbench_elementary_math_qa": 0.27025681734085083,
37
+ "bigbench_dyck_languages": 0.1589999943971634,
38
+ "agi_eval_lsat_ar": 0.2652173936367035,
39
+ "bigbench_cs_algorithms": 0.5015151500701904,
40
+ "bigbench_logical_deduction": 0.25333333015441895,
41
+ "bigbench_operators": 0.34285715222358704,
42
+ "bigbench_repeat_copy_logic": 0.15625,
43
+ "simple_arithmetic_nospaces": 0.1809999942779541,
44
+ "simple_arithmetic_withspaces": 0.20600000023841858,
45
+ "math_qa": 0.2708682417869568,
46
+ "logi_qa": 0.3425499200820923,
47
+ "pubmed_qa_labeled": 0.4659999907016754,
48
+ "squad": 0.5244086980819702,
49
+ "agi_eval_lsat_rc": 0.43656715750694275,
50
+ "agi_eval_lsat_lr": 0.386274516582489,
51
+ "coqa": 0.4436928331851959,
52
+ "bigbench_understanding_fables": 0.4444444477558136,
53
+ "boolq": 0.7281345725059509,
54
+ "agi_eval_sat_en": 0.5485436916351318,
55
+ "winogender_mc_female": 0.46666666865348816,
56
+ "winogender_mc_male": 0.5833333134651184,
57
+ "enterprise_pii_classification": 0.5372606515884399,
58
+ "bbq": 0.48323566534302453,
59
+ "gpqa_main": 0.2232142835855484,
60
+ "gpqa_diamond": 0.21212121844291687
61
+ }
62
+ },
63
+ "missing tasks": "[]",
64
+ "aggregated_task_categories_centered": {
65
+ "commonsense reasoning": 0.46081640452671535,
66
+ "language understanding": 0.4762512398893946,
67
+ "reading comprehension": 0.36885401178478144,
68
+ "safety": 0.03524814952503552,
69
+ "symbolic problem solving": 0.15957477013304083,
70
+ "world knowledge": 0.34119598718414534
71
+ },
72
+ "aggregated_centered_results": 0.30931975984045923,
73
+ "aggregated_results": 0.4599646118255447,
74
+ "rw_small": 0.7152613600095113,
75
+ "rw_small_centered": 0.5043107818441781,
76
+ "95%_CI_above": 0.5589368432469957,
77
+ "95%_CI_above_centered": 0.41426754302993024,
78
+ "99%_CI_above": 0.5701544312031374,
79
+ "99%_CI_above_centered": 0.45654352726393493,
80
+ "low_variance_datasets": 0.5659450578418646,
81
+ "low_variance_datasets_centered": 0.46520260353587645
82
+ }
heavy-OLMoE-7B-A1B-step1223842-tokens5100B.json ADDED
@@ -0,0 +1,82 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "name": "heavy",
3
+ "uuid": "c260eedc-dbb5-4ff4-afa9-d163e8d7585b",
4
+ "model": "/net/nfs.cirrascale/allennlp/davidw/checkpoints/moe-release/OLMoE-7B-A1B/step1223842-tokens5100B",
5
+ "creation_date": "2024_08_06-16_39_52",
6
+ "eval_metrics": {
7
+ "icl": {
8
+ "mmlu_zeroshot": 0.43332746128241223,
9
+ "hellaswag_zeroshot": 0.7701653242111206,
10
+ "jeopardy": 0.5032301664352417,
11
+ "triviaqa_sm_sub": 0.5586666464805603,
12
+ "gsm8k_cot": 0.07429870963096619,
13
+ "agi_eval_sat_math_cot": 0.06363636255264282,
14
+ "aqua_cot": 0.020408162847161293,
15
+ "svamp_cot": 0.33000001311302185,
16
+ "bigbench_qa_wikidata": 0.6918458938598633,
17
+ "arc_easy": 0.7765151262283325,
18
+ "arc_challenge": 0.5375426411628723,
19
+ "mmlu_fewshot": 0.5333352494135237,
20
+ "bigbench_misconceptions": 0.6255707740783691,
21
+ "copa": 0.7799999713897705,
22
+ "siqa": 0.6934493184089661,
23
+ "commonsense_qa": 0.6928746700286865,
24
+ "piqa": 0.8204570412635803,
25
+ "openbook_qa": 0.4480000138282776,
26
+ "bigbench_novel_concepts": 0.65625,
27
+ "bigbench_strange_stories": 0.6954023241996765,
28
+ "bigbench_strategy_qa": 0.5696811079978943,
29
+ "lambada_openai": 0.7327769994735718,
30
+ "hellaswag": 0.7857996225357056,
31
+ "winograd": 0.8461538553237915,
32
+ "winogrande": 0.6898184418678284,
33
+ "bigbench_conlang_translation": 0.0731707289814949,
34
+ "bigbench_language_identification": 0.31049999594688416,
35
+ "bigbench_conceptual_combinations": 0.5631067752838135,
36
+ "bigbench_elementary_math_qa": 0.26944443583488464,
37
+ "bigbench_dyck_languages": 0.2150000035762787,
38
+ "agi_eval_lsat_ar": 0.2869565188884735,
39
+ "bigbench_cs_algorithms": 0.47196969389915466,
40
+ "bigbench_logical_deduction": 0.2460000067949295,
41
+ "bigbench_operators": 0.3380952477455139,
42
+ "bigbench_repeat_copy_logic": 0.1875,
43
+ "simple_arithmetic_nospaces": 0.20100000500679016,
44
+ "simple_arithmetic_withspaces": 0.22100000083446503,
45
+ "math_qa": 0.27522629499435425,
46
+ "logi_qa": 0.34562212228775024,
47
+ "pubmed_qa_labeled": 0.5789999961853027,
48
+ "squad": 0.5235572457313538,
49
+ "agi_eval_lsat_rc": 0.46641790866851807,
50
+ "agi_eval_lsat_lr": 0.37254902720451355,
51
+ "coqa": 0.4366779327392578,
52
+ "bigbench_understanding_fables": 0.4761904776096344,
53
+ "boolq": 0.7318042516708374,
54
+ "agi_eval_sat_en": 0.5291262269020081,
55
+ "winogender_mc_female": 0.5,
56
+ "winogender_mc_male": 0.6000000238418579,
57
+ "enterprise_pii_classification": 0.5216494798660278,
58
+ "bbq": 0.5055712298913435,
59
+ "gpqa_main": 0.2254464328289032,
60
+ "gpqa_diamond": 0.19696970283985138
61
+ }
62
+ },
63
+ "missing tasks": "[]",
64
+ "aggregated_task_categories_centered": {
65
+ "commonsense reasoning": 0.46523631517512387,
66
+ "language understanding": 0.4930994285627118,
67
+ "reading comprehension": 0.3874464948710642,
68
+ "safety": 0.06361036679961463,
69
+ "symbolic problem solving": 0.1715689478790787,
70
+ "world knowledge": 0.36090664066069306
71
+ },
72
+ "aggregated_centered_results": 0.3245905660540145,
73
+ "aggregated_results": 0.4716746728993969,
74
+ "rw_small": 0.7143076260884603,
75
+ "rw_small_centered": 0.5021371105958147,
76
+ "95%_CI_above": 0.5717007633347017,
77
+ "95%_CI_above_centered": 0.4290739257829261,
78
+ "99%_CI_above": 0.581190875561341,
79
+ "99%_CI_above_centered": 0.4683558738011154,
80
+ "low_variance_datasets": 0.571692757173018,
81
+ "low_variance_datasets_centered": 0.4723345583847347
82
+ }
heavy-jetmoe-8b-main.json ADDED
@@ -0,0 +1,82 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "name": "heavy",
3
+ "uuid": "72557004-cff2-408c-b3e2-6a004215f09d",
4
+ "model": "/net/nfs.cirrascale/allennlp/davidw/checkpoints/moe-release/jetmoe-8b/main",
5
+ "creation_date": "2024_08_06-16_55_05",
6
+ "eval_metrics": {
7
+ "icl": {
8
+ "mmlu_zeroshot": 0.4326617845840621,
9
+ "hellaswag_zeroshot": 0.7868950366973877,
10
+ "jeopardy": 0.45192375779151917,
11
+ "triviaqa_sm_sub": 0.4986666738986969,
12
+ "gsm8k_cot": 0.3025018870830536,
13
+ "agi_eval_sat_math_cot": 0.11363636702299118,
14
+ "aqua_cot": 0.05306122452020645,
15
+ "svamp_cot": 0.4466666579246521,
16
+ "bigbench_qa_wikidata": 0.7317061424255371,
17
+ "arc_easy": 0.7356902360916138,
18
+ "arc_challenge": 0.4530716836452484,
19
+ "mmlu_fewshot": 0.4966564126182021,
20
+ "bigbench_misconceptions": 0.5707762837409973,
21
+ "copa": 0.8399999737739563,
22
+ "siqa": 0.8162742853164673,
23
+ "commonsense_qa": 0.7911548018455505,
24
+ "piqa": 0.8084874749183655,
25
+ "openbook_qa": 0.3959999978542328,
26
+ "bigbench_novel_concepts": 0.53125,
27
+ "bigbench_strange_stories": 0.6781609058380127,
28
+ "bigbench_strategy_qa": 0.5801659822463989,
29
+ "lambada_openai": 0.6976518630981445,
30
+ "hellaswag": 0.793367862701416,
31
+ "winograd": 0.8388278484344482,
32
+ "winogrande": 0.6835043430328369,
33
+ "bigbench_conlang_translation": 0.09146341681480408,
34
+ "bigbench_language_identification": 0.336899995803833,
35
+ "bigbench_conceptual_combinations": 0.553398072719574,
36
+ "bigbench_elementary_math_qa": 0.2795073390007019,
37
+ "bigbench_dyck_languages": 0.3190000057220459,
38
+ "agi_eval_lsat_ar": 0.2869565188884735,
39
+ "bigbench_cs_algorithms": 0.5098484754562378,
40
+ "bigbench_logical_deduction": 0.3033333420753479,
41
+ "bigbench_operators": 0.538095235824585,
42
+ "bigbench_repeat_copy_logic": 0.21875,
43
+ "simple_arithmetic_nospaces": 0.3070000112056732,
44
+ "simple_arithmetic_withspaces": 0.3100000023841858,
45
+ "math_qa": 0.28226616978645325,
46
+ "logi_qa": 0.33794161677360535,
47
+ "pubmed_qa_labeled": 0.6430000066757202,
48
+ "squad": 0.2401135265827179,
49
+ "agi_eval_lsat_rc": 0.46268656849861145,
50
+ "agi_eval_lsat_lr": 0.3450980484485626,
51
+ "coqa": 0.43893274664878845,
52
+ "bigbench_understanding_fables": 0.41798943281173706,
53
+ "boolq": 0.8223241567611694,
54
+ "agi_eval_sat_en": 0.6213592290878296,
55
+ "winogender_mc_female": 0.5,
56
+ "winogender_mc_male": 0.5833333134651184,
57
+ "enterprise_pii_classification": 0.6017673015594482,
58
+ "bbq": 0.5354551727121527,
59
+ "gpqa_main": 0.2566964328289032,
60
+ "gpqa_diamond": 0.2222222238779068
61
+ }
62
+ },
63
+ "missing tasks": "[]",
64
+ "aggregated_task_categories_centered": {
65
+ "commonsense reasoning": 0.4857313116330063,
66
+ "language understanding": 0.4936472507617851,
67
+ "reading comprehension": 0.3914801840458,
68
+ "safety": 0.11027789386835968,
69
+ "symbolic problem solving": 0.24344731914942666,
70
+ "world knowledge": 0.3286514173125663
71
+ },
72
+ "aggregated_centered_results": 0.3461548208936837,
73
+ "aggregated_results": 0.4885697707456261,
74
+ "rw_small": 0.7202475716670355,
75
+ "rw_small_centered": 0.5356083167226692,
76
+ "95%_CI_above": 0.5773314228886133,
77
+ "95%_CI_above_centered": 0.44323355322011243,
78
+ "99%_CI_above": 0.5924812607143236,
79
+ "99%_CI_above_centered": 0.48989709603090853,
80
+ "low_variance_datasets": 0.5781455310908231,
81
+ "low_variance_datasets_centered": 0.4859825293638729
82
+ }
humaneval-OLMoE-7B-A1B-main.json ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "name": "/net/nfs.cirrascale/allennlp/davidw/proj/OLMoE/script/humaneval",
3
+ "uuid": "dc03f2af-e3c1-479d-98bd-6efc73e684e4",
4
+ "model": "/net/nfs.cirrascale/allennlp/davidw/checkpoints/moe-release/OLMoE-7B-A1B/main",
5
+ "creation_date": "2024_08_06-02_09_56",
6
+ "eval_metrics": {
7
+ "icl": {
8
+ "human_eval": 0.024390242993831635,
9
+ "human_eval_cpp": 0.1304347813129425,
10
+ "human_eval_js": 0.0,
11
+ "human_eval_return_simple": 0.6486486196517944,
12
+ "human_eval_return_complex": 0.14960630238056183,
13
+ "human_eval_25": 0.03658536449074745,
14
+ "human_eval_50": 0.08536585420370102,
15
+ "human_eval_75": 0.16463415324687958
16
+ }
17
+ },
18
+ "missing tasks": "['mmlu_zeroshot', 'hellaswag_zeroshot', 'jeopardy', 'triviaqa_sm_sub', 'gsm8k_cot', 'agi_eval_sat_math_cot', 'aqua_cot', 'svamp_cot', 'bigbench_qa_wikidata', 'arc_easy', 'arc_challenge', 'mmlu_fewshot', 'bigbench_misconceptions', 'copa', 'siqa', 'commonsense_qa', 'piqa', 'openbook_qa', 'bigbench_novel_concepts', 'bigbench_strange_stories', 'bigbench_strategy_qa', 'lambada_openai', 'hellaswag', 'winograd', 'winogrande', 'bigbench_conlang_translation', 'bigbench_language_identification', 'bigbench_conceptual_combinations', 'bigbench_elementary_math_qa', 'bigbench_dyck_languages', 'agi_eval_lsat_ar', 'bigbench_cs_algorithms', 'bigbench_logical_deduction', 'bigbench_operators', 'bigbench_repeat_copy_logic', 'simple_arithmetic_nospaces', 'simple_arithmetic_withspaces', 'math_qa', 'logi_qa', 'pubmed_qa_labeled', 'squad', 'agi_eval_lsat_rc', 'agi_eval_lsat_lr', 'coqa', 'bigbench_understanding_fables', 'boolq', 'agi_eval_sat_en', 'winogender_mc_female', 'winogender_mc_male', 'enterprise_pii_classification', 'bbq', 'gpqa_main', 'gpqa_diamond']",
19
+ "aggregated_task_categories_centered": {
20
+ "commonsense reasoning": NaN,
21
+ "language understanding": NaN,
22
+ "reading comprehension": NaN,
23
+ "safety": NaN,
24
+ "symbolic problem solving": NaN,
25
+ "world knowledge": NaN
26
+ },
27
+ "aggregated_centered_results": NaN,
28
+ "aggregated_results": NaN,
29
+ "rw_small": NaN,
30
+ "rw_small_centered": NaN,
31
+ "95%_CI_above": NaN,
32
+ "95%_CI_above_centered": NaN,
33
+ "99%_CI_above": NaN,
34
+ "99%_CI_above_centered": NaN,
35
+ "low_variance_datasets": NaN,
36
+ "low_variance_datasets_centered": NaN
37
+ }
humaneval-OLMoE-7B-A1B-step1220000-tokens5117B.json ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "name": "/net/nfs.cirrascale/allennlp/davidw/proj/OLMoE/script/humaneval",
3
+ "uuid": "d31d6b3f-15ee-4d96-ac2e-b2d780bf206e",
4
+ "model": "/net/nfs.cirrascale/allennlp/davidw/checkpoints/moe-release/OLMoE-7B-A1B/step1220000-tokens5117B",
5
+ "creation_date": "2024_08_06-02_14_00",
6
+ "eval_metrics": {
7
+ "icl": {
8
+ "human_eval": 0.006097560748457909,
9
+ "human_eval_cpp": 0.12422360479831696,
10
+ "human_eval_js": 0.0,
11
+ "human_eval_return_simple": 0.7567567825317383,
12
+ "human_eval_return_complex": 0.14173229038715363,
13
+ "human_eval_25": 0.060975611209869385,
14
+ "human_eval_50": 0.1036585345864296,
15
+ "human_eval_75": 0.12195122241973877
16
+ }
17
+ },
18
+ "missing tasks": "['mmlu_zeroshot', 'hellaswag_zeroshot', 'jeopardy', 'triviaqa_sm_sub', 'gsm8k_cot', 'agi_eval_sat_math_cot', 'aqua_cot', 'svamp_cot', 'bigbench_qa_wikidata', 'arc_easy', 'arc_challenge', 'mmlu_fewshot', 'bigbench_misconceptions', 'copa', 'siqa', 'commonsense_qa', 'piqa', 'openbook_qa', 'bigbench_novel_concepts', 'bigbench_strange_stories', 'bigbench_strategy_qa', 'lambada_openai', 'hellaswag', 'winograd', 'winogrande', 'bigbench_conlang_translation', 'bigbench_language_identification', 'bigbench_conceptual_combinations', 'bigbench_elementary_math_qa', 'bigbench_dyck_languages', 'agi_eval_lsat_ar', 'bigbench_cs_algorithms', 'bigbench_logical_deduction', 'bigbench_operators', 'bigbench_repeat_copy_logic', 'simple_arithmetic_nospaces', 'simple_arithmetic_withspaces', 'math_qa', 'logi_qa', 'pubmed_qa_labeled', 'squad', 'agi_eval_lsat_rc', 'agi_eval_lsat_lr', 'coqa', 'bigbench_understanding_fables', 'boolq', 'agi_eval_sat_en', 'winogender_mc_female', 'winogender_mc_male', 'enterprise_pii_classification', 'bbq', 'gpqa_main', 'gpqa_diamond']",
19
+ "aggregated_task_categories_centered": {
20
+ "commonsense reasoning": NaN,
21
+ "language understanding": NaN,
22
+ "reading comprehension": NaN,
23
+ "safety": NaN,
24
+ "symbolic problem solving": NaN,
25
+ "world knowledge": NaN
26
+ },
27
+ "aggregated_centered_results": NaN,
28
+ "aggregated_results": NaN,
29
+ "rw_small": NaN,
30
+ "rw_small_centered": NaN,
31
+ "95%_CI_above": NaN,
32
+ "95%_CI_above_centered": NaN,
33
+ "99%_CI_above": NaN,
34
+ "99%_CI_above_centered": NaN,
35
+ "low_variance_datasets": NaN,
36
+ "low_variance_datasets_centered": NaN
37
+ }
humaneval-OLMoE-7B-A1B-step1223842-tokens5100B.json ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "name": "/net/nfs.cirrascale/allennlp/davidw/proj/OLMoE/script/humaneval",
3
+ "uuid": "3a11e770-7075-42f3-81f6-c14a6db4953d",
4
+ "model": "/net/nfs.cirrascale/allennlp/davidw/checkpoints/moe-release/OLMoE-7B-A1B/step1223842-tokens5100B",
5
+ "creation_date": "2024_08_06-02_21_03",
6
+ "eval_metrics": {
7
+ "icl": {
8
+ "human_eval": 0.012195121496915817,
9
+ "human_eval_cpp": 0.1304347813129425,
10
+ "human_eval_js": 0.0,
11
+ "human_eval_return_simple": 0.8648648858070374,
12
+ "human_eval_return_complex": 0.14960630238056183,
13
+ "human_eval_25": 0.04268292710185051,
14
+ "human_eval_50": 0.09146341681480408,
15
+ "human_eval_75": 0.16463415324687958
16
+ }
17
+ },
18
+ "missing tasks": "['mmlu_zeroshot', 'hellaswag_zeroshot', 'jeopardy', 'triviaqa_sm_sub', 'gsm8k_cot', 'agi_eval_sat_math_cot', 'aqua_cot', 'svamp_cot', 'bigbench_qa_wikidata', 'arc_easy', 'arc_challenge', 'mmlu_fewshot', 'bigbench_misconceptions', 'copa', 'siqa', 'commonsense_qa', 'piqa', 'openbook_qa', 'bigbench_novel_concepts', 'bigbench_strange_stories', 'bigbench_strategy_qa', 'lambada_openai', 'hellaswag', 'winograd', 'winogrande', 'bigbench_conlang_translation', 'bigbench_language_identification', 'bigbench_conceptual_combinations', 'bigbench_elementary_math_qa', 'bigbench_dyck_languages', 'agi_eval_lsat_ar', 'bigbench_cs_algorithms', 'bigbench_logical_deduction', 'bigbench_operators', 'bigbench_repeat_copy_logic', 'simple_arithmetic_nospaces', 'simple_arithmetic_withspaces', 'math_qa', 'logi_qa', 'pubmed_qa_labeled', 'squad', 'agi_eval_lsat_rc', 'agi_eval_lsat_lr', 'coqa', 'bigbench_understanding_fables', 'boolq', 'agi_eval_sat_en', 'winogender_mc_female', 'winogender_mc_male', 'enterprise_pii_classification', 'bbq', 'gpqa_main', 'gpqa_diamond']",
19
+ "aggregated_task_categories_centered": {
20
+ "commonsense reasoning": NaN,
21
+ "language understanding": NaN,
22
+ "reading comprehension": NaN,
23
+ "safety": NaN,
24
+ "symbolic problem solving": NaN,
25
+ "world knowledge": NaN
26
+ },
27
+ "aggregated_centered_results": NaN,
28
+ "aggregated_results": NaN,
29
+ "rw_small": NaN,
30
+ "rw_small_centered": NaN,
31
+ "95%_CI_above": NaN,
32
+ "95%_CI_above_centered": NaN,
33
+ "99%_CI_above": NaN,
34
+ "99%_CI_above_centered": NaN,
35
+ "low_variance_datasets": NaN,
36
+ "low_variance_datasets_centered": NaN
37
+ }
humaneval-jetmoe-8b-main.json ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "name": "/net/nfs.cirrascale/allennlp/davidw/proj/OLMoE/script/humaneval",
3
+ "uuid": "9a26c983-2b2b-46bc-bc2f-9e1d9c153710",
4
+ "model": "/net/nfs.cirrascale/allennlp/davidw/checkpoints/moe-release/jetmoe-8b/main",
5
+ "creation_date": "2024_08_05-22_40_40",
6
+ "eval_metrics": {
7
+ "icl": {
8
+ "human_eval": 0.32926830649375916,
9
+ "human_eval_cpp": 0.2732919156551361,
10
+ "human_eval_js": 0.0,
11
+ "human_eval_return_simple": 0.7837837934494019,
12
+ "human_eval_return_complex": 0.4488188922405243,
13
+ "human_eval_25": 0.4390243887901306,
14
+ "human_eval_50": 0.5243902206420898,
15
+ "human_eval_75": 0.6463414430618286
16
+ }
17
+ },
18
+ "missing tasks": "['mmlu_zeroshot', 'hellaswag_zeroshot', 'jeopardy', 'triviaqa_sm_sub', 'gsm8k_cot', 'agi_eval_sat_math_cot', 'aqua_cot', 'svamp_cot', 'bigbench_qa_wikidata', 'arc_easy', 'arc_challenge', 'mmlu_fewshot', 'bigbench_misconceptions', 'copa', 'siqa', 'commonsense_qa', 'piqa', 'openbook_qa', 'bigbench_novel_concepts', 'bigbench_strange_stories', 'bigbench_strategy_qa', 'lambada_openai', 'hellaswag', 'winograd', 'winogrande', 'bigbench_conlang_translation', 'bigbench_language_identification', 'bigbench_conceptual_combinations', 'bigbench_elementary_math_qa', 'bigbench_dyck_languages', 'agi_eval_lsat_ar', 'bigbench_cs_algorithms', 'bigbench_logical_deduction', 'bigbench_operators', 'bigbench_repeat_copy_logic', 'simple_arithmetic_nospaces', 'simple_arithmetic_withspaces', 'math_qa', 'logi_qa', 'pubmed_qa_labeled', 'squad', 'agi_eval_lsat_rc', 'agi_eval_lsat_lr', 'coqa', 'bigbench_understanding_fables', 'boolq', 'agi_eval_sat_en', 'winogender_mc_female', 'winogender_mc_male', 'enterprise_pii_classification', 'bbq', 'gpqa_main', 'gpqa_diamond']",
19
+ "aggregated_task_categories_centered": {
20
+ "commonsense reasoning": NaN,
21
+ "language understanding": NaN,
22
+ "reading comprehension": NaN,
23
+ "safety": NaN,
24
+ "symbolic problem solving": NaN,
25
+ "world knowledge": NaN
26
+ },
27
+ "aggregated_centered_results": NaN,
28
+ "aggregated_results": NaN,
29
+ "rw_small": NaN,
30
+ "rw_small_centered": NaN,
31
+ "95%_CI_above": NaN,
32
+ "95%_CI_above_centered": NaN,
33
+ "99%_CI_above": NaN,
34
+ "99%_CI_above_centered": NaN,
35
+ "low_variance_datasets": NaN,
36
+ "low_variance_datasets_centered": NaN
37
+ }