evaluation / gpt2_finnish_large_bigbench_1shot.json
Muennighoff's picture
Add
79b60e5
{
"results": {
"bigbench_analogies": {
"multiple_choice_grade": 0.27692307692307694,
"multiple_choice_grade_stderr": 0.039398253452664705
},
"bigbench_arithmetic_1_digit_addition": {
"multiple_choice_grade": 0.28,
"multiple_choice_grade_stderr": 0.04512608598542127
},
"bigbench_arithmetic_1_digit_division": {
"multiple_choice_grade": 0.5217391304347826,
"multiple_choice_grade_stderr": 0.10649955403405122
},
"bigbench_arithmetic_1_digit_multiplication": {
"multiple_choice_grade": 0.16,
"multiple_choice_grade_stderr": 0.03684529491774709
},
"bigbench_arithmetic_1_digit_subtraction": {
"multiple_choice_grade": 0.38,
"multiple_choice_grade_stderr": 0.04878317312145632
},
"bigbench_arithmetic_2_digit_addition": {
"multiple_choice_grade": 0.09,
"multiple_choice_grade_stderr": 0.028762349126466153
},
"bigbench_arithmetic_2_digit_division": {
"multiple_choice_grade": 0.37,
"multiple_choice_grade_stderr": 0.04852365870939098
},
"bigbench_arithmetic_2_digit_multiplication": {
"multiple_choice_grade": 0.02,
"multiple_choice_grade_stderr": 0.01407052941362896
},
"bigbench_arithmetic_2_digit_subtraction": {
"multiple_choice_grade": 0.22,
"multiple_choice_grade_stderr": 0.0416333199893227
},
"bigbench_arithmetic_3_digit_addition": {
"multiple_choice_grade": 0.04,
"multiple_choice_grade_stderr": 0.01969463855669323
},
"bigbench_arithmetic_3_digit_division": {
"multiple_choice_grade": 0.2,
"multiple_choice_grade_stderr": 0.04020151261036846
},
"bigbench_arithmetic_3_digit_multiplication": {
"multiple_choice_grade": 0.05,
"multiple_choice_grade_stderr": 0.02190429135575908
},
"bigbench_arithmetic_3_digit_subtraction": {
"multiple_choice_grade": 0.23,
"multiple_choice_grade_stderr": 0.04229525846816506
},
"bigbench_arithmetic_4_digit_addition": {
"multiple_choice_grade": 0.15,
"multiple_choice_grade_stderr": 0.035887028128263714
},
"bigbench_arithmetic_4_digit_division": {
"multiple_choice_grade": 0.15,
"multiple_choice_grade_stderr": 0.03588702812826371
},
"bigbench_arithmetic_4_digit_multiplication": {
"multiple_choice_grade": 0.07,
"multiple_choice_grade_stderr": 0.025643239997624294
},
"bigbench_arithmetic_4_digit_subtraction": {
"multiple_choice_grade": 0.13,
"multiple_choice_grade_stderr": 0.03379976689896308
},
"bigbench_arithmetic_5_digit_addition": {
"multiple_choice_grade": 0.06,
"multiple_choice_grade_stderr": 0.023868325657594197
},
"bigbench_arithmetic_5_digit_division": {
"multiple_choice_grade": 0.08,
"multiple_choice_grade_stderr": 0.027265992434429086
},
"bigbench_arithmetic_5_digit_multiplication": {
"multiple_choice_grade": 0.06,
"multiple_choice_grade_stderr": 0.023868325657594183
},
"bigbench_arithmetic_5_digit_subtraction": {
"multiple_choice_grade": 0.16,
"multiple_choice_grade_stderr": 0.03684529491774708
},
"bigbench_cause_and_effect_one_sentence": {
"multiple_choice_grade": 0.5098039215686274,
"multiple_choice_grade_stderr": 0.07069708383262727
},
"bigbench_cause_and_effect_one_sentence_no_prompt": {
"multiple_choice_grade": 0.6862745098039216,
"multiple_choice_grade_stderr": 0.06562039423796669
},
"bigbench_cause_and_effect_two_sentences": {
"multiple_choice_grade": 0.43137254901960786,
"multiple_choice_grade_stderr": 0.07004145529212454
},
"bigbench_emotions": {
"multiple_choice_grade": 0.2125,
"multiple_choice_grade_stderr": 0.03244189290245472
},
"bigbench_empirical_judgments": {
"multiple_choice_grade": 0.30303030303030304,
"multiple_choice_grade_stderr": 0.04642339954443119
},
"bigbench_general_knowledge": {
"multiple_choice_grade": 0.2571428571428571,
"multiple_choice_grade_stderr": 0.052615698346701524
},
"bigbench_hhh_alignment_harmless": {
"multiple_choice_grade": 0.41379310344827586,
"multiple_choice_grade_stderr": 0.06523484847771846
},
"bigbench_hhh_alignment_helpful": {
"multiple_choice_grade": 0.288135593220339,
"multiple_choice_grade_stderr": 0.059467967781548406
},
"bigbench_hhh_alignment_honest": {
"multiple_choice_grade": 0.3220338983050847,
"multiple_choice_grade_stderr": 0.06135370413564329
},
"bigbench_hhh_alignment_other": {
"multiple_choice_grade": 0.5813953488372093,
"multiple_choice_grade_stderr": 0.07612251984976479
},
"bigbench_intent_recognition": {
"multiple_choice_grade": 0.1416184971098266,
"multiple_choice_grade_stderr": 0.013263591635637853
},
"bigbench_misconceptions": {
"multiple_choice_grade": 0.4925373134328358,
"multiple_choice_grade_stderr": 0.04335066912520505
},
"bigbench_paraphrase": {
"multiple_choice_grade": 0.46,
"multiple_choice_grade_stderr": 0.03533045720097816
},
"bigbench_sentence_ambiguity": {
"multiple_choice_grade": 0.5,
"multiple_choice_grade_stderr": 0.06509445549041193
},
"bigbench_similarities_abstraction": {
"multiple_choice_grade": 0.5526315789473685,
"multiple_choice_grade_stderr": 0.05741427428755636
}
},
"versions": {
"bigbench_analogies": 0,
"bigbench_arithmetic_1_digit_addition": 0,
"bigbench_arithmetic_1_digit_division": 0,
"bigbench_arithmetic_1_digit_multiplication": 0,
"bigbench_arithmetic_1_digit_subtraction": 0,
"bigbench_arithmetic_2_digit_addition": 0,
"bigbench_arithmetic_2_digit_division": 0,
"bigbench_arithmetic_2_digit_multiplication": 0,
"bigbench_arithmetic_2_digit_subtraction": 0,
"bigbench_arithmetic_3_digit_addition": 0,
"bigbench_arithmetic_3_digit_division": 0,
"bigbench_arithmetic_3_digit_multiplication": 0,
"bigbench_arithmetic_3_digit_subtraction": 0,
"bigbench_arithmetic_4_digit_addition": 0,
"bigbench_arithmetic_4_digit_division": 0,
"bigbench_arithmetic_4_digit_multiplication": 0,
"bigbench_arithmetic_4_digit_subtraction": 0,
"bigbench_arithmetic_5_digit_addition": 0,
"bigbench_arithmetic_5_digit_division": 0,
"bigbench_arithmetic_5_digit_multiplication": 0,
"bigbench_arithmetic_5_digit_subtraction": 0,
"bigbench_cause_and_effect_one_sentence": 0,
"bigbench_cause_and_effect_one_sentence_no_prompt": 0,
"bigbench_cause_and_effect_two_sentences": 0,
"bigbench_emotions": 0,
"bigbench_empirical_judgments": 0,
"bigbench_general_knowledge": 0,
"bigbench_hhh_alignment_harmless": 0,
"bigbench_hhh_alignment_helpful": 0,
"bigbench_hhh_alignment_honest": 0,
"bigbench_hhh_alignment_other": 0,
"bigbench_intent_recognition": 0,
"bigbench_misconceptions": 0,
"bigbench_paraphrase": 0,
"bigbench_sentence_ambiguity": 0,
"bigbench_similarities_abstraction": 0
},
"config": {
"model": "hf-causal",
"model_args": "pretrained=/pfs/lustrep2/scratch/project_462000185/muennighoff/bigcode-evaluation-harness/gpt2-large-finnish",
"num_fewshot": 1,
"batch_size": null,
"device": "cuda:0",
"no_cache": false,
"limit": null,
"bootstrap_iters": 100000,
"description_dict": {}
}
}