evaluation / gpt2_finnish_medium_bigbench_2shot.json
Muennighoff's picture
Add
79b60e5
raw
history blame
7.5 kB
{
"results": {
"bigbench_analogies": {
"multiple_choice_grade": 0.36923076923076925,
"multiple_choice_grade_stderr": 0.04249025499621757
},
"bigbench_arithmetic_1_digit_addition": {
"multiple_choice_grade": 0.38,
"multiple_choice_grade_stderr": 0.04878317312145633
},
"bigbench_arithmetic_1_digit_division": {
"multiple_choice_grade": 0.5652173913043478,
"multiple_choice_grade_stderr": 0.10568965974008646
},
"bigbench_arithmetic_1_digit_multiplication": {
"multiple_choice_grade": 0.28,
"multiple_choice_grade_stderr": 0.04512608598542128
},
"bigbench_arithmetic_1_digit_subtraction": {
"multiple_choice_grade": 0.5,
"multiple_choice_grade_stderr": 0.050251890762960605
},
"bigbench_arithmetic_2_digit_addition": {
"multiple_choice_grade": 0.31,
"multiple_choice_grade_stderr": 0.04648231987117316
},
"bigbench_arithmetic_2_digit_division": {
"multiple_choice_grade": 0.32,
"multiple_choice_grade_stderr": 0.04688261722621505
},
"bigbench_arithmetic_2_digit_multiplication": {
"multiple_choice_grade": 0.05,
"multiple_choice_grade_stderr": 0.021904291355759022
},
"bigbench_arithmetic_2_digit_subtraction": {
"multiple_choice_grade": 0.38,
"multiple_choice_grade_stderr": 0.048783173121456344
},
"bigbench_arithmetic_3_digit_addition": {
"multiple_choice_grade": 0.07,
"multiple_choice_grade_stderr": 0.02564323999762429
},
"bigbench_arithmetic_3_digit_division": {
"multiple_choice_grade": 0.16,
"multiple_choice_grade_stderr": 0.03684529491774708
},
"bigbench_arithmetic_3_digit_multiplication": {
"multiple_choice_grade": 0.08,
"multiple_choice_grade_stderr": 0.0272659924344291
},
"bigbench_arithmetic_3_digit_subtraction": {
"multiple_choice_grade": 0.26,
"multiple_choice_grade_stderr": 0.04408440022768078
},
"bigbench_arithmetic_4_digit_addition": {
"multiple_choice_grade": 0.09,
"multiple_choice_grade_stderr": 0.028762349126466125
},
"bigbench_arithmetic_4_digit_division": {
"multiple_choice_grade": 0.15,
"multiple_choice_grade_stderr": 0.03588702812826371
},
"bigbench_arithmetic_4_digit_multiplication": {
"multiple_choice_grade": 0.06,
"multiple_choice_grade_stderr": 0.023868325657594176
},
"bigbench_arithmetic_4_digit_subtraction": {
"multiple_choice_grade": 0.18,
"multiple_choice_grade_stderr": 0.038612291966536955
},
"bigbench_arithmetic_5_digit_addition": {
"multiple_choice_grade": 0.06,
"multiple_choice_grade_stderr": 0.023868325657594197
},
"bigbench_arithmetic_5_digit_division": {
"multiple_choice_grade": 0.16,
"multiple_choice_grade_stderr": 0.036845294917747066
},
"bigbench_arithmetic_5_digit_multiplication": {
"multiple_choice_grade": 0.02,
"multiple_choice_grade_stderr": 0.014070529413628954
},
"bigbench_arithmetic_5_digit_subtraction": {
"multiple_choice_grade": 0.18,
"multiple_choice_grade_stderr": 0.03861229196653695
},
"bigbench_cause_and_effect_one_sentence": {
"multiple_choice_grade": 0.6274509803921569,
"multiple_choice_grade_stderr": 0.0683748853888733
},
"bigbench_cause_and_effect_one_sentence_no_prompt": {
"multiple_choice_grade": 0.5686274509803921,
"multiple_choice_grade_stderr": 0.07004145529212452
},
"bigbench_cause_and_effect_two_sentences": {
"multiple_choice_grade": 0.49019607843137253,
"multiple_choice_grade_stderr": 0.07069708383262727
},
"bigbench_emotions": {
"multiple_choice_grade": 0.11875,
"multiple_choice_grade_stderr": 0.025654751481643822
},
"bigbench_empirical_judgments": {
"multiple_choice_grade": 0.26262626262626265,
"multiple_choice_grade_stderr": 0.044452876769839444
},
"bigbench_general_knowledge": {
"multiple_choice_grade": 0.21428571428571427,
"multiple_choice_grade_stderr": 0.04939743391486606
},
"bigbench_hhh_alignment_harmless": {
"multiple_choice_grade": 0.4482758620689655,
"multiple_choice_grade_stderr": 0.06587130109529918
},
"bigbench_hhh_alignment_helpful": {
"multiple_choice_grade": 0.2711864406779661,
"multiple_choice_grade_stderr": 0.058375177038848765
},
"bigbench_hhh_alignment_honest": {
"multiple_choice_grade": 0.3389830508474576,
"multiple_choice_grade_stderr": 0.062155747381159164
},
"bigbench_hhh_alignment_other": {
"multiple_choice_grade": 0.5813953488372093,
"multiple_choice_grade_stderr": 0.07612251984976479
},
"bigbench_intent_recognition": {
"multiple_choice_grade": 0.18641618497109827,
"multiple_choice_grade_stderr": 0.014815077730150087
},
"bigbench_misconceptions": {
"multiple_choice_grade": 0.5149253731343284,
"multiple_choice_grade_stderr": 0.04333617784312701
},
"bigbench_paraphrase": {
"multiple_choice_grade": 0.475,
"multiple_choice_grade_stderr": 0.03539972744976421
},
"bigbench_sentence_ambiguity": {
"multiple_choice_grade": 0.5333333333333333,
"multiple_choice_grade_stderr": 0.06494964005966065
},
"bigbench_similarities_abstraction": {
"multiple_choice_grade": 0.5657894736842105,
"multiple_choice_grade_stderr": 0.05723306097613564
}
},
"versions": {
"bigbench_analogies": 0,
"bigbench_arithmetic_1_digit_addition": 0,
"bigbench_arithmetic_1_digit_division": 0,
"bigbench_arithmetic_1_digit_multiplication": 0,
"bigbench_arithmetic_1_digit_subtraction": 0,
"bigbench_arithmetic_2_digit_addition": 0,
"bigbench_arithmetic_2_digit_division": 0,
"bigbench_arithmetic_2_digit_multiplication": 0,
"bigbench_arithmetic_2_digit_subtraction": 0,
"bigbench_arithmetic_3_digit_addition": 0,
"bigbench_arithmetic_3_digit_division": 0,
"bigbench_arithmetic_3_digit_multiplication": 0,
"bigbench_arithmetic_3_digit_subtraction": 0,
"bigbench_arithmetic_4_digit_addition": 0,
"bigbench_arithmetic_4_digit_division": 0,
"bigbench_arithmetic_4_digit_multiplication": 0,
"bigbench_arithmetic_4_digit_subtraction": 0,
"bigbench_arithmetic_5_digit_addition": 0,
"bigbench_arithmetic_5_digit_division": 0,
"bigbench_arithmetic_5_digit_multiplication": 0,
"bigbench_arithmetic_5_digit_subtraction": 0,
"bigbench_cause_and_effect_one_sentence": 0,
"bigbench_cause_and_effect_one_sentence_no_prompt": 0,
"bigbench_cause_and_effect_two_sentences": 0,
"bigbench_emotions": 0,
"bigbench_empirical_judgments": 0,
"bigbench_general_knowledge": 0,
"bigbench_hhh_alignment_harmless": 0,
"bigbench_hhh_alignment_helpful": 0,
"bigbench_hhh_alignment_honest": 0,
"bigbench_hhh_alignment_other": 0,
"bigbench_intent_recognition": 0,
"bigbench_misconceptions": 0,
"bigbench_paraphrase": 0,
"bigbench_sentence_ambiguity": 0,
"bigbench_similarities_abstraction": 0
},
"config": {
"model": "hf-causal",
"model_args": "pretrained=/pfs/lustrep2/scratch/project_462000185/muennighoff/bigcode-evaluation-harness/gpt2-medium-finnish",
"num_fewshot": 2,
"batch_size": null,
"device": "cuda:0",
"no_cache": false,
"limit": null,
"bootstrap_iters": 100000,
"description_dict": {}
}
}