evaluation / gpt2_finnish_large_bigbench_0shot.json
Muennighoff's picture
Add
79b60e5
{
"results": {
"bigbench_analogies": {
"multiple_choice_grade": 0.3153846153846154,
"multiple_choice_grade_stderr": 0.04091181286955817
},
"bigbench_arithmetic_1_digit_addition": {
"multiple_choice_grade": 0.25,
"multiple_choice_grade_stderr": 0.04351941398892446
},
"bigbench_arithmetic_1_digit_division": {
"multiple_choice_grade": 0.6521739130434783,
"multiple_choice_grade_stderr": 0.10154334054280736
},
"bigbench_arithmetic_1_digit_multiplication": {
"multiple_choice_grade": 0.34,
"multiple_choice_grade_stderr": 0.04760952285695235
},
"bigbench_arithmetic_1_digit_subtraction": {
"multiple_choice_grade": 0.28,
"multiple_choice_grade_stderr": 0.045126085985421276
},
"bigbench_arithmetic_2_digit_addition": {
"multiple_choice_grade": 0.0,
"multiple_choice_grade_stderr": 0.0
},
"bigbench_arithmetic_2_digit_division": {
"multiple_choice_grade": 0.4,
"multiple_choice_grade_stderr": 0.049236596391733084
},
"bigbench_arithmetic_2_digit_multiplication": {
"multiple_choice_grade": 0.0,
"multiple_choice_grade_stderr": 0.0
},
"bigbench_arithmetic_2_digit_subtraction": {
"multiple_choice_grade": 0.06,
"multiple_choice_grade_stderr": 0.02386832565759416
},
"bigbench_arithmetic_3_digit_addition": {
"multiple_choice_grade": 0.0,
"multiple_choice_grade_stderr": 0.0
},
"bigbench_arithmetic_3_digit_division": {
"multiple_choice_grade": 0.17,
"multiple_choice_grade_stderr": 0.0377525168068637
},
"bigbench_arithmetic_3_digit_multiplication": {
"multiple_choice_grade": 0.0,
"multiple_choice_grade_stderr": 0.0
},
"bigbench_arithmetic_3_digit_subtraction": {
"multiple_choice_grade": 0.01,
"multiple_choice_grade_stderr": 0.009999999999999997
},
"bigbench_arithmetic_4_digit_addition": {
"multiple_choice_grade": 0.01,
"multiple_choice_grade_stderr": 0.009999999999999993
},
"bigbench_arithmetic_4_digit_division": {
"multiple_choice_grade": 0.17,
"multiple_choice_grade_stderr": 0.0377525168068637
},
"bigbench_arithmetic_4_digit_multiplication": {
"multiple_choice_grade": 0.0,
"multiple_choice_grade_stderr": 0.0
},
"bigbench_arithmetic_4_digit_subtraction": {
"multiple_choice_grade": 0.02,
"multiple_choice_grade_stderr": 0.014070529413628957
},
"bigbench_arithmetic_5_digit_addition": {
"multiple_choice_grade": 0.0,
"multiple_choice_grade_stderr": 0.0
},
"bigbench_arithmetic_5_digit_division": {
"multiple_choice_grade": 0.09,
"multiple_choice_grade_stderr": 0.028762349126466143
},
"bigbench_arithmetic_5_digit_multiplication": {
"multiple_choice_grade": 0.0,
"multiple_choice_grade_stderr": 0.0
},
"bigbench_arithmetic_5_digit_subtraction": {
"multiple_choice_grade": 0.0,
"multiple_choice_grade_stderr": 0.0
},
"bigbench_cause_and_effect_one_sentence": {
"multiple_choice_grade": 0.43137254901960786,
"multiple_choice_grade_stderr": 0.07004145529212452
},
"bigbench_cause_and_effect_one_sentence_no_prompt": {
"multiple_choice_grade": 0.6470588235294118,
"multiple_choice_grade_stderr": 0.06758308995927091
},
"bigbench_cause_and_effect_two_sentences": {
"multiple_choice_grade": 0.6078431372549019,
"multiple_choice_grade_stderr": 0.0690463406339569
},
"bigbench_emotions": {
"multiple_choice_grade": 0.24375,
"multiple_choice_grade_stderr": 0.03404916326237584
},
"bigbench_empirical_judgments": {
"multiple_choice_grade": 0.3333333333333333,
"multiple_choice_grade_stderr": 0.04761904761904759
},
"bigbench_general_knowledge": {
"multiple_choice_grade": 0.2571428571428571,
"multiple_choice_grade_stderr": 0.052615698346701524
},
"bigbench_hhh_alignment_harmless": {
"multiple_choice_grade": 0.43103448275862066,
"multiple_choice_grade_stderr": 0.06559361295281742
},
"bigbench_hhh_alignment_helpful": {
"multiple_choice_grade": 0.2542372881355932,
"multiple_choice_grade_stderr": 0.05717497183586486
},
"bigbench_hhh_alignment_honest": {
"multiple_choice_grade": 0.3728813559322034,
"multiple_choice_grade_stderr": 0.0634959746661109
},
"bigbench_hhh_alignment_other": {
"multiple_choice_grade": 0.6046511627906976,
"multiple_choice_grade_stderr": 0.07544284088704808
},
"bigbench_intent_recognition": {
"multiple_choice_grade": 0.17630057803468208,
"multiple_choice_grade_stderr": 0.014496802592691349
},
"bigbench_misconceptions": {
"multiple_choice_grade": 0.44029850746268656,
"multiple_choice_grade_stderr": 0.0430453277257087
},
"bigbench_paraphrase": {
"multiple_choice_grade": 0.515,
"multiple_choice_grade_stderr": 0.03542810683297719
},
"bigbench_sentence_ambiguity": {
"multiple_choice_grade": 0.5333333333333333,
"multiple_choice_grade_stderr": 0.06494964005966064
},
"bigbench_similarities_abstraction": {
"multiple_choice_grade": 0.5394736842105263,
"multiple_choice_grade_stderr": 0.0575548235169272
}
},
"versions": {
"bigbench_analogies": 0,
"bigbench_arithmetic_1_digit_addition": 0,
"bigbench_arithmetic_1_digit_division": 0,
"bigbench_arithmetic_1_digit_multiplication": 0,
"bigbench_arithmetic_1_digit_subtraction": 0,
"bigbench_arithmetic_2_digit_addition": 0,
"bigbench_arithmetic_2_digit_division": 0,
"bigbench_arithmetic_2_digit_multiplication": 0,
"bigbench_arithmetic_2_digit_subtraction": 0,
"bigbench_arithmetic_3_digit_addition": 0,
"bigbench_arithmetic_3_digit_division": 0,
"bigbench_arithmetic_3_digit_multiplication": 0,
"bigbench_arithmetic_3_digit_subtraction": 0,
"bigbench_arithmetic_4_digit_addition": 0,
"bigbench_arithmetic_4_digit_division": 0,
"bigbench_arithmetic_4_digit_multiplication": 0,
"bigbench_arithmetic_4_digit_subtraction": 0,
"bigbench_arithmetic_5_digit_addition": 0,
"bigbench_arithmetic_5_digit_division": 0,
"bigbench_arithmetic_5_digit_multiplication": 0,
"bigbench_arithmetic_5_digit_subtraction": 0,
"bigbench_cause_and_effect_one_sentence": 0,
"bigbench_cause_and_effect_one_sentence_no_prompt": 0,
"bigbench_cause_and_effect_two_sentences": 0,
"bigbench_emotions": 0,
"bigbench_empirical_judgments": 0,
"bigbench_general_knowledge": 0,
"bigbench_hhh_alignment_harmless": 0,
"bigbench_hhh_alignment_helpful": 0,
"bigbench_hhh_alignment_honest": 0,
"bigbench_hhh_alignment_other": 0,
"bigbench_intent_recognition": 0,
"bigbench_misconceptions": 0,
"bigbench_paraphrase": 0,
"bigbench_sentence_ambiguity": 0,
"bigbench_similarities_abstraction": 0
},
"config": {
"model": "hf-causal",
"model_args": "pretrained=/pfs/lustrep2/scratch/project_462000185/muennighoff/bigcode-evaluation-harness/gpt2-large-finnish",
"num_fewshot": 0,
"batch_size": null,
"device": "cuda:0",
"no_cache": false,
"limit": null,
"bootstrap_iters": 100000,
"description_dict": {}
}
}