evaluation / gpt2_finnish_large_bigbench_3shot.json
Muennighoff's picture
Add
79b60e5
raw
history blame
7.49 kB
{
"results": {
"bigbench_analogies": {
"multiple_choice_grade": 0.23076923076923078,
"multiple_choice_grade_stderr": 0.03709560170541631
},
"bigbench_arithmetic_1_digit_addition": {
"multiple_choice_grade": 0.35,
"multiple_choice_grade_stderr": 0.047937248544110196
},
"bigbench_arithmetic_1_digit_division": {
"multiple_choice_grade": 0.5652173913043478,
"multiple_choice_grade_stderr": 0.10568965974008646
},
"bigbench_arithmetic_1_digit_multiplication": {
"multiple_choice_grade": 0.19,
"multiple_choice_grade_stderr": 0.039427724440366234
},
"bigbench_arithmetic_1_digit_subtraction": {
"multiple_choice_grade": 0.48,
"multiple_choice_grade_stderr": 0.050211673156867795
},
"bigbench_arithmetic_2_digit_addition": {
"multiple_choice_grade": 0.1,
"multiple_choice_grade_stderr": 0.030151134457776358
},
"bigbench_arithmetic_2_digit_division": {
"multiple_choice_grade": 0.36,
"multiple_choice_grade_stderr": 0.04824181513244218
},
"bigbench_arithmetic_2_digit_multiplication": {
"multiple_choice_grade": 0.12,
"multiple_choice_grade_stderr": 0.03265986323710906
},
"bigbench_arithmetic_2_digit_subtraction": {
"multiple_choice_grade": 0.2,
"multiple_choice_grade_stderr": 0.040201512610368445
},
"bigbench_arithmetic_3_digit_addition": {
"multiple_choice_grade": 0.09,
"multiple_choice_grade_stderr": 0.02876234912646613
},
"bigbench_arithmetic_3_digit_division": {
"multiple_choice_grade": 0.12,
"multiple_choice_grade_stderr": 0.03265986323710906
},
"bigbench_arithmetic_3_digit_multiplication": {
"multiple_choice_grade": 0.18,
"multiple_choice_grade_stderr": 0.03861229196653697
},
"bigbench_arithmetic_3_digit_subtraction": {
"multiple_choice_grade": 0.27,
"multiple_choice_grade_stderr": 0.04461960433384741
},
"bigbench_arithmetic_4_digit_addition": {
"multiple_choice_grade": 0.15,
"multiple_choice_grade_stderr": 0.035887028128263714
},
"bigbench_arithmetic_4_digit_division": {
"multiple_choice_grade": 0.17,
"multiple_choice_grade_stderr": 0.0377525168068637
},
"bigbench_arithmetic_4_digit_multiplication": {
"multiple_choice_grade": 0.09,
"multiple_choice_grade_stderr": 0.028762349126466125
},
"bigbench_arithmetic_4_digit_subtraction": {
"multiple_choice_grade": 0.22,
"multiple_choice_grade_stderr": 0.0416333199893227
},
"bigbench_arithmetic_5_digit_addition": {
"multiple_choice_grade": 0.12,
"multiple_choice_grade_stderr": 0.03265986323710906
},
"bigbench_arithmetic_5_digit_division": {
"multiple_choice_grade": 0.11,
"multiple_choice_grade_stderr": 0.031446603773522035
},
"bigbench_arithmetic_5_digit_multiplication": {
"multiple_choice_grade": 0.16,
"multiple_choice_grade_stderr": 0.03684529491774708
},
"bigbench_arithmetic_5_digit_subtraction": {
"multiple_choice_grade": 0.29,
"multiple_choice_grade_stderr": 0.045604802157206845
},
"bigbench_cause_and_effect_one_sentence": {
"multiple_choice_grade": 0.5882352941176471,
"multiple_choice_grade_stderr": 0.06960093862470136
},
"bigbench_cause_and_effect_one_sentence_no_prompt": {
"multiple_choice_grade": 0.6078431372549019,
"multiple_choice_grade_stderr": 0.0690463406339569
},
"bigbench_cause_and_effect_two_sentences": {
"multiple_choice_grade": 0.47058823529411764,
"multiple_choice_grade_stderr": 0.07058823529411762
},
"bigbench_emotions": {
"multiple_choice_grade": 0.18125,
"multiple_choice_grade_stderr": 0.030550343799854465
},
"bigbench_empirical_judgments": {
"multiple_choice_grade": 0.35353535353535354,
"multiple_choice_grade_stderr": 0.04829206502361188
},
"bigbench_general_knowledge": {
"multiple_choice_grade": 0.2571428571428571,
"multiple_choice_grade_stderr": 0.052615698346701524
},
"bigbench_hhh_alignment_harmless": {
"multiple_choice_grade": 0.43103448275862066,
"multiple_choice_grade_stderr": 0.06559361295281742
},
"bigbench_hhh_alignment_helpful": {
"multiple_choice_grade": 0.288135593220339,
"multiple_choice_grade_stderr": 0.059467967781548406
},
"bigbench_hhh_alignment_honest": {
"multiple_choice_grade": 0.3389830508474576,
"multiple_choice_grade_stderr": 0.062155747381159164
},
"bigbench_hhh_alignment_other": {
"multiple_choice_grade": 0.5813953488372093,
"multiple_choice_grade_stderr": 0.07612251984976479
},
"bigbench_intent_recognition": {
"multiple_choice_grade": 0.17341040462427745,
"multiple_choice_grade_stderr": 0.014402686887147381
},
"bigbench_misconceptions": {
"multiple_choice_grade": 0.5074626865671642,
"multiple_choice_grade_stderr": 0.04335066912520504
},
"bigbench_paraphrase": {
"multiple_choice_grade": 0.47,
"multiple_choice_grade_stderr": 0.03538020341900045
},
"bigbench_sentence_ambiguity": {
"multiple_choice_grade": 0.5166666666666667,
"multiple_choice_grade_stderr": 0.06505828185300304
},
"bigbench_similarities_abstraction": {
"multiple_choice_grade": 0.5526315789473685,
"multiple_choice_grade_stderr": 0.05741427428755636
}
},
"versions": {
"bigbench_analogies": 0,
"bigbench_arithmetic_1_digit_addition": 0,
"bigbench_arithmetic_1_digit_division": 0,
"bigbench_arithmetic_1_digit_multiplication": 0,
"bigbench_arithmetic_1_digit_subtraction": 0,
"bigbench_arithmetic_2_digit_addition": 0,
"bigbench_arithmetic_2_digit_division": 0,
"bigbench_arithmetic_2_digit_multiplication": 0,
"bigbench_arithmetic_2_digit_subtraction": 0,
"bigbench_arithmetic_3_digit_addition": 0,
"bigbench_arithmetic_3_digit_division": 0,
"bigbench_arithmetic_3_digit_multiplication": 0,
"bigbench_arithmetic_3_digit_subtraction": 0,
"bigbench_arithmetic_4_digit_addition": 0,
"bigbench_arithmetic_4_digit_division": 0,
"bigbench_arithmetic_4_digit_multiplication": 0,
"bigbench_arithmetic_4_digit_subtraction": 0,
"bigbench_arithmetic_5_digit_addition": 0,
"bigbench_arithmetic_5_digit_division": 0,
"bigbench_arithmetic_5_digit_multiplication": 0,
"bigbench_arithmetic_5_digit_subtraction": 0,
"bigbench_cause_and_effect_one_sentence": 0,
"bigbench_cause_and_effect_one_sentence_no_prompt": 0,
"bigbench_cause_and_effect_two_sentences": 0,
"bigbench_emotions": 0,
"bigbench_empirical_judgments": 0,
"bigbench_general_knowledge": 0,
"bigbench_hhh_alignment_harmless": 0,
"bigbench_hhh_alignment_helpful": 0,
"bigbench_hhh_alignment_honest": 0,
"bigbench_hhh_alignment_other": 0,
"bigbench_intent_recognition": 0,
"bigbench_misconceptions": 0,
"bigbench_paraphrase": 0,
"bigbench_sentence_ambiguity": 0,
"bigbench_similarities_abstraction": 0
},
"config": {
"model": "hf-causal",
"model_args": "pretrained=/pfs/lustrep2/scratch/project_462000185/muennighoff/bigcode-evaluation-harness/gpt2-large-finnish",
"num_fewshot": 3,
"batch_size": null,
"device": "cuda:0",
"no_cache": false,
"limit": null,
"bootstrap_iters": 100000,
"description_dict": {}
}
}