evaluation / gpt3_finnish_medium_bigbench.json
Muennighoff's picture
Upload 2 files
dcdf9ec
{
"results": {
"bigbench_analogies": {
"multiple_choice_grade": 0.3769230769230769,
"multiple_choice_grade_stderr": 0.04266800546039561
},
"bigbench_arithmetic_1_digit_addition": {
"multiple_choice_grade": 0.35,
"multiple_choice_grade_stderr": 0.047937248544110196
},
"bigbench_arithmetic_1_digit_division": {
"multiple_choice_grade": 0.6521739130434783,
"multiple_choice_grade_stderr": 0.10154334054280735
},
"bigbench_arithmetic_1_digit_multiplication": {
"multiple_choice_grade": 0.29,
"multiple_choice_grade_stderr": 0.045604802157206845
},
"bigbench_arithmetic_1_digit_subtraction": {
"multiple_choice_grade": 0.25,
"multiple_choice_grade_stderr": 0.04351941398892446
},
"bigbench_arithmetic_2_digit_addition": {
"multiple_choice_grade": 0.0,
"multiple_choice_grade_stderr": 0.0
},
"bigbench_arithmetic_2_digit_division": {
"multiple_choice_grade": 0.55,
"multiple_choice_grade_stderr": 0.049999999999999996
},
"bigbench_arithmetic_2_digit_multiplication": {
"multiple_choice_grade": 0.11,
"multiple_choice_grade_stderr": 0.031446603773522035
},
"bigbench_arithmetic_2_digit_subtraction": {
"multiple_choice_grade": 0.23,
"multiple_choice_grade_stderr": 0.04229525846816505
},
"bigbench_arithmetic_3_digit_addition": {
"multiple_choice_grade": 0.24,
"multiple_choice_grade_stderr": 0.042923469599092816
},
"bigbench_arithmetic_3_digit_division": {
"multiple_choice_grade": 0.22,
"multiple_choice_grade_stderr": 0.041633319989322695
},
"bigbench_arithmetic_3_digit_multiplication": {
"multiple_choice_grade": 0.07,
"multiple_choice_grade_stderr": 0.02564323999762428
},
"bigbench_arithmetic_3_digit_subtraction": {
"multiple_choice_grade": 0.2,
"multiple_choice_grade_stderr": 0.04020151261036845
},
"bigbench_arithmetic_4_digit_addition": {
"multiple_choice_grade": 0.1,
"multiple_choice_grade_stderr": 0.030151134457776348
},
"bigbench_arithmetic_4_digit_division": {
"multiple_choice_grade": 0.2,
"multiple_choice_grade_stderr": 0.04020151261036845
},
"bigbench_arithmetic_4_digit_multiplication": {
"multiple_choice_grade": 0.05,
"multiple_choice_grade_stderr": 0.021904291355759026
},
"bigbench_arithmetic_4_digit_subtraction": {
"multiple_choice_grade": 0.11,
"multiple_choice_grade_stderr": 0.031446603773522035
},
"bigbench_arithmetic_5_digit_addition": {
"multiple_choice_grade": 0.07,
"multiple_choice_grade_stderr": 0.02564323999762429
},
"bigbench_arithmetic_5_digit_division": {
"multiple_choice_grade": 0.13,
"multiple_choice_grade_stderr": 0.03379976689896309
},
"bigbench_arithmetic_5_digit_multiplication": {
"multiple_choice_grade": 0.0,
"multiple_choice_grade_stderr": 0.0
},
"bigbench_arithmetic_5_digit_subtraction": {
"multiple_choice_grade": 0.12,
"multiple_choice_grade_stderr": 0.03265986323710906
},
"bigbench_cause_and_effect_one_sentence": {
"multiple_choice_grade": 0.49019607843137253,
"multiple_choice_grade_stderr": 0.07069708383262727
},
"bigbench_cause_and_effect_one_sentence_no_prompt": {
"multiple_choice_grade": 0.7058823529411765,
"multiple_choice_grade_stderr": 0.06443794794178427
},
"bigbench_cause_and_effect_two_sentences": {
"multiple_choice_grade": 0.43137254901960786,
"multiple_choice_grade_stderr": 0.07004145529212454
},
"bigbench_emotions": {
"multiple_choice_grade": 0.18125,
"multiple_choice_grade_stderr": 0.03055034379985447
},
"bigbench_empirical_judgments": {
"multiple_choice_grade": 0.31313131313131315,
"multiple_choice_grade_stderr": 0.0468475702186087
},
"bigbench_general_knowledge": {
"multiple_choice_grade": 0.34285714285714286,
"multiple_choice_grade_stderr": 0.05714285714285712
},
"bigbench_hhh_alignment_harmless": {
"multiple_choice_grade": 0.3793103448275862,
"multiple_choice_grade_stderr": 0.06426835284800642
},
"bigbench_hhh_alignment_helpful": {
"multiple_choice_grade": 0.3050847457627119,
"multiple_choice_grade_stderr": 0.060459168847106955
},
"bigbench_hhh_alignment_honest": {
"multiple_choice_grade": 0.3728813559322034,
"multiple_choice_grade_stderr": 0.0634959746661109
},
"bigbench_hhh_alignment_other": {
"multiple_choice_grade": 0.6046511627906976,
"multiple_choice_grade_stderr": 0.07544284088704808
},
"bigbench_intent_recognition": {
"multiple_choice_grade": 0.1936416184971098,
"multiple_choice_grade_stderr": 0.015032263521320763
},
"bigbench_misconceptions": {
"multiple_choice_grade": 0.4626865671641791,
"multiple_choice_grade_stderr": 0.04323460286839717
},
"bigbench_paraphrase": {
"multiple_choice_grade": 0.525,
"multiple_choice_grade_stderr": 0.0353997274497642
},
"bigbench_sentence_ambiguity": {
"multiple_choice_grade": 0.5333333333333333,
"multiple_choice_grade_stderr": 0.06494964005966064
},
"bigbench_similarities_abstraction": {
"multiple_choice_grade": 0.5263157894736842,
"multiple_choice_grade_stderr": 0.05765500605317536
}
},
"versions": {
"bigbench_analogies": 0,
"bigbench_arithmetic_1_digit_addition": 0,
"bigbench_arithmetic_1_digit_division": 0,
"bigbench_arithmetic_1_digit_multiplication": 0,
"bigbench_arithmetic_1_digit_subtraction": 0,
"bigbench_arithmetic_2_digit_addition": 0,
"bigbench_arithmetic_2_digit_division": 0,
"bigbench_arithmetic_2_digit_multiplication": 0,
"bigbench_arithmetic_2_digit_subtraction": 0,
"bigbench_arithmetic_3_digit_addition": 0,
"bigbench_arithmetic_3_digit_division": 0,
"bigbench_arithmetic_3_digit_multiplication": 0,
"bigbench_arithmetic_3_digit_subtraction": 0,
"bigbench_arithmetic_4_digit_addition": 0,
"bigbench_arithmetic_4_digit_division": 0,
"bigbench_arithmetic_4_digit_multiplication": 0,
"bigbench_arithmetic_4_digit_subtraction": 0,
"bigbench_arithmetic_5_digit_addition": 0,
"bigbench_arithmetic_5_digit_division": 0,
"bigbench_arithmetic_5_digit_multiplication": 0,
"bigbench_arithmetic_5_digit_subtraction": 0,
"bigbench_cause_and_effect_one_sentence": 0,
"bigbench_cause_and_effect_one_sentence_no_prompt": 0,
"bigbench_cause_and_effect_two_sentences": 0,
"bigbench_emotions": 0,
"bigbench_empirical_judgments": 0,
"bigbench_general_knowledge": 0,
"bigbench_hhh_alignment_harmless": 0,
"bigbench_hhh_alignment_helpful": 0,
"bigbench_hhh_alignment_honest": 0,
"bigbench_hhh_alignment_other": 0,
"bigbench_intent_recognition": 0,
"bigbench_misconceptions": 0,
"bigbench_paraphrase": 0,
"bigbench_sentence_ambiguity": 0,
"bigbench_similarities_abstraction": 0
},
"config": {
"model": "hf-causal",
"model_args": "pretrained=TurkuNLP/gpt3-finnish-medium",
"num_fewshot": 0,
"batch_size": null,
"device": "cuda:0",
"no_cache": false,
"limit": null,
"bootstrap_iters": 100000,
"description_dict": {}
}
}