evaluation / gpt2_finnish_medium_bigbench_1shot.json
Muennighoff's picture
Add
79b60e5
raw
history blame
7.49 kB
{
"results": {
"bigbench_analogies": {
"multiple_choice_grade": 0.36153846153846153,
"multiple_choice_grade_stderr": 0.042300915595389274
},
"bigbench_arithmetic_1_digit_addition": {
"multiple_choice_grade": 0.35,
"multiple_choice_grade_stderr": 0.0479372485441102
},
"bigbench_arithmetic_1_digit_division": {
"multiple_choice_grade": 0.5217391304347826,
"multiple_choice_grade_stderr": 0.10649955403405122
},
"bigbench_arithmetic_1_digit_multiplication": {
"multiple_choice_grade": 0.27,
"multiple_choice_grade_stderr": 0.044619604333847394
},
"bigbench_arithmetic_1_digit_subtraction": {
"multiple_choice_grade": 0.43,
"multiple_choice_grade_stderr": 0.049756985195624284
},
"bigbench_arithmetic_2_digit_addition": {
"multiple_choice_grade": 0.23,
"multiple_choice_grade_stderr": 0.04229525846816506
},
"bigbench_arithmetic_2_digit_division": {
"multiple_choice_grade": 0.26,
"multiple_choice_grade_stderr": 0.0440844002276808
},
"bigbench_arithmetic_2_digit_multiplication": {
"multiple_choice_grade": 0.04,
"multiple_choice_grade_stderr": 0.019694638556693244
},
"bigbench_arithmetic_2_digit_subtraction": {
"multiple_choice_grade": 0.34,
"multiple_choice_grade_stderr": 0.04760952285695236
},
"bigbench_arithmetic_3_digit_addition": {
"multiple_choice_grade": 0.03,
"multiple_choice_grade_stderr": 0.01714466079977653
},
"bigbench_arithmetic_3_digit_division": {
"multiple_choice_grade": 0.17,
"multiple_choice_grade_stderr": 0.03775251680686371
},
"bigbench_arithmetic_3_digit_multiplication": {
"multiple_choice_grade": 0.06,
"multiple_choice_grade_stderr": 0.02386832565759416
},
"bigbench_arithmetic_3_digit_subtraction": {
"multiple_choice_grade": 0.32,
"multiple_choice_grade_stderr": 0.04688261722621504
},
"bigbench_arithmetic_4_digit_addition": {
"multiple_choice_grade": 0.16,
"multiple_choice_grade_stderr": 0.0368452949177471
},
"bigbench_arithmetic_4_digit_division": {
"multiple_choice_grade": 0.17,
"multiple_choice_grade_stderr": 0.0377525168068637
},
"bigbench_arithmetic_4_digit_multiplication": {
"multiple_choice_grade": 0.01,
"multiple_choice_grade_stderr": 0.009999999999999993
},
"bigbench_arithmetic_4_digit_subtraction": {
"multiple_choice_grade": 0.16,
"multiple_choice_grade_stderr": 0.036845294917747094
},
"bigbench_arithmetic_5_digit_addition": {
"multiple_choice_grade": 0.05,
"multiple_choice_grade_stderr": 0.021904291355759088
},
"bigbench_arithmetic_5_digit_division": {
"multiple_choice_grade": 0.16,
"multiple_choice_grade_stderr": 0.036845294917747066
},
"bigbench_arithmetic_5_digit_multiplication": {
"multiple_choice_grade": 0.03,
"multiple_choice_grade_stderr": 0.017144660799776553
},
"bigbench_arithmetic_5_digit_subtraction": {
"multiple_choice_grade": 0.2,
"multiple_choice_grade_stderr": 0.04020151261036846
},
"bigbench_cause_and_effect_one_sentence": {
"multiple_choice_grade": 0.6666666666666666,
"multiple_choice_grade_stderr": 0.06666666666666664
},
"bigbench_cause_and_effect_one_sentence_no_prompt": {
"multiple_choice_grade": 0.7058823529411765,
"multiple_choice_grade_stderr": 0.06443794794178427
},
"bigbench_cause_and_effect_two_sentences": {
"multiple_choice_grade": 0.49019607843137253,
"multiple_choice_grade_stderr": 0.07069708383262727
},
"bigbench_emotions": {
"multiple_choice_grade": 0.2,
"multiple_choice_grade_stderr": 0.03172206342872573
},
"bigbench_empirical_judgments": {
"multiple_choice_grade": 0.24242424242424243,
"multiple_choice_grade_stderr": 0.04329004329004327
},
"bigbench_general_knowledge": {
"multiple_choice_grade": 0.22857142857142856,
"multiple_choice_grade_stderr": 0.05055152782453619
},
"bigbench_hhh_alignment_harmless": {
"multiple_choice_grade": 0.43103448275862066,
"multiple_choice_grade_stderr": 0.06559361295281742
},
"bigbench_hhh_alignment_helpful": {
"multiple_choice_grade": 0.288135593220339,
"multiple_choice_grade_stderr": 0.059467967781548406
},
"bigbench_hhh_alignment_honest": {
"multiple_choice_grade": 0.3389830508474576,
"multiple_choice_grade_stderr": 0.062155747381159164
},
"bigbench_hhh_alignment_other": {
"multiple_choice_grade": 0.5813953488372093,
"multiple_choice_grade_stderr": 0.07612251984976479
},
"bigbench_intent_recognition": {
"multiple_choice_grade": 0.19653179190751446,
"multiple_choice_grade_stderr": 0.015116864901597936
},
"bigbench_misconceptions": {
"multiple_choice_grade": 0.4925373134328358,
"multiple_choice_grade_stderr": 0.04335066912520505
},
"bigbench_paraphrase": {
"multiple_choice_grade": 0.5,
"multiple_choice_grade_stderr": 0.0354440602504168
},
"bigbench_sentence_ambiguity": {
"multiple_choice_grade": 0.5166666666666667,
"multiple_choice_grade_stderr": 0.06505828185300304
},
"bigbench_similarities_abstraction": {
"multiple_choice_grade": 0.5263157894736842,
"multiple_choice_grade_stderr": 0.05765500605317536
}
},
"versions": {
"bigbench_analogies": 0,
"bigbench_arithmetic_1_digit_addition": 0,
"bigbench_arithmetic_1_digit_division": 0,
"bigbench_arithmetic_1_digit_multiplication": 0,
"bigbench_arithmetic_1_digit_subtraction": 0,
"bigbench_arithmetic_2_digit_addition": 0,
"bigbench_arithmetic_2_digit_division": 0,
"bigbench_arithmetic_2_digit_multiplication": 0,
"bigbench_arithmetic_2_digit_subtraction": 0,
"bigbench_arithmetic_3_digit_addition": 0,
"bigbench_arithmetic_3_digit_division": 0,
"bigbench_arithmetic_3_digit_multiplication": 0,
"bigbench_arithmetic_3_digit_subtraction": 0,
"bigbench_arithmetic_4_digit_addition": 0,
"bigbench_arithmetic_4_digit_division": 0,
"bigbench_arithmetic_4_digit_multiplication": 0,
"bigbench_arithmetic_4_digit_subtraction": 0,
"bigbench_arithmetic_5_digit_addition": 0,
"bigbench_arithmetic_5_digit_division": 0,
"bigbench_arithmetic_5_digit_multiplication": 0,
"bigbench_arithmetic_5_digit_subtraction": 0,
"bigbench_cause_and_effect_one_sentence": 0,
"bigbench_cause_and_effect_one_sentence_no_prompt": 0,
"bigbench_cause_and_effect_two_sentences": 0,
"bigbench_emotions": 0,
"bigbench_empirical_judgments": 0,
"bigbench_general_knowledge": 0,
"bigbench_hhh_alignment_harmless": 0,
"bigbench_hhh_alignment_helpful": 0,
"bigbench_hhh_alignment_honest": 0,
"bigbench_hhh_alignment_other": 0,
"bigbench_intent_recognition": 0,
"bigbench_misconceptions": 0,
"bigbench_paraphrase": 0,
"bigbench_sentence_ambiguity": 0,
"bigbench_similarities_abstraction": 0
},
"config": {
"model": "hf-causal",
"model_args": "pretrained=/pfs/lustrep2/scratch/project_462000185/muennighoff/bigcode-evaluation-harness/gpt2-medium-finnish",
"num_fewshot": 1,
"batch_size": null,
"device": "cuda:0",
"no_cache": false,
"limit": null,
"bootstrap_iters": 100000,
"description_dict": {}
}
}