evaluation / gpt2_finnish_bigbench_1shot.json
Muennighoff's picture
Add
79b60e5
{
"results": {
"bigbench_analogies": {
"multiple_choice_grade": 0.25384615384615383,
"multiple_choice_grade_stderr": 0.038318158508745
},
"bigbench_arithmetic_1_digit_addition": {
"multiple_choice_grade": 0.35,
"multiple_choice_grade_stderr": 0.047937248544110196
},
"bigbench_arithmetic_1_digit_division": {
"multiple_choice_grade": 0.43478260869565216,
"multiple_choice_grade_stderr": 0.10568965974008647
},
"bigbench_arithmetic_1_digit_multiplication": {
"multiple_choice_grade": 0.14,
"multiple_choice_grade_stderr": 0.03487350880197769
},
"bigbench_arithmetic_1_digit_subtraction": {
"multiple_choice_grade": 0.4,
"multiple_choice_grade_stderr": 0.04923659639173309
},
"bigbench_arithmetic_2_digit_addition": {
"multiple_choice_grade": 0.21,
"multiple_choice_grade_stderr": 0.040936018074033256
},
"bigbench_arithmetic_2_digit_division": {
"multiple_choice_grade": 0.32,
"multiple_choice_grade_stderr": 0.046882617226215034
},
"bigbench_arithmetic_2_digit_multiplication": {
"multiple_choice_grade": 0.07,
"multiple_choice_grade_stderr": 0.025643239997624294
},
"bigbench_arithmetic_2_digit_subtraction": {
"multiple_choice_grade": 0.25,
"multiple_choice_grade_stderr": 0.04351941398892446
},
"bigbench_arithmetic_3_digit_addition": {
"multiple_choice_grade": 0.1,
"multiple_choice_grade_stderr": 0.030151134457776334
},
"bigbench_arithmetic_3_digit_division": {
"multiple_choice_grade": 0.19,
"multiple_choice_grade_stderr": 0.03942772444036623
},
"bigbench_arithmetic_3_digit_multiplication": {
"multiple_choice_grade": 0.09,
"multiple_choice_grade_stderr": 0.028762349126466125
},
"bigbench_arithmetic_3_digit_subtraction": {
"multiple_choice_grade": 0.28,
"multiple_choice_grade_stderr": 0.045126085985421276
},
"bigbench_arithmetic_4_digit_addition": {
"multiple_choice_grade": 0.13,
"multiple_choice_grade_stderr": 0.03379976689896308
},
"bigbench_arithmetic_4_digit_division": {
"multiple_choice_grade": 0.17,
"multiple_choice_grade_stderr": 0.0377525168068637
},
"bigbench_arithmetic_4_digit_multiplication": {
"multiple_choice_grade": 0.13,
"multiple_choice_grade_stderr": 0.03379976689896308
},
"bigbench_arithmetic_4_digit_subtraction": {
"multiple_choice_grade": 0.29,
"multiple_choice_grade_stderr": 0.04560480215720684
},
"bigbench_arithmetic_5_digit_addition": {
"multiple_choice_grade": 0.08,
"multiple_choice_grade_stderr": 0.027265992434429093
},
"bigbench_arithmetic_5_digit_division": {
"multiple_choice_grade": 0.12,
"multiple_choice_grade_stderr": 0.03265986323710906
},
"bigbench_arithmetic_5_digit_multiplication": {
"multiple_choice_grade": 0.07,
"multiple_choice_grade_stderr": 0.025643239997624294
},
"bigbench_arithmetic_5_digit_subtraction": {
"multiple_choice_grade": 0.31,
"multiple_choice_grade_stderr": 0.04648231987117316
},
"bigbench_cause_and_effect_one_sentence": {
"multiple_choice_grade": 0.5686274509803921,
"multiple_choice_grade_stderr": 0.07004145529212454
},
"bigbench_cause_and_effect_one_sentence_no_prompt": {
"multiple_choice_grade": 0.6274509803921569,
"multiple_choice_grade_stderr": 0.0683748853888733
},
"bigbench_cause_and_effect_two_sentences": {
"multiple_choice_grade": 0.5882352941176471,
"multiple_choice_grade_stderr": 0.06960093862470136
},
"bigbench_emotions": {
"multiple_choice_grade": 0.1,
"multiple_choice_grade_stderr": 0.023791547571544308
},
"bigbench_empirical_judgments": {
"multiple_choice_grade": 0.24242424242424243,
"multiple_choice_grade_stderr": 0.04329004329004327
},
"bigbench_general_knowledge": {
"multiple_choice_grade": 0.05714285714285714,
"multiple_choice_grade_stderr": 0.02794340839553291
},
"bigbench_hhh_alignment_harmless": {
"multiple_choice_grade": 0.46551724137931033,
"multiple_choice_grade_stderr": 0.06606893520605227
},
"bigbench_hhh_alignment_helpful": {
"multiple_choice_grade": 0.2711864406779661,
"multiple_choice_grade_stderr": 0.05837517703884876
},
"bigbench_hhh_alignment_honest": {
"multiple_choice_grade": 0.3728813559322034,
"multiple_choice_grade_stderr": 0.0634959746661109
},
"bigbench_hhh_alignment_other": {
"multiple_choice_grade": 0.5813953488372093,
"multiple_choice_grade_stderr": 0.07612251984976479
},
"bigbench_intent_recognition": {
"multiple_choice_grade": 0.13583815028901733,
"multiple_choice_grade_stderr": 0.013033750423196561
},
"bigbench_misconceptions": {
"multiple_choice_grade": 0.4925373134328358,
"multiple_choice_grade_stderr": 0.04335066912520505
},
"bigbench_paraphrase": {
"multiple_choice_grade": 0.49,
"multiple_choice_grade_stderr": 0.035436970729343674
},
"bigbench_sentence_ambiguity": {
"multiple_choice_grade": 0.5333333333333333,
"multiple_choice_grade_stderr": 0.06494964005966064
},
"bigbench_similarities_abstraction": {
"multiple_choice_grade": 0.4605263157894737,
"multiple_choice_grade_stderr": 0.057554823516927214
}
},
"versions": {
"bigbench_analogies": 0,
"bigbench_arithmetic_1_digit_addition": 0,
"bigbench_arithmetic_1_digit_division": 0,
"bigbench_arithmetic_1_digit_multiplication": 0,
"bigbench_arithmetic_1_digit_subtraction": 0,
"bigbench_arithmetic_2_digit_addition": 0,
"bigbench_arithmetic_2_digit_division": 0,
"bigbench_arithmetic_2_digit_multiplication": 0,
"bigbench_arithmetic_2_digit_subtraction": 0,
"bigbench_arithmetic_3_digit_addition": 0,
"bigbench_arithmetic_3_digit_division": 0,
"bigbench_arithmetic_3_digit_multiplication": 0,
"bigbench_arithmetic_3_digit_subtraction": 0,
"bigbench_arithmetic_4_digit_addition": 0,
"bigbench_arithmetic_4_digit_division": 0,
"bigbench_arithmetic_4_digit_multiplication": 0,
"bigbench_arithmetic_4_digit_subtraction": 0,
"bigbench_arithmetic_5_digit_addition": 0,
"bigbench_arithmetic_5_digit_division": 0,
"bigbench_arithmetic_5_digit_multiplication": 0,
"bigbench_arithmetic_5_digit_subtraction": 0,
"bigbench_cause_and_effect_one_sentence": 0,
"bigbench_cause_and_effect_one_sentence_no_prompt": 0,
"bigbench_cause_and_effect_two_sentences": 0,
"bigbench_emotions": 0,
"bigbench_empirical_judgments": 0,
"bigbench_general_knowledge": 0,
"bigbench_hhh_alignment_harmless": 0,
"bigbench_hhh_alignment_helpful": 0,
"bigbench_hhh_alignment_honest": 0,
"bigbench_hhh_alignment_other": 0,
"bigbench_intent_recognition": 0,
"bigbench_misconceptions": 0,
"bigbench_paraphrase": 0,
"bigbench_sentence_ambiguity": 0,
"bigbench_similarities_abstraction": 0
},
"config": {
"model": "hf-causal",
"model_args": "pretrained=/pfs/lustrep2/scratch/project_462000185/muennighoff/bigcode-evaluation-harness/gpt2-finnish",
"num_fewshot": 1,
"batch_size": null,
"device": "cuda:0",
"no_cache": false,
"limit": null,
"bootstrap_iters": 100000,
"description_dict": {}
}
}