evaluation / gpt2_finnish_bigbench_3shot.json
Muennighoff's picture
Add
79b60e5
{
"results": {
"bigbench_analogies": {
"multiple_choice_grade": 0.2923076923076923,
"multiple_choice_grade_stderr": 0.0400449268361614
},
"bigbench_arithmetic_1_digit_addition": {
"multiple_choice_grade": 0.27,
"multiple_choice_grade_stderr": 0.0446196043338474
},
"bigbench_arithmetic_1_digit_division": {
"multiple_choice_grade": 0.6521739130434783,
"multiple_choice_grade_stderr": 0.10154334054280735
},
"bigbench_arithmetic_1_digit_multiplication": {
"multiple_choice_grade": 0.14,
"multiple_choice_grade_stderr": 0.034873508801977704
},
"bigbench_arithmetic_1_digit_subtraction": {
"multiple_choice_grade": 0.35,
"multiple_choice_grade_stderr": 0.047937248544110196
},
"bigbench_arithmetic_2_digit_addition": {
"multiple_choice_grade": 0.1,
"multiple_choice_grade_stderr": 0.030151134457776348
},
"bigbench_arithmetic_2_digit_division": {
"multiple_choice_grade": 0.31,
"multiple_choice_grade_stderr": 0.04648231987117316
},
"bigbench_arithmetic_2_digit_multiplication": {
"multiple_choice_grade": 0.03,
"multiple_choice_grade_stderr": 0.017144660799776522
},
"bigbench_arithmetic_2_digit_subtraction": {
"multiple_choice_grade": 0.05,
"multiple_choice_grade_stderr": 0.021904291355759026
},
"bigbench_arithmetic_3_digit_addition": {
"multiple_choice_grade": 0.05,
"multiple_choice_grade_stderr": 0.021904291355759026
},
"bigbench_arithmetic_3_digit_division": {
"multiple_choice_grade": 0.15,
"multiple_choice_grade_stderr": 0.03588702812826372
},
"bigbench_arithmetic_3_digit_multiplication": {
"multiple_choice_grade": 0.06,
"multiple_choice_grade_stderr": 0.023868325657594197
},
"bigbench_arithmetic_3_digit_subtraction": {
"multiple_choice_grade": 0.07,
"multiple_choice_grade_stderr": 0.025643239997624294
},
"bigbench_arithmetic_4_digit_addition": {
"multiple_choice_grade": 0.12,
"multiple_choice_grade_stderr": 0.032659863237109066
},
"bigbench_arithmetic_4_digit_division": {
"multiple_choice_grade": 0.11,
"multiple_choice_grade_stderr": 0.03144660377352203
},
"bigbench_arithmetic_4_digit_multiplication": {
"multiple_choice_grade": 0.08,
"multiple_choice_grade_stderr": 0.027265992434429103
},
"bigbench_arithmetic_4_digit_subtraction": {
"multiple_choice_grade": 0.07,
"multiple_choice_grade_stderr": 0.02564323999762429
},
"bigbench_arithmetic_5_digit_addition": {
"multiple_choice_grade": 0.13,
"multiple_choice_grade_stderr": 0.033799766898963086
},
"bigbench_arithmetic_5_digit_division": {
"multiple_choice_grade": 0.1,
"multiple_choice_grade_stderr": 0.030151134457776334
},
"bigbench_arithmetic_5_digit_multiplication": {
"multiple_choice_grade": 0.01,
"multiple_choice_grade_stderr": 0.009999999999999995
},
"bigbench_arithmetic_5_digit_subtraction": {
"multiple_choice_grade": 0.1,
"multiple_choice_grade_stderr": 0.030151134457776348
},
"bigbench_cause_and_effect_one_sentence": {
"multiple_choice_grade": 0.5490196078431373,
"multiple_choice_grade_stderr": 0.07037003311735827
},
"bigbench_cause_and_effect_one_sentence_no_prompt": {
"multiple_choice_grade": 0.6470588235294118,
"multiple_choice_grade_stderr": 0.0675830899592709
},
"bigbench_cause_and_effect_two_sentences": {
"multiple_choice_grade": 0.6274509803921569,
"multiple_choice_grade_stderr": 0.06837488538887332
},
"bigbench_emotions": {
"multiple_choice_grade": 0.10625,
"multiple_choice_grade_stderr": 0.02443846559481445
},
"bigbench_empirical_judgments": {
"multiple_choice_grade": 0.3434343434343434,
"multiple_choice_grade_stderr": 0.04796759058757481
},
"bigbench_general_knowledge": {
"multiple_choice_grade": 0.08571428571428572,
"multiple_choice_grade_stderr": 0.03370101854969077
},
"bigbench_hhh_alignment_harmless": {
"multiple_choice_grade": 0.5,
"multiple_choice_grade_stderr": 0.06622661785325219
},
"bigbench_hhh_alignment_helpful": {
"multiple_choice_grade": 0.2542372881355932,
"multiple_choice_grade_stderr": 0.05717497183586486
},
"bigbench_hhh_alignment_honest": {
"multiple_choice_grade": 0.3220338983050847,
"multiple_choice_grade_stderr": 0.06135370413564329
},
"bigbench_hhh_alignment_other": {
"multiple_choice_grade": 0.5581395348837209,
"multiple_choice_grade_stderr": 0.07662832288817804
},
"bigbench_intent_recognition": {
"multiple_choice_grade": 0.12572254335260116,
"multiple_choice_grade_stderr": 0.012612239102334798
},
"bigbench_misconceptions": {
"multiple_choice_grade": 0.5373134328358209,
"multiple_choice_grade_stderr": 0.04323460286839717
},
"bigbench_paraphrase": {
"multiple_choice_grade": 0.535,
"multiple_choice_grade_stderr": 0.035357115664894224
},
"bigbench_sentence_ambiguity": {
"multiple_choice_grade": 0.5333333333333333,
"multiple_choice_grade_stderr": 0.06494964005966064
},
"bigbench_similarities_abstraction": {
"multiple_choice_grade": 0.4605263157894737,
"multiple_choice_grade_stderr": 0.0575548235169272
}
},
"versions": {
"bigbench_analogies": 0,
"bigbench_arithmetic_1_digit_addition": 0,
"bigbench_arithmetic_1_digit_division": 0,
"bigbench_arithmetic_1_digit_multiplication": 0,
"bigbench_arithmetic_1_digit_subtraction": 0,
"bigbench_arithmetic_2_digit_addition": 0,
"bigbench_arithmetic_2_digit_division": 0,
"bigbench_arithmetic_2_digit_multiplication": 0,
"bigbench_arithmetic_2_digit_subtraction": 0,
"bigbench_arithmetic_3_digit_addition": 0,
"bigbench_arithmetic_3_digit_division": 0,
"bigbench_arithmetic_3_digit_multiplication": 0,
"bigbench_arithmetic_3_digit_subtraction": 0,
"bigbench_arithmetic_4_digit_addition": 0,
"bigbench_arithmetic_4_digit_division": 0,
"bigbench_arithmetic_4_digit_multiplication": 0,
"bigbench_arithmetic_4_digit_subtraction": 0,
"bigbench_arithmetic_5_digit_addition": 0,
"bigbench_arithmetic_5_digit_division": 0,
"bigbench_arithmetic_5_digit_multiplication": 0,
"bigbench_arithmetic_5_digit_subtraction": 0,
"bigbench_cause_and_effect_one_sentence": 0,
"bigbench_cause_and_effect_one_sentence_no_prompt": 0,
"bigbench_cause_and_effect_two_sentences": 0,
"bigbench_emotions": 0,
"bigbench_empirical_judgments": 0,
"bigbench_general_knowledge": 0,
"bigbench_hhh_alignment_harmless": 0,
"bigbench_hhh_alignment_helpful": 0,
"bigbench_hhh_alignment_honest": 0,
"bigbench_hhh_alignment_other": 0,
"bigbench_intent_recognition": 0,
"bigbench_misconceptions": 0,
"bigbench_paraphrase": 0,
"bigbench_sentence_ambiguity": 0,
"bigbench_similarities_abstraction": 0
},
"config": {
"model": "hf-causal",
"model_args": "pretrained=/pfs/lustrep2/scratch/project_462000185/muennighoff/bigcode-evaluation-harness/gpt2-finnish",
"num_fewshot": 3,
"batch_size": null,
"device": "cuda:0",
"no_cache": false,
"limit": null,
"bootstrap_iters": 100000,
"description_dict": {}
}
}