evaluation / gpt2_finnish_bigbench_0shot.json
Muennighoff's picture
Add
79b60e5
raw
history blame
7.49 kB
{
"results": {
"bigbench_analogies": {
"multiple_choice_grade": 0.23846153846153847,
"multiple_choice_grade_stderr": 0.03751977598816766
},
"bigbench_arithmetic_1_digit_addition": {
"multiple_choice_grade": 0.35,
"multiple_choice_grade_stderr": 0.04793724854411019
},
"bigbench_arithmetic_1_digit_division": {
"multiple_choice_grade": 0.6521739130434783,
"multiple_choice_grade_stderr": 0.10154334054280736
},
"bigbench_arithmetic_1_digit_multiplication": {
"multiple_choice_grade": 0.16,
"multiple_choice_grade_stderr": 0.03684529491774707
},
"bigbench_arithmetic_1_digit_subtraction": {
"multiple_choice_grade": 0.29,
"multiple_choice_grade_stderr": 0.04560480215720683
},
"bigbench_arithmetic_2_digit_addition": {
"multiple_choice_grade": 0.09,
"multiple_choice_grade_stderr": 0.028762349126466146
},
"bigbench_arithmetic_2_digit_division": {
"multiple_choice_grade": 0.46,
"multiple_choice_grade_stderr": 0.05009082659620333
},
"bigbench_arithmetic_2_digit_multiplication": {
"multiple_choice_grade": 0.06,
"multiple_choice_grade_stderr": 0.023868325657594166
},
"bigbench_arithmetic_2_digit_subtraction": {
"multiple_choice_grade": 0.14,
"multiple_choice_grade_stderr": 0.034873508801977725
},
"bigbench_arithmetic_3_digit_addition": {
"multiple_choice_grade": 0.03,
"multiple_choice_grade_stderr": 0.017144660799776557
},
"bigbench_arithmetic_3_digit_division": {
"multiple_choice_grade": 0.17,
"multiple_choice_grade_stderr": 0.0377525168068637
},
"bigbench_arithmetic_3_digit_multiplication": {
"multiple_choice_grade": 0.04,
"multiple_choice_grade_stderr": 0.019694638556693216
},
"bigbench_arithmetic_3_digit_subtraction": {
"multiple_choice_grade": 0.02,
"multiple_choice_grade_stderr": 0.01407052941362896
},
"bigbench_arithmetic_4_digit_addition": {
"multiple_choice_grade": 0.16,
"multiple_choice_grade_stderr": 0.0368452949177471
},
"bigbench_arithmetic_4_digit_division": {
"multiple_choice_grade": 0.18,
"multiple_choice_grade_stderr": 0.03861229196653694
},
"bigbench_arithmetic_4_digit_multiplication": {
"multiple_choice_grade": 0.1,
"multiple_choice_grade_stderr": 0.030151134457776334
},
"bigbench_arithmetic_4_digit_subtraction": {
"multiple_choice_grade": 0.08,
"multiple_choice_grade_stderr": 0.027265992434429086
},
"bigbench_arithmetic_5_digit_addition": {
"multiple_choice_grade": 0.13,
"multiple_choice_grade_stderr": 0.033799766898963086
},
"bigbench_arithmetic_5_digit_division": {
"multiple_choice_grade": 0.1,
"multiple_choice_grade_stderr": 0.030151134457776334
},
"bigbench_arithmetic_5_digit_multiplication": {
"multiple_choice_grade": 0.08,
"multiple_choice_grade_stderr": 0.0272659924344291
},
"bigbench_arithmetic_5_digit_subtraction": {
"multiple_choice_grade": 0.03,
"multiple_choice_grade_stderr": 0.017144660799776525
},
"bigbench_cause_and_effect_one_sentence": {
"multiple_choice_grade": 0.45098039215686275,
"multiple_choice_grade_stderr": 0.0703700331173583
},
"bigbench_cause_and_effect_one_sentence_no_prompt": {
"multiple_choice_grade": 0.6862745098039216,
"multiple_choice_grade_stderr": 0.06562039423796669
},
"bigbench_cause_and_effect_two_sentences": {
"multiple_choice_grade": 0.6666666666666666,
"multiple_choice_grade_stderr": 0.06666666666666664
},
"bigbench_emotions": {
"multiple_choice_grade": 0.2,
"multiple_choice_grade_stderr": 0.031722063428725716
},
"bigbench_empirical_judgments": {
"multiple_choice_grade": 0.32323232323232326,
"multiple_choice_grade_stderr": 0.047245903445151234
},
"bigbench_general_knowledge": {
"multiple_choice_grade": 0.21428571428571427,
"multiple_choice_grade_stderr": 0.049397433914866055
},
"bigbench_hhh_alignment_harmless": {
"multiple_choice_grade": 0.4482758620689655,
"multiple_choice_grade_stderr": 0.06587130109529918
},
"bigbench_hhh_alignment_helpful": {
"multiple_choice_grade": 0.2711864406779661,
"multiple_choice_grade_stderr": 0.05837517703884876
},
"bigbench_hhh_alignment_honest": {
"multiple_choice_grade": 0.3728813559322034,
"multiple_choice_grade_stderr": 0.0634959746661109
},
"bigbench_hhh_alignment_other": {
"multiple_choice_grade": 0.6046511627906976,
"multiple_choice_grade_stderr": 0.07544284088704808
},
"bigbench_intent_recognition": {
"multiple_choice_grade": 0.18063583815028902,
"multiple_choice_grade_stderr": 0.014635292876381762
},
"bigbench_misconceptions": {
"multiple_choice_grade": 0.48507462686567165,
"multiple_choice_grade_stderr": 0.04333617784312701
},
"bigbench_paraphrase": {
"multiple_choice_grade": 0.525,
"multiple_choice_grade_stderr": 0.035399727449764204
},
"bigbench_sentence_ambiguity": {
"multiple_choice_grade": 0.5333333333333333,
"multiple_choice_grade_stderr": 0.06494964005966064
},
"bigbench_similarities_abstraction": {
"multiple_choice_grade": 0.47368421052631576,
"multiple_choice_grade_stderr": 0.057655006053175376
}
},
"versions": {
"bigbench_analogies": 0,
"bigbench_arithmetic_1_digit_addition": 0,
"bigbench_arithmetic_1_digit_division": 0,
"bigbench_arithmetic_1_digit_multiplication": 0,
"bigbench_arithmetic_1_digit_subtraction": 0,
"bigbench_arithmetic_2_digit_addition": 0,
"bigbench_arithmetic_2_digit_division": 0,
"bigbench_arithmetic_2_digit_multiplication": 0,
"bigbench_arithmetic_2_digit_subtraction": 0,
"bigbench_arithmetic_3_digit_addition": 0,
"bigbench_arithmetic_3_digit_division": 0,
"bigbench_arithmetic_3_digit_multiplication": 0,
"bigbench_arithmetic_3_digit_subtraction": 0,
"bigbench_arithmetic_4_digit_addition": 0,
"bigbench_arithmetic_4_digit_division": 0,
"bigbench_arithmetic_4_digit_multiplication": 0,
"bigbench_arithmetic_4_digit_subtraction": 0,
"bigbench_arithmetic_5_digit_addition": 0,
"bigbench_arithmetic_5_digit_division": 0,
"bigbench_arithmetic_5_digit_multiplication": 0,
"bigbench_arithmetic_5_digit_subtraction": 0,
"bigbench_cause_and_effect_one_sentence": 0,
"bigbench_cause_and_effect_one_sentence_no_prompt": 0,
"bigbench_cause_and_effect_two_sentences": 0,
"bigbench_emotions": 0,
"bigbench_empirical_judgments": 0,
"bigbench_general_knowledge": 0,
"bigbench_hhh_alignment_harmless": 0,
"bigbench_hhh_alignment_helpful": 0,
"bigbench_hhh_alignment_honest": 0,
"bigbench_hhh_alignment_other": 0,
"bigbench_intent_recognition": 0,
"bigbench_misconceptions": 0,
"bigbench_paraphrase": 0,
"bigbench_sentence_ambiguity": 0,
"bigbench_similarities_abstraction": 0
},
"config": {
"model": "hf-causal",
"model_args": "pretrained=/pfs/lustrep2/scratch/project_462000185/muennighoff/bigcode-evaluation-harness/gpt2-finnish",
"num_fewshot": 0,
"batch_size": null,
"device": "cuda:0",
"no_cache": false,
"limit": null,
"bootstrap_iters": 100000,
"description_dict": {}
}
}