bootstrapping for stddev: perplexity { "results": { "arc_challenge": { "acc,none": 0.2431740614334471, "acc_stderr,none": 0.012536554144587082, "acc_norm,none": 0.27303754266211605, "acc_norm_stderr,none": 0.013019332762635751 }, "arc_easy": { "acc,none": 0.5441919191919192, "acc_stderr,none": 0.010219631763437853, "acc_norm,none": 0.47853535353535354, "acc_norm_stderr,none": 0.010250325159456656 }, "boolq": { "acc,none": 0.618960244648318, "acc_stderr,none": 0.008493937524439329 }, "hellaswag": { "acc,none": 0.3488348934475204, "acc_stderr,none": 0.004756275875018267, "acc_norm,none": 0.41545508862776337, "acc_norm_stderr,none": 0.004917931778593191 }, "lambada_openai": { "perplexity,none": 9.320713747126444, "perplexity_stderr,none": 0.32204382695401407, "acc,none": 0.5148457209392587, "acc_stderr,none": 0.006962906440875391 }, "openbookqa": { "acc,none": 0.204, "acc_stderr,none": 0.018039369104138645, "acc_norm,none": 0.3, "acc_norm_stderr,none": 0.02051442622562805 }, "piqa": { "acc,none": 0.6789989118607181, "acc_stderr,none": 0.010892641574707904, "acc_norm,none": 0.6811751904243744, "acc_norm_stderr,none": 0.010873037534333416 }, "sciq": { "acc,none": 0.809, "acc_stderr,none": 0.012436787112179501, "acc_norm,none": 0.717, "acc_norm_stderr,none": 0.01425181090648174 }, "wikitext": { "word_perplexity,none": 38.90357956698245, "byte_perplexity,none": 1.8285957273924667, "bits_per_byte,none": 0.8707361540620543 }, "winogrande": { "acc,none": 0.5272296764009471, "acc_stderr,none": 0.014031631629827694 } }, "configs": { "arc_challenge": { "task": "arc_challenge", "group": [ "ai2_arc", "multiple_choice" ], "dataset_path": "ai2_arc", "dataset_name": "ARC-Challenge", "training_split": "train", "validation_split": "validation", "test_split": "test", "doc_to_text": "Question: {{question}}\nAnswer:", "doc_to_target": "{{choices.label.index(answerKey)}}", "doc_to_choice": "{{choices.text}}", "description": "", "target_delimiter": " ", "fewshot_delimiter": "\n\n", "num_fewshot": 0, "metric_list": [ { "metric": "acc", "aggregation": "mean", "higher_is_better": true }, { "metric": "acc_norm", "aggregation": "mean", "higher_is_better": true } ], "output_type": "multiple_choice", "repeats": 1, "should_decontaminate": true, "doc_to_decontamination_query": "Question: {{question}}\nAnswer:" }, "arc_easy": { "task": "arc_easy", "group": [ "ai2_arc", "multiple_choice" ], "dataset_path": "ai2_arc", "dataset_name": "ARC-Easy", "training_split": "train", "validation_split": "validation", "test_split": "test", "doc_to_text": "Question: {{question}}\nAnswer:", "doc_to_target": "{{choices.label.index(answerKey)}}", "doc_to_choice": "{{choices.text}}", "description": "", "target_delimiter": " ", "fewshot_delimiter": "\n\n", "num_fewshot": 0, "metric_list": [ { "metric": "acc", "aggregation": "mean", "higher_is_better": true }, { "metric": "acc_norm", "aggregation": "mean", "higher_is_better": true } ], "output_type": "multiple_choice", "repeats": 1, "should_decontaminate": true, "doc_to_decontamination_query": "Question: {{question}}\nAnswer:" }, "boolq": { "task": "boolq", "group": [ "super-glue-lm-eval-v1" ], "dataset_path": "super_glue", "dataset_name": "boolq", "training_split": "train", "validation_split": "validation", "doc_to_text": "{{passage}}\nQuestion: {{question}}?\nAnswer:", "doc_to_target": "label", "doc_to_choice": [ "no", "yes" ], "description": "", "target_delimiter": " ", "fewshot_delimiter": "\n\n", "num_fewshot": 0, "metric_list": [ { "metric": "acc" } ], "output_type": "multiple_choice", "repeats": 1, "should_decontaminate": true, "doc_to_decontamination_query": "passage" }, "hellaswag": { "task": "hellaswag", "group": [ "multiple_choice" ], "dataset_path": "hellaswag", "training_split": "train", "validation_split": "validation", "doc_to_text": "{% set text = activity_label ~ ': ' ~ ctx_a ~ ' ' ~ ctx_b.capitalize() %}{{text|trim|replace(' [title]', '. ')|regex_replace('\\[.*?\\]', '')|replace(' ', ' ')}}", "doc_to_target": "{{label}}", "doc_to_choice": "{{endings|map('trim')|map('replace', ' [title]', '. ')|map('regex_replace', '\\[.*?\\]', '')|map('replace', ' ', ' ')|list}}", "description": "", "target_delimiter": " ", "fewshot_delimiter": "\n\n", "num_fewshot": 0, "metric_list": [ { "metric": "acc", "aggregation": "mean", "higher_is_better": true }, { "metric": "acc_norm", "aggregation": "mean", "higher_is_better": true } ], "output_type": "multiple_choice", "repeats": 1, "should_decontaminate": false }, "lambada_openai": { "task": "lambada_openai", "group": [ "lambada", "loglikelihood", "perplexity" ], "dataset_path": "EleutherAI/lambada_openai", "dataset_name": "default", "test_split": "test", "template_aliases": "", "doc_to_text": "{{text.split(' ')[:-1]|join(' ')}}", "doc_to_target": "{{' '+text.split(' ')[-1]}}", "description": "", "target_delimiter": " ", "fewshot_delimiter": "\n\n", "num_fewshot": 0, "metric_list": [ { "metric": "perplexity", "aggregation": "perplexity", "higher_is_better": false }, { "metric": "acc", "aggregation": "mean", "higher_is_better": true } ], "output_type": "loglikelihood", "repeats": 1, "should_decontaminate": true, "doc_to_decontamination_query": "{{text}}" }, "openbookqa": { "task": "openbookqa", "group": [ "multiple_choice" ], "dataset_path": "openbookqa", "dataset_name": "main", "training_split": "train", "validation_split": "validation", "test_split": "test", "doc_to_text": "question_stem", "doc_to_target": "{{choices.label.index(answerKey.lstrip())}}", "doc_to_choice": "{{choices.text}}", "description": "", "target_delimiter": " ", "fewshot_delimiter": "\n\n", "num_fewshot": 0, "metric_list": [ { "metric": "acc", "aggregation": "mean", "higher_is_better": true }, { "metric": "acc_norm", "aggregation": "mean", "higher_is_better": true } ], "output_type": "multiple_choice", "repeats": 1, "should_decontaminate": true, "doc_to_decontamination_query": "question_stem" }, "piqa": { "task": "piqa", "group": [ "multiple_choice" ], "dataset_path": "piqa", "training_split": "train", "validation_split": "validation", "doc_to_text": "Question: {{goal}}\nAnswer:", "doc_to_target": "label", "doc_to_choice": "{{[sol1, sol2]}}", "description": "", "target_delimiter": " ", "fewshot_delimiter": "\n\n", "num_fewshot": 0, "metric_list": [ { "metric": "acc", "aggregation": "mean", "higher_is_better": true }, { "metric": "acc_norm", "aggregation": "mean", "higher_is_better": true } ], "output_type": "multiple_choice", "repeats": 1, "should_decontaminate": true, "doc_to_decontamination_query": "goal" }, "sciq": { "task": "sciq", "group": [ "multiple_choice" ], "dataset_path": "sciq", "training_split": "train", "validation_split": "validation", "test_split": "test", "doc_to_text": "{{support.lstrip()}}\nQuestion: {{question}}\nAnswer:", "doc_to_target": 3, "doc_to_choice": "{{[distractor1, distractor2, distractor3, correct_answer]}}", "description": "", "target_delimiter": " ", "fewshot_delimiter": "\n\n", "num_fewshot": 0, "metric_list": [ { "metric": "acc", "aggregation": "mean", "higher_is_better": true }, { "metric": "acc_norm", "aggregation": "mean", "higher_is_better": true } ], "output_type": "multiple_choice", "repeats": 1, "should_decontaminate": true, "doc_to_decontamination_query": "{{support}} {{question}}" }, "wikitext": { "task": "wikitext", "group": [ "perplexity", "loglikelihood_rolling" ], "dataset_path": "EleutherAI/wikitext_document_level", "dataset_name": "wikitext-2-raw-v1", "training_split": "train", "validation_split": "validation", "test_split": "test", "template_aliases": "", "doc_to_text": "", "doc_to_target": "", "description": "", "target_delimiter": " ", "fewshot_delimiter": "\n\n", "num_fewshot": 0, "metric_list": [ { "metric": "word_perplexity" }, { "metric": "byte_perplexity" }, { "metric": "bits_per_byte" } ], "output_type": "loglikelihood_rolling", "repeats": 1, "should_decontaminate": true, "doc_to_decontamination_query": "{{page}}" }, "winogrande": { "task": "winogrande", "dataset_path": "winogrande", "dataset_name": "winogrande_xl", "training_split": "train", "validation_split": "validation", "doc_to_text": "", "doc_to_target": "", "doc_to_choice": "", "description": "", "target_delimiter": " ", "fewshot_delimiter": "\n\n", "num_fewshot": 0, "metric_list": [ { "metric": "acc", "aggregation": "mean", "higher_is_better": true } ], "output_type": "multiple_choice", "repeats": 1, "should_decontaminate": false } }, "versions": { "arc_challenge": "Yaml", "arc_easy": "Yaml", "boolq": "Yaml", "hellaswag": "Yaml", "lambada_openai": "Yaml", "openbookqa": "Yaml", "piqa": "Yaml", "sciq": "Yaml", "wikitext": "Yaml", "winogrande": "Yaml" }, "config": { "model": "hf", "model_args": "pretrained=lomahony/eleuther-pythia410m-hh-dpo", "num_fewshot": 0, "batch_size": 16, "batch_sizes": [], "device": "cuda:0", "use_cache": null, "limit": null, "bootstrap_iters": 100000 }, "git_hash": "4e44f0a" } hf (pretrained=lomahony/eleuther-pythia410m-hh-dpo), limit: None, num_fewshot: 0, batch_size: 16 | Task |Version|Filter| Metric | Value | |Stderr| |--------------|-------|------|---------------|------:|---|-----:| |arc_challenge |Yaml |none |acc | 0.2432|± |0.0125| | | |none |acc_norm | 0.2730|± |0.0130| |arc_easy |Yaml |none |acc | 0.5442|± |0.0102| | | |none |acc_norm | 0.4785|± |0.0103| |boolq |Yaml |none |acc | 0.6190|± |0.0085| |hellaswag |Yaml |none |acc | 0.3488|± |0.0048| | | |none |acc_norm | 0.4155|± |0.0049| |lambada_openai|Yaml |none |perplexity | 9.3207|± |0.3220| | | |none |acc | 0.5148|± |0.0070| |openbookqa |Yaml |none |acc | 0.2040|± |0.0180| | | |none |acc_norm | 0.3000|± |0.0205| |piqa |Yaml |none |acc | 0.6790|± |0.0109| | | |none |acc_norm | 0.6812|± |0.0109| |sciq |Yaml |none |acc | 0.8090|± |0.0124| | | |none |acc_norm | 0.7170|± |0.0143| |wikitext |Yaml |none |word_perplexity|38.9036| | | | | |none |byte_perplexity| 1.8286| | | | | |none |bits_per_byte | 0.8707| | | |winogrande |Yaml |none |acc | 0.5272|± |0.0140|