Downloading and preparing dataset super_glue/boolq to /home/laura/.cache/huggingface/datasets/super_glue/boolq/1.0.3/bb9675f958ebfee0d5d6dc5476fafe38c79123727a7258d515c450873dbdbbed... Dataset super_glue downloaded and prepared to /home/laura/.cache/huggingface/datasets/super_glue/boolq/1.0.3/bb9675f958ebfee0d5d6dc5476fafe38c79123727a7258d515c450873dbdbbed. Subsequent calls will reuse this data. Downloading and preparing dataset openbookqa/main to /home/laura/.cache/huggingface/datasets/openbookqa/main/1.0.1/f338ccacfbc86fb8c2de3aa1c06d2ce686933de3bca284dba97d32592c52b33f... Dataset openbookqa downloaded and prepared to /home/laura/.cache/huggingface/datasets/openbookqa/main/1.0.1/f338ccacfbc86fb8c2de3aa1c06d2ce686933de3bca284dba97d32592c52b33f. Subsequent calls will reuse this data. Downloading and preparing dataset piqa/plain_text to /home/laura/.cache/huggingface/datasets/piqa/plain_text/1.1.0/6c611c1a9bf220943c4174e117d3b660859665baf1d43156230116185312d011... Dataset piqa downloaded and prepared to /home/laura/.cache/huggingface/datasets/piqa/plain_text/1.1.0/6c611c1a9bf220943c4174e117d3b660859665baf1d43156230116185312d011. Subsequent calls will reuse this data. Downloading and preparing dataset sciq/default to /home/laura/.cache/huggingface/datasets/sciq/default/0.1.0/50e5c6e3795b55463819d399ec417bfd4c3c621105e00295ddb5f3633d708493... Dataset sciq downloaded and prepared to /home/laura/.cache/huggingface/datasets/sciq/default/0.1.0/50e5c6e3795b55463819d399ec417bfd4c3c621105e00295ddb5f3633d708493. Subsequent calls will reuse this data. Downloading and preparing dataset winogrande/winogrande_xl to /home/laura/.cache/huggingface/datasets/winogrande/winogrande_xl/1.1.0/a826c3d3506aefe0e9e9390dcb53271070536586bab95849876b2c1743df56e2... Dataset winogrande downloaded and prepared to /home/laura/.cache/huggingface/datasets/winogrande/winogrande_xl/1.1.0/a826c3d3506aefe0e9e9390dcb53271070536586bab95849876b2c1743df56e2. Subsequent calls will reuse this data. bootstrapping for stddev: perplexity { "results": { "arc_challenge": { "acc,none": 0.21843003412969283, "acc_stderr,none": 0.012074291605700959, "acc_norm,none": 0.2645051194539249, "acc_norm_stderr,none": 0.012889272949313368 }, "arc_easy": { "acc,none": 0.54503367003367, "acc_stderr,none": 0.010218084454602589, "acc_norm,none": 0.5370370370370371, "acc_norm_stderr,none": 0.010231597249131058 }, "boolq": { "acc,none": 0.4871559633027523, "acc_stderr,none": 0.008742169169427067 }, "hellaswag": { "acc,none": 0.33827922724556864, "acc_stderr,none": 0.004721571443354456, "acc_norm,none": 0.40818562039434375, "acc_norm_stderr,none": 0.004904933500255884 }, "lambada_openai": { "perplexity,none": 14.485555582236119, "perplexity_stderr,none": 0.4358013409476018, "acc,none": 0.4422666407917718, "acc_stderr,none": 0.006919384666875831 }, "openbookqa": { "acc,none": 0.188, "acc_stderr,none": 0.01749067888034625, "acc_norm,none": 0.28, "acc_norm_stderr,none": 0.020099950647503237 }, "piqa": { "acc,none": 0.6806311207834603, "acc_stderr,none": 0.010877964076613737, "acc_norm,none": 0.6692056583242655, "acc_norm_stderr,none": 0.010977520584714429 }, "sciq": { "acc,none": 0.892, "acc_stderr,none": 0.009820001651345682, "acc_norm,none": 0.887, "acc_norm_stderr,none": 0.01001655286669685 }, "wikitext": { "word_perplexity,none": 34.50450469911897, "byte_perplexity,none": 1.7927778872125213, "bits_per_byte,none": 0.842196759334895 }, "winogrande": { "acc,none": 0.5335438042620363, "acc_stderr,none": 0.014020826677598103 } }, "configs": { "arc_challenge": { "task": "arc_challenge", "group": [ "ai2_arc", "multiple_choice" ], "dataset_path": "ai2_arc", "dataset_name": "ARC-Challenge", "training_split": "train", "validation_split": "validation", "test_split": "test", "doc_to_text": "Question: {{question}}\nAnswer:", "doc_to_target": "{{choices.label.index(answerKey)}}", "doc_to_choice": "{{choices.text}}", "description": "", "target_delimiter": " ", "fewshot_delimiter": "\n\n", "num_fewshot": 5, "metric_list": [ { "metric": "acc", "aggregation": "mean", "higher_is_better": true }, { "metric": "acc_norm", "aggregation": "mean", "higher_is_better": true } ], "output_type": "multiple_choice", "repeats": 1, "should_decontaminate": true, "doc_to_decontamination_query": "Question: {{question}}\nAnswer:" }, "arc_easy": { "task": "arc_easy", "group": [ "ai2_arc", "multiple_choice" ], "dataset_path": "ai2_arc", "dataset_name": "ARC-Easy", "training_split": "train", "validation_split": "validation", "test_split": "test", "doc_to_text": "Question: {{question}}\nAnswer:", "doc_to_target": "{{choices.label.index(answerKey)}}", "doc_to_choice": "{{choices.text}}", "description": "", "target_delimiter": " ", "fewshot_delimiter": "\n\n", "num_fewshot": 5, "metric_list": [ { "metric": "acc", "aggregation": "mean", "higher_is_better": true }, { "metric": "acc_norm", "aggregation": "mean", "higher_is_better": true } ], "output_type": "multiple_choice", "repeats": 1, "should_decontaminate": true, "doc_to_decontamination_query": "Question: {{question}}\nAnswer:" }, "boolq": { "task": "boolq", "group": [ "super-glue-lm-eval-v1" ], "dataset_path": "super_glue", "dataset_name": "boolq", "training_split": "train", "validation_split": "validation", "doc_to_text": "{{passage}}\nQuestion: {{question}}?\nAnswer:", "doc_to_target": "label", "doc_to_choice": [ "no", "yes" ], "description": "", "target_delimiter": " ", "fewshot_delimiter": "\n\n", "num_fewshot": 5, "metric_list": [ { "metric": "acc" } ], "output_type": "multiple_choice", "repeats": 1, "should_decontaminate": true, "doc_to_decontamination_query": "passage" }, "hellaswag": { "task": "hellaswag", "group": [ "multiple_choice" ], "dataset_path": "hellaswag", "training_split": "train", "validation_split": "validation", "doc_to_text": "{% set text = activity_label ~ ': ' ~ ctx_a ~ ' ' ~ ctx_b.capitalize() %}{{text|trim|replace(' [title]', '. ')|regex_replace('\\[.*?\\]', '')|replace(' ', ' ')}}", "doc_to_target": "{{label}}", "doc_to_choice": "{{endings|map('trim')|map('replace', ' [title]', '. ')|map('regex_replace', '\\[.*?\\]', '')|map('replace', ' ', ' ')|list}}", "description": "", "target_delimiter": " ", "fewshot_delimiter": "\n\n", "num_fewshot": 5, "metric_list": [ { "metric": "acc", "aggregation": "mean", "higher_is_better": true }, { "metric": "acc_norm", "aggregation": "mean", "higher_is_better": true } ], "output_type": "multiple_choice", "repeats": 1, "should_decontaminate": false }, "lambada_openai": { "task": "lambada_openai", "group": [ "lambada", "loglikelihood", "perplexity" ], "dataset_path": "EleutherAI/lambada_openai", "dataset_name": "default", "test_split": "test", "template_aliases": "", "doc_to_text": "{{text.split(' ')[:-1]|join(' ')}}", "doc_to_target": "{{' '+text.split(' ')[-1]}}", "description": "", "target_delimiter": " ", "fewshot_delimiter": "\n\n", "num_fewshot": 5, "metric_list": [ { "metric": "perplexity", "aggregation": "perplexity", "higher_is_better": false }, { "metric": "acc", "aggregation": "mean", "higher_is_better": true } ], "output_type": "loglikelihood", "repeats": 1, "should_decontaminate": true, "doc_to_decontamination_query": "{{text}}" }, "openbookqa": { "task": "openbookqa", "group": [ "multiple_choice" ], "dataset_path": "openbookqa", "dataset_name": "main", "training_split": "train", "validation_split": "validation", "test_split": "test", "doc_to_text": "question_stem", "doc_to_target": "{{choices.label.index(answerKey.lstrip())}}", "doc_to_choice": "{{choices.text}}", "description": "", "target_delimiter": " ", "fewshot_delimiter": "\n\n", "num_fewshot": 5, "metric_list": [ { "metric": "acc", "aggregation": "mean", "higher_is_better": true }, { "metric": "acc_norm", "aggregation": "mean", "higher_is_better": true } ], "output_type": "multiple_choice", "repeats": 1, "should_decontaminate": true, "doc_to_decontamination_query": "question_stem" }, "piqa": { "task": "piqa", "group": [ "multiple_choice" ], "dataset_path": "piqa", "training_split": "train", "validation_split": "validation", "doc_to_text": "Question: {{goal}}\nAnswer:", "doc_to_target": "label", "doc_to_choice": "{{[sol1, sol2]}}", "description": "", "target_delimiter": " ", "fewshot_delimiter": "\n\n", "num_fewshot": 5, "metric_list": [ { "metric": "acc", "aggregation": "mean", "higher_is_better": true }, { "metric": "acc_norm", "aggregation": "mean", "higher_is_better": true } ], "output_type": "multiple_choice", "repeats": 1, "should_decontaminate": true, "doc_to_decontamination_query": "goal" }, "sciq": { "task": "sciq", "group": [ "multiple_choice" ], "dataset_path": "sciq", "training_split": "train", "validation_split": "validation", "test_split": "test", "doc_to_text": "{{support.lstrip()}}\nQuestion: {{question}}\nAnswer:", "doc_to_target": 3, "doc_to_choice": "{{[distractor1, distractor2, distractor3, correct_answer]}}", "description": "", "target_delimiter": " ", "fewshot_delimiter": "\n\n", "num_fewshot": 5, "metric_list": [ { "metric": "acc", "aggregation": "mean", "higher_is_better": true }, { "metric": "acc_norm", "aggregation": "mean", "higher_is_better": true } ], "output_type": "multiple_choice", "repeats": 1, "should_decontaminate": true, "doc_to_decontamination_query": "{{support}} {{question}}" }, "wikitext": { "task": "wikitext", "group": [ "perplexity", "loglikelihood_rolling" ], "dataset_path": "EleutherAI/wikitext_document_level", "dataset_name": "wikitext-2-raw-v1", "training_split": "train", "validation_split": "validation", "test_split": "test", "template_aliases": "", "doc_to_text": "", "doc_to_target": "", "description": "", "target_delimiter": " ", "fewshot_delimiter": "\n\n", "num_fewshot": 5, "metric_list": [ { "metric": "word_perplexity" }, { "metric": "byte_perplexity" }, { "metric": "bits_per_byte" } ], "output_type": "loglikelihood_rolling", "repeats": 1, "should_decontaminate": true, "doc_to_decontamination_query": "{{page}}" }, "winogrande": { "task": "winogrande", "dataset_path": "winogrande", "dataset_name": "winogrande_xl", "training_split": "train", "validation_split": "validation", "doc_to_text": "", "doc_to_target": "", "doc_to_choice": "", "description": "", "target_delimiter": " ", "fewshot_delimiter": "\n\n", "num_fewshot": 5, "metric_list": [ { "metric": "acc", "aggregation": "mean", "higher_is_better": true } ], "output_type": "multiple_choice", "repeats": 1, "should_decontaminate": false } }, "versions": { "arc_challenge": "Yaml", "arc_easy": "Yaml", "boolq": "Yaml", "hellaswag": "Yaml", "lambada_openai": "Yaml", "openbookqa": "Yaml", "piqa": "Yaml", "sciq": "Yaml", "wikitext": "Yaml", "winogrande": "Yaml" }, "config": { "model": "hf", "model_args": "pretrained=EleutherAI/pythia-410m", "num_fewshot": 5, "batch_size": 16, "batch_sizes": [], "device": "cuda:0", "use_cache": null, "limit": null, "bootstrap_iters": 100000 }, "git_hash": "4e44f0a" } hf (pretrained=EleutherAI/pythia-410m), limit: None, num_fewshot: 5, batch_size: 16 | Task |Version|Filter| Metric | Value | |Stderr| |--------------|-------|------|---------------|------:|---|-----:| |arc_challenge |Yaml |none |acc | 0.2184|± |0.0121| | | |none |acc_norm | 0.2645|± |0.0129| |arc_easy |Yaml |none |acc | 0.5450|± |0.0102| | | |none |acc_norm | 0.5370|± |0.0102| |boolq |Yaml |none |acc | 0.4872|± |0.0087| |hellaswag |Yaml |none |acc | 0.3383|± |0.0047| | | |none |acc_norm | 0.4082|± |0.0049| |lambada_openai|Yaml |none |perplexity |14.4856|± |0.4358| | | |none |acc | 0.4423|± |0.0069| |openbookqa |Yaml |none |acc | 0.1880|± |0.0175| | | |none |acc_norm | 0.2800|± |0.0201| |piqa |Yaml |none |acc | 0.6806|± |0.0109| | | |none |acc_norm | 0.6692|± |0.0110| |sciq |Yaml |none |acc | 0.8920|± |0.0098| | | |none |acc_norm | 0.8870|± |0.0100| |wikitext |Yaml |none |word_perplexity|34.5045| | | | | |none |byte_perplexity| 1.7928| | | | | |none |bits_per_byte | 0.8422| | | |winogrande |Yaml |none |acc | 0.5335|± |0.0140|