diff --git "a/dataset_info_old.json" "b/dataset_info_old.json" deleted file mode 100644--- "a/dataset_info_old.json" +++ /dev/null @@ -1 +0,0 @@ -{"super_glue_cb_GPT_3_style_score_eval": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"idx": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "is_correct": {"dtype": "bool", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "weight": {"dtype": "float32", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "super_glue_cb_GPT_3_style_score_eval", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 608800, "num_examples": 750, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 150982, "num_examples": 168, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 646339, "num_examples": 750, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 1406121, "size_in_bytes": 1406121}, "super_glue_cb_based_on_the_previous_passage_score_eval": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"idx": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "is_correct": {"dtype": "bool", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "weight": {"dtype": "float32", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "super_glue_cb_based_on_the_previous_passage_score_eval", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 654916, "num_examples": 750, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 161146, "num_examples": 168, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 692059, "num_examples": 750, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 1508121, "size_in_bytes": 1508121}, "duorc_SelfRC_title_generation": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "duorc_SelfRC_title_generation", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 245154956, "num_examples": 60721, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 52322049, "num_examples": 12961, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 50193716, "num_examples": 12559, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 347670721, "size_in_bytes": 347670721}, "super_glue_rte_must_be_true_score_eval": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"idx": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "is_correct": {"dtype": "bool", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "weight": {"dtype": "float32", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "super_glue_rte_must_be_true_score_eval", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 3907143, "num_examples": 4980, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 423207, "num_examples": 554, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 4517714, "num_examples": 6000, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 8848064, "size_in_bytes": 8848064}, "anli_guaranteed_possible_impossible_r2_score_eval": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"idx": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "is_correct": {"dtype": "bool", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "weight": {"dtype": "float32", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "anli_guaranteed_possible_impossible_r2_score_eval", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 134233358, "num_examples": 136380, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 2960731, "num_examples": 3000, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 2978188, "num_examples": 3000, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 140172277, "size_in_bytes": 140172277}, "quartz_use_info_from_question_paragraph": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "quartz_use_info_from_question_paragraph", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 1752163, "num_examples": 2696, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 257340, "num_examples": 384, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 513185, "num_examples": 784, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 2522688, "size_in_bytes": 2522688}, "anli_does_it_follow_that_r2_score_eval": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"idx": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "is_correct": {"dtype": "bool", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "weight": {"dtype": "float32", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "anli_does_it_follow_that_r2_score_eval", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 125519890, "num_examples": 136380, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 2768463, "num_examples": 3000, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 2786136, "num_examples": 3000, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 131074489, "size_in_bytes": 131074489}, "anli_can_we_infer_r1": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "anli_can_we_infer_r1", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 16276477, "num_examples": 16946, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 961754, "num_examples": 1000, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 960053, "num_examples": 1000, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 18198284, "size_in_bytes": 18198284}, "openbookqa_main_choose_an_answer_with_options": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "openbookqa_main_choose_an_answer_with_options", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 2351525, "num_examples": 4957, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 256670, "num_examples": 500, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 245012, "num_examples": 500, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 2853207, "size_in_bytes": 2853207}, "super_glue_copa_exercise_score_eval": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"idx": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "is_correct": {"dtype": "bool", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "weight": {"dtype": "float32", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "super_glue_copa_exercise_score_eval", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 317479, "num_examples": 800, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 80437, "num_examples": 200, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 390014, "num_examples": 1000, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 787930, "size_in_bytes": 787930}, "yelp_review_full_on_a_scale": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "yelp_review_full_on_a_scale", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 1035020418, "num_examples": 650000, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 79679036, "num_examples": 50000, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 1114699454, "size_in_bytes": 1114699454}, "amazon_polarity_Is_this_review": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "amazon_polarity_Is_this_review", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 3691733865, "num_examples": 3600000, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 409971845, "num_examples": 400000, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 4101705710, "size_in_bytes": 4101705710}, "anli_justified_in_saying_r2_score_eval": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"idx": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "is_correct": {"dtype": "bool", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "weight": {"dtype": "float32", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "anli_justified_in_saying_r2_score_eval", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 127778038, "num_examples": 136380, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 2818731, "num_examples": 3000, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 2836188, "num_examples": 3000, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 133432957, "size_in_bytes": 133432957}, "ropes_plain_background_situation": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "ropes_plain_background_situation", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 22357363, "num_examples": 10924, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 3179476, "num_examples": 1688, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 25536839, "size_in_bytes": 25536839}, "glue_mrpc_generate_paraphrase": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "glue_mrpc_generate_paraphrase", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 1412387, "num_examples": 2474, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 159972, "num_examples": 279, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 655059, "num_examples": 1147, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 2227418, "size_in_bytes": 2227418}, "samsum_To_sum_up_this_dialog": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "samsum_To_sum_up_this_dialog", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 20450207, "num_examples": 14732, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 1110338, "num_examples": 818, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 1156278, "num_examples": 819, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 22716823, "size_in_bytes": 22716823}, "super_glue_wsc.fixed_I_think_they_mean_score_eval": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"idx": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "is_correct": {"dtype": "bool", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "weight": {"dtype": "float32", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "super_glue_wsc.fixed_I_think_they_mean_score_eval", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 490727, "num_examples": 1108, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 115462, "num_examples": 208, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 163973, "num_examples": 292, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 770162, "size_in_bytes": 770162}, "ag_news_which_section_choices": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "ag_news_which_section_choices", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 100099811, "num_examples": 120000, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 6314306, "num_examples": 7600, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 106414117, "size_in_bytes": 106414117}, "super_glue_boolq_based_on_the_following_passage": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "super_glue_boolq_based_on_the_following_passage", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 12674748, "num_examples": 9427, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 4344881, "num_examples": 3270, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 4430670, "num_examples": 3245, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 21450299, "size_in_bytes": 21450299}, "anli_consider_always_sometimes_never_r1": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "anli_consider_always_sometimes_never_r1", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 17445255, "num_examples": 16946, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 1030603, "num_examples": 1000, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 1028750, "num_examples": 1000, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 19504608, "size_in_bytes": 19504608}, "wiqa_what_might_be_the_last_step_of_the_process": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "wiqa_what_might_be_the_last_step_of_the_process", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 22415568, "num_examples": 29808, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 4932496, "num_examples": 6894, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 2006933, "num_examples": 3003, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 29354997, "size_in_bytes": 29354997}, "ai2_arc_ARC_Easy_heres_a_problem": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "ai2_arc_ARC_Easy_heres_a_problem", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 1585458, "num_examples": 2251, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 402857, "num_examples": 570, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 1680764, "num_examples": 2376, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 3669079, "size_in_bytes": 3669079}, "super_glue_multirc_decide_valid": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "super_glue_multirc_decide_valid", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 89151124, "num_examples": 27243, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 15594652, "num_examples": 4848, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 29967010, "num_examples": 9693, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 134712786, "size_in_bytes": 134712786}, "duorc_ParaphraseRC_decide_worth_it": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "duorc_ParaphraseRC_decide_worth_it", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 314845901, "num_examples": 69524, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 70331303, "num_examples": 15591, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 72204147, "num_examples": 15857, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 457381351, "size_in_bytes": 457381351}, "winogrande_winogrande_debiased_underscore_refer_to_score_eval": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"idx": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "is_correct": {"dtype": "bool", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "weight": {"dtype": "float32", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "winogrande_winogrande_debiased_underscore_refer_to_score_eval", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 7070812, "num_examples": 18496, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 964290, "num_examples": 2534, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 1348987, "num_examples": 3534, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 9384089, "size_in_bytes": 9384089}, "anli_does_this_imply_r1": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "anli_does_this_imply_r1", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 16378153, "num_examples": 16946, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 967754, "num_examples": 1000, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 966053, "num_examples": 1000, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 18311960, "size_in_bytes": 18311960}, "anli_should_assume_r3": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "anli_should_assume_r3", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 92114106, "num_examples": 100459, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 1114269, "num_examples": 1200, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 1110422, "num_examples": 1200, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 94338797, "size_in_bytes": 94338797}, "super_glue_cb_claim_true_false_inconclusive_score_eval": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"idx": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "is_correct": {"dtype": "bool", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "weight": {"dtype": "float32", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "super_glue_cb_claim_true_false_inconclusive_score_eval", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 672666, "num_examples": 750, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 165122, "num_examples": 168, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 709809, "num_examples": 750, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 1547597, "size_in_bytes": 1547597}, "super_glue_boolq_I_wonder_": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "super_glue_boolq_I_wonder_", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 12684175, "num_examples": 9427, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 4348151, "num_examples": 3270, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 4433915, "num_examples": 3245, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 21466241, "size_in_bytes": 21466241}, "super_glue_cb_must_be_true_score_eval": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"idx": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "is_correct": {"dtype": "bool", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "weight": {"dtype": "float32", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "super_glue_cb_must_be_true_score_eval", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 648916, "num_examples": 750, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 159802, "num_examples": 168, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 686059, "num_examples": 750, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 1494777, "size_in_bytes": 1494777}, "anli_based_on_the_previous_passage_r1_score_eval": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"idx": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "is_correct": {"dtype": "bool", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "weight": {"dtype": "float32", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "anli_based_on_the_previous_passage_r1_score_eval", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 49891563, "num_examples": 50838, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 2948586, "num_examples": 3000, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 2943483, "num_examples": 3000, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 55783632, "size_in_bytes": 55783632}, "trec_which_category_best_describes": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "trec_which_category_best_describes", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 2689198, "num_examples": 5452, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 231935, "num_examples": 500, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 2921133, "size_in_bytes": 2921133}, "race_middle_Write_a_multi_choice_question_options_given_": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "race_middle_Write_a_multi_choice_question_options_given_", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 67842678, "num_examples": 25421, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 3847401, "num_examples": 1436, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 3900574, "num_examples": 1436, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 75590653, "size_in_bytes": 75590653}, "cosmos_qa_only_question_answer": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "cosmos_qa_only_question_answer", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 9307123, "num_examples": 25262, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 1265535, "num_examples": 2985, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 2916845, "num_examples": 6963, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 13489503, "size_in_bytes": 13489503}, "adversarial_qa_dbidaf_answer_the_following_q": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "adversarial_qa_dbidaf_answer_the_following_q", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 18273233, "num_examples": 10000, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 1797805, "num_examples": 1000, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 20071038, "size_in_bytes": 20071038}, "trec_fine_grained_ENTY": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "trec_fine_grained_ENTY", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 1190205, "num_examples": 1250, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 87290, "num_examples": 94, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 1277495, "size_in_bytes": 1277495}, "hellaswag_Open_ended_completion": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "hellaswag_Open_ended_completion", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 53208867, "num_examples": 39905, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 13804129, "num_examples": 10042, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 13323237, "num_examples": 10003, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 80336233, "size_in_bytes": 80336233}, "piqa_Correct_the_solution_if_false_from_sol_1": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "piqa_Correct_the_solution_if_false_from_sol_1", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 12887951, "num_examples": 16113, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 1464103, "num_examples": 1838, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 2420408, "num_examples": 3084, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 16772462, "size_in_bytes": 16772462}, "super_glue_cb_guaranteed_possible_impossible_score_eval": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"idx": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "is_correct": {"dtype": "bool", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "weight": {"dtype": "float32", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "super_glue_cb_guaranteed_possible_impossible_score_eval", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 667166, "num_examples": 750, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 163890, "num_examples": 168, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 704309, "num_examples": 750, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 1535365, "size_in_bytes": 1535365}, "duorc_ParaphraseRC_extract_answer": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "duorc_ParaphraseRC_extract_answer", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 308637022, "num_examples": 69524, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 68940401, "num_examples": 15591, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 70789860, "num_examples": 15857, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 448367283, "size_in_bytes": 448367283}, "race_high_Select_the_best_answer_generate_span_": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "race_high_Select_the_best_answer_generate_span_", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 253586151, "num_examples": 62445, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 13907823, "num_examples": 3451, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 14065936, "num_examples": 3498, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 281559910, "size_in_bytes": 281559910}, "paws_labeled_final_Rewrite_no_label": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "paws_labeled_final_Rewrite_no_label", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 34861938, "num_examples": 49401, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 5643181, "num_examples": 8000, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 5647650, "num_examples": 8000, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 46152769, "size_in_bytes": 46152769}, "openbookqa_main_which_correct_inverse": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "openbookqa_main_which_correct_inverse", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 2311869, "num_examples": 4957, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 252670, "num_examples": 500, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 241012, "num_examples": 500, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 2805551, "size_in_bytes": 2805551}, "gigaword_TLDR": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "gigaword_TLDR", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 2050910582, "num_examples": 3803957, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 102512266, "num_examples": 189651, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 1022032, "num_examples": 1951, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 2154444880, "size_in_bytes": 2154444880}, "qasc_is_correct_1": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "qasc_is_correct_1", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 3401127, "num_examples": 8134, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 386156, "num_examples": 926, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 292647, "num_examples": 920, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 4079930, "size_in_bytes": 4079930}, "super_glue_wic_question_context": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "super_glue_wic_question_context", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 1994487, "num_examples": 5428, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 243238, "num_examples": 638, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 577583, "num_examples": 1400, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 2815308, "size_in_bytes": 2815308}, "squad_v2_Topic_Prediction_Question_and_Answer_Pair": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "squad_v2_Topic_Prediction_Question_and_Answer_Pair", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 29250974, "num_examples": 86821, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 2015115, "num_examples": 5928, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 31266089, "size_in_bytes": 31266089}, "trivia_qa_unfiltered_guess_question": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "trivia_qa_unfiltered_guess_question", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 26388647, "num_examples": 87622, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 3405389, "num_examples": 11313, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 29794036, "size_in_bytes": 29794036}, "adversarial_qa_dbidaf_generate_question": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "adversarial_qa_dbidaf_generate_question", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 18508983, "num_examples": 10000, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 1830601, "num_examples": 1000, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 1925739, "num_examples": 1000, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 22265323, "size_in_bytes": 22265323}, "anli_MNLI_crowdsource_r1_score_eval": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"idx": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "is_correct": {"dtype": "bool", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "weight": {"dtype": "float32", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "anli_MNLI_crowdsource_r1_score_eval", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 55009255, "num_examples": 50838, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 3250586, "num_examples": 3000, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 3245483, "num_examples": 3000, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 61505324, "size_in_bytes": 61505324}, "super_glue_rte_can_we_infer_score_eval": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"idx": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "is_correct": {"dtype": "bool", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "weight": {"dtype": "float32", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "super_glue_rte_can_we_infer_score_eval", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 3782643, "num_examples": 4980, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 409357, "num_examples": 554, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 4367714, "num_examples": 6000, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 8559714, "size_in_bytes": 8559714}, "anli_should_assume_r2_score_eval": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"idx": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "is_correct": {"dtype": "bool", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "weight": {"dtype": "float32", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "anli_should_assume_r2_score_eval", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 128869078, "num_examples": 136380, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 2842731, "num_examples": 3000, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 2860188, "num_examples": 3000, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 134571997, "size_in_bytes": 134571997}, "paws_labeled_final_Meaning": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "paws_labeled_final_Meaning", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 36887379, "num_examples": 49401, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 5971181, "num_examples": 8000, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 5975650, "num_examples": 8000, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 48834210, "size_in_bytes": 48834210}, "social_i_qa_Check_if_a_random_answer_is_valid_or_not": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "social_i_qa_Check_if_a_random_answer_is_valid_or_not", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 13459244, "num_examples": 33410, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 789762, "num_examples": 1954, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 14249006, "size_in_bytes": 14249006}, "super_glue_cb_take_the_following_as_truth": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "super_glue_cb_take_the_following_as_truth", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 237413, "num_examples": 250, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 58055, "num_examples": 56, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 255839, "num_examples": 250, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 551307, "size_in_bytes": 551307}, "super_glue_boolq_after_reading": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "super_glue_boolq_after_reading", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 13662405, "num_examples": 9427, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 4687521, "num_examples": 3270, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 4755170, "num_examples": 3245, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 23105096, "size_in_bytes": 23105096}, "xsum_college_roommate_asked_DOC_so_I_recap": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "xsum_college_roommate_asked_DOC_so_I_recap", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 693890392, "num_examples": 204045, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 38529754, "num_examples": 11332, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 38633229, "num_examples": 11334, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 771053375, "size_in_bytes": 771053375}, "super_glue_wic_polysemous_score_eval": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"idx": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "is_correct": {"dtype": "bool", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "weight": {"dtype": "float32", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "super_glue_wic_polysemous_score_eval", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 5119347, "num_examples": 10856, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 619312, "num_examples": 1276, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 1358488, "num_examples": 2800, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 7097147, "size_in_bytes": 7097147}, "anli_must_be_true_r2_score_eval": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"idx": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "is_correct": {"dtype": "bool", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "weight": {"dtype": "float32", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "anli_must_be_true_r2_score_eval", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 130914778, "num_examples": 136380, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 2887731, "num_examples": 3000, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 2905188, "num_examples": 3000, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 136707697, "size_in_bytes": 136707697}, "super_glue_boolq_exercise": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "super_glue_boolq_exercise", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 13766102, "num_examples": 9427, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 4723491, "num_examples": 3270, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 4790865, "num_examples": 3245, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 23280458, "size_in_bytes": 23280458}, "duorc_SelfRC_decide_worth_it": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "duorc_SelfRC_decide_worth_it", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 270002072, "num_examples": 60721, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 57619780, "num_examples": 12961, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 55323506, "num_examples": 12559, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 382945358, "size_in_bytes": 382945358}, "piqa_choose_the_most_appropriate_solution": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "piqa_choose_the_most_appropriate_solution", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 13494873, "num_examples": 16113, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 1532379, "num_examples": 1838, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 2536737, "num_examples": 3084, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 17563989, "size_in_bytes": 17563989}, "super_glue_wsc.fixed_GPT_3_Style_score_eval": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"idx": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "is_correct": {"dtype": "bool", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "weight": {"dtype": "float32", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "super_glue_wsc.fixed_GPT_3_Style_score_eval", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 528587, "num_examples": 1108, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 117440, "num_examples": 208, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 171575, "num_examples": 292, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 817602, "size_in_bytes": 817602}, "yelp_review_full_format_score": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "yelp_review_full_format_score", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 1020720422, "num_examples": 650000, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 78579036, "num_examples": 50000, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 1099299458, "size_in_bytes": 1099299458}, "quail_no_prompt_id": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "quail_no_prompt_id", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 41168581, "num_examples": 10246, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 8758113, "num_examples": 2164, "dataset_name": "p3"}, "challenge": {"name": "challenge", "num_bytes": 2251655, "num_examples": 556, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 52178349, "size_in_bytes": 52178349}, "super_glue_rte_based_on_the_previous_passage": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "super_glue_rte_based_on_the_previous_passage", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 1975688, "num_examples": 2490, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 214083, "num_examples": 277, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 2379996, "num_examples": 3000, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 4569767, "size_in_bytes": 4569767}, "super_glue_wic_question_context_meaning_with_label_score_eval": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"idx": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "is_correct": {"dtype": "bool", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "weight": {"dtype": "float32", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "super_glue_wic_question_context_meaning_with_label_score_eval", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 3827483, "num_examples": 10856, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 467468, "num_examples": 1276, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 1025288, "num_examples": 2800, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 5320239, "size_in_bytes": 5320239}, "imdb_Sentiment_with_choices_": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "imdb_Sentiment_with_choices_", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 62082778, "num_examples": 25000, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 61206582, "num_examples": 25000, "dataset_name": "p3"}, "unsupervised": {"name": "unsupervised", "num_bytes": 124506277, "num_examples": 50000, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 247795637, "size_in_bytes": 247795637}, "quail_description_context_question_text": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "quail_description_context_question_text", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 41681436, "num_examples": 10246, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 8866979, "num_examples": 2164, "dataset_name": "p3"}, "challenge": {"name": "challenge", "num_bytes": 2277073, "num_examples": 556, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 52825488, "size_in_bytes": 52825488}, "anli_take_the_following_as_truth_r3_score_eval": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"idx": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "is_correct": {"dtype": "bool", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "weight": {"dtype": "float32", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "anli_take_the_following_as_truth_r3_score_eval", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 297916462, "num_examples": 301377, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 3601181, "num_examples": 3600, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 3589640, "num_examples": 3600, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 305107283, "size_in_bytes": 305107283}, "super_glue_multirc_confirm": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "super_glue_multirc_confirm", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 88851451, "num_examples": 27243, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 15541324, "num_examples": 4848, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 29860387, "num_examples": 9693, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 134253162, "size_in_bytes": 134253162}, "anli_must_be_true_r3_score_eval": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"idx": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "is_correct": {"dtype": "bool", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "weight": {"dtype": "float32", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "anli_must_be_true_r3_score_eval", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 277221908, "num_examples": 301377, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 3353981, "num_examples": 3600, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 3342440, "num_examples": 3600, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 283918329, "size_in_bytes": 283918329}, "anli_claim_true_false_inconclusive_r3": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "anli_claim_true_false_inconclusive_r3", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 97906226, "num_examples": 100459, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 1182273, "num_examples": 1200, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 1178426, "num_examples": 1200, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 100266925, "size_in_bytes": 100266925}, "super_glue_cb_does_this_imply_score_eval": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"idx": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "is_correct": {"dtype": "bool", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "weight": {"dtype": "float32", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "super_glue_cb_does_this_imply_score_eval", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 635416, "num_examples": 750, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 156778, "num_examples": 168, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 672559, "num_examples": 750, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 1464753, "size_in_bytes": 1464753}, "ai2_arc_ARC_Challenge_i_am_hesitating": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "ai2_arc_ARC_Challenge_i_am_hesitating", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 1063104, "num_examples": 1119, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 290337, "num_examples": 299, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 1135818, "num_examples": 1172, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 2489259, "size_in_bytes": 2489259}, "hellaswag_complete_first_then_score_eval": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"idx": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "is_correct": {"dtype": "bool", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "weight": {"dtype": "float32", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "hellaswag_complete_first_then_score_eval", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 322593161, "num_examples": 159620, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 83761931, "num_examples": 40168, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 81095769, "num_examples": 40012, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 487450861, "size_in_bytes": 487450861}, "glue_qqp_meaning": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "glue_qqp_meaning", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 153364674, "num_examples": 363846, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 17037044, "num_examples": 40430, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 166404750, "num_examples": 390965, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 336806468, "size_in_bytes": 336806468}, "ai2_arc_ARC_Easy_multiple_choice": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "ai2_arc_ARC_Easy_multiple_choice", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 1927350, "num_examples": 2251, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 487729, "num_examples": 570, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 2039257, "num_examples": 2376, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 4454336, "size_in_bytes": 4454336}, "anli_can_we_infer_r2_score_eval": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"idx": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "is_correct": {"dtype": "bool", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "weight": {"dtype": "float32", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "anli_can_we_infer_r2_score_eval", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 127505278, "num_examples": 136380, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 2812731, "num_examples": 3000, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 2830188, "num_examples": 3000, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 133148197, "size_in_bytes": 133148197}, "anli_always_sometimes_never_r2_score_eval": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"idx": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "is_correct": {"dtype": "bool", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "weight": {"dtype": "float32", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "anli_always_sometimes_never_r2_score_eval", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 132869558, "num_examples": 136380, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 2930731, "num_examples": 3000, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 2948188, "num_examples": 3000, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 138748477, "size_in_bytes": 138748477}, "ropes_new_situation_background_answer": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "ropes_new_situation_background_answer", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 24312759, "num_examples": 10924, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 3481628, "num_examples": 1688, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 27794387, "size_in_bytes": 27794387}, "cnn_dailymail_3.0.0_news_summary": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "cnn_dailymail_3.0.0_news_summary", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 1315405372, "num_examples": 287113, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 61613186, "num_examples": 13368, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 52731850, "num_examples": 11490, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 1429750408, "size_in_bytes": 1429750408}, "ropes_prompt_mix": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "ropes_prompt_mix", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 23919495, "num_examples": 10924, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 3420860, "num_examples": 1688, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 27340355, "size_in_bytes": 27340355}, "super_glue_wic_affirmation_true_or_false": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "super_glue_wic_affirmation_true_or_false", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 2293027, "num_examples": 5428, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 278328, "num_examples": 638, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 646183, "num_examples": 1400, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 3217538, "size_in_bytes": 3217538}, "super_glue_rte_does_it_follow_that_score_eval": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"idx": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "is_correct": {"dtype": "bool", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "weight": {"dtype": "float32", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "super_glue_rte_does_it_follow_that_score_eval", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 3714987, "num_examples": 4980, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 401825, "num_examples": 554, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 4287490, "num_examples": 6000, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 8404302, "size_in_bytes": 8404302}, "glue_mrpc_paraphrase": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "glue_mrpc_paraphrase", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 2468433, "num_examples": 3668, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 275398, "num_examples": 408, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 1156829, "num_examples": 1725, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 3900660, "size_in_bytes": 3900660}, "anli_guaranteed_possible_impossible_r2": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "anli_guaranteed_possible_impossible_r2", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 45981500, "num_examples": 45460, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 1015802, "num_examples": 1000, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 1021621, "num_examples": 1000, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 48018923, "size_in_bytes": 48018923}, "kilt_tasks_hotpotqa_formulate": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "kilt_tasks_hotpotqa_formulate", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 30938841, "num_examples": 88869, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 1816077, "num_examples": 5600, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 32754918, "size_in_bytes": 32754918}, "super_glue_cb_can_we_infer_score_eval": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"idx": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "is_correct": {"dtype": "bool", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "weight": {"dtype": "float32", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "super_glue_cb_can_we_infer_score_eval", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 630166, "num_examples": 750, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 155602, "num_examples": 168, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 667309, "num_examples": 750, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 1453077, "size_in_bytes": 1453077}, "wiki_qa_Topic_Prediction_Question_Only": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "wiki_qa_Topic_Prediction_Question_Only", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 242938, "num_examples": 1040, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 32796, "num_examples": 140, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 68582, "num_examples": 293, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 344316, "size_in_bytes": 344316}, "super_glue_copa_plausible_alternatives": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "super_glue_copa_plausible_alternatives", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 184653, "num_examples": 400, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 46843, "num_examples": 100, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 218107, "num_examples": 500, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 449603, "size_in_bytes": 449603}, "social_i_qa_Show_choices_and_generate_answer": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "social_i_qa_Show_choices_and_generate_answer", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 17811027, "num_examples": 33410, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 1051021, "num_examples": 1954, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 18862048, "size_in_bytes": 18862048}, "race_middle_Select_the_best_answer": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "race_middle_Select_the_best_answer", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 64964791, "num_examples": 25421, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 3683969, "num_examples": 1436, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 3736498, "num_examples": 1436, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 72385258, "size_in_bytes": 72385258}, "anli_justified_in_saying_r1_score_eval": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"idx": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "is_correct": {"dtype": "bool", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "weight": {"dtype": "float32", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "anli_justified_in_saying_r1_score_eval", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 48315585, "num_examples": 50838, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 2855586, "num_examples": 3000, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 2850483, "num_examples": 3000, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 54021654, "size_in_bytes": 54021654}, "anli_consider_always_sometimes_never_r3_score_eval": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"idx": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "is_correct": {"dtype": "bool", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "weight": {"dtype": "float32", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "anli_consider_always_sometimes_never_r3_score_eval", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 287786454, "num_examples": 301377, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 3481265, "num_examples": 3600, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 3468848, "num_examples": 3600, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 294736567, "size_in_bytes": 294736567}, "hellaswag_how_ends": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "hellaswag_how_ends", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 71330909, "num_examples": 39905, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 18491345, "num_examples": 10042, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 17929265, "num_examples": 10003, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 107751519, "size_in_bytes": 107751519}, "cos_e_v1.11_explain_why_human": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "cos_e_v1.11_explain_why_human", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 5427413, "num_examples": 9741, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 661538, "num_examples": 1221, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 6088951, "size_in_bytes": 6088951}, "super_glue_copa_exercise": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "super_glue_copa_exercise", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 179045, "num_examples": 400, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 45451, "num_examples": 100, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 211107, "num_examples": 500, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 435603, "size_in_bytes": 435603}, "glue_mrpc_same_thing": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "glue_mrpc_same_thing", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 2255689, "num_examples": 3668, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 251734, "num_examples": 408, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 1056779, "num_examples": 1725, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 3564202, "size_in_bytes": 3564202}, "multi_news_summary_scenario": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "multi_news_summary_scenario", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 527516767, "num_examples": 44972, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 64955531, "num_examples": 5622, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 65366677, "num_examples": 5622, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 657838975, "size_in_bytes": 657838975}, "piqa_Does_this_solution_make_sense_sol1": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "piqa_Does_this_solution_make_sense_sol1", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 6636349, "num_examples": 16113, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 753997, "num_examples": 1838, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 1247826, "num_examples": 3084, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 8638172, "size_in_bytes": 8638172}, "anli_should_assume_r2": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "anli_should_assume_r2", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 43512935, "num_examples": 45460, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 959469, "num_examples": 1000, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 965288, "num_examples": 1000, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 45437692, "size_in_bytes": 45437692}, "super_glue_copa__What_could_happen_next_C1_or_C2__score_eval": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"idx": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "is_correct": {"dtype": "bool", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "weight": {"dtype": "float32", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "super_glue_copa__What_could_happen_next_C1_or_C2__score_eval", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 141188, "num_examples": 404, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 33109, "num_examples": 96, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 171095, "num_examples": 500, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 345392, "size_in_bytes": 345392}, "anli_GPT_3_style_r1_score_eval": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"idx": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "is_correct": {"dtype": "bool", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "weight": {"dtype": "float32", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "anli_GPT_3_style_r1_score_eval", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 46818639, "num_examples": 50838, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 2767134, "num_examples": 3000, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 2761575, "num_examples": 3000, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 52347348, "size_in_bytes": 52347348}, "wiki_hop_original_choose_best_object_affirmative_3": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "wiki_hop_original_choose_best_object_affirmative_3", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 666212259, "num_examples": 43738, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 83400938, "num_examples": 5129, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 749613197, "size_in_bytes": 749613197}, "super_glue_wsc.fixed_Who_or_what_is_are_score_eval": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"idx": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "is_correct": {"dtype": "bool", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "weight": {"dtype": "float32", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "super_glue_wsc.fixed_Who_or_what_is_are_score_eval", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 456225, "num_examples": 1108, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 103554, "num_examples": 208, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 152571, "num_examples": 292, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 712350, "size_in_bytes": 712350}, "super_glue_multirc_grading": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "super_glue_multirc_grading", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 88933180, "num_examples": 27243, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 15555868, "num_examples": 4848, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 29889466, "num_examples": 9693, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 134378514, "size_in_bytes": 134378514}, "common_gen_Put_together": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "common_gen_Put_together", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 18114361, "num_examples": 67389, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 1121592, "num_examples": 4018, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 263645, "num_examples": 1497, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 19499598, "size_in_bytes": 19499598}, "anli_does_it_follow_that_r2": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "anli_does_it_follow_that_r2", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 42351079, "num_examples": 45460, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 933713, "num_examples": 1000, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 939604, "num_examples": 1000, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 44224396, "size_in_bytes": 44224396}, "gigaword_write_a_title_for_this_sentence": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "gigaword_write_a_title_for_this_sentence", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 2256324244, "num_examples": 3803957, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 112753420, "num_examples": 189651, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 1127386, "num_examples": 1951, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 2370205050, "size_in_bytes": 2370205050}, "cnn_dailymail_3.0.0_2_or_3_sentences": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "cnn_dailymail_3.0.0_2_or_3_sentences", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 1353304288, "num_examples": 287113, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 63377762, "num_examples": 13368, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 54248530, "num_examples": 11490, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 1470930580, "size_in_bytes": 1470930580}, "trivia_qa_unfiltered_first_person_context": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "trivia_qa_unfiltered_first_person_context", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 23222623, "num_examples": 87622, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 2998624, "num_examples": 11313, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 2891891, "num_examples": 10832, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 29113138, "size_in_bytes": 29113138}, "super_glue_record_GPT_3_style_without_hyphens_continuation_choices_": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "super_glue_record_GPT_3_style_without_hyphens_continuation_choices_", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 382247856, "num_examples": 100730, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 37700113, "num_examples": 10000, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 35302555, "num_examples": 10000, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 455250524, "size_in_bytes": 455250524}, "super_glue_cb_guaranteed_true": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "super_glue_cb_guaranteed_true", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 214121, "num_examples": 250, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 52793, "num_examples": 56, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 234339, "num_examples": 250, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 501253, "size_in_bytes": 501253}, "super_glue_wsc.fixed_Who_or_what_is_are": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "super_glue_wsc.fixed_Who_or_what_is_are", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 228593, "num_examples": 554, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 51868, "num_examples": 104, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 81026, "num_examples": 146, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 361487, "size_in_bytes": 361487}, "gigaword_reverse_writing": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "gigaword_reverse_writing", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 2005263098, "num_examples": 3803957, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 100236454, "num_examples": 189651, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 998620, "num_examples": 1951, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 2106498172, "size_in_bytes": 2106498172}, "anli_does_this_imply_r3_score_eval": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"idx": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "is_correct": {"dtype": "bool", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "weight": {"dtype": "float32", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "anli_does_this_imply_r3_score_eval", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 271797122, "num_examples": 301377, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 3289181, "num_examples": 3600, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 3277640, "num_examples": 3600, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 278363943, "size_in_bytes": 278363943}, "xsum_summarize_DOC": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "xsum_summarize_DOC", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 660835102, "num_examples": 204045, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 36693970, "num_examples": 11332, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 36797121, "num_examples": 11334, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 734326193, "size_in_bytes": 734326193}, "winogrande_winogrande_debiased_fill_in_the_blank_score_eval": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"idx": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "is_correct": {"dtype": "bool", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "weight": {"dtype": "float32", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "winogrande_winogrande_debiased_fill_in_the_blank_score_eval", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 7588700, "num_examples": 18496, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 1035242, "num_examples": 2534, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 1447939, "num_examples": 3534, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 10071881, "size_in_bytes": 10071881}, "sciq_Multiple_Choice_Question_First": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "sciq_Multiple_Choice_Question_First", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 15943432, "num_examples": 11679, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 1355775, "num_examples": 1000, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 1375599, "num_examples": 1000, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 18674806, "size_in_bytes": 18674806}, "multi_news_synthesize": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "multi_news_synthesize", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 525154905, "num_examples": 44972, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 64662443, "num_examples": 5622, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 65072630, "num_examples": 5622, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 654889978, "size_in_bytes": 654889978}, "xsum_DOC_how_would_you_rephrase_few_words": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "xsum_DOC_how_would_you_rephrase_few_words", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 675118252, "num_examples": 204045, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 37487210, "num_examples": 11332, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 37590501, "num_examples": 11334, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 750195963, "size_in_bytes": 750195963}, "super_glue_wic_question_context_meaning_with_label": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "super_glue_wic_question_context_meaning_with_label", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 1918495, "num_examples": 5428, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 234306, "num_examples": 638, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 557983, "num_examples": 1400, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 2710784, "size_in_bytes": 2710784}, "qasc_qa_with_combined_facts_1": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "qasc_qa_with_combined_facts_1", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 5454204, "num_examples": 8134, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 634990, "num_examples": 926, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 504869, "num_examples": 920, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 6594063, "size_in_bytes": 6594063}, "anli_GPT_3_style_r2_score_eval": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"idx": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "is_correct": {"dtype": "bool", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "weight": {"dtype": "float32", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "anli_GPT_3_style_r2_score_eval", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 123746950, "num_examples": 136380, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 2729463, "num_examples": 3000, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 2747136, "num_examples": 3000, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 129223549, "size_in_bytes": 129223549}, "anli_does_it_follow_that_r3": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "anli_does_it_follow_that_r3", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 89574595, "num_examples": 100459, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 1084297, "num_examples": 1200, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 1080158, "num_examples": 1200, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 91739050, "size_in_bytes": 91739050}, "squad_v2_Topic_Prediction_Context_with_randomized_prompt_options_placed_in_the_end": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "squad_v2_Topic_Prediction_Context_with_randomized_prompt_options_placed_in_the_end", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 201426821, "num_examples": 130319, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 19292401, "num_examples": 11873, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 220719222, "size_in_bytes": 220719222}, "amazon_polarity_convey_negative_or_positive_sentiment": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "amazon_polarity_convey_negative_or_positive_sentiment", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 3853733865, "num_examples": 3600000, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 427971845, "num_examples": 400000, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 4281705710, "size_in_bytes": 4281705710}, "squad_v2_Jeopardy_without_Context": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "squad_v2_Jeopardy_without_Context", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 27943970, "num_examples": 86821, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 1932726, "num_examples": 5928, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 29876696, "size_in_bytes": 29876696}, "super_glue_wic_same_sense": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "super_glue_wic_same_sense", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 2390731, "num_examples": 5428, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 289812, "num_examples": 638, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 679783, "num_examples": 1400, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 3360326, "size_in_bytes": 3360326}, "quarel_choose_between": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "quarel_choose_between", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 1121872, "num_examples": 1941, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 162487, "num_examples": 278, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 322429, "num_examples": 552, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 1606788, "size_in_bytes": 1606788}, "super_glue_copa_more_likely_score_eval": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"idx": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "is_correct": {"dtype": "bool", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "weight": {"dtype": "float32", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "super_glue_copa_more_likely_score_eval", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 350691, "num_examples": 800, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 88725, "num_examples": 200, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 431514, "num_examples": 1000, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 870930, "size_in_bytes": 870930}, "super_glue_wsc.fixed_replaced_with_score_eval": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"idx": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "is_correct": {"dtype": "bool", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "weight": {"dtype": "float32", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "super_glue_wsc.fixed_replaced_with_score_eval", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 525139, "num_examples": 1108, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 116960, "num_examples": 208, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 170735, "num_examples": 292, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 812834, "size_in_bytes": 812834}, "paws_labeled_final_paraphrase_task": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "paws_labeled_final_paraphrase_task", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 11968892, "num_examples": 21829, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 1934167, "num_examples": 3539, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 1926815, "num_examples": 3536, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 15829874, "size_in_bytes": 15829874}, "cosmos_qa_context_description_question_text": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "cosmos_qa_context_description_question_text", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 29196375, "num_examples": 25262, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 3705299, "num_examples": 2985, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 8646104, "num_examples": 6963, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 41547778, "size_in_bytes": 41547778}, "piqa_pick_correct_choice_with_choice_given_before_goal": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "piqa_pick_correct_choice_with_choice_given_before_goal", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 18033662, "num_examples": 16113, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 2041025, "num_examples": 1838, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 3356005, "num_examples": 3084, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 23430692, "size_in_bytes": 23430692}, "gigaword_in_a_nutshell": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "gigaword_in_a_nutshell", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 2107969937, "num_examples": 3803957, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 105357031, "num_examples": 189651, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 1051297, "num_examples": 1951, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 2214378265, "size_in_bytes": 2214378265}, "multi_news_what_are_the_key_points": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "multi_news_what_are_the_key_points", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 526122635, "num_examples": 44972, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 64781249, "num_examples": 5622, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 65192395, "num_examples": 5622, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 656096279, "size_in_bytes": 656096279}, "social_i_qa_Generate_the_question_from_the_answer": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "social_i_qa_Generate_the_question_from_the_answer", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 13497003, "num_examples": 33410, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 790883, "num_examples": 1954, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 14287886, "size_in_bytes": 14287886}, "duorc_ParaphraseRC_question_answering": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "duorc_ParaphraseRC_question_answering", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 303335115, "num_examples": 69524, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 67754855, "num_examples": 15591, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 69577670, "num_examples": 15857, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 440667640, "size_in_bytes": 440667640}, "imdb_Reviewer_Opinion_bad_good_choices": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "imdb_Reviewer_Opinion_bad_good_choices", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 62220278, "num_examples": 25000, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 61344082, "num_examples": 25000, "dataset_name": "p3"}, "unsupervised": {"name": "unsupervised", "num_bytes": 124806277, "num_examples": 50000, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 248370637, "size_in_bytes": 248370637}, "samsum_Summarize_": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "samsum_Summarize_", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 20155567, "num_examples": 14732, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 1093978, "num_examples": 818, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 1139898, "num_examples": 819, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 22389443, "size_in_bytes": 22389443}, "common_gen_random_task_template_prompt": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "common_gen_random_task_template_prompt", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 18000106, "num_examples": 67389, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 1113838, "num_examples": 4018, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 261716, "num_examples": 1497, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 19375660, "size_in_bytes": 19375660}, "common_gen_sentence_to_concepts": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "common_gen_sentence_to_concepts", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 18929213, "num_examples": 67389, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 1169884, "num_examples": 4018, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 287597, "num_examples": 1497, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 20386694, "size_in_bytes": 20386694}, "anli_guaranteed_true_r1": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "anli_guaranteed_true_r1", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 16395099, "num_examples": 16946, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 968754, "num_examples": 1000, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 967053, "num_examples": 1000, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 18330906, "size_in_bytes": 18330906}, "race_middle_Taking_a_test": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "race_middle_Taking_a_test", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 67278102, "num_examples": 25421, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 3814645, "num_examples": 1436, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 3867174, "num_examples": 1436, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 74959921, "size_in_bytes": 74959921}, "ropes_prompt_beginning": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "ropes_prompt_beginning", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 23963191, "num_examples": 10924, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 3427612, "num_examples": 1688, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 27390803, "size_in_bytes": 27390803}, "super_glue_rte_should_assume": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "super_glue_rte_should_assume", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 1918418, "num_examples": 2490, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 207712, "num_examples": 277, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 2310996, "num_examples": 3000, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 4437126, "size_in_bytes": 4437126}, "wiki_bio_key_content": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "wiki_bio_key_content", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 1427895650, "num_examples": 582639, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 178164996, "num_examples": 72829, "dataset_name": "p3"}, "val": {"name": "val", "num_bytes": 178545508, "num_examples": 72831, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 1784606154, "size_in_bytes": 1784606154}, "anli_claim_true_false_inconclusive_r1_score_eval": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"idx": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "is_correct": {"dtype": "bool", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "weight": {"dtype": "float32", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "anli_claim_true_false_inconclusive_r1_score_eval", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 51094729, "num_examples": 50838, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 3019586, "num_examples": 3000, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 3014483, "num_examples": 3000, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 57128798, "size_in_bytes": 57128798}, "ag_news_classify_question_first": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "ag_news_classify_question_first", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 79339811, "num_examples": 120000, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 4999506, "num_examples": 7600, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 84339317, "size_in_bytes": 84339317}, "wiki_qa_Decide_good_answer": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "wiki_qa_Decide_good_answer", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 11928399, "num_examples": 20360, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 1588537, "num_examples": 2733, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 3601330, "num_examples": 6165, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 17118266, "size_in_bytes": 17118266}, "squad_v2_Jeopardy_with_Context": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "squad_v2_Jeopardy_with_Context", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 162658871, "num_examples": 86821, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 11632776, "num_examples": 5928, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 174291647, "size_in_bytes": 174291647}, "wiki_qa_exercise": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "wiki_qa_exercise", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 14832159, "num_examples": 20360, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 1976964, "num_examples": 2733, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 4488223, "num_examples": 6165, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 21297346, "size_in_bytes": 21297346}, "yelp_review_full_so_i_would": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "yelp_review_full_so_i_would", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 1020590418, "num_examples": 650000, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 78569036, "num_examples": 50000, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 1099159454, "size_in_bytes": 1099159454}, "super_glue_record_In_the_question_above_the_placeholder_stands_for": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "super_glue_record_In_the_question_above_the_placeholder_stands_for", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 263170641, "num_examples": 100730, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 25668756, "num_examples": 10000, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 25793143, "num_examples": 10000, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 314632540, "size_in_bytes": 314632540}, "super_glue_wic_similar_sense_score_eval": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"idx": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "is_correct": {"dtype": "bool", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "weight": {"dtype": "float32", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "super_glue_wic_similar_sense_score_eval", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 2624347, "num_examples": 10856, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 324760, "num_examples": 1276, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 712704, "num_examples": 2800, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 3661811, "size_in_bytes": 3661811}, "adversarial_qa_droberta_based_on": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "adversarial_qa_droberta_based_on", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 17352089, "num_examples": 10000, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 1725167, "num_examples": 1000, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 19077256, "size_in_bytes": 19077256}, "hellaswag_Randomized_prompts_template": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "hellaswag_Randomized_prompts_template", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 101708025, "num_examples": 39905, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 26424198, "num_examples": 10042, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 25517552, "num_examples": 10003, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 153649775, "size_in_bytes": 153649775}, "super_glue_wsc.fixed_does_p_stand_for_score_eval": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"idx": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "is_correct": {"dtype": "bool", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "weight": {"dtype": "float32", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "super_glue_wsc.fixed_does_p_stand_for_score_eval", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 433291, "num_examples": 1108, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 99552, "num_examples": 208, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 146535, "num_examples": 292, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 679378, "size_in_bytes": 679378}, "super_glue_wic_question_context_meaning": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "super_glue_wic_question_context_meaning", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 1782795, "num_examples": 5428, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 218356, "num_examples": 638, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 522983, "num_examples": 1400, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 2524134, "size_in_bytes": 2524134}, "dream_baseline": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "dream_baseline", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 10027171, "num_examples": 6116, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 3280124, "num_examples": 2040, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 3289553, "num_examples": 2041, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 16596848, "size_in_bytes": 16596848}, "hellaswag_Randomized_prompts_template_score_eval": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"idx": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "is_correct": {"dtype": "bool", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "weight": {"dtype": "float32", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "hellaswag_Randomized_prompts_template_score_eval", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 318750017, "num_examples": 159620, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 82815427, "num_examples": 40168, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 80149517, "num_examples": 40012, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 481714961, "size_in_bytes": 481714961}, "anli_always_sometimes_never_r1_score_eval": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"idx": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "is_correct": {"dtype": "bool", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "weight": {"dtype": "float32", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "anli_always_sometimes_never_r1_score_eval", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 50213537, "num_examples": 50838, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 2967586, "num_examples": 3000, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 2962483, "num_examples": 3000, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 56143606, "size_in_bytes": 56143606}, "amazon_polarity_flattering_or_not": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "amazon_polarity_flattering_or_not", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 4156133865, "num_examples": 3600000, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 461571845, "num_examples": 400000, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 4617705710, "size_in_bytes": 4617705710}, "winogrande_winogrande_xl_underscore_refer_to": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "winogrande_winogrande_xl_underscore_refer_to", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 15703993, "num_examples": 40398, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 495664, "num_examples": 1267, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 693702, "num_examples": 1767, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 16893359, "size_in_bytes": 16893359}, "super_glue_cb_based_on_the_previous_passage": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "super_glue_cb_based_on_the_previous_passage", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 220621, "num_examples": 250, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 54249, "num_examples": 56, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 240839, "num_examples": 250, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 515709, "size_in_bytes": 515709}, "wiki_hop_original_generate_subject": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "wiki_hop_original_generate_subject", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 623714545, "num_examples": 43738, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 78260746, "num_examples": 5129, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 701975291, "size_in_bytes": 701975291}, "gigaword_generate_summary_for_this": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "gigaword_generate_summary_for_this", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 2282951959, "num_examples": 3803957, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 114080977, "num_examples": 189651, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 1141043, "num_examples": 1951, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 2398173979, "size_in_bytes": 2398173979}, "wiki_qa_Topic_Prediction_Answer_Only": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "wiki_qa_Topic_Prediction_Answer_Only", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 476986, "num_examples": 1040, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 63674, "num_examples": 140, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 131065, "num_examples": 293, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 671725, "size_in_bytes": 671725}, "adversarial_qa_dbidaf_tell_what_it_is": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "adversarial_qa_dbidaf_tell_what_it_is", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 17755177, "num_examples": 10000, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 1745733, "num_examples": 1000, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 19500910, "size_in_bytes": 19500910}, "super_glue_cb_claim_true_false_inconclusive": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "super_glue_cb_claim_true_false_inconclusive", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 228163, "num_examples": 250, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 55983, "num_examples": 56, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 246589, "num_examples": 250, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 530735, "size_in_bytes": 530735}, "anli_guaranteed_true_r3_score_eval": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"idx": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "is_correct": {"dtype": "bool", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "weight": {"dtype": "float32", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "anli_guaranteed_true_r3_score_eval", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 271797122, "num_examples": 301377, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 3289181, "num_examples": 3600, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 3277640, "num_examples": 3600, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 278363943, "size_in_bytes": 278363943}, "anli_guaranteed_true_r1_score_eval": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"idx": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "is_correct": {"dtype": "bool", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "weight": {"dtype": "float32", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "anli_guaranteed_true_r1_score_eval", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 48569775, "num_examples": 50838, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 2870586, "num_examples": 3000, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 2865483, "num_examples": 3000, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 54305844, "size_in_bytes": 54305844}, "super_glue_record_GPT_3_style_summary_only_continuation_choices_": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "super_glue_record_GPT_3_style_summary_only_continuation_choices_", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 391489105, "num_examples": 100730, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 38568867, "num_examples": 10000, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 36068959, "num_examples": 10000, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 466126931, "size_in_bytes": 466126931}, "glue_mrpc_equivalent": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "glue_mrpc_equivalent", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 2501187, "num_examples": 3668, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 279007, "num_examples": 408, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 1172381, "num_examples": 1725, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 3952575, "size_in_bytes": 3952575}, "adversarial_qa_dbidaf_based_on": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "adversarial_qa_dbidaf_based_on", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 17539793, "num_examples": 10000, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 1724593, "num_examples": 1000, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 19264386, "size_in_bytes": 19264386}, "anli_take_the_following_as_truth_r1_score_eval": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"idx": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "is_correct": {"dtype": "bool", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "weight": {"dtype": "float32", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "anli_take_the_following_as_truth_r1_score_eval", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 52975735, "num_examples": 50838, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 3130586, "num_examples": 3000, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 3125483, "num_examples": 3000, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 59231804, "size_in_bytes": 59231804}, "imdb_Text_Expressed_Sentiment": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "imdb_Text_Expressed_Sentiment", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 62357778, "num_examples": 25000, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 61481582, "num_examples": 25000, "dataset_name": "p3"}, "unsupervised": {"name": "unsupervised", "num_bytes": 125056277, "num_examples": 50000, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 248895637, "size_in_bytes": 248895637}, "imdb_Reviewer_Expressed_Sentiment": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "imdb_Reviewer_Expressed_Sentiment", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 63182778, "num_examples": 25000, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 62306582, "num_examples": 25000, "dataset_name": "p3"}, "unsupervised": {"name": "unsupervised", "num_bytes": 126706277, "num_examples": 50000, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 252195637, "size_in_bytes": 252195637}, "rotten_tomatoes_Reviewer_Enjoyment_Yes_No": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "rotten_tomatoes_Reviewer_Enjoyment_Yes_No", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 3001441, "num_examples": 8530, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 375350, "num_examples": 1066, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 378127, "num_examples": 1066, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 3754918, "size_in_bytes": 3754918}, "super_glue_wic_same_sense_score_eval": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"idx": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "is_correct": {"dtype": "bool", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "weight": {"dtype": "float32", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "super_glue_wic_same_sense_score_eval", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 4771955, "num_examples": 10856, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 578480, "num_examples": 1276, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 1268888, "num_examples": 2800, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 6619323, "size_in_bytes": 6619323}, "duorc_SelfRC_generate_question_by_answer": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "duorc_SelfRC_generate_question_by_answer", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 250482962, "num_examples": 60094, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 53541384, "num_examples": 12845, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 51271161, "num_examples": 12415, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 355295507, "size_in_bytes": 355295507}, "anli_claim_true_false_inconclusive_r2_score_eval": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"idx": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "is_correct": {"dtype": "bool", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "weight": {"dtype": "float32", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "anli_claim_true_false_inconclusive_r2_score_eval", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 135233478, "num_examples": 136380, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 2982731, "num_examples": 3000, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 3000188, "num_examples": 3000, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 141216397, "size_in_bytes": 141216397}, "common_gen_topic_to_sentence": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "common_gen_topic_to_sentence", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 15085978, "num_examples": 67389, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 914294, "num_examples": 4018, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 169793, "num_examples": 1497, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 16170065, "size_in_bytes": 16170065}, "super_glue_boolq_exam": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "super_glue_boolq_exam", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 13146098, "num_examples": 9427, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 4508381, "num_examples": 3270, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 4592920, "num_examples": 3245, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 22247399, "size_in_bytes": 22247399}, "trec_what_category_best_describe": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "trec_what_category_best_describe", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 2372982, "num_examples": 5452, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 202935, "num_examples": 500, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 2575917, "size_in_bytes": 2575917}, "super_glue_record_trying_to_decide": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "super_glue_record_trying_to_decide", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 309721578, "num_examples": 100730, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 30091918, "num_examples": 10000, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 30187405, "num_examples": 10000, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 370000901, "size_in_bytes": 370000901}, "super_glue_record_exercise": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "super_glue_record_exercise", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 269411680, "num_examples": 100730, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 26288756, "num_examples": 10000, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 26413143, "num_examples": 10000, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 322113579, "size_in_bytes": 322113579}, "super_glue_rte_should_assume_score_eval": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"idx": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "is_correct": {"dtype": "bool", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "weight": {"dtype": "float32", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "super_glue_rte_should_assume_score_eval", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 3832443, "num_examples": 4980, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 414897, "num_examples": 554, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 4427714, "num_examples": 6000, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 8675054, "size_in_bytes": 8675054}, "web_questions_get_the_answer": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "web_questions_get_the_answer", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 804353, "num_examples": 3778, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 436898, "num_examples": 2032, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 1241251, "size_in_bytes": 1241251}, "glue_qqp_duplicate": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "glue_qqp_duplicate", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 143210252, "num_examples": 363846, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 15908937, "num_examples": 40430, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 155773201, "num_examples": 390965, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 314892390, "size_in_bytes": 314892390}, "trivia_qa_unfiltered_question_answer": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "trivia_qa_unfiltered_question_answer", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 23047349, "num_examples": 87622, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 2974305, "num_examples": 11313, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 2870227, "num_examples": 10832, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 28891881, "size_in_bytes": 28891881}, "qasc_qa_with_separated_facts_5": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "qasc_qa_with_separated_facts_5", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 6924341, "num_examples": 8134, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 788080, "num_examples": 926, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 563775, "num_examples": 920, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 8276196, "size_in_bytes": 8276196}, "paws_labeled_final_Concatenation": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "paws_labeled_final_Concatenation", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 35504151, "num_examples": 49401, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 5747181, "num_examples": 8000, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 5751650, "num_examples": 8000, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 47002982, "size_in_bytes": 47002982}, "quarel_logic_test": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "quarel_logic_test", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 1226686, "num_examples": 1941, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 177499, "num_examples": 278, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 352237, "num_examples": 552, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 1756422, "size_in_bytes": 1756422}, "super_glue_wsc.fixed_by_p_they_mean_score_eval": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"idx": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "is_correct": {"dtype": "bool", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "weight": {"dtype": "float32", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "super_glue_wsc.fixed_by_p_they_mean_score_eval", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 440931, "num_examples": 1108, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 101152, "num_examples": 208, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 148543, "num_examples": 292, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 690626, "size_in_bytes": 690626}, "sciq_Direct_Question": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "sciq_Direct_Question", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 13620318, "num_examples": 11679, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 1155460, "num_examples": 1000, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 1179523, "num_examples": 1000, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 15955301, "size_in_bytes": 15955301}, "imdb_Reviewer_Enjoyment": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "imdb_Reviewer_Enjoyment", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 63445278, "num_examples": 25000, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 62569082, "num_examples": 25000, "dataset_name": "p3"}, "unsupervised": {"name": "unsupervised", "num_bytes": 126656277, "num_examples": 50000, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 252670637, "size_in_bytes": 252670637}, "cnn_dailymail_3.0.0_news_card_view": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "cnn_dailymail_3.0.0_news_card_view", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 1358759435, "num_examples": 287113, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 63631754, "num_examples": 13368, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 54466840, "num_examples": 11490, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 1476858029, "size_in_bytes": 1476858029}, "ropes_plain_no_background": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "ropes_plain_no_background", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 7337263, "num_examples": 10924, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 1455216, "num_examples": 1688, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 8792479, "size_in_bytes": 8792479}, "wiki_bio_comprehension": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "wiki_bio_comprehension", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 1630511446, "num_examples": 582639, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 203505917, "num_examples": 72829, "dataset_name": "p3"}, "val": {"name": "val", "num_bytes": 203916518, "num_examples": 72831, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 2037933881, "size_in_bytes": 2037933881}, "super_glue_boolq_could_you_tell_me_": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "super_glue_boolq_could_you_tell_me_", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 12844434, "num_examples": 9427, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 4403741, "num_examples": 3270, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 4489080, "num_examples": 3245, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 21737255, "size_in_bytes": 21737255}, "super_glue_multirc_I_was_going_to_say_": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "super_glue_multirc_I_was_going_to_say_", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 87327439, "num_examples": 27243, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 15270196, "num_examples": 4848, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 29317971, "num_examples": 9693, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 131915606, "size_in_bytes": 131915606}, "anli_can_we_infer_r3": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "anli_can_we_infer_r3", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 91109516, "num_examples": 100459, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 1102269, "num_examples": 1200, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 1098422, "num_examples": 1200, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 93310207, "size_in_bytes": 93310207}, "hellaswag_Predict_ending_with_hint": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "hellaswag_Predict_ending_with_hint", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 103772221, "num_examples": 39905, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 26953632, "num_examples": 10042, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 26056337, "num_examples": 10003, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 156782190, "size_in_bytes": 156782190}, "super_glue_record_Add_sentence_after_continuation_choices_": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "super_glue_record_Add_sentence_after_continuation_choices_", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 397869483, "num_examples": 100730, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 39209985, "num_examples": 10000, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 36813565, "num_examples": 10000, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 473893033, "size_in_bytes": 473893033}, "duorc_ParaphraseRC_build_story_around_qa": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "duorc_ParaphraseRC_build_story_around_qa", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 249445065, "num_examples": 58752, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 55541457, "num_examples": 13111, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 57135083, "num_examples": 13449, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 362121605, "size_in_bytes": 362121605}, "samsum_Summarize_this_dialogue_": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "samsum_Summarize_this_dialogue_", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 20494403, "num_examples": 14732, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 1112792, "num_examples": 818, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 1158735, "num_examples": 819, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 22765930, "size_in_bytes": 22765930}, "super_glue_wsc.fixed_I_think_they_mean": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "super_glue_wsc.fixed_I_think_they_mean", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 245844, "num_examples": 554, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 57822, "num_examples": 104, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 86727, "num_examples": 146, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 390393, "size_in_bytes": 390393}, "yelp_review_full_based_on_that": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "yelp_review_full_based_on_that", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 1031640418, "num_examples": 650000, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 79419036, "num_examples": 50000, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 1111059454, "size_in_bytes": 1111059454}, "super_glue_rte_guaranteed_true": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "super_glue_rte_guaranteed_true", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 1910948, "num_examples": 2490, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 206881, "num_examples": 277, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 2301996, "num_examples": 3000, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 4419825, "size_in_bytes": 4419825}, "duorc_ParaphraseRC_title_generation": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "duorc_ParaphraseRC_title_generation", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 286267374, "num_examples": 69524, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 63924078, "num_examples": 15591, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 65673482, "num_examples": 15857, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 415864934, "size_in_bytes": 415864934}, "race_middle_Select_the_best_answer_generate_span_": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "race_middle_Select_the_best_answer_generate_span_", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 68147445, "num_examples": 25421, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 3865635, "num_examples": 1436, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 3920560, "num_examples": 1436, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 75933640, "size_in_bytes": 75933640}, "ropes_background_new_situation_answer": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "ropes_background_new_situation_answer", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 24148899, "num_examples": 10924, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 3456308, "num_examples": 1688, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 27605207, "size_in_bytes": 27605207}, "multi_news_summarize": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "multi_news_summarize", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 525663397, "num_examples": 44972, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 64723529, "num_examples": 5622, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 65134812, "num_examples": 5622, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 655521738, "size_in_bytes": 655521738}, "trec_fine_grained_HUM_context_first": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "trec_fine_grained_HUM_context_first", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 405437, "num_examples": 1223, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 19687, "num_examples": 65, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 425124, "size_in_bytes": 425124}, "glue_mrpc_generate_sentence": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "glue_mrpc_generate_sentence", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 1550931, "num_examples": 2474, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 175596, "num_examples": 279, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 719291, "num_examples": 1147, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 2445818, "size_in_bytes": 2445818}, "social_i_qa_I_was_wondering": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "social_i_qa_I_was_wondering", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 13607428, "num_examples": 33410, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 799781, "num_examples": 1954, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 14407209, "size_in_bytes": 14407209}, "adversarial_qa_dbert_generate_question": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "adversarial_qa_dbert_generate_question", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 18552826, "num_examples": 10000, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 1824247, "num_examples": 1000, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 1954968, "num_examples": 1000, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 22332041, "size_in_bytes": 22332041}, "cos_e_v1.11_question_option_description_text": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "cos_e_v1.11_question_option_description_text", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 4120285, "num_examples": 9741, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 512005, "num_examples": 1221, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 4632290, "size_in_bytes": 4632290}, "rotten_tomatoes_Movie_Expressed_Sentiment_2": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "rotten_tomatoes_Movie_Expressed_Sentiment_2", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 3372496, "num_examples": 8530, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 421721, "num_examples": 1066, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 424498, "num_examples": 1066, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 4218715, "size_in_bytes": 4218715}, "super_glue_multirc_is_the_correct_answer_": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "super_glue_multirc_is_the_correct_answer_", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 86487327, "num_examples": 27243, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 15121664, "num_examples": 4848, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 29019739, "num_examples": 9693, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 130628730, "size_in_bytes": 130628730}, "super_glue_cb_justified_in_saying_score_eval": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"idx": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "is_correct": {"dtype": "bool", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "weight": {"dtype": "float32", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "super_glue_cb_justified_in_saying_score_eval", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 631666, "num_examples": 750, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 155938, "num_examples": 168, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 668809, "num_examples": 750, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 1456413, "size_in_bytes": 1456413}, "paws_labeled_final_PAWS_ANLI_GPT3": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "paws_labeled_final_PAWS_ANLI_GPT3", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 29160137, "num_examples": 49401, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 4719791, "num_examples": 8000, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 4724290, "num_examples": 8000, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 38604218, "size_in_bytes": 38604218}, "super_glue_rte_must_be_true": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "super_glue_rte_must_be_true", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 1955768, "num_examples": 2490, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 211867, "num_examples": 277, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 2355996, "num_examples": 3000, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 4523631, "size_in_bytes": 4523631}, "piqa_Correct_the_solution": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "piqa_Correct_the_solution", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 11641862, "num_examples": 16113, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 1321001, "num_examples": 1838, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 1592878, "num_examples": 3084, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 14555741, "size_in_bytes": 14555741}, "wiki_bio_what_content": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "wiki_bio_what_content", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 1005722302, "num_examples": 582639, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 125491892, "num_examples": 72829, "dataset_name": "p3"}, "val": {"name": "val", "num_bytes": 125718797, "num_examples": 72831, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 1256932991, "size_in_bytes": 1256932991}, "super_glue_rte_GPT_3_style_score_eval": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"idx": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "is_correct": {"dtype": "bool", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "weight": {"dtype": "float32", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "super_glue_rte_GPT_3_style_score_eval", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 3620367, "num_examples": 4980, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 391299, "num_examples": 554, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 4173490, "num_examples": 6000, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 8185156, "size_in_bytes": 8185156}, "rotten_tomatoes_Text_Expressed_Sentiment": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "rotten_tomatoes_Text_Expressed_Sentiment", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 3278666, "num_examples": 8530, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 409995, "num_examples": 1066, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 412772, "num_examples": 1066, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 4101433, "size_in_bytes": 4101433}, "wiki_bio_who": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "wiki_bio_who", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 1439608063, "num_examples": 582639, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 179628653, "num_examples": 72829, "dataset_name": "p3"}, "val": {"name": "val", "num_bytes": 180006533, "num_examples": 72831, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 1799243249, "size_in_bytes": 1799243249}, "glue_qqp_answer": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "glue_qqp_answer", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 138151512, "num_examples": 363846, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 15346729, "num_examples": 40430, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 150347231, "num_examples": 390965, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 303845472, "size_in_bytes": 303845472}, "super_glue_wsc.fixed_in_other_words_score_eval": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"idx": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "is_correct": {"dtype": "bool", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "weight": {"dtype": "float32", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "super_glue_wsc.fixed_in_other_words_score_eval", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 491695, "num_examples": 1108, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 115454, "num_examples": 208, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 164165, "num_examples": 292, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 771314, "size_in_bytes": 771314}, "rotten_tomatoes_Reviewer_Sentiment_Feeling": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "rotten_tomatoes_Reviewer_Sentiment_Feeling", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 3244546, "num_examples": 8530, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 405731, "num_examples": 1066, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 408508, "num_examples": 1066, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 4058785, "size_in_bytes": 4058785}, "winogrande_winogrande_debiased_fill_in_the_blank": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "winogrande_winogrande_debiased_fill_in_the_blank", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 3894323, "num_examples": 9248, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 531140, "num_examples": 1267, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 743178, "num_examples": 1767, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 5168641, "size_in_bytes": 5168641}, "super_glue_rte_MNLI_crowdsource": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "super_glue_rte_MNLI_crowdsource", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 2152478, "num_examples": 2490, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 233750, "num_examples": 277, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 2592996, "num_examples": 3000, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 4979224, "size_in_bytes": 4979224}, "super_glue_cb_does_this_imply": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "super_glue_cb_does_this_imply", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 214121, "num_examples": 250, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 52793, "num_examples": 56, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 234339, "num_examples": 250, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 501253, "size_in_bytes": 501253}, "super_glue_copa_best_option_score_eval": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"idx": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "is_correct": {"dtype": "bool", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "weight": {"dtype": "float32", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "super_glue_copa_best_option_score_eval", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 325091, "num_examples": 800, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 82325, "num_examples": 200, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 399514, "num_examples": 1000, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 806930, "size_in_bytes": 806930}, "amazon_polarity_Is_this_review_negative": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "amazon_polarity_Is_this_review_negative", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 3596333865, "num_examples": 3600000, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 399371845, "num_examples": 400000, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 3995705710, "size_in_bytes": 3995705710}, "cos_e_v1.11_description_question_option_text": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "cos_e_v1.11_description_question_option_text", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 5269723, "num_examples": 9741, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 656083, "num_examples": 1221, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 5925806, "size_in_bytes": 5925806}, "ag_news_classify_with_choices": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "ag_news_classify_with_choices", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 91699811, "num_examples": 120000, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 5782306, "num_examples": 7600, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 97482117, "size_in_bytes": 97482117}, "super_glue_wsc.fixed_in_other_words": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "super_glue_wsc.fixed_in_other_words", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 248724, "num_examples": 554, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 58374, "num_examples": 104, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 86531, "num_examples": 146, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 393629, "size_in_bytes": 393629}, "amazon_polarity_Is_this_product_review_positive": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "amazon_polarity_Is_this_product_review_positive", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 3657533861, "num_examples": 3600000, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 406171845, "num_examples": 400000, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 4063705706, "size_in_bytes": 4063705706}, "multi_news_expand_reverse_task_": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "multi_news_expand_reverse_task_", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 267362189, "num_examples": 44972, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 33300278, "num_examples": 5622, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 33227761, "num_examples": 5622, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 333890228, "size_in_bytes": 333890228}, "super_glue_cb_guaranteed_possible_impossible": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "super_glue_cb_guaranteed_possible_impossible", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 230064, "num_examples": 250, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 56365, "num_examples": 56, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 246589, "num_examples": 250, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 533018, "size_in_bytes": 533018}, "hellaswag_Predict_ending_with_hint_score_eval": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"idx": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "is_correct": {"dtype": "bool", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "weight": {"dtype": "float32", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "hellaswag_Predict_ending_with_hint_score_eval", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 327006801, "num_examples": 159620, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 84933163, "num_examples": 40168, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 82304657, "num_examples": 40012, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 494244621, "size_in_bytes": 494244621}, "cosmos_qa_description_context_question_text": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "cosmos_qa_description_context_question_text", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 30105807, "num_examples": 25262, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 3812759, "num_examples": 2985, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 8896772, "num_examples": 6963, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 42815338, "size_in_bytes": 42815338}, "anli_guaranteed_possible_impossible_r1": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "anli_guaranteed_possible_impossible_r1", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 17379204, "num_examples": 16946, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 1028087, "num_examples": 1000, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 1026386, "num_examples": 1000, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 19433677, "size_in_bytes": 19433677}, "anli_take_the_following_as_truth_r2_score_eval": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"idx": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "is_correct": {"dtype": "bool", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "weight": {"dtype": "float32", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "anli_take_the_following_as_truth_r2_score_eval", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 140279538, "num_examples": 136380, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 3093731, "num_examples": 3000, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 3111188, "num_examples": 3000, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 146484457, "size_in_bytes": 146484457}, "quartz_answer_question_based_on": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "quartz_answer_question_based_on", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 1684763, "num_examples": 2696, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 247740, "num_examples": 384, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 493585, "num_examples": 784, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 2426088, "size_in_bytes": 2426088}, "anli_always_sometimes_never_r1": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "anli_always_sometimes_never_r1", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 17096937, "num_examples": 16946, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 1010087, "num_examples": 1000, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 1008386, "num_examples": 1000, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 19115410, "size_in_bytes": 19115410}, "super_glue_wic_question_context_score_eval": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"idx": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "is_correct": {"dtype": "bool", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "weight": {"dtype": "float32", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "super_glue_wic_question_context_score_eval", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 3979467, "num_examples": 10856, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 485332, "num_examples": 1276, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 1064488, "num_examples": 2800, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 5529287, "size_in_bytes": 5529287}, "winogrande_winogrande_xl_Replace": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "winogrande_winogrande_xl_Replace", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 16754341, "num_examples": 40398, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 528606, "num_examples": 1267, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 739644, "num_examples": 1767, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 18022591, "size_in_bytes": 18022591}, "openbookqa_main_which_correct": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "openbookqa_main_which_correct", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 2311869, "num_examples": 4957, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 252670, "num_examples": 500, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 241012, "num_examples": 500, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 2805551, "size_in_bytes": 2805551}, "ai2_arc_ARC_Challenge_heres_a_problem": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "ai2_arc_ARC_Challenge_heres_a_problem", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 870719, "num_examples": 1119, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 237550, "num_examples": 299, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 929168, "num_examples": 1172, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 2037437, "size_in_bytes": 2037437}, "anli_guaranteed_possible_impossible_r3": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "anli_guaranteed_possible_impossible_r3", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 97660087, "num_examples": 100459, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 1181817, "num_examples": 1200, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 1177970, "num_examples": 1200, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 100019874, "size_in_bytes": 100019874}, "paws_labeled_final_PAWS_ANLI_GPT3_no_label": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "paws_labeled_final_PAWS_ANLI_GPT3_no_label", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 28588011, "num_examples": 49401, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 4627181, "num_examples": 8000, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 4631650, "num_examples": 8000, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 37846842, "size_in_bytes": 37846842}, "anli_GPT_3_style_r3_score_eval": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"idx": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "is_correct": {"dtype": "bool", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "weight": {"dtype": "float32", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "anli_GPT_3_style_r3_score_eval", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 261466196, "num_examples": 301377, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 3166865, "num_examples": 3600, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 3154448, "num_examples": 3600, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 267787509, "size_in_bytes": 267787509}, "anli_MNLI_crowdsource_r3": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "anli_MNLI_crowdsource_r3", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 106341199, "num_examples": 100459, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 1283079, "num_examples": 1200, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 1279232, "num_examples": 1200, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 108903510, "size_in_bytes": 108903510}, "ropes_prompt_bottom_no_hint": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "ropes_prompt_bottom_no_hint", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 8691839, "num_examples": 10924, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 1664528, "num_examples": 1688, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 10356367, "size_in_bytes": 10356367}, "paws_labeled_final_context_question_no_label": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "paws_labeled_final_context_question_no_label", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 30761579, "num_examples": 49401, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 4979181, "num_examples": 8000, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 4983650, "num_examples": 8000, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 40724410, "size_in_bytes": 40724410}, "super_glue_cb_MNLI_crowdsource_score_eval": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"idx": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "is_correct": {"dtype": "bool", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "weight": {"dtype": "float32", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "super_glue_cb_MNLI_crowdsource_score_eval", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 730416, "num_examples": 750, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 178058, "num_examples": 168, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 767559, "num_examples": 750, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 1676033, "size_in_bytes": 1676033}, "piqa_Does_this_solution_make_sense_sol2": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "piqa_Does_this_solution_make_sense_sol2", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 5965542, "num_examples": 16113, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 678174, "num_examples": 1838, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 1117950, "num_examples": 3084, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 7761666, "size_in_bytes": 7761666}, "quoref_Guess_Answer": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "quoref_Guess_Answer", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 76701191, "num_examples": 19399, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 9438316, "num_examples": 2418, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 86139507, "size_in_bytes": 86139507}, "cos_e_v1.11_rationale": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "cos_e_v1.11_rationale", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 5252075, "num_examples": 9741, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 639560, "num_examples": 1221, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 5891635, "size_in_bytes": 5891635}, "adversarial_qa_dbert_based_on": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "adversarial_qa_dbert_based_on", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 17580569, "num_examples": 10000, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 1717582, "num_examples": 1000, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 19298151, "size_in_bytes": 19298151}, "super_glue_copa__As_a_result_C1_or_C2__score_eval": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"idx": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "is_correct": {"dtype": "bool", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "weight": {"dtype": "float32", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "super_glue_copa__As_a_result_C1_or_C2__score_eval", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 136744, "num_examples": 404, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 32053, "num_examples": 96, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 165595, "num_examples": 500, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 334392, "size_in_bytes": 334392}, "cnn_dailymail_3.0.0_spice_up_story": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "cnn_dailymail_3.0.0_spice_up_story", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 1346700689, "num_examples": 287113, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 63070298, "num_examples": 13368, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 53984260, "num_examples": 11490, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 1463755247, "size_in_bytes": 1463755247}, "super_glue_copa_i_am_hesitating": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "super_glue_copa_i_am_hesitating", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 201057, "num_examples": 400, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 50939, "num_examples": 100, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 238607, "num_examples": 500, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 490603, "size_in_bytes": 490603}, "anli_does_this_imply_r2_score_eval": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"idx": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "is_correct": {"dtype": "bool", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "weight": {"dtype": "float32", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "anli_does_this_imply_r2_score_eval", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 128459938, "num_examples": 136380, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 2833731, "num_examples": 3000, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 2851188, "num_examples": 3000, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 134144857, "size_in_bytes": 134144857}, "winogrande_winogrande_debiased_Replace_score_eval": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"idx": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "is_correct": {"dtype": "bool", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "weight": {"dtype": "float32", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "winogrande_winogrande_debiased_Replace_score_eval", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 7551708, "num_examples": 18496, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 1030174, "num_examples": 2534, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 1440871, "num_examples": 3534, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 10022753, "size_in_bytes": 10022753}, "super_glue_multirc_paragraph_question_is_it_": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "super_glue_multirc_paragraph_question_is_it_", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 85833495, "num_examples": 27243, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 15005312, "num_examples": 4848, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 28787107, "num_examples": 9693, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 129625914, "size_in_bytes": 129625914}, "wiki_qa_Direct_Answer_to_Question": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "wiki_qa_Direct_Answer_to_Question", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 464796, "num_examples": 1040, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 62298, "num_examples": 140, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 128404, "num_examples": 293, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 655498, "size_in_bytes": 655498}, "super_glue_cb_guaranteed_true_score_eval": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"idx": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "is_correct": {"dtype": "bool", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "weight": {"dtype": "float32", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "super_glue_cb_guaranteed_true_score_eval", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 635416, "num_examples": 750, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 156778, "num_examples": 168, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 672559, "num_examples": 750, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 1464753, "size_in_bytes": 1464753}, "winogrande_winogrande_debiased_does_underscore_refer_to": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "winogrande_winogrande_debiased_does_underscore_refer_to", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 3515155, "num_examples": 9248, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 479193, "num_examples": 1267, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 670731, "num_examples": 1767, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 4665079, "size_in_bytes": 4665079}, "super_glue_cb_consider_always_sometimes_never": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "super_glue_cb_consider_always_sometimes_never", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 229515, "num_examples": 250, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 56298, "num_examples": 56, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 249099, "num_examples": 250, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 534912, "size_in_bytes": 534912}, "qasc_qa_with_separated_facts_1": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "qasc_qa_with_separated_facts_1", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 6720901, "num_examples": 8134, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 775802, "num_examples": 926, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 552758, "num_examples": 920, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 8049461, "size_in_bytes": 8049461}, "quartz_given_the_fact_answer_the_q": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "quartz_given_the_fact_answer_the_q", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 1568835, "num_examples": 2696, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 231228, "num_examples": 384, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 459873, "num_examples": 784, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 2259936, "size_in_bytes": 2259936}, "social_i_qa_Show_choices_and_generate_index": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "social_i_qa_Show_choices_and_generate_index", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 19481163, "num_examples": 33410, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 1144405, "num_examples": 1954, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 20625568, "size_in_bytes": 20625568}, "super_glue_cb_does_it_follow_that_score_eval": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"idx": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "is_correct": {"dtype": "bool", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "weight": {"dtype": "float32", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "super_glue_cb_does_it_follow_that_score_eval", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 618550, "num_examples": 750, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 153166, "num_examples": 168, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 656089, "num_examples": 750, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 1427805, "size_in_bytes": 1427805}, "imdb_Writer_Expressed_Sentiment": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "imdb_Writer_Expressed_Sentiment", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 62657778, "num_examples": 25000, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 61781582, "num_examples": 25000, "dataset_name": "p3"}, "unsupervised": {"name": "unsupervised", "num_bytes": 125656277, "num_examples": 50000, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 250095637, "size_in_bytes": 250095637}, "gigaword_first_sentence_title": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "gigaword_first_sentence_title", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 2214480717, "num_examples": 3803957, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 110667259, "num_examples": 189651, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 1105925, "num_examples": 1951, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 2326253901, "size_in_bytes": 2326253901}, "xsum_DOC_given_above_write_one_sentence": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "xsum_DOC_given_above_write_one_sentence", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 680219377, "num_examples": 204045, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 37770510, "num_examples": 11332, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 37873851, "num_examples": 11334, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 755863738, "size_in_bytes": 755863738}, "anli_does_this_imply_r3": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "anli_does_this_imply_r3", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 91712270, "num_examples": 100459, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 1109469, "num_examples": 1200, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 1105622, "num_examples": 1200, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 93927361, "size_in_bytes": 93927361}, "winogrande_winogrande_xl_does_underscore_refer_to": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "winogrande_winogrande_xl_does_underscore_refer_to", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 15178819, "num_examples": 40398, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 479193, "num_examples": 1267, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 670731, "num_examples": 1767, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 16328743, "size_in_bytes": 16328743}, "super_glue_wic_grammar_homework_score_eval": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"idx": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "is_correct": {"dtype": "bool", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "weight": {"dtype": "float32", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "super_glue_wic_grammar_homework_score_eval", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 4739387, "num_examples": 10856, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 574652, "num_examples": 1276, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 1260488, "num_examples": 2800, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 6574527, "size_in_bytes": 6574527}, "super_glue_cb_MNLI_crowdsource": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "super_glue_cb_MNLI_crowdsource", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 249258, "num_examples": 250, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 60700, "num_examples": 56, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 267339, "num_examples": 250, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 577297, "size_in_bytes": 577297}, "rotten_tomatoes_Reviewer_Expressed_Sentiment": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "rotten_tomatoes_Reviewer_Expressed_Sentiment", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 3560156, "num_examples": 8530, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 445173, "num_examples": 1066, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 447950, "num_examples": 1066, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 4453279, "size_in_bytes": 4453279}, "ropes_read_background_situation": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "ropes_read_background_situation", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 26606799, "num_examples": 10924, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 3836108, "num_examples": 1688, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 30442907, "size_in_bytes": 30442907}, "anli_must_be_true_r1": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "anli_must_be_true_r1", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 16700127, "num_examples": 16946, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 986754, "num_examples": 1000, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 985053, "num_examples": 1000, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 18671934, "size_in_bytes": 18671934}, "anli_guaranteed_true_r2_score_eval": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"idx": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "is_correct": {"dtype": "bool", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "weight": {"dtype": "float32", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "anli_guaranteed_true_r2_score_eval", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 128459938, "num_examples": 136380, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 2833731, "num_examples": 3000, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 2851188, "num_examples": 3000, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 134144857, "size_in_bytes": 134144857}, "super_glue_copa__which_may_be_caused_by": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "super_glue_copa__which_may_be_caused_by", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 77349, "num_examples": 198, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 21260, "num_examples": 52, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 91698, "num_examples": 250, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 190307, "size_in_bytes": 190307}, "super_glue_copa_choose_score_eval": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"idx": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "is_correct": {"dtype": "bool", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "weight": {"dtype": "float32", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "super_glue_copa_choose_score_eval", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 274279, "num_examples": 800, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 69637, "num_examples": 200, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 336014, "num_examples": 1000, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 679930, "size_in_bytes": 679930}, "super_glue_cb_always_sometimes_never": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "super_glue_cb_always_sometimes_never", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 224637, "num_examples": 250, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 55150, "num_examples": 56, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 244089, "num_examples": 250, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 523876, "size_in_bytes": 523876}, "super_glue_cb_consider_always_sometimes_never_score_eval": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"idx": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "is_correct": {"dtype": "bool", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "weight": {"dtype": "float32", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "super_glue_cb_consider_always_sometimes_never_score_eval", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 674300, "num_examples": 750, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 165654, "num_examples": 168, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 711839, "num_examples": 750, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 1551793, "size_in_bytes": 1551793}, "anli_does_it_follow_that_r1": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "anli_does_it_follow_that_r1", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 16014739, "num_examples": 16946, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 946270, "num_examples": 1000, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 944417, "num_examples": 1000, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 17905426, "size_in_bytes": 17905426}, "qasc_qa_with_separated_facts_4": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "qasc_qa_with_separated_facts_4", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 7652910, "num_examples": 8134, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 883000, "num_examples": 926, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 655622, "num_examples": 920, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 9191532, "size_in_bytes": 9191532}, "wiqa_which_of_the_following_is_the_supposed_perturbation": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "wiqa_which_of_the_following_is_the_supposed_perturbation", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 38964564, "num_examples": 29808, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 8703267, "num_examples": 6894, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 3649334, "num_examples": 3003, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 51317165, "size_in_bytes": 51317165}, "super_glue_rte_guaranteed_true_score_eval": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"idx": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "is_correct": {"dtype": "bool", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "weight": {"dtype": "float32", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "super_glue_rte_guaranteed_true_score_eval", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 3817503, "num_examples": 4980, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 413235, "num_examples": 554, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 4409714, "num_examples": 6000, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 8640452, "size_in_bytes": 8640452}, "duorc_SelfRC_movie_director": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "duorc_SelfRC_movie_director", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 268967131, "num_examples": 60721, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 57398923, "num_examples": 12961, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 55109467, "num_examples": 12559, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 381475521, "size_in_bytes": 381475521}, "anli_always_sometimes_never_r3": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "anli_always_sometimes_never_r3", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 95972326, "num_examples": 100459, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 1160271, "num_examples": 1200, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 1156424, "num_examples": 1200, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 98289021, "size_in_bytes": 98289021}, "cos_e_v1.11_question_description_option_text": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "cos_e_v1.11_question_description_option_text", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 4967752, "num_examples": 9741, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 618232, "num_examples": 1221, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 5585984, "size_in_bytes": 5585984}, "super_glue_copa_best_option": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "super_glue_copa_best_option", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 182851, "num_examples": 400, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 46395, "num_examples": 100, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 215857, "num_examples": 500, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 445103, "size_in_bytes": 445103}, "ag_news_which_section": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "ag_news_which_section", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 83899811, "num_examples": 120000, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 5288306, "num_examples": 7600, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 89188117, "size_in_bytes": 89188117}, "cosmos_qa_context_description_question_answer_id": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "cosmos_qa_context_description_question_answer_id", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 34592731, "num_examples": 25262, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 4377859, "num_examples": 2985, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 10239734, "num_examples": 6963, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 49210324, "size_in_bytes": 49210324}, "wiki_qa_Jeopardy_style": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "wiki_qa_Jeopardy_style", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 564004, "num_examples": 1040, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 75586, "num_examples": 140, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 155933, "num_examples": 293, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 795523, "size_in_bytes": 795523}, "anli_claim_true_false_inconclusive_r1": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "anli_claim_true_false_inconclusive_r1", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 17425827, "num_examples": 16946, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 1028410, "num_examples": 1000, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 1026709, "num_examples": 1000, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 19480946, "size_in_bytes": 19480946}, "race_high_Write_a_multi_choice_question_for_the_following_article": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "race_high_Write_a_multi_choice_question_for_the_following_article", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 241477048, "num_examples": 62445, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 13243746, "num_examples": 3451, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 13381588, "num_examples": 3498, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 268102382, "size_in_bytes": 268102382}, "super_glue_record_GPT_3_style_with_labels_without_hyphens_continuation_choices_": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "super_glue_record_GPT_3_style_with_labels_without_hyphens_continuation_choices_", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 386704513, "num_examples": 100730, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 38142139, "num_examples": 10000, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 35743784, "num_examples": 10000, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 460590436, "size_in_bytes": 460590436}, "quoref_Answer_Test": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "quoref_Answer_Test", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 77478105, "num_examples": 19399, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 9535389, "num_examples": 2418, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 87013494, "size_in_bytes": 87013494}, "super_glue_copa_choose": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "super_glue_copa_choose", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 157445, "num_examples": 400, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 40051, "num_examples": 100, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 184107, "num_examples": 500, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 381603, "size_in_bytes": 381603}, "anli_guaranteed_possible_impossible_r1_score_eval": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"idx": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "is_correct": {"dtype": "bool", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "weight": {"dtype": "float32", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "anli_guaranteed_possible_impossible_r1_score_eval", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 50721917, "num_examples": 50838, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 2997586, "num_examples": 3000, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 2992483, "num_examples": 3000, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 56711986, "size_in_bytes": 56711986}, "duorc_ParaphraseRC_generate_question": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "duorc_ParaphraseRC_generate_question", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 289153756, "num_examples": 69524, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 64571791, "num_examples": 15591, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 66337535, "num_examples": 15857, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 420063082, "size_in_bytes": 420063082}, "web_questions_potential_correct_answer": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "web_questions_potential_correct_answer", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 872732, "num_examples": 3778, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 472864, "num_examples": 2032, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 1345596, "size_in_bytes": 1345596}, "web_questions_short_general_knowledge_q": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "web_questions_short_general_knowledge_q", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 713681, "num_examples": 3778, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 387516, "num_examples": 2032, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 1101197, "size_in_bytes": 1101197}, "imdb_Reviewer_Sentiment_Feeling": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "imdb_Reviewer_Sentiment_Feeling", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 62257778, "num_examples": 25000, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 61381582, "num_examples": 25000, "dataset_name": "p3"}, "unsupervised": {"name": "unsupervised", "num_bytes": 124856277, "num_examples": 50000, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 248495637, "size_in_bytes": 248495637}, "rotten_tomatoes_Writer_Expressed_Sentiment": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "rotten_tomatoes_Writer_Expressed_Sentiment", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 3381026, "num_examples": 8530, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 422787, "num_examples": 1066, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 425564, "num_examples": 1066, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 4229377, "size_in_bytes": 4229377}, "anli_take_the_following_as_truth_r2": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "anli_take_the_following_as_truth_r2", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 47847743, "num_examples": 45460, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 1053125, "num_examples": 1000, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 1058944, "num_examples": 1000, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 49959812, "size_in_bytes": 49959812}, "rotten_tomatoes_Reviewer_Opinion_bad_good_choices": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "rotten_tomatoes_Reviewer_Opinion_bad_good_choices", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 3231751, "num_examples": 8530, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 404132, "num_examples": 1066, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 406909, "num_examples": 1066, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 4042792, "size_in_bytes": 4042792}, "anli_must_be_true_r2": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "anli_must_be_true_r2", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 44194835, "num_examples": 45460, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 974469, "num_examples": 1000, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 980288, "num_examples": 1000, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 46149592, "size_in_bytes": 46149592}, "super_glue_copa_C1_or_C2_premise_so_because__score_eval": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"idx": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "is_correct": {"dtype": "bool", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "weight": {"dtype": "float32", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "super_glue_copa_C1_or_C2_premise_so_because__score_eval", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 249461, "num_examples": 800, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 63445, "num_examples": 200, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 305098, "num_examples": 1000, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 618004, "size_in_bytes": 618004}, "super_glue_cb_take_the_following_as_truth_score_eval": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"idx": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "is_correct": {"dtype": "bool", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "weight": {"dtype": "float32", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "super_glue_cb_take_the_following_as_truth_score_eval", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 700416, "num_examples": 750, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 171338, "num_examples": 168, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 737559, "num_examples": 750, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 1609313, "size_in_bytes": 1609313}, "wiki_hop_original_choose_best_object_affirmative_2": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "wiki_hop_original_choose_best_object_affirmative_2", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 663019385, "num_examples": 43738, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 83026521, "num_examples": 5129, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 746045906, "size_in_bytes": 746045906}, "dream_answer_to_dialogue": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "dream_answer_to_dialogue", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 9167509, "num_examples": 6116, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 3008458, "num_examples": 2040, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 3008258, "num_examples": 2041, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 15184225, "size_in_bytes": 15184225}, "quoref_Find_Answer": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "quoref_Find_Answer", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 76972874, "num_examples": 19399, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 9472352, "num_examples": 2418, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 86445226, "size_in_bytes": 86445226}, "super_glue_wic_GPT_3_prompt_score_eval": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"idx": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "is_correct": {"dtype": "bool", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "weight": {"dtype": "float32", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "super_glue_wic_GPT_3_prompt_score_eval", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 3957755, "num_examples": 10856, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 482780, "num_examples": 1276, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 1058888, "num_examples": 2800, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 5499423, "size_in_bytes": 5499423}, "super_glue_copa__What_could_happen_next_C1_or_C2_": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "super_glue_copa__What_could_happen_next_C1_or_C2_", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 80923, "num_examples": 202, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 19007, "num_examples": 48, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 93475, "num_examples": 250, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 193405, "size_in_bytes": 193405}, "quoref_Found_Context_Online": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "quoref_Found_Context_Online", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 76216668, "num_examples": 19399, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 9378050, "num_examples": 2418, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 85594718, "size_in_bytes": 85594718}, "super_glue_wsc.fixed_p_is_are_r": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "super_glue_wsc.fixed_p_is_are_r", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 239545, "num_examples": 554, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 54190, "num_examples": 104, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 82956, "num_examples": 146, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 376691, "size_in_bytes": 376691}, "winogrande_winogrande_debiased_stand_for": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "winogrande_winogrande_debiased_stand_for", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 3533651, "num_examples": 9248, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 481727, "num_examples": 1267, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 674265, "num_examples": 1767, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 4689643, "size_in_bytes": 4689643}, "yelp_review_full_format_rating": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "yelp_review_full_format_rating", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 1019290422, "num_examples": 650000, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 78469036, "num_examples": 50000, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 1097759458, "size_in_bytes": 1097759458}, "anli_GPT_3_style_r2": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "anli_GPT_3_style_r2", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 42010884, "num_examples": 45460, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 926708, "num_examples": 1000, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 932599, "num_examples": 1000, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 43870191, "size_in_bytes": 43870191}, "cos_e_v1.11_aligned_with_common_sense": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "cos_e_v1.11_aligned_with_common_sense", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 5953395, "num_examples": 9741, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 727468, "num_examples": 1221, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 6680863, "size_in_bytes": 6680863}, "quail_context_question_answer_description_id": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "quail_context_question_answer_description_id", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 42080475, "num_examples": 10246, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 8950709, "num_examples": 2164, "dataset_name": "p3"}, "challenge": {"name": "challenge", "num_bytes": 2301139, "num_examples": 556, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 53332323, "size_in_bytes": 53332323}, "duorc_SelfRC_build_story_around_qa": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "duorc_SelfRC_build_story_around_qa", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 245194760, "num_examples": 60094, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 52411126, "num_examples": 12845, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 50178368, "num_examples": 12415, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 347784254, "size_in_bytes": 347784254}, "anli_should_assume_r1_score_eval": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"idx": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "is_correct": {"dtype": "bool", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "weight": {"dtype": "float32", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "anli_should_assume_r1_score_eval", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 48722289, "num_examples": 50838, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 2879586, "num_examples": 3000, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 2874483, "num_examples": 3000, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 54476358, "size_in_bytes": 54476358}, "ropes_plain_bottom_hint": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "ropes_plain_bottom_hint", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 22553995, "num_examples": 10924, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 3209860, "num_examples": 1688, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 25763855, "size_in_bytes": 25763855}, "squad_v2_Topic_Prediction_Context": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "squad_v2_Topic_Prediction_Context", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 204107475, "num_examples": 130319, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 19537215, "num_examples": 11873, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 223644690, "size_in_bytes": 223644690}, "ai2_arc_ARC_Easy_i_am_hesitating": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "ai2_arc_ARC_Easy_i_am_hesitating", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 1893585, "num_examples": 2251, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 479179, "num_examples": 570, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 2003617, "num_examples": 2376, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 4376381, "size_in_bytes": 4376381}, "super_glue_record_Can_you_figure_out_": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "super_glue_record_Can_you_figure_out_", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 265384581, "num_examples": 100730, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 25888836, "num_examples": 10000, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 26013143, "num_examples": 10000, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 317286560, "size_in_bytes": 317286560}, "yelp_review_full_this_place": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "yelp_review_full_this_place", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 1018640418, "num_examples": 650000, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 78419036, "num_examples": 50000, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 1097059454, "size_in_bytes": 1097059454}, "super_glue_rte_GPT_3_style": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "super_glue_rte_GPT_3_style", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 1822300, "num_examples": 2490, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 196946, "num_examples": 277, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 2177884, "num_examples": 3000, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 4197130, "size_in_bytes": 4197130}, "anli_MNLI_crowdsource_r1": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "anli_MNLI_crowdsource_r1", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 18848458, "num_examples": 16946, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 1112412, "num_examples": 1000, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 1110711, "num_examples": 1000, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 21071581, "size_in_bytes": 21071581}, "web_questions_whats_the_answer": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "web_questions_whats_the_answer", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 782052, "num_examples": 3778, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 424640, "num_examples": 2032, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 1206692, "size_in_bytes": 1206692}, "anli_justified_in_saying_r3_score_eval": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"idx": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "is_correct": {"dtype": "bool", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "weight": {"dtype": "float32", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "anli_justified_in_saying_r3_score_eval", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 270290237, "num_examples": 301377, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 3271181, "num_examples": 3600, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 3259640, "num_examples": 3600, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 276821058, "size_in_bytes": 276821058}, "cos_e_v1.11_question_description_option_id": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "cos_e_v1.11_question_description_option_id", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 4570166, "num_examples": 9741, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 569078, "num_examples": 1221, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 5139244, "size_in_bytes": 5139244}, "duorc_SelfRC_extract_answer": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "duorc_SelfRC_extract_answer", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 264596370, "num_examples": 60721, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 56466046, "num_examples": 12961, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 54205467, "num_examples": 12559, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 375267883, "size_in_bytes": 375267883}, "super_glue_multirc_Would_it_be_good_to_answer_": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "super_glue_multirc_Would_it_be_good_to_answer_", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 86590282, "num_examples": 27243, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 15138940, "num_examples": 4848, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 29055868, "num_examples": 9693, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 130785090, "size_in_bytes": 130785090}, "glue_qqp_quora": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "glue_qqp_quora", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 246542516, "num_examples": 363846, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 27391057, "num_examples": 40430, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 266807261, "num_examples": 390965, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 540740834, "size_in_bytes": 540740834}, "squad_v2_Questions_with_Context_Without_Prompt_Keywords_unanswerable": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "squad_v2_Questions_with_Context_Without_Prompt_Keywords_unanswerable", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 231512392, "num_examples": 130319, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 22043203, "num_examples": 11873, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 253555595, "size_in_bytes": 253555595}, "ai2_arc_ARC_Easy_qa_options": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "ai2_arc_ARC_Easy_qa_options", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 1396114, "num_examples": 2251, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 353209, "num_examples": 570, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 1478521, "num_examples": 2376, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 3227844, "size_in_bytes": 3227844}, "imdb_Movie_Expressed_Sentiment": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "imdb_Movie_Expressed_Sentiment", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 62032778, "num_examples": 25000, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 61156582, "num_examples": 25000, "dataset_name": "p3"}, "unsupervised": {"name": "unsupervised", "num_bytes": 124406277, "num_examples": 50000, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 247595637, "size_in_bytes": 247595637}, "xsum_DOC_tldr": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "xsum_DOC_tldr", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 661243192, "num_examples": 204045, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 36716634, "num_examples": 11332, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 36819789, "num_examples": 11334, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 734779615, "size_in_bytes": 734779615}, "imdb_Movie_Expressed_Sentiment_2": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "imdb_Movie_Expressed_Sentiment_2", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 62632778, "num_examples": 25000, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 61756582, "num_examples": 25000, "dataset_name": "p3"}, "unsupervised": {"name": "unsupervised", "num_bytes": 125606277, "num_examples": 50000, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 249995637, "size_in_bytes": 249995637}, "duorc_ParaphraseRC_generate_question_by_answer": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "duorc_ParaphraseRC_generate_question_by_answer", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 254613827, "num_examples": 58752, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 56696014, "num_examples": 13111, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 58319369, "num_examples": 13449, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 369629210, "size_in_bytes": 369629210}, "common_gen_topics_from_the_sentence": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "common_gen_topics_from_the_sentence", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 16631803, "num_examples": 67389, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 1033196, "num_examples": 4018, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 230711, "num_examples": 1497, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 17895710, "size_in_bytes": 17895710}, "rotten_tomatoes_Sentiment_with_choices_": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "rotten_tomatoes_Sentiment_with_choices_", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 3184836, "num_examples": 8530, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 398269, "num_examples": 1066, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 401046, "num_examples": 1066, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 3984151, "size_in_bytes": 3984151}, "openbookqa_main_pick_answer_with_options": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "openbookqa_main_pick_answer_with_options", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 2391181, "num_examples": 4957, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 260670, "num_examples": 500, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 249012, "num_examples": 500, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 2900863, "size_in_bytes": 2900863}, "ai2_arc_ARC_Challenge_multiple_choice": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "ai2_arc_ARC_Challenge_multiple_choice", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 1079889, "num_examples": 1119, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 294822, "num_examples": 299, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 1153398, "num_examples": 1172, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 2528109, "size_in_bytes": 2528109}, "ropes_background_situation_middle": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "ropes_background_situation_middle", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 24028735, "num_examples": 10924, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 3437740, "num_examples": 1688, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 27466475, "size_in_bytes": 27466475}, "adversarial_qa_dbert_question_context_answer": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "adversarial_qa_dbert_question_context_answer", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 16859701, "num_examples": 10000, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 1646134, "num_examples": 1000, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 18505835, "size_in_bytes": 18505835}, "anli_must_be_true_r1_score_eval": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"idx": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "is_correct": {"dtype": "bool", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "weight": {"dtype": "float32", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "anli_must_be_true_r1_score_eval", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 49484859, "num_examples": 50838, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 2924586, "num_examples": 3000, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 2919483, "num_examples": 3000, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 55328928, "size_in_bytes": 55328928}, "super_glue_cb_does_it_follow_that": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "super_glue_cb_does_it_follow_that", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 208499, "num_examples": 250, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 51589, "num_examples": 56, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 228849, "num_examples": 250, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 488937, "size_in_bytes": 488937}, "quail_context_description_question_answer_text": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "quail_context_description_question_answer_text", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 44439997, "num_examples": 10246, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 9451157, "num_examples": 2164, "dataset_name": "p3"}, "challenge": {"name": "challenge", "num_bytes": 2421666, "num_examples": 556, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 56312820, "size_in_bytes": 56312820}, "duorc_ParaphraseRC_movie_director": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "duorc_ParaphraseRC_movie_director", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 313618959, "num_examples": 69524, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 70059793, "num_examples": 15591, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 71923513, "num_examples": 15857, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 455602265, "size_in_bytes": 455602265}, "super_glue_record_pick_one_option": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "super_glue_record_pick_one_option", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 298946413, "num_examples": 100730, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 29021197, "num_examples": 10000, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 29117405, "num_examples": 10000, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 357085015, "size_in_bytes": 357085015}, "super_glue_wic_GPT_3_prompt_with_label": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "super_glue_wic_GPT_3_prompt_with_label", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 2119331, "num_examples": 5428, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 257912, "num_examples": 638, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 609783, "num_examples": 1400, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 2987026, "size_in_bytes": 2987026}, "trec_fine_grained_open": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "trec_fine_grained_open", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 4097097, "num_examples": 5452, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 361398, "num_examples": 500, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 4458495, "size_in_bytes": 4458495}, "trec_trec2": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "trec_trec2", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 2291202, "num_examples": 5452, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 195435, "num_examples": 500, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 2486637, "size_in_bytes": 2486637}, "winogrande_winogrande_xl_fill_in_the_blank_score_eval": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"idx": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "is_correct": {"dtype": "bool", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "weight": {"dtype": "float32", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "winogrande_winogrande_xl_fill_in_the_blank_score_eval", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 32788834, "num_examples": 80796, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 1035242, "num_examples": 2534, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 1447939, "num_examples": 3534, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 35272015, "size_in_bytes": 35272015}, "hellaswag_complete_first_then": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "hellaswag_complete_first_then", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 102668811, "num_examples": 39905, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 26660824, "num_examples": 10042, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 25754115, "num_examples": 10003, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 155083750, "size_in_bytes": 155083750}, "anli_claim_true_false_inconclusive_r2": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "anli_claim_true_false_inconclusive_r2", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 46165723, "num_examples": 45460, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 1016125, "num_examples": 1000, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 1021944, "num_examples": 1000, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 48203792, "size_in_bytes": 48203792}, "paws_labeled_final_context_question": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "paws_labeled_final_context_question", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 32095406, "num_examples": 49401, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 5195181, "num_examples": 8000, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 5199650, "num_examples": 8000, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 42490237, "size_in_bytes": 42490237}, "multi_news_distill": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "multi_news_distill", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 526482411, "num_examples": 44972, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 64826225, "num_examples": 5622, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 65237371, "num_examples": 5622, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 656546007, "size_in_bytes": 656546007}, "super_glue_cb_must_be_true": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "super_glue_cb_must_be_true", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 218621, "num_examples": 250, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 53801, "num_examples": 56, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 238839, "num_examples": 250, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 511261, "size_in_bytes": 511261}, "app_reviews_convert_to_star_rating": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "app_reviews_convert_to_star_rating", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 131910501, "num_examples": 288065, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 131910501, "size_in_bytes": 131910501}, "imdb_Negation_template_for_positive_and_negative": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "imdb_Negation_template_for_positive_and_negative", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 61932778, "num_examples": 25000, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 61056582, "num_examples": 25000, "dataset_name": "p3"}, "unsupervised": {"name": "unsupervised", "num_bytes": 123606277, "num_examples": 50000, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 246595637, "size_in_bytes": 246595637}, "race_middle_Select_the_best_answer_no_instructions_": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "race_middle_Select_the_best_answer_no_instructions_", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 61583798, "num_examples": 25421, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 3492981, "num_examples": 1436, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 3545510, "num_examples": 1436, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 68622289, "size_in_bytes": 68622289}, "gigaword_write_its_sentence": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "gigaword_write_its_sentence", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 2313383615, "num_examples": 3803957, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 115598185, "num_examples": 189651, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 1156651, "num_examples": 1951, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 2430138451, "size_in_bytes": 2430138451}, "cosmos_qa_no_prompt_text": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "cosmos_qa_no_prompt_text", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 35221450, "num_examples": 25262, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 4600625, "num_examples": 2985, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 10721065, "num_examples": 6963, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 50543140, "size_in_bytes": 50543140}, "common_gen_Given_concepts_type_1": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "common_gen_Given_concepts_type_1", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 21820756, "num_examples": 67389, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 1342582, "num_examples": 4018, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 345980, "num_examples": 1497, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 23509318, "size_in_bytes": 23509318}, "winogrande_winogrande_debiased_underscore_refer_to": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "winogrande_winogrande_debiased_underscore_refer_to", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 3635379, "num_examples": 9248, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 495664, "num_examples": 1267, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 693702, "num_examples": 1767, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 4824745, "size_in_bytes": 4824745}, "duorc_SelfRC_answer_question": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "duorc_SelfRC_answer_question", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 263617916, "num_examples": 60721, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 56257314, "num_examples": 12961, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 54003024, "num_examples": 12559, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 373878254, "size_in_bytes": 373878254}, "anli_always_sometimes_never_r2": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "anli_always_sometimes_never_r2", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 45261374, "num_examples": 45460, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 997802, "num_examples": 1000, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 1003621, "num_examples": 1000, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 47262797, "size_in_bytes": 47262797}, "cosmos_qa_context_question_description_answer_text": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "cosmos_qa_context_question_description_answer_text", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 37368720, "num_examples": 25262, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 4854350, "num_examples": 2985, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 11312920, "num_examples": 6963, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 53535990, "size_in_bytes": 53535990}, "cos_e_v1.11_i_think": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "cos_e_v1.11_i_think", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 6041096, "num_examples": 9741, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 738461, "num_examples": 1221, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 6779557, "size_in_bytes": 6779557}, "piqa_finish_sentence_with_correct_choice": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "piqa_finish_sentence_with_correct_choice", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 16905752, "num_examples": 16113, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 1912365, "num_examples": 1838, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 3140125, "num_examples": 3084, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 21958242, "size_in_bytes": 21958242}, "anli_does_this_imply_r2": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "anli_does_this_imply_r2", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 43331095, "num_examples": 45460, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 955469, "num_examples": 1000, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 961288, "num_examples": 1000, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 45247852, "size_in_bytes": 45247852}, "qasc_qa_with_separated_facts_2": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "qasc_qa_with_separated_facts_2", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 7495398, "num_examples": 8134, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 863324, "num_examples": 926, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 639062, "num_examples": 920, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 8997784, "size_in_bytes": 8997784}, "anli_justified_in_saying_r1": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "anli_justified_in_saying_r1", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 16310369, "num_examples": 16946, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 963754, "num_examples": 1000, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 962053, "num_examples": 1000, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 18236176, "size_in_bytes": 18236176}, "trec_fine_grained_NUM_context_first": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "trec_fine_grained_NUM_context_first", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 518592, "num_examples": 896, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 62852, "num_examples": 113, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 581444, "size_in_bytes": 581444}, "super_glue_copa__why_C1_or_C2": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "super_glue_copa__why_C1_or_C2", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 71409, "num_examples": 198, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 19700, "num_examples": 52, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 84198, "num_examples": 250, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 175307, "size_in_bytes": 175307}, "super_glue_multirc_correct": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "super_glue_multirc_correct", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 89540458, "num_examples": 27243, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 15663463, "num_examples": 4848, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 30104472, "num_examples": 9693, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 135308393, "size_in_bytes": 135308393}, "race_middle_Read_the_article_and_answer_the_question_no_option_": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "race_middle_Read_the_article_and_answer_the_question_no_option_", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 62603334, "num_examples": 25421, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 3549861, "num_examples": 1436, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 3602930, "num_examples": 1436, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 69756125, "size_in_bytes": 69756125}, "quail_no_prompt_text": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "quail_no_prompt_text", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 42483011, "num_examples": 10246, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 9037833, "num_examples": 2164, "dataset_name": "p3"}, "challenge": {"name": "challenge", "num_bytes": 2315470, "num_examples": 556, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 53836314, "size_in_bytes": 53836314}, "quail_context_question_description_text": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "quail_context_question_description_text", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 41220366, "num_examples": 10246, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 8769599, "num_examples": 2164, "dataset_name": "p3"}, "challenge": {"name": "challenge", "num_bytes": 2252053, "num_examples": 556, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 52242018, "size_in_bytes": 52242018}, "qasc_is_correct_2": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "qasc_is_correct_2", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 3224150, "num_examples": 8134, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 366401, "num_examples": 926, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 273918, "num_examples": 920, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 3864469, "size_in_bytes": 3864469}, "anli_MNLI_crowdsource_r2": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "anli_MNLI_crowdsource_r2", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 49982247, "num_examples": 45460, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 1100127, "num_examples": 1000, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 1105946, "num_examples": 1000, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 52188320, "size_in_bytes": 52188320}, "super_glue_rte_can_we_infer": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "super_glue_rte_can_we_infer", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 1893518, "num_examples": 2490, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 204942, "num_examples": 277, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 2280996, "num_examples": 3000, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 4379456, "size_in_bytes": 4379456}, "anli_should_assume_r3_score_eval": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"idx": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "is_correct": {"dtype": "bool", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "weight": {"dtype": "float32", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "anli_should_assume_r3_score_eval", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 272701253, "num_examples": 301377, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 3299981, "num_examples": 3600, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 3288440, "num_examples": 3600, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 279289674, "size_in_bytes": 279289674}, "common_gen_choice_in_concept_centric_sentence_generation": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "common_gen_choice_in_concept_centric_sentence_generation", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 23307812, "num_examples": 67389, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 1427507, "num_examples": 4018, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 378028, "num_examples": 1497, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 25113347, "size_in_bytes": 25113347}, "piqa_pick_correct_choice_index": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "piqa_pick_correct_choice_index", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 11722443, "num_examples": 16113, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 1330199, "num_examples": 1838, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 2197497, "num_examples": 3084, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 15250139, "size_in_bytes": 15250139}, "paws_labeled_final_Concatenation_no_label": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "paws_labeled_final_Concatenation_no_label", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 34170324, "num_examples": 49401, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 5531181, "num_examples": 8000, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 5535650, "num_examples": 8000, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 45237155, "size_in_bytes": 45237155}, "trec_fine_grained_LOC_context_first": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "trec_fine_grained_LOC_context_first", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 294513, "num_examples": 835, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 26999, "num_examples": 81, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 321512, "size_in_bytes": 321512}, "cosmos_qa_context_description_question_answer_text": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "cosmos_qa_context_description_question_answer_text", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 39970706, "num_examples": 25262, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 5161805, "num_examples": 2985, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 12030109, "num_examples": 6963, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 57162620, "size_in_bytes": 57162620}, "quoref_Read_And_Extract_": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "quoref_Read_And_Extract_", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 76216664, "num_examples": 19399, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 9378219, "num_examples": 2418, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 85594883, "size_in_bytes": 85594883}, "anli_should_assume_r1": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "anli_should_assume_r1", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 16445937, "num_examples": 16946, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 971754, "num_examples": 1000, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 970053, "num_examples": 1000, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 18387744, "size_in_bytes": 18387744}, "piqa_what_is_the_correct_ending": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "piqa_what_is_the_correct_ending", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 16212893, "num_examples": 16113, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 1833331, "num_examples": 1838, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 3007513, "num_examples": 3084, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 21053737, "size_in_bytes": 21053737}, "squad_v2_Questions_with_Context": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "squad_v2_Questions_with_Context", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 228499348, "num_examples": 130319, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 21788345, "num_examples": 11873, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 250287693, "size_in_bytes": 250287693}, "super_glue_wic_grammar_homework": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "super_glue_wic_grammar_homework", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 2374447, "num_examples": 5428, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 287898, "num_examples": 638, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 675583, "num_examples": 1400, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 3337928, "size_in_bytes": 3337928}, "quail_context_description_question_text": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "quail_context_description_question_text", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 41312580, "num_examples": 10246, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 8789075, "num_examples": 2164, "dataset_name": "p3"}, "challenge": {"name": "challenge", "num_bytes": 2257057, "num_examples": 556, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 52358712, "size_in_bytes": 52358712}, "squad_v2_Unanwerable_question": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "squad_v2_Unanwerable_question", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 223883796, "num_examples": 130319, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 21366189, "num_examples": 11873, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 245249985, "size_in_bytes": 245249985}, "super_glue_copa_i_am_hesitating_score_eval": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"idx": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "is_correct": {"dtype": "bool", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "weight": {"dtype": "float32", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "super_glue_copa_i_am_hesitating_score_eval", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 361503, "num_examples": 800, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 91413, "num_examples": 200, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 445014, "num_examples": 1000, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 897930, "size_in_bytes": 897930}, "piqa_no_prompt_needed": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "piqa_no_prompt_needed", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 4712855, "num_examples": 16113, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 534592, "num_examples": 1838, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 876542, "num_examples": 3084, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 6123989, "size_in_bytes": 6123989}, "paws_labeled_final_Meaning_no_label": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "paws_labeled_final_Meaning_no_label", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 35553552, "num_examples": 49401, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 5755181, "num_examples": 8000, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 5759650, "num_examples": 8000, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 47068383, "size_in_bytes": 47068383}, "super_glue_boolq_yes_no_question": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "super_glue_boolq_yes_no_question", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 13240368, "num_examples": 9427, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 4541081, "num_examples": 3270, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 4625370, "num_examples": 3245, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 22406819, "size_in_bytes": 22406819}, "duorc_ParaphraseRC_answer_question": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "duorc_ParaphraseRC_answer_question", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 307403904, "num_examples": 69524, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 68663732, "num_examples": 15591, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 70505652, "num_examples": 15857, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 446573288, "size_in_bytes": 446573288}, "common_gen_Example_prompt": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "common_gen_Example_prompt", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 29031379, "num_examples": 67389, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 1772508, "num_examples": 4018, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 506159, "num_examples": 1497, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 31310046, "size_in_bytes": 31310046}, "sciq_Multiple_Choice": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "sciq_Multiple_Choice", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 15429556, "num_examples": 11679, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 1311775, "num_examples": 1000, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 1331599, "num_examples": 1000, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 18072930, "size_in_bytes": 18072930}, "ai2_arc_ARC_Challenge_pick_false_options": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "ai2_arc_ARC_Challenge_pick_false_options", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 965418, "num_examples": 1119, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 263187, "num_examples": 299, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 1032972, "num_examples": 1172, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 2261577, "size_in_bytes": 2261577}, "xsum_read_below_DOC_write_abstract": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "xsum_read_below_DOC_write_abstract", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 692870167, "num_examples": 204045, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 38473094, "num_examples": 11332, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 38576559, "num_examples": 11334, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 769919820, "size_in_bytes": 769919820}, "sciq_Multiple_Choice_Closed_Book_": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "sciq_Multiple_Choice_Closed_Book_", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 5013047, "num_examples": 11679, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 435227, "num_examples": 1000, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 424232, "num_examples": 1000, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 5872506, "size_in_bytes": 5872506}, "super_glue_record_News_article_continuation_choices_": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "super_glue_record_News_article_continuation_choices_", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 400385073, "num_examples": 100730, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 39459985, "num_examples": 10000, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 37063565, "num_examples": 10000, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 476908623, "size_in_bytes": 476908623}, "winogrande_winogrande_xl_underscore_refer_to_score_eval": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"idx": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "is_correct": {"dtype": "bool", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "weight": {"dtype": "float32", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "winogrande_winogrande_xl_underscore_refer_to_score_eval", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 30526546, "num_examples": 80796, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 964290, "num_examples": 2534, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 1348987, "num_examples": 3534, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 32839823, "size_in_bytes": 32839823}, "super_glue_wsc.fixed_replaced_with": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "super_glue_wsc.fixed_replaced_with", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 263050, "num_examples": 554, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 58571, "num_examples": 104, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 90108, "num_examples": 146, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 411729, "size_in_bytes": 411729}, "wiqa_effect_with_label_answer": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "wiqa_effect_with_label_answer", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 29887730, "num_examples": 29808, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 6603907, "num_examples": 6894, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 2736765, "num_examples": 3003, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 39228402, "size_in_bytes": 39228402}, "race_high_Read_the_article_and_answer_the_question_no_option_": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "race_high_Read_the_article_and_answer_the_question_no_option_", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 234697881, "num_examples": 62445, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 12871890, "num_examples": 3451, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 13001530, "num_examples": 3498, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 260571301, "size_in_bytes": 260571301}, "dbpedia_14_given_a_list_of_category_what_does_the_title_belong_to": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "dbpedia_14_given_a_list_of_category_what_does_the_title_belong_to", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 409925208, "num_examples": 560000, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 51249265, "num_examples": 70000, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 461174473, "size_in_bytes": 461174473}, "squad_v2_Topic_Prediction_Context_with_randomized_prompt_options": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "squad_v2_Topic_Prediction_Context_with_randomized_prompt_options", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 202172668, "num_examples": 130319, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 19361094, "num_examples": 11873, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 221533762, "size_in_bytes": 221533762}, "wiqa_what_is_the_final_step_of_the_following_process": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "wiqa_what_is_the_final_step_of_the_following_process", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 22534800, "num_examples": 29808, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 4960072, "num_examples": 6894, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 2018945, "num_examples": 3003, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 29513817, "size_in_bytes": 29513817}, "anli_based_on_the_previous_passage_r2": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "anli_based_on_the_previous_passage_r2", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 44513055, "num_examples": 45460, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 981469, "num_examples": 1000, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 987288, "num_examples": 1000, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 46481812, "size_in_bytes": 46481812}, "paws_labeled_final_task_description_no_label": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "paws_labeled_final_task_description_no_label", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 34417329, "num_examples": 49401, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 5571181, "num_examples": 8000, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 5575650, "num_examples": 8000, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 45564160, "size_in_bytes": 45564160}, "super_glue_copa_cause_effect_score_eval": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"idx": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "is_correct": {"dtype": "bool", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "weight": {"dtype": "float32", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "super_glue_copa_cause_effect_score_eval", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 285503, "num_examples": 800, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 72413, "num_examples": 200, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 350014, "num_examples": 1000, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 707930, "size_in_bytes": 707930}, "quartz_answer_question_below": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "quartz_answer_question_below", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 1576923, "num_examples": 2696, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 232380, "num_examples": 384, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 462225, "num_examples": 784, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 2271528, "size_in_bytes": 2271528}, "hellaswag_Topic_without_the_ending_answer": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "hellaswag_Topic_without_the_ending_answer", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 22237306, "num_examples": 39905, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 5743926, "num_examples": 10042, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 5617256, "num_examples": 10003, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 33598488, "size_in_bytes": 33598488}, "super_glue_wsc.fixed_does_the_pronoun_refer_to_score_eval": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"idx": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "is_correct": {"dtype": "bool", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "weight": {"dtype": "float32", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "super_glue_wsc.fixed_does_the_pronoun_refer_to_score_eval", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 498663, "num_examples": 1108, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 111824, "num_examples": 208, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 163763, "num_examples": 292, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 774250, "size_in_bytes": 774250}, "super_glue_rte_does_this_imply_score_eval": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"idx": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "is_correct": {"dtype": "bool", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "weight": {"dtype": "float32", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "super_glue_rte_does_this_imply_score_eval", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 3817503, "num_examples": 4980, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 413235, "num_examples": 554, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 4409714, "num_examples": 6000, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 8640452, "size_in_bytes": 8640452}, "cnn_dailymail_3.0.0_sum_in_brief": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "cnn_dailymail_3.0.0_sum_in_brief", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 1318276502, "num_examples": 287113, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 61746866, "num_examples": 13368, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 52846750, "num_examples": 11490, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 1432870118, "size_in_bytes": 1432870118}, "super_glue_wic_affirmation_true_or_false_score_eval": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"idx": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "is_correct": {"dtype": "bool", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "weight": {"dtype": "float32", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "super_glue_wic_affirmation_true_or_false_score_eval", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 4533123, "num_examples": 10856, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 550408, "num_examples": 1276, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 1207288, "num_examples": 2800, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 6290819, "size_in_bytes": 6290819}, "wiki_hop_original_explain_relation": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "wiki_hop_original_explain_relation", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 620991153, "num_examples": 43738, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 77941974, "num_examples": 5129, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 698933127, "size_in_bytes": 698933127}, "amazon_polarity_would_you_buy": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "amazon_polarity_would_you_buy", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 4541333861, "num_examples": 3600000, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 504371845, "num_examples": 400000, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 5045705706, "size_in_bytes": 5045705706}, "ai2_arc_ARC_Easy_pick_false_options": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "ai2_arc_ARC_Easy_pick_false_options", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 1702845, "num_examples": 2251, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 431965, "num_examples": 570, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 1803239, "num_examples": 2376, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 3938049, "size_in_bytes": 3938049}, "anli_can_we_infer_r2": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "anli_can_we_infer_r2", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 43058335, "num_examples": 45460, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 949469, "num_examples": 1000, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 955288, "num_examples": 1000, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 44963092, "size_in_bytes": 44963092}, "trec_fine_grained_HUM": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "trec_fine_grained_HUM", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 405437, "num_examples": 1223, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 19687, "num_examples": 65, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 425124, "size_in_bytes": 425124}, "wiki_qa_Topic_Prediction_Question_and_Answer_Pair": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "wiki_qa_Topic_Prediction_Question_and_Answer_Pair", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 637120, "num_examples": 1040, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 85426, "num_examples": 140, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 176583, "num_examples": 293, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 899129, "size_in_bytes": 899129}, "qasc_qa_with_separated_facts_3": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "qasc_qa_with_separated_facts_3", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 4698932, "num_examples": 8134, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 533970, "num_examples": 926, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 321119, "num_examples": 920, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 5554021, "size_in_bytes": 5554021}, "wiki_qa_Generate_Question_from_Topic": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "wiki_qa_Generate_Question_from_Topic", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 600360, "num_examples": 1040, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 80510, "num_examples": 140, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 166307, "num_examples": 293, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 847177, "size_in_bytes": 847177}, "anli_always_sometimes_never_r3_score_eval": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"idx": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "is_correct": {"dtype": "bool", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "weight": {"dtype": "float32", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "anli_always_sometimes_never_r3_score_eval", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 281541645, "num_examples": 301377, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 3405581, "num_examples": 3600, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 3394040, "num_examples": 3600, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 288341266, "size_in_bytes": 288341266}, "kilt_tasks_hotpotqa_combining_facts": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "kilt_tasks_hotpotqa_combining_facts", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 28006164, "num_examples": 88869, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 1631277, "num_examples": 5600, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 29637441, "size_in_bytes": 29637441}, "super_glue_cb_should_assume_score_eval": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"idx": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "is_correct": {"dtype": "bool", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "weight": {"dtype": "float32", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "super_glue_cb_should_assume_score_eval", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 637666, "num_examples": 750, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 157282, "num_examples": 168, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 674809, "num_examples": 750, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 1469757, "size_in_bytes": 1469757}, "wiki_hop_original_choose_best_object_affirmative_1": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "wiki_hop_original_choose_best_object_affirmative_1", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 663150599, "num_examples": 43738, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 83041908, "num_examples": 5129, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 746192507, "size_in_bytes": 746192507}, "quail_context_question_answer_description_text": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "quail_context_question_answer_description_text", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 43456381, "num_examples": 10246, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 9243413, "num_examples": 2164, "dataset_name": "p3"}, "challenge": {"name": "challenge", "num_bytes": 2368290, "num_examples": 556, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 55068084, "size_in_bytes": 55068084}, "xsum_DOC_boils_down_to_simple_idea_that": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "xsum_DOC_boils_down_to_simple_idea_that", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 671037352, "num_examples": 204045, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 37260570, "num_examples": 11332, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 37363821, "num_examples": 11334, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 745661743, "size_in_bytes": 745661743}, "gigaword_make_a_title": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "gigaword_make_a_title", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 2187853018, "num_examples": 3803957, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 109339702, "num_examples": 189651, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 1092268, "num_examples": 1951, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 2298284988, "size_in_bytes": 2298284988}, "imdb_Reviewer_Enjoyment_Yes_No": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "imdb_Reviewer_Enjoyment_Yes_No", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 61545278, "num_examples": 25000, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 60669082, "num_examples": 25000, "dataset_name": "p3"}, "unsupervised": {"name": "unsupervised", "num_bytes": 123456277, "num_examples": 50000, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 245670637, "size_in_bytes": 245670637}, "quartz_paragraph_question_plain_concat": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "quartz_paragraph_question_plain_concat", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 1350459, "num_examples": 2696, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 200124, "num_examples": 384, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 396369, "num_examples": 784, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 1946952, "size_in_bytes": 1946952}, "quoref_Guess_Title_For_Context": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "quoref_Guess_Title_For_Context", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 73151061, "num_examples": 19399, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 9007532, "num_examples": 2418, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 82158593, "size_in_bytes": 82158593}, "amazon_polarity_negative_or_positive_tone": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "amazon_polarity_negative_or_positive_tone", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 3983333861, "num_examples": 3600000, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 442371845, "num_examples": 400000, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 4425705706, "size_in_bytes": 4425705706}, "super_glue_multirc_is_a_correct_answer_": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "super_glue_multirc_is_a_correct_answer_", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 87897946, "num_examples": 27243, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 15371644, "num_examples": 4848, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 29521132, "num_examples": 9693, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 132790722, "size_in_bytes": 132790722}, "openbookqa_main_pick_using_id": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "openbookqa_main_pick_using_id", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 2231328, "num_examples": 4957, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 235199, "num_examples": 500, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 228651, "num_examples": 500, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 2695178, "size_in_bytes": 2695178}, "trivia_qa_unfiltered_question_with_instruction": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "trivia_qa_unfiltered_question_with_instruction", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 23660719, "num_examples": 87622, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 3054769, "num_examples": 11313, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 2946051, "num_examples": 10832, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 29661539, "size_in_bytes": 29661539}, "wiqa_effect_with_string_answer": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "wiqa_effect_with_string_answer", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 32719490, "num_examples": 29808, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 7258837, "num_examples": 6894, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 3024336, "num_examples": 3003, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 43002663, "size_in_bytes": 43002663}, "quail_description_context_question_answer_id": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "quail_description_context_question_answer_id", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 43146059, "num_examples": 10246, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 9175765, "num_examples": 2164, "dataset_name": "p3"}, "challenge": {"name": "challenge", "num_bytes": 2358963, "num_examples": 556, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 54680787, "size_in_bytes": 54680787}, "samsum_Generate_a_summary_for_this_dialogue": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "samsum_Generate_a_summary_for_this_dialogue", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 20847971, "num_examples": 14732, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 1132424, "num_examples": 818, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 1178391, "num_examples": 819, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 23158786, "size_in_bytes": 23158786}, "yelp_review_full_format_star": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "yelp_review_full_format_star", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 1014090422, "num_examples": 650000, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 78069036, "num_examples": 50000, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 1092159458, "size_in_bytes": 1092159458}, "super_glue_copa_plausible_alternatives_score_eval": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"idx": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "is_correct": {"dtype": "bool", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "weight": {"dtype": "float32", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "super_glue_copa_plausible_alternatives_score_eval", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 328695, "num_examples": 800, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 83221, "num_examples": 200, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 404014, "num_examples": 1000, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 815930, "size_in_bytes": 815930}, "race_middle_Write_a_multi_choice_question_for_the_following_article": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "race_middle_Write_a_multi_choice_question_for_the_following_article", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 64990188, "num_examples": 25421, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 3685397, "num_examples": 1436, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 3737926, "num_examples": 1436, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 72413511, "size_in_bytes": 72413511}, "hellaswag_if_begins_how_continues": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "hellaswag_if_begins_how_continues", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 74842549, "num_examples": 39905, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 19375041, "num_examples": 10042, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 18809529, "num_examples": 10003, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 113027119, "size_in_bytes": 113027119}, "glue_mrpc_want_to_know": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "glue_mrpc_want_to_know", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 2464765, "num_examples": 3668, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 274990, "num_examples": 408, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 1155104, "num_examples": 1725, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 3894859, "size_in_bytes": 3894859}, "squad_v2_Trivia": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "squad_v2_Trivia", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 15357501, "num_examples": 86821, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 1073362, "num_examples": 5928, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 16430863, "size_in_bytes": 16430863}, "trec_fine_grained_LOC": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "trec_fine_grained_LOC", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 293678, "num_examples": 835, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 26918, "num_examples": 81, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 320596, "size_in_bytes": 320596}, "adversarial_qa_droberta_tell_what_it_is": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "adversarial_qa_droberta_tell_what_it_is", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 17571853, "num_examples": 10000, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 1747059, "num_examples": 1000, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 19318912, "size_in_bytes": 19318912}, "cnn_dailymail_3.0.0_write_an_outline": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "cnn_dailymail_3.0.0_write_an_outline", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 1341819768, "num_examples": 287113, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 62843042, "num_examples": 13368, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 53788930, "num_examples": 11490, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 1458451740, "size_in_bytes": 1458451740}, "adversarial_qa_dbidaf_question_context_answer": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "adversarial_qa_dbidaf_question_context_answer", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 16821521, "num_examples": 10000, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 1652441, "num_examples": 1000, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 18473962, "size_in_bytes": 18473962}, "race_middle_Is_this_the_right_answer": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "race_middle_Is_this_the_right_answer", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 59522574, "num_examples": 25421, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 3374975, "num_examples": 1436, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 3426289, "num_examples": 1436, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 66323838, "size_in_bytes": 66323838}, "quoref_Answer_Question_Given_Context": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "quoref_Answer_Question_Given_Context", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 75906514, "num_examples": 19399, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 9339531, "num_examples": 2418, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 85246045, "size_in_bytes": 85246045}, "ropes_prompt_bottom_hint_beginning": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "ropes_prompt_bottom_hint_beginning", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 24170747, "num_examples": 10924, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 3459684, "num_examples": 1688, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 27630431, "size_in_bytes": 27630431}, "super_glue_record_GPT_3_style_with_labels_continuation_choices_": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "super_glue_record_GPT_3_style_with_labels_continuation_choices_", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 394006387, "num_examples": 100730, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 38818779, "num_examples": 10000, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 36318959, "num_examples": 10000, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 469144125, "size_in_bytes": 469144125}, "wiqa_what_might_be_the_first_step_of_the_process": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "wiqa_what_might_be_the_first_step_of_the_process", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 22471241, "num_examples": 29808, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 4941673, "num_examples": 6894, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 2012356, "num_examples": 3003, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 29425270, "size_in_bytes": 29425270}, "super_glue_wic_similar_sense": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "super_glue_wic_similar_sense", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 1316927, "num_examples": 5428, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 162952, "num_examples": 638, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 401691, "num_examples": 1400, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 1881570, "size_in_bytes": 1881570}, "duorc_SelfRC_question_answering": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "duorc_SelfRC_question_answering", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 259527231, "num_examples": 60721, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 55383000, "num_examples": 12961, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 53157711, "num_examples": 12559, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 368067942, "size_in_bytes": 368067942}, "anli_justified_in_saying_r2": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "anli_justified_in_saying_r2", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 43149255, "num_examples": 45460, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 951469, "num_examples": 1000, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 957288, "num_examples": 1000, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 45058012, "size_in_bytes": 45058012}, "super_glue_record_New_highlight_continuation_choices_": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "super_glue_record_New_highlight_continuation_choices_", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 398639617, "num_examples": 100730, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 39278867, "num_examples": 10000, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 36778959, "num_examples": 10000, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 474697443, "size_in_bytes": 474697443}, "super_glue_wic_question_context_meaning_score_eval": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"idx": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "is_correct": {"dtype": "bool", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "weight": {"dtype": "float32", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "super_glue_wic_question_context_meaning_score_eval", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 3556083, "num_examples": 10856, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 435568, "num_examples": 1276, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 955288, "num_examples": 2800, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 4946939, "size_in_bytes": 4946939}, "super_glue_copa_C1_or_C2_premise_so_because_": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "super_glue_copa_C1_or_C2_premise_so_because_", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 145036, "num_examples": 400, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 36955, "num_examples": 100, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 168649, "num_examples": 500, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 350640, "size_in_bytes": 350640}, "super_glue_cb_justified_in_saying": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "super_glue_cb_justified_in_saying", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 212871, "num_examples": 250, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 52513, "num_examples": 56, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 233089, "num_examples": 250, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 498473, "size_in_bytes": 498473}, "wiki_bio_guess_person": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "wiki_bio_guess_person", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 709583568, "num_examples": 582639, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 88627917, "num_examples": 72829, "dataset_name": "p3"}, "val": {"name": "val", "num_bytes": 88793275, "num_examples": 72831, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 887004760, "size_in_bytes": 887004760}, "super_glue_wic_GPT_3_prompt_with_label_score_eval": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"idx": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "is_correct": {"dtype": "bool", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "weight": {"dtype": "float32", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "super_glue_wic_GPT_3_prompt_with_label_score_eval", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 4229155, "num_examples": 10856, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 514680, "num_examples": 1276, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 1128888, "num_examples": 2800, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 5872723, "size_in_bytes": 5872723}, "winogrande_winogrande_debiased_does_underscore_refer_to_score_eval": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"idx": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "is_correct": {"dtype": "bool", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "weight": {"dtype": "float32", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "winogrande_winogrande_debiased_does_underscore_refer_to_score_eval", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 6830364, "num_examples": 18496, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 931348, "num_examples": 2534, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 1303045, "num_examples": 3534, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 9064757, "size_in_bytes": 9064757}, "anli_guaranteed_true_r2": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "anli_guaranteed_true_r2", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 43376555, "num_examples": 45460, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 956469, "num_examples": 1000, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 962288, "num_examples": 1000, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 45295312, "size_in_bytes": 45295312}, "anli_consider_always_sometimes_never_r1_score_eval": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"idx": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "is_correct": {"dtype": "bool", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "weight": {"dtype": "float32", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "anli_consider_always_sometimes_never_r1_score_eval", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 51258491, "num_examples": 50838, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 3029134, "num_examples": 3000, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 3023575, "num_examples": 3000, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 57311200, "size_in_bytes": 57311200}, "super_glue_cb_always_sometimes_never_score_eval": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"idx": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "is_correct": {"dtype": "bool", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "weight": {"dtype": "float32", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "super_glue_cb_always_sometimes_never_score_eval", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 659666, "num_examples": 750, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 162210, "num_examples": 168, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 696809, "num_examples": 750, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 1518685, "size_in_bytes": 1518685}, "super_glue_record_corrupted": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "super_glue_record_corrupted", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 272131390, "num_examples": 100730, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 26559269, "num_examples": 10000, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 26683143, "num_examples": 10000, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 325373802, "size_in_bytes": 325373802}, "anli_justified_in_saying_r3": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "anli_justified_in_saying_r3", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 91310434, "num_examples": 100459, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 1104669, "num_examples": 1200, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 1100822, "num_examples": 1200, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 93515925, "size_in_bytes": 93515925}, "sciq_Direct_Question_Closed_Book_": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "sciq_Direct_Question_Closed_Book_", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 3203809, "num_examples": 11679, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 278912, "num_examples": 1000, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 272156, "num_examples": 1000, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 3754877, "size_in_bytes": 3754877}, "ai2_arc_ARC_Challenge_pick_the_most_correct_option": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "ai2_arc_ARC_Challenge_pick_the_most_correct_option", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 812532, "num_examples": 1119, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 222005, "num_examples": 299, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 868228, "num_examples": 1172, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 1902765, "size_in_bytes": 1902765}, "quoref_Context_Contains_Answer": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "quoref_Context_Contains_Answer", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 76410241, "num_examples": 19399, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 9402229, "num_examples": 2418, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 85812470, "size_in_bytes": 85812470}, "glue_qqp_same_thing": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "glue_qqp_same_thing", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 138151512, "num_examples": 363846, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 15346729, "num_examples": 40430, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 150347231, "num_examples": 390965, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 303845472, "size_in_bytes": 303845472}, "glue_qqp_duplicate_or_not": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "glue_qqp_duplicate_or_not", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 166116094, "num_examples": 363846, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 18454344, "num_examples": 40430, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 178134020, "num_examples": 390965, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 362704458, "size_in_bytes": 362704458}, "dbpedia_14_given_a_choice_of_categories_": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "dbpedia_14_given_a_choice_of_categories_", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 719437863, "num_examples": 560000, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 89954836, "num_examples": 70000, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 809392699, "size_in_bytes": 809392699}, "rotten_tomatoes_Reviewer_Enjoyment": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "rotten_tomatoes_Reviewer_Enjoyment", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 3619866, "num_examples": 8530, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 452635, "num_examples": 1066, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 455412, "num_examples": 1066, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 4527913, "size_in_bytes": 4527913}, "winogrande_winogrande_xl_fill_in_the_blank": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "winogrande_winogrande_xl_fill_in_the_blank", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 16835137, "num_examples": 40398, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 531140, "num_examples": 1267, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 743178, "num_examples": 1767, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 18109455, "size_in_bytes": 18109455}, "super_glue_wsc.fixed_GPT_3_Style": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "super_glue_wsc.fixed_GPT_3_Style", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 264774, "num_examples": 554, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 58811, "num_examples": 104, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 90528, "num_examples": 146, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 414113, "size_in_bytes": 414113}, "squad_v2_Questions_with_Context_unanswerable": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "squad_v2_Questions_with_Context_unanswerable", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 244112502, "num_examples": 130319, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 23192990, "num_examples": 11873, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 267305492, "size_in_bytes": 267305492}, "common_gen_Given_concepts_type_2": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "common_gen_Given_concepts_type_2", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 23168536, "num_examples": 67389, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 1422942, "num_examples": 4018, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 375920, "num_examples": 1497, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 24967398, "size_in_bytes": 24967398}, "cnn_dailymail_3.0.0_generate_story": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "cnn_dailymail_3.0.0_generate_story", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 1323444536, "num_examples": 287113, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 61987490, "num_examples": 13368, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 53053570, "num_examples": 11490, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 1438485596, "size_in_bytes": 1438485596}, "openbookqa_main_only_options": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "openbookqa_main_only_options", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 2044191, "num_examples": 4957, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 225670, "num_examples": 500, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 214012, "num_examples": 500, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 2483873, "size_in_bytes": 2483873}, "wiki_hop_original_choose_best_object_interrogative_2": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "wiki_hop_original_choose_best_object_interrogative_2", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 658601847, "num_examples": 43738, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 82508492, "num_examples": 5129, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 741110339, "size_in_bytes": 741110339}, "anli_based_on_the_previous_passage_r2_score_eval": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"idx": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "is_correct": {"dtype": "bool", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "weight": {"dtype": "float32", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "anli_based_on_the_previous_passage_r2_score_eval", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 132005818, "num_examples": 136380, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 2911731, "num_examples": 3000, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 2929188, "num_examples": 3000, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 137846737, "size_in_bytes": 137846737}, "anli_claim_true_false_inconclusive_r3_score_eval": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"idx": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "is_correct": {"dtype": "bool", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "weight": {"dtype": "float32", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "anli_claim_true_false_inconclusive_r3_score_eval", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 286765513, "num_examples": 301377, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 3467981, "num_examples": 3600, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 3456440, "num_examples": 3600, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 293689934, "size_in_bytes": 293689934}, "winogrande_winogrande_debiased_stand_for_score_eval": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"idx": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "is_correct": {"dtype": "bool", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "weight": {"dtype": "float32", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "winogrande_winogrande_debiased_stand_for_score_eval", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 6904348, "num_examples": 18496, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 941484, "num_examples": 2534, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 1317181, "num_examples": 3534, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 9163013, "size_in_bytes": 9163013}, "dbpedia_14_pick_one_category_for_the_following_text": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "dbpedia_14_pick_one_category_for_the_following_text", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 717757851, "num_examples": 560000, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 89744836, "num_examples": 70000, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 807502687, "size_in_bytes": 807502687}, "super_glue_rte_does_this_imply": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "super_glue_rte_does_this_imply", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 1910948, "num_examples": 2490, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 206881, "num_examples": 277, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 2301996, "num_examples": 3000, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 4419825, "size_in_bytes": 4419825}, "anli_consider_always_sometimes_never_r2": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "anli_consider_always_sometimes_never_r2", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 46190678, "num_examples": 45460, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 1018046, "num_examples": 1000, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 1023937, "num_examples": 1000, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 48232661, "size_in_bytes": 48232661}, "super_glue_record_the_placeholder_refers_to_": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "super_glue_record_the_placeholder_refers_to_", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 258634203, "num_examples": 100730, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 25218836, "num_examples": 10000, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 25343143, "num_examples": 10000, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 309196182, "size_in_bytes": 309196182}, "quoref_Given_Context_Answer_Question": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "quoref_Given_Context_Answer_Question", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 75847738, "num_examples": 19399, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 9331940, "num_examples": 2418, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 85179678, "size_in_bytes": 85179678}, "cos_e_v1.11_generate_explanation_given_text": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "cos_e_v1.11_generate_explanation_given_text", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 4677356, "num_examples": 9741, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 567521, "num_examples": 1221, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 5244877, "size_in_bytes": 5244877}, "dream_generate_last_utterance": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "dream_generate_last_utterance", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 8125896, "num_examples": 6116, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 2659736, "num_examples": 2040, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 2660185, "num_examples": 2041, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 13445817, "size_in_bytes": 13445817}, "wiki_qa_found_on_google": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "wiki_qa_found_on_google", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 11401719, "num_examples": 20360, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 1516487, "num_examples": 2733, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 3449268, "num_examples": 6165, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 16367474, "size_in_bytes": 16367474}, "quartz_use_info_from_paragraph_question": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "quartz_use_info_from_paragraph_question", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 1752163, "num_examples": 2696, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 257340, "num_examples": 384, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 513185, "num_examples": 784, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 2522688, "size_in_bytes": 2522688}, "paws_labeled_final_Rewrite": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "paws_labeled_final_Rewrite", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 36195765, "num_examples": 49401, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 5859181, "num_examples": 8000, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 5863650, "num_examples": 8000, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 47918596, "size_in_bytes": 47918596}, "samsum_Sum_up_the_following_dialogue": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "samsum_Sum_up_the_following_dialogue", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 20582795, "num_examples": 14732, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 1117700, "num_examples": 818, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 1163649, "num_examples": 819, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 22864144, "size_in_bytes": 22864144}, "super_glue_wsc.fixed_the_pronoun_refers_to": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "super_glue_wsc.fixed_the_pronoun_refers_to", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 253874, "num_examples": 554, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 56871, "num_examples": 104, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 86732, "num_examples": 146, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 397477, "size_in_bytes": 397477}, "app_reviews_convert_to_rating": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "app_reviews_convert_to_rating", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 109715170, "num_examples": 288065, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 109715170, "size_in_bytes": 109715170}, "super_glue_copa__why_C1_or_C2_score_eval": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"idx": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "is_correct": {"dtype": "bool", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "weight": {"dtype": "float32", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "super_glue_copa__why_C1_or_C2_score_eval", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 122838, "num_examples": 396, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 33812, "num_examples": 104, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 152024, "num_examples": 500, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 308674, "size_in_bytes": 308674}, "quail_context_question_description_answer_id": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "quail_context_question_description_answer_id", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 42070229, "num_examples": 10246, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 8948545, "num_examples": 2164, "dataset_name": "p3"}, "challenge": {"name": "challenge", "num_bytes": 2300583, "num_examples": 556, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 53319357, "size_in_bytes": 53319357}, "hellaswag_Appropriate_continuation_Yes_or_No": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "hellaswag_Appropriate_continuation_Yes_or_No", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 36636491, "num_examples": 39905, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 9457760, "num_examples": 10042, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 9208016, "num_examples": 10003, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 55302267, "size_in_bytes": 55302267}, "super_glue_record_Summary_first_continuation_choices_": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "super_glue_record_Summary_first_continuation_choices_", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 389936771, "num_examples": 100730, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 38422446, "num_examples": 10000, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 36024859, "num_examples": 10000, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 464384076, "size_in_bytes": 464384076}, "trec_fine_grained_ABBR_context_first": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "trec_fine_grained_ABBR_context_first", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 29171, "num_examples": 86, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 2905, "num_examples": 9, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 32076, "size_in_bytes": 32076}, "amazon_polarity_user_satisfied": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "amazon_polarity_user_satisfied", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 4269533861, "num_examples": 3600000, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 474171845, "num_examples": 400000, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 4743705706, "size_in_bytes": 4743705706}, "winogrande_winogrande_xl_Replace_score_eval": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"idx": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "is_correct": {"dtype": "bool", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "weight": {"dtype": "float32", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "winogrande_winogrande_xl_Replace_score_eval", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 32627242, "num_examples": 80796, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 1030174, "num_examples": 2534, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 1440871, "num_examples": 3534, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 35098287, "size_in_bytes": 35098287}, "winogrande_winogrande_xl_stand_for_score_eval": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"idx": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "is_correct": {"dtype": "bool", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "weight": {"dtype": "float32", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "winogrande_winogrande_xl_stand_for_score_eval", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 29799382, "num_examples": 80796, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 941484, "num_examples": 2534, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 1317181, "num_examples": 3534, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 32058047, "size_in_bytes": 32058047}, "super_glue_rte_MNLI_crowdsource_score_eval": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"idx": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "is_correct": {"dtype": "bool", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "weight": {"dtype": "float32", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "super_glue_rte_MNLI_crowdsource_score_eval", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 4300563, "num_examples": 4980, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 466973, "num_examples": 554, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 4991714, "num_examples": 6000, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 9759250, "size_in_bytes": 9759250}, "anli_consider_always_sometimes_never_r2_score_eval": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"idx": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "is_correct": {"dtype": "bool", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "weight": {"dtype": "float32", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "anli_consider_always_sometimes_never_r2_score_eval", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 135657470, "num_examples": 136380, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 2991463, "num_examples": 3000, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 3009136, "num_examples": 3000, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 141658069, "size_in_bytes": 141658069}, "super_glue_wsc.fixed_does_the_pronoun_refer_to": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "super_glue_wsc.fixed_does_the_pronoun_refer_to", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 249812, "num_examples": 554, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 56003, "num_examples": 104, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 86622, "num_examples": 146, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 392437, "size_in_bytes": 392437}, "adversarial_qa_dbert_answer_the_following_q": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "adversarial_qa_dbert_answer_the_following_q", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 18313769, "num_examples": 10000, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 1791050, "num_examples": 1000, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 20104819, "size_in_bytes": 20104819}, "race_high_Select_the_best_answer_no_instructions_": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "race_high_Select_the_best_answer_no_instructions_", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 233109474, "num_examples": 62445, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 12781320, "num_examples": 3451, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 12912864, "num_examples": 3498, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 258803658, "size_in_bytes": 258803658}, "trec_fine_grained_NUM": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "trec_fine_grained_NUM", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 517696, "num_examples": 896, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 62739, "num_examples": 113, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 580435, "size_in_bytes": 580435}, "adversarial_qa_droberta_answer_the_following_q": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "adversarial_qa_droberta_answer_the_following_q", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 18084409, "num_examples": 10000, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 1798391, "num_examples": 1000, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 19882800, "size_in_bytes": 19882800}, "race_high_Is_this_the_right_answer": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "race_high_Is_this_the_right_answer", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 224067418, "num_examples": 62445, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 12288447, "num_examples": 3451, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 12402621, "num_examples": 3498, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 248758486, "size_in_bytes": 248758486}, "quarel_do_not_use": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "quarel_do_not_use", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 1331500, "num_examples": 1941, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 192511, "num_examples": 278, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 382045, "num_examples": 552, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 1906056, "size_in_bytes": 1906056}, "race_high_Taking_a_test": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "race_high_Taking_a_test", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 247097154, "num_examples": 62445, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 13554344, "num_examples": 3451, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 13696416, "num_examples": 3498, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 274347914, "size_in_bytes": 274347914}, "amazon_polarity_User_recommend_this_product": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "amazon_polarity_User_recommend_this_product", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 3647240562, "num_examples": 3600000, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 405020024, "num_examples": 400000, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 4052260586, "size_in_bytes": 4052260586}, "quoref_What_Is_The_Answer": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "quoref_What_Is_The_Answer", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 76274516, "num_examples": 19399, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 9385089, "num_examples": 2418, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 85659605, "size_in_bytes": 85659605}, "super_glue_cb_GPT_3_style": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "super_glue_cb_GPT_3_style", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 206769, "num_examples": 250, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 51222, "num_examples": 56, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 225599, "num_examples": 250, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 483590, "size_in_bytes": 483590}, "quarel_testing_students": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "quarel_testing_students", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 1380025, "num_examples": 1941, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 199453, "num_examples": 278, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 395833, "num_examples": 552, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 1975311, "size_in_bytes": 1975311}, "super_glue_record_choose_between": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "super_glue_record_choose_between", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 303576652, "num_examples": 100730, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 29481868, "num_examples": 10000, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 29577405, "num_examples": 10000, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 362635925, "size_in_bytes": 362635925}, "kilt_tasks_hotpotqa_straighforward_qa": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "kilt_tasks_hotpotqa_straighforward_qa", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 23118369, "num_examples": 88869, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 1323277, "num_examples": 5600, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 24441646, "size_in_bytes": 24441646}, "race_high_Write_a_multi_choice_question_options_given_": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "race_high_Write_a_multi_choice_question_options_given_", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 249781061, "num_examples": 62445, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 13701402, "num_examples": 3451, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 13849598, "num_examples": 3498, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 277332061, "size_in_bytes": 277332061}, "duorc_SelfRC_generate_question": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "duorc_SelfRC_generate_question", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 247616070, "num_examples": 60721, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 52851327, "num_examples": 12961, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 50703157, "num_examples": 12559, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 351170554, "size_in_bytes": 351170554}, "super_glue_wsc.fixed_does_p_stand_for": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "super_glue_wsc.fixed_does_p_stand_for", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 217126, "num_examples": 554, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 49867, "num_examples": 104, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 78008, "num_examples": 146, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 345001, "size_in_bytes": 345001}, "anli_take_the_following_as_truth_r1": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "anli_take_the_following_as_truth_r1", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 18052829, "num_examples": 16946, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 1065410, "num_examples": 1000, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 1063709, "num_examples": 1000, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 20181948, "size_in_bytes": 20181948}, "super_glue_boolq_valid_binary": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "super_glue_boolq_valid_binary", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 12710278, "num_examples": 9427, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 4357251, "num_examples": 3270, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 4427425, "num_examples": 3245, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 21494954, "size_in_bytes": 21494954}, "super_glue_copa_more_likely": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "super_glue_copa_more_likely", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 195651, "num_examples": 400, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 49595, "num_examples": 100, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 231857, "num_examples": 500, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 477103, "size_in_bytes": 477103}, "super_glue_rte_justified_in_saying": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "super_glue_rte_justified_in_saying", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 1898498, "num_examples": 2490, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 205496, "num_examples": 277, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 2286996, "num_examples": 3000, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 4390990, "size_in_bytes": 4390990}, "winogrande_winogrande_xl_stand_for": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "winogrande_winogrande_xl_stand_for", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 15259615, "num_examples": 40398, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 481727, "num_examples": 1267, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 674265, "num_examples": 1767, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 16415607, "size_in_bytes": 16415607}, "wiqa_what_is_the_missing_first_step": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "wiqa_what_is_the_missing_first_step", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 22948169, "num_examples": 29808, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 5051977, "num_examples": 6894, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 2060404, "num_examples": 3003, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 30060550, "size_in_bytes": 30060550}, "trec_fine_grained_ABBR": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "trec_fine_grained_ABBR", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 29085, "num_examples": 86, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 2896, "num_examples": 9, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 31981, "size_in_bytes": 31981}, "quail_description_context_question_answer_text": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "quail_description_context_question_answer_text", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 44460489, "num_examples": 10246, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 9455485, "num_examples": 2164, "dataset_name": "p3"}, "challenge": {"name": "challenge", "num_bytes": 2422778, "num_examples": 556, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 56338752, "size_in_bytes": 56338752}, "trec_trec1": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "trec_trec1", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 2149450, "num_examples": 5452, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 182435, "num_examples": 500, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 2331885, "size_in_bytes": 2331885}, "app_reviews_categorize_rating_using_review": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "app_reviews_categorize_rating_using_review", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 129262239, "num_examples": 288065, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 129262239, "size_in_bytes": 129262239}, "wiki_hop_original_generate_object": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "wiki_hop_original_generate_object", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 621316801, "num_examples": 43738, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 77980644, "num_examples": 5129, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 699297445, "size_in_bytes": 699297445}, "cosmos_qa_description_context_question_answer_id": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "cosmos_qa_description_context_question_answer_id", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 34668517, "num_examples": 25262, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 4386814, "num_examples": 2985, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 10260623, "num_examples": 6963, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 49315954, "size_in_bytes": 49315954}, "anli_does_this_imply_r1_score_eval": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"idx": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "is_correct": {"dtype": "bool", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "weight": {"dtype": "float32", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "anli_does_this_imply_r1_score_eval", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 48569775, "num_examples": 50838, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 2870586, "num_examples": 3000, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 2865483, "num_examples": 3000, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 54305844, "size_in_bytes": 54305844}, "adversarial_qa_dbert_tell_what_it_is": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "adversarial_qa_dbert_tell_what_it_is", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 17793293, "num_examples": 10000, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 1739434, "num_examples": 1000, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 19532727, "size_in_bytes": 19532727}, "super_glue_record_Add_sentence_after_after_continuation_choices_": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "super_glue_record_Add_sentence_after_after_continuation_choices_", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 405852111, "num_examples": 100730, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 40002393, "num_examples": 10000, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 37604859, "num_examples": 10000, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 483459363, "size_in_bytes": 483459363}, "super_glue_wsc.fixed_p_is_are_r_score_eval": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"idx": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "is_correct": {"dtype": "bool", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "weight": {"dtype": "float32", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "super_glue_wsc.fixed_p_is_are_r_score_eval", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 473337, "num_examples": 1108, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 107086, "num_examples": 208, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 157015, "num_examples": 292, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 737438, "size_in_bytes": 737438}, "anli_based_on_the_previous_passage_r1": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "anli_based_on_the_previous_passage_r1", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 16818749, "num_examples": 16946, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 993754, "num_examples": 1000, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 992053, "num_examples": 1000, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 18804556, "size_in_bytes": 18804556}, "super_glue_record_GPT_3_style_continuation_choices_": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "super_glue_record_GPT_3_style_continuation_choices_", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 389547617, "num_examples": 100730, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 38377053, "num_examples": 10000, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 35877665, "num_examples": 10000, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 463802335, "size_in_bytes": 463802335}, "super_glue_wsc.fixed_by_p_they_mean": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "super_glue_wsc.fixed_by_p_they_mean", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 220946, "num_examples": 554, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 50667, "num_examples": 104, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 79012, "num_examples": 146, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 350625, "size_in_bytes": 350625}, "quarel_heres_a_story": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "quarel_heres_a_story", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 1308200, "num_examples": 1941, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 189167, "num_examples": 278, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 375409, "num_examples": 552, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 1872776, "size_in_bytes": 1872776}, "anli_can_we_infer_r1_score_eval": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"idx": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "is_correct": {"dtype": "bool", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "weight": {"dtype": "float32", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "anli_can_we_infer_r1_score_eval", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 48213909, "num_examples": 50838, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 2849586, "num_examples": 3000, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 2844483, "num_examples": 3000, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 53907978, "size_in_bytes": 53907978}, "ai2_arc_ARC_Challenge_qa_options": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "ai2_arc_ARC_Challenge_qa_options", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 815805, "num_examples": 1119, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 224258, "num_examples": 299, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 876806, "num_examples": 1172, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 1916869, "size_in_bytes": 1916869}, "trec_fine_grained_open_context_first": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "trec_fine_grained_open_context_first", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 4097097, "num_examples": 5452, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 361398, "num_examples": 500, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 4458495, "size_in_bytes": 4458495}, "adversarial_qa_droberta_question_context_answer": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "adversarial_qa_droberta_question_context_answer", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 16638409, "num_examples": 10000, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 1653831, "num_examples": 1000, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 18292240, "size_in_bytes": 18292240}, "ag_news_classify": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "ag_news_classify", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 79459811, "num_examples": 120000, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 5007106, "num_examples": 7600, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 84466917, "size_in_bytes": 84466917}, "quail_context_description_question_answer_id": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "quail_context_description_question_answer_id", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 43125567, "num_examples": 10246, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 9171437, "num_examples": 2164, "dataset_name": "p3"}, "challenge": {"name": "challenge", "num_bytes": 2357851, "num_examples": 556, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 54654855, "size_in_bytes": 54654855}, "dbpedia_14_given_list_what_category_does_the_paragraph_belong_to": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "dbpedia_14_given_list_what_category_does_the_paragraph_belong_to", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 698519835, "num_examples": 560000, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 87332523, "num_examples": 70000, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 785852358, "size_in_bytes": 785852358}, "cosmos_qa_no_prompt_id": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "cosmos_qa_no_prompt_id", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 29843475, "num_examples": 25262, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 3816679, "num_examples": 2985, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 8930690, "num_examples": 6963, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 42590844, "size_in_bytes": 42590844}, "super_glue_wic_polysemous": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "super_glue_wic_polysemous", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 2564427, "num_examples": 5428, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 310228, "num_examples": 638, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 724583, "num_examples": 1400, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 3599238, "size_in_bytes": 3599238}, "trec_fine_grained_DESC_context_first": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "trec_fine_grained_DESC_context_first", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 395163, "num_examples": 1162, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 41580, "num_examples": 138, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 436743, "size_in_bytes": 436743}, "anli_does_it_follow_that_r1_score_eval": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"idx": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "is_correct": {"dtype": "bool", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "weight": {"dtype": "float32", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "anli_does_it_follow_that_r1_score_eval", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 47479533, "num_examples": 50838, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 2806134, "num_examples": 3000, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 2800575, "num_examples": 3000, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 53086242, "size_in_bytes": 53086242}, "samsum_Given_the_above_dialogue_write_a_summary": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "samsum_Given_the_above_dialogue_write_a_summary", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 20995291, "num_examples": 14732, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 1140604, "num_examples": 818, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 1186581, "num_examples": 819, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 23322476, "size_in_bytes": 23322476}, "anli_must_be_true_r3": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "anli_must_be_true_r3", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 93620991, "num_examples": 100459, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 1132269, "num_examples": 1200, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 1128422, "num_examples": 1200, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 95881682, "size_in_bytes": 95881682}, "cosmos_qa_context_answer_to_question": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "cosmos_qa_context_answer_to_question", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 26180698, "num_examples": 25262, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 3249022, "num_examples": 2985, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 6946240, "num_examples": 6963, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 36375960, "size_in_bytes": 36375960}, "adversarial_qa_droberta_generate_question": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "adversarial_qa_droberta_generate_question", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 18257430, "num_examples": 10000, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 1828982, "num_examples": 1000, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 1997572, "num_examples": 1000, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 22083984, "size_in_bytes": 22083984}, "super_glue_boolq_GPT_3_Style": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "super_glue_boolq_GPT_3_Style", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 12429642, "num_examples": 9427, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 4259861, "num_examples": 3270, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 4346300, "num_examples": 3245, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 21035803, "size_in_bytes": 21035803}, "super_glue_wsc.fixed_the_pronoun_refers_to_score_eval": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"idx": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "is_correct": {"dtype": "bool", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "weight": {"dtype": "float32", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "super_glue_wsc.fixed_the_pronoun_refers_to_score_eval", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 501995, "num_examples": 1108, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 112448, "num_examples": 208, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 164567, "num_examples": 292, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 779010, "size_in_bytes": 779010}, "quartz_having_read_above_passage": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "quartz_having_read_above_passage", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 1971980, "num_examples": 2696, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 289592, "num_examples": 384, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 577004, "num_examples": 784, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 2838576, "size_in_bytes": 2838576}, "app_reviews_generate_review": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "app_reviews_generate_review", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 113485306, "num_examples": 288065, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 113485306, "size_in_bytes": 113485306}, "trec_fine_grained_DESC": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "trec_fine_grained_DESC", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 394001, "num_examples": 1162, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 41442, "num_examples": 138, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 435443, "size_in_bytes": 435443}, "kilt_tasks_hotpotqa_final_exam": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "kilt_tasks_hotpotqa_final_exam", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 28095033, "num_examples": 88869, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 1636877, "num_examples": 5600, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 29731910, "size_in_bytes": 29731910}, "wiki_hop_original_generate_subject_and_object": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "wiki_hop_original_generate_subject_and_object", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 624675339, "num_examples": 43738, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 78374297, "num_examples": 5129, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 703049636, "size_in_bytes": 703049636}, "winogrande_winogrande_xl_does_underscore_refer_to_score_eval": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"idx": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "is_correct": {"dtype": "bool", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "weight": {"dtype": "float32", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "winogrande_winogrande_xl_does_underscore_refer_to_score_eval", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 29476198, "num_examples": 80796, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 931348, "num_examples": 2534, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 1303045, "num_examples": 3534, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 31710591, "size_in_bytes": 31710591}, "anli_based_on_the_previous_passage_r3_score_eval": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"idx": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "is_correct": {"dtype": "bool", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "weight": {"dtype": "float32", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "anli_based_on_the_previous_passage_r3_score_eval", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 279632924, "num_examples": 301377, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 3382781, "num_examples": 3600, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 3371240, "num_examples": 3600, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 286386945, "size_in_bytes": 286386945}, "cosmos_qa_description_context_question_answer_text": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "cosmos_qa_description_context_question_answer_text", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 40046492, "num_examples": 25262, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 5170760, "num_examples": 2985, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 12050998, "num_examples": 6963, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 57268250, "size_in_bytes": 57268250}, "wiki_qa_automatic_system": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "wiki_qa_automatic_system", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 12887999, "num_examples": 20360, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 1715996, "num_examples": 2733, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 3899313, "num_examples": 6165, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 18503308, "size_in_bytes": 18503308}, "hellaswag_if_begins_how_continues_score_eval": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"idx": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "is_correct": {"dtype": "bool", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "weight": {"dtype": "float32", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "hellaswag_if_begins_how_continues_score_eval", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 293643765, "num_examples": 159620, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 76059045, "num_examples": 40168, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 73802594, "num_examples": 40012, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 443505404, "size_in_bytes": 443505404}, "winogrande_winogrande_debiased_Replace": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "winogrande_winogrande_debiased_Replace", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 3875827, "num_examples": 9248, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 528606, "num_examples": 1267, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 739644, "num_examples": 1767, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 5144077, "size_in_bytes": 5144077}, "super_glue_copa_cause_effect": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "super_glue_copa_cause_effect", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 163057, "num_examples": 400, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 41439, "num_examples": 100, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 191107, "num_examples": 500, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 395603, "size_in_bytes": 395603}, "ropes_given_background_situation": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "ropes_given_background_situation", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 23701015, "num_examples": 10924, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 3387100, "num_examples": 1688, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 27088115, "size_in_bytes": 27088115}, "super_glue_rte_based_on_the_previous_passage_score_eval": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"idx": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "is_correct": {"dtype": "bool", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "weight": {"dtype": "float32", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "super_glue_rte_based_on_the_previous_passage_score_eval", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 3946983, "num_examples": 4980, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 427639, "num_examples": 554, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 4565714, "num_examples": 6000, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 8940336, "size_in_bytes": 8940336}, "anli_guaranteed_true_r3": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "anli_guaranteed_true_r3", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 91812729, "num_examples": 100459, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 1110669, "num_examples": 1200, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 1106822, "num_examples": 1200, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 94030220, "size_in_bytes": 94030220}, "anli_GPT_3_style_r3": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "anli_GPT_3_style_r3", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 88846867, "num_examples": 100459, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 1075867, "num_examples": 1200, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 1071728, "num_examples": 1200, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 90994462, "size_in_bytes": 90994462}, "cos_e_v1.11_description_question_option_id": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "cos_e_v1.11_description_question_option_id", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 4842914, "num_examples": 9741, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 603266, "num_examples": 1221, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 5446180, "size_in_bytes": 5446180}, "anli_can_we_infer_r3_score_eval": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"idx": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "is_correct": {"dtype": "bool", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "weight": {"dtype": "float32", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "anli_can_we_infer_r3_score_eval", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 269687483, "num_examples": 301377, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 3263981, "num_examples": 3600, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 3252440, "num_examples": 3600, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 276203904, "size_in_bytes": 276203904}, "anli_consider_always_sometimes_never_r3": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "anli_consider_always_sometimes_never_r3", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 98053929, "num_examples": 100459, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 1185499, "num_examples": 1200, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 1181360, "num_examples": 1200, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 100420788, "size_in_bytes": 100420788}, "wiqa_does_the_supposed_perturbation_have_an_effect": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "wiqa_does_the_supposed_perturbation_have_an_effect", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 32441282, "num_examples": 29808, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 7194493, "num_examples": 6894, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 2993768, "num_examples": 3003, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 42629543, "size_in_bytes": 42629543}, "anli_does_it_follow_that_r3_score_eval": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"idx": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "is_correct": {"dtype": "bool", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "weight": {"dtype": "float32", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "anli_does_it_follow_that_r3_score_eval", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 265384097, "num_examples": 301377, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 3213665, "num_examples": 3600, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 3201248, "num_examples": 3600, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 271799010, "size_in_bytes": 271799010}, "super_glue_cb_can_we_infer": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "super_glue_cb_can_we_infer", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 212371, "num_examples": 250, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 52401, "num_examples": 56, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 232589, "num_examples": 250, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 497361, "size_in_bytes": 497361}, "dream_read_the_following_conversation_and_answer_the_question": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "dream_read_the_following_conversation_and_answer_the_question", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 10461407, "num_examples": 6116, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 3424964, "num_examples": 2040, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 3434464, "num_examples": 2041, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 17320835, "size_in_bytes": 17320835}, "hellaswag_Open_ended_start": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "hellaswag_Open_ended_start", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 31586242, "num_examples": 39905, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 8175537, "num_examples": 10042, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 7918203, "num_examples": 10003, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 47679982, "size_in_bytes": 47679982}, "super_glue_cb_should_assume": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "super_glue_cb_should_assume", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 214871, "num_examples": 250, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 52961, "num_examples": 56, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 235089, "num_examples": 250, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 502921, "size_in_bytes": 502921}, "gigaword_write_an_article": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "gigaword_write_an_article", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 2340011314, "num_examples": 3803957, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 116925742, "num_examples": 189651, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 1170308, "num_examples": 1951, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 2458107364, "size_in_bytes": 2458107364}, "quartz_read_passage_below_choose": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "quartz_read_passage_below_choose", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 1939628, "num_examples": 2696, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 284984, "num_examples": 384, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 567596, "num_examples": 784, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 2792208, "size_in_bytes": 2792208}, "kilt_tasks_hotpotqa_complex_question": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "kilt_tasks_hotpotqa_complex_question", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 38937051, "num_examples": 88869, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 2320077, "num_examples": 5600, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 41257128, "size_in_bytes": 41257128}, "quoref_Answer_Friend_Question": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "quoref_Answer_Friend_Question", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 77399445, "num_examples": 19399, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 9525611, "num_examples": 2418, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 86925056, "size_in_bytes": 86925056}, "anli_MNLI_crowdsource_r3_score_eval": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"idx": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "is_correct": {"dtype": "bool", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "weight": {"dtype": "float32", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "anli_MNLI_crowdsource_r3_score_eval", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 309971542, "num_examples": 301377, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 3745181, "num_examples": 3600, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 3733640, "num_examples": 3600, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 317450363, "size_in_bytes": 317450363}, "rotten_tomatoes_Movie_Expressed_Sentiment": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "rotten_tomatoes_Movie_Expressed_Sentiment", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 3167776, "num_examples": 8530, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 396137, "num_examples": 1066, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 398914, "num_examples": 1066, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 3962827, "size_in_bytes": 3962827}, "trivia_qa_unfiltered_formal_description": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "trivia_qa_unfiltered_formal_description", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 35314429, "num_examples": 87622, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 4560624, "num_examples": 11313, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 4386707, "num_examples": 10832, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 44261760, "size_in_bytes": 44261760}, "social_i_qa_Generate_answer": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "social_i_qa_Generate_answer", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 12738768, "num_examples": 33410, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 748977, "num_examples": 1954, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 13487745, "size_in_bytes": 13487745}, "cnn_dailymail_3.0.0_tldr_summary": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "cnn_dailymail_3.0.0_tldr_summary", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 1362779017, "num_examples": 287113, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 63818906, "num_examples": 13368, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 54627700, "num_examples": 11490, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 1481225623, "size_in_bytes": 1481225623}, "piqa_Correct_the_solution_if_false_from_sol_2": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "piqa_Correct_the_solution_if_false_from_sol_2", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 13211899, "num_examples": 16113, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 1501654, "num_examples": 1838, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 2477808, "num_examples": 3084, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 17191361, "size_in_bytes": 17191361}, "quail_context_question_description_answer_text": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "quail_context_question_description_answer_text", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 43384659, "num_examples": 10246, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 9228265, "num_examples": 2164, "dataset_name": "p3"}, "challenge": {"name": "challenge", "num_bytes": 2364398, "num_examples": 556, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 54977322, "size_in_bytes": 54977322}, "ag_news_recommend": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "ag_news_recommend", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 94039811, "num_examples": 120000, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 5930506, "num_examples": 7600, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 99970317, "size_in_bytes": 99970317}, "cosmos_qa_context_question_description_text": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "cosmos_qa_context_question_description_text", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 28514301, "num_examples": 25262, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 3624704, "num_examples": 2985, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 8458103, "num_examples": 6963, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 40597108, "size_in_bytes": 40597108}, "anli_GPT_3_style_r1": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "anli_GPT_3_style_r1", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 15891877, "num_examples": 16946, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 939265, "num_examples": 1000, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 937412, "num_examples": 1000, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 17768554, "size_in_bytes": 17768554}, "samsum_Write_a_dialogue_that_match_this_summary": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "samsum_Write_a_dialogue_that_match_this_summary", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 20951095, "num_examples": 14732, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 1138150, "num_examples": 818, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 1184124, "num_examples": 819, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 23273369, "size_in_bytes": 23273369}, "super_glue_copa__which_may_be_caused_by_score_eval": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"idx": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "is_correct": {"dtype": "bool", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "weight": {"dtype": "float32", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "super_glue_copa__which_may_be_caused_by_score_eval", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 134718, "num_examples": 396, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 36932, "num_examples": 104, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 167024, "num_examples": 500, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 338674, "size_in_bytes": 338674}, "anli_take_the_following_as_truth_r3": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "anli_take_the_following_as_truth_r3", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 101623209, "num_examples": 100459, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 1226673, "num_examples": 1200, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 1222826, "num_examples": 1200, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 104072708, "size_in_bytes": 104072708}, "hellaswag_Topic_of_the_context": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "hellaswag_Topic_of_the_context", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 33608307, "num_examples": 39905, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 8699564, "num_examples": 10042, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 8451101, "num_examples": 10003, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 50758972, "size_in_bytes": 50758972}, "glue_mrpc_replace": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "glue_mrpc_replace", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 2439089, "num_examples": 3668, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 272134, "num_examples": 408, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 1143029, "num_examples": 1725, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 3854252, "size_in_bytes": 3854252}, "super_glue_boolq_based_on_the_previous_passage": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "super_glue_boolq_based_on_the_previous_passage", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 12665321, "num_examples": 9427, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 4341611, "num_examples": 3270, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 4427425, "num_examples": 3245, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 21434357, "size_in_bytes": 21434357}, "super_glue_multirc_found_this_answer": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "super_glue_multirc_found_this_answer", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 88308187, "num_examples": 27243, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 15444724, "num_examples": 4848, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 29666919, "num_examples": 9693, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 133419830, "size_in_bytes": 133419830}, "anli_based_on_the_previous_passage_r3": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "anli_based_on_the_previous_passage_r3", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 94324204, "num_examples": 100459, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 1140669, "num_examples": 1200, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 1136822, "num_examples": 1200, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 96601695, "size_in_bytes": 96601695}, "super_glue_rte_does_it_follow_that": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "super_glue_rte_does_it_follow_that", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 1859690, "num_examples": 2490, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 201176, "num_examples": 277, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 2240884, "num_examples": 3000, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 4301750, "size_in_bytes": 4301750}, "wiki_qa_Is_This_True_": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "wiki_qa_Is_This_True_", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 9652143, "num_examples": 20360, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 1282215, "num_examples": 2733, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 2918036, "num_examples": 6165, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 13852394, "size_in_bytes": 13852394}, "trec_pick_the_best_descriptor": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "trec_pick_the_best_descriptor", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 2383886, "num_examples": 5452, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 203935, "num_examples": 500, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 2587821, "size_in_bytes": 2587821}, "cnn_dailymail_3.0.0_news_stock": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "cnn_dailymail_3.0.0_news_stock", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 1342393994, "num_examples": 287113, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 62869778, "num_examples": 13368, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 53811910, "num_examples": 11490, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 1459075682, "size_in_bytes": 1459075682}, "xsum_DOC_write_summary_of_above": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "xsum_DOC_write_summary_of_above", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 674710162, "num_examples": 204045, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 37464546, "num_examples": 11332, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 37567833, "num_examples": 11334, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 749742541, "size_in_bytes": 749742541}, "race_high_Select_the_best_answer": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "race_high_Select_the_best_answer", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 241414659, "num_examples": 62445, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 13240303, "num_examples": 3451, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 13378098, "num_examples": 3498, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 268033060, "size_in_bytes": 268033060}, "xsum_summarize_this_DOC_summary": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "xsum_summarize_this_DOC_summary", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 668996902, "num_examples": 204045, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 37147250, "num_examples": 11332, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 37250481, "num_examples": 11334, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 743394633, "size_in_bytes": 743394633}, "super_glue_record_What_could_the_placeholder_be_": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "super_glue_record_What_could_the_placeholder_be_", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 291018169, "num_examples": 100730, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 28253760, "num_examples": 10000, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 28355895, "num_examples": 10000, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 347627824, "size_in_bytes": 347627824}, "ag_news_classify_with_choices_question_first": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "ag_news_classify_with_choices_question_first", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 91699811, "num_examples": 120000, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 5782306, "num_examples": 7600, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 97482117, "size_in_bytes": 97482117}, "hellaswag_Reversed_appropriate_continuation_Yes_or_No": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "hellaswag_Reversed_appropriate_continuation_Yes_or_No", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 37685953, "num_examples": 39905, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 9718988, "num_examples": 10042, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 9484346, "num_examples": 10003, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 56889287, "size_in_bytes": 56889287}, "xsum_article_DOC_summary": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "xsum_article_DOC_summary", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 662671507, "num_examples": 204045, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 36795958, "num_examples": 11332, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 36899127, "num_examples": 11334, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 736366592, "size_in_bytes": 736366592}, "cosmos_qa_context_question_description_answer_id": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "cosmos_qa_context_question_description_answer_id", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 31990745, "num_examples": 25262, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 4070404, "num_examples": 2985, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 9522545, "num_examples": 6963, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 45583694, "size_in_bytes": 45583694}, "super_glue_rte_justified_in_saying_score_eval": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"idx": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "is_correct": {"dtype": "bool", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "weight": {"dtype": "float32", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "super_glue_rte_justified_in_saying_score_eval", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 3792603, "num_examples": 4980, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 410465, "num_examples": 554, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 4379714, "num_examples": 6000, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 8582782, "size_in_bytes": 8582782}, "squad_v2_Questions_with_Context_Without_Prompt_Keywords": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "squad_v2_Questions_with_Context_Without_Prompt_Keywords", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 215624363, "num_examples": 130319, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 20614575, "num_examples": 11873, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 236238938, "size_in_bytes": 236238938}, "anli_MNLI_crowdsource_r2_score_eval": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"idx": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "is_correct": {"dtype": "bool", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "weight": {"dtype": "float32", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "anli_MNLI_crowdsource_r2_score_eval", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 145734738, "num_examples": 136380, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 3213731, "num_examples": 3000, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 3231188, "num_examples": 3000, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 152179657, "size_in_bytes": 152179657}, "dream_generate_first_utterance": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "dream_generate_first_utterance", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 7880078, "num_examples": 6116, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 2580551, "num_examples": 2040, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 2584973, "num_examples": 2041, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 13045602, "size_in_bytes": 13045602}, "cos_e_v1.11_question_option_description_id": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "cos_e_v1.11_question_option_description_id", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 3693476, "num_examples": 9741, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 459188, "num_examples": 1221, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 4152664, "size_in_bytes": 4152664}, "wiki_hop_original_choose_best_object_interrogative_1": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "wiki_hop_original_choose_best_object_interrogative_1", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 658558109, "num_examples": 43738, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 82503363, "num_examples": 5129, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 741061472, "size_in_bytes": 741061472}, "super_glue_wic_GPT_3_prompt": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "super_glue_wic_GPT_3_prompt", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 1983631, "num_examples": 5428, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 241962, "num_examples": 638, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 574783, "num_examples": 1400, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 2800376, "size_in_bytes": 2800376}, "anli_guaranteed_possible_impossible_r3_score_eval": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"idx": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "is_correct": {"dtype": "bool", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "weight": {"dtype": "float32", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "anli_guaranteed_possible_impossible_r3_score_eval", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 284555415, "num_examples": 301377, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 3441581, "num_examples": 3600, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 3430040, "num_examples": 3600, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 291427036, "size_in_bytes": 291427036}, "super_glue_copa__As_a_result_C1_or_C2_": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "super_glue_copa__As_a_result_C1_or_C2_", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 78701, "num_examples": 202, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 18479, "num_examples": 48, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 90725, "num_examples": 250, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 187905, "size_in_bytes": 187905}, "ai2_arc_ARC_Easy_pick_the_most_correct_option": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "ai2_arc_ARC_Easy_pick_the_most_correct_option", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 1468412, "num_examples": 2251, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 373218, "num_examples": 570, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 1557219, "num_examples": 2376, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 3398849, "size_in_bytes": 3398849}, "web_questions_question_answer": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "web_questions_question_answer", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 509616, "num_examples": 3778, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 277665, "num_examples": 2032, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 787281, "size_in_bytes": 787281}, "openbookqa_main_choices": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "openbookqa_main_choices", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 2153245, "num_examples": 4957, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 236670, "num_examples": 500, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 225012, "num_examples": 500, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 2614927, "size_in_bytes": 2614927}, "super_glue_record_Which_one_is_the_placeholder_": {"description": "P3 (Pubic Pool of Prompts)is a collection of prompted English datasets covering a diverse set of NLP tasks. A prompt is the combination of an input template and a target template. The templates are functions mapping a data example into natural language for the input and target sequences. For example, in the case of an NLI dataset, the data example would include fields for *Premise, Hypothesis, Label*. An input template would be *If {Premise} is true, is it also true that {Hypothesis}?*, whereas a target template can be defined with the label choices *Choices[label]*. Here *Choices* is prompt-specific metadata that consists of the options *yes, maybe, no* corresponding to *label* being entailment (0), neutral (1) or contradiction (2).\n\nPrompts are collected using [Promptsource](https://github.com/bigscience-workshop/promptsource), an interface to interactively write prompts on datasets, and collect prompt-specific metadata such as evaluation metrics. As of October 13th, there are 2'000 prompts collected for 270+ data(sub)sets. The collection of prompts is publicly available on [Promptsource](https://github.com/bigscience-workshop/promptsource).\n\nTo train [T0*](https://huggingface.co/bigscience/T0pp), we used a subset of the prompts available in Promptsource (see details [here](https://huggingface.co/bigscience/T0pp#training-data)). However, some of the prompts use `random.choice`, a method that selects uniformly at random an option in a list of valid possibilities. For reproducibility purposes, we release the collection of prompted examples used to train T0*. **The data available here are the materialized version of the prompted datasets used in [Multi-task enables task zero-shot generalization](TODO) which represent only a subset datasets for which there is at least one prompt on Promptsource.**\n", "citation": "TODO", "homepage": "https://github.com/bigscience-workshop/promptsource", "license": "Apache License 2.0", "features": {"answer_choices": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "inputs_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}, "targets": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "targets_pretokenized": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "p3", "config_name": "super_glue_record_Which_one_is_the_placeholder_", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 290920948, "num_examples": 100730, "dataset_name": "p3"}, "validation": {"name": "validation", "num_bytes": 28243988, "num_examples": 10000, "dataset_name": "p3"}, "test": {"name": "test", "num_bytes": 28345895, "num_examples": 10000, "dataset_name": "p3"}}, "download_checksums": {}, "download_size": 0, "post_processing_size": null, "dataset_size": 347510831, "size_in_bytes": 347510831}} \ No newline at end of file