|
from datasets import load_dataset |
|
load_dataset("path/to/my_dataset") |
|
datasets.Features( |
|
{ |
|
"id": datasets.Value("string"), |
|
"title": datasets.Value("string"), |
|
"context": datasets.Value("string"), |
|
"question": datasets.Value("string"), |
|
"answers": datasets.Sequence( |
|
{ |
|
"text": datasets.Value("string"), |
|
"answer_start": datasets.Value("int32"), |
|
} |
|
), |
|
} |
|
) |
|
def _info(self): |
|
return datasets.DatasetInfo( |
|
description=_DESCRIPTION, |
|
features=datasets.Features( |
|
{ |
|
"id": datasets.Value("string"), |
|
"title": datasets.Value("string"), |
|
"context": datasets.Value("string"), |
|
"question": datasets.Value("string"), |
|
"answers": datasets.features.Sequence( |
|
{"text": datasets.Value("string"), "answer_start": datasets.Value("int32"),} |
|
), |
|
} |
|
), |
|
|
|
|
|
supervised_keys=None, |
|
homepage="https://rajpurkar.github.io/SQuAD-explorer/", |
|
citation=_CITATION, |
|
) |
|
class SuperGlueConfig(datasets.BuilderConfig): |
|
"""BuilderConfig for SuperGLUE.""" |
|
|
|
def __init__(self, features, data_url, citation, url, label_classes=("False", "True"), **kwargs): |
|
"""BuilderConfig for SuperGLUE. |
|
|
|
Args: |
|
features: *list[string]*, list of the features that will appear in the |
|
feature dict. Should not include "label". |
|
data_url: *string*, url to download the zip file from. |
|
citation: *string*, citation for the data set. |
|
url: *string*, url for information about the data set. |
|
label_classes: *list[string]*, the list of classes for the label if the |
|
label is present as a string. Non-string labels will be cast to either |
|
'False' or 'True'. |
|
**kwargs: keyword arguments forwarded to super. |
|
""" |
|
|
|
|
|
|
|
|
|
|
|
|
|
super(SuperGlueConfig, self).__init__(version=datasets.Version("1.0.2"), **kwargs) |
|
self.features = features |
|
self.label_classes = label_classes |
|
self.data_url = data_url |
|
self.citation = citation |
|
self.url = url |
|
class SuperGlue(datasets.GeneratorBasedBuilder): |
|
"""The SuperGLUE benchmark.""" |
|
|
|
BUILDER_CONFIGS = [ |
|
SuperGlueConfig( |
|
name="boolq", |
|
description=_BOOLQ_DESCRIPTION, |
|
features=["question", "passage"], |
|
data_url="https://dl.fbaipublicfiles.com/glue/superglue/data/v2/BoolQ.zip", |
|
citation=_BOOLQ_CITATION, |
|
url="https://github.com/google-research-datasets/boolean-questions", |
|
), |
|
... |
|
... |
|
SuperGlueConfig( |
|
name="axg", |
|
description=_AXG_DESCRIPTION, |
|
features=["premise", "hypothesis"], |
|
label_classes=["entailment", "not_entailment"], |
|
data_url="https://dl.fbaipublicfiles.com/glue/superglue/data/v2/AX-g.zip", |
|
citation=_AXG_CITATION, |
|
url="https://github.com/rudinger/winogender-schemas", |
|
), |
|
from datasets import load_dataset |
|
dataset = load_dataset('super_glue', 'boolq') |
|
class NewDataset(datasets.GeneratorBasedBuilder): |
|
|
|
VERSION = datasets.Version("1.1.0") |
|
|
|
BUILDER_CONFIGS = [ |
|
datasets.BuilderConfig(name="first_domain", version=VERSION, description="This part of my dataset covers a first domain"), |
|
datasets.BuilderConfig(name="second_domain", version=VERSION, description="This part of my dataset covers a second domain"), |
|
] |
|
|
|
DEFAULT_CONFIG_NAME = "first_domain" |
|
_URL = "https://rajpurkar.github.io/SQuAD-explorer/dataset/" |
|
_URLS = { |
|
"train": _URL + "train-v1.1.json", |
|
"dev": _URL + "dev-v1.1.json", |
|
} |
|
def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]: |
|
urls_to_download = self._URLS |
|
downloaded_files = dl_manager.download_and_extract(urls_to_download) |
|
|
|
return [ |
|
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"]}), |
|
datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": downloaded_files["dev"]}), |
|
] |
|
def _generate_examples(self, filepath): |
|
"""This function returns the examples in the raw (text) form.""" |
|
logger.info("generating examples from = %s", filepath) |
|
with open(filepath) as f: |
|
squad = json.load(f) |
|
for article in squad["data"]: |
|
title = article.get("title", "").strip() |
|
for paragraph in article["paragraphs"]: |
|
context = paragraph["context"].strip() |
|
for qa in paragraph["qas"]: |
|
question = qa["question"].strip() |
|
id_ = qa["id"] |
|
|
|
answer_starts = [answer["answer_start"] for answer in qa["answers"]] |
|
answers = [answer["text"].strip() for answer in qa["answers"]] |
|
|
|
|
|
|
|
yield id_, { |
|
"title": title, |
|
"context": context, |
|
"question": question, |
|
"id": id_, |
|
"answers": {"answer_start": answer_starts, "text": answers,}, |
|
} |
|
datasets-cli test datasets/<your-dataset-folder> --save_infos --all_configs |
|
datasets-cli dummy_data datasets/<your-dataset-folder> --auto_generate |
|
datasets-cli dummy_data datasets/<your-dataset-folder> |
|
|
|
==============================DUMMY DATA INSTRUCTIONS============================== |
|
- In order to create the dummy data for my-dataset, please go into the folder './datasets/my-dataset/dummy/1.1.0' with *cd ./datasets/my-dataset/dummy/1.1.0* . |
|
|
|
- Please create the following dummy data files 'dummy_data/TREC_10.label, dummy_data/train_5500.label' from the folder './datasets/my-dataset/dummy/1.1.0' |
|
|
|
- For each of the splits 'train, test', make sure that one or more of the dummy data files provide at least one example |
|
|
|
- If the method *_generate_examples(...)* includes multiple *open()* statements, you might have to create other files in addition to 'dummy_data/TREC_10.label, dummy_data/train_5500.label'. In this case please refer to the *_generate_examples(...)* method |
|
|
|
- After all dummy data files are created, they should be zipped recursively to 'dummy_data.zip' with the command *zip -r dummy_data.zip dummy_data/* |
|
|
|
- You can now delete the folder 'dummy_data' with the command *rm -r dummy_data* |
|
|
|
- To get the folder 'dummy_data' back for further changes to the dummy data, simply unzip dummy_data.zip with the command *unzip dummy_data.zip* |
|
|
|
- Make sure you have created the file 'dummy_data.zip' in './datasets/my-dataset/dummy/1.1.0' |
|
=================================================================================== |
|
RUN_SLOW=1 pytest tests/test_dataset_common.py::LocalDatasetTest::test_load_real_dataset_<your_dataset_name> |
|
RUN_SLOW=1 pytest tests/test_dataset_common.py::LocalDatasetTest::test_load_dataset_all_configs_<your_dataset_name> |