|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
"""TODO: Add a description here.""" |
|
|
|
|
|
import csv |
|
import glob |
|
import os |
|
|
|
import datasets |
|
|
|
import numpy as np |
|
|
|
|
|
|
|
_CITATION = """\ |
|
@InProceedings{huggingface:dataset, |
|
title = {A great new dataset}, |
|
author={huggingface, Inc. |
|
}, |
|
year={2020} |
|
} |
|
""" |
|
|
|
|
|
|
|
_DESCRIPTION = """\ |
|
This new dataset is designed to solve this great NLP task and is crafted with a lot of care. |
|
""" |
|
|
|
|
|
_HOMEPAGE = "" |
|
|
|
|
|
_LICENSE = "" |
|
|
|
|
|
|
|
|
|
_DATA_URLs = { |
|
"tokenizer": "https://cdn-lfs.huggingface.co/datasets/semeru/completeformer-masked/30668967d62b849f48db64ff26c0d2b92bd08d940471b80b80ce2ff39bd8358e", |
|
"all": { |
|
"train": "https://cdn-lfs.huggingface.co/datasets/semeru/completeformer-masked/f39db3b2843aa34c2956fae91a379c30953cd06dde8b783e245f3325cca906ee", |
|
"valid": "https://cdn-lfs.huggingface.co/datasets/semeru/completeformer-masked/7591e528bd8e712d1b7d61ed328f9d371d90f7b70a192b0eb8b00196611f4ecd", |
|
"test": "https://cdn-lfs.huggingface.co/datasets/semeru/completeformer-masked/861606eea456617389625c6d2b1680875438e623e0582288d9814e8289c060d6", |
|
}, |
|
"mix": { |
|
"train": "https://huggingface.co/datasets/semeru/completeformer-masked/resolve/main/training_mix.csv", |
|
"valid": "https://huggingface.co/datasets/semeru/completeformer-masked/resolve/main/validation_mix.csv", |
|
"test": "https://huggingface.co/datasets/semeru/completeformer-masked/resolve/main/test_mix.csv", |
|
}, |
|
"length_short": { |
|
"train": "https://huggingface.co/datasets/semeru/completeformer-masked/resolve/main/training_short.csv", |
|
"train_long": "https://huggingface.co/datasets/semeru/completeformer-masked/resolve/main/training_long.csv", |
|
"valid": "https://huggingface.co/datasets/semeru/completeformer-masked/resolve/main/validation_length.csv", |
|
"test": "https://huggingface.co/datasets/semeru/completeformer-masked/resolve/main/test_length.csv", |
|
}, |
|
"length_medium": { |
|
"train": "https://huggingface.co/datasets/semeru/completeformer-masked/resolve/main/training_medium.csv", |
|
"valid": "https://huggingface.co/datasets/semeru/completeformer-masked/resolve/main/validation_length.csv", |
|
"test": "https://huggingface.co/datasets/semeru/completeformer-masked/resolve/main/test_length.csv", |
|
}, |
|
"length_long": { |
|
"train": "https://huggingface.co/datasets/semeru/completeformer-masked/resolve/main/training_long.csv", |
|
"valid": "https://huggingface.co/datasets/semeru/completeformer-masked/resolve/main/validation_length.csv", |
|
"test": "https://huggingface.co/datasets/semeru/completeformer-masked/resolve/main/test_length.csv", |
|
}, |
|
} |
|
|
|
|
|
|
|
class CSNCHumanJudgementDataset(datasets.GeneratorBasedBuilder): |
|
"""TODO: Short description of my dataset.""" |
|
|
|
VERSION = datasets.Version("1.1.0") |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
BUILDER_CONFIGS = [ |
|
datasets.BuilderConfig( |
|
name="all", |
|
version=VERSION, |
|
description="", |
|
), |
|
datasets.BuilderConfig( |
|
name="mix", |
|
version=VERSION, |
|
description="", |
|
), |
|
datasets.BuilderConfig( |
|
name="length_short", |
|
version=VERSION, |
|
description="", |
|
), |
|
datasets.BuilderConfig( |
|
name="length_medium", |
|
version=VERSION, |
|
description="", |
|
), |
|
datasets.BuilderConfig( |
|
name="length_long", |
|
version=VERSION, |
|
description="", |
|
), |
|
datasets.BuilderConfig( |
|
name="tokenizer", |
|
version=VERSION, |
|
description="", |
|
), |
|
] |
|
|
|
DEFAULT_CONFIG_NAME = "all" |
|
|
|
def _info(self): |
|
if self.config.name == "tokenizer": |
|
features = datasets.Features( |
|
{ |
|
"function": datasets.Value("string"), |
|
} |
|
) |
|
elif self.config.name == "all": |
|
features = datasets.Features( |
|
{ |
|
"method": datasets.Value("string"), |
|
"block": datasets.Value("string"), |
|
"complex_masked_block": datasets.Value("string"), |
|
"complex_input": datasets.Value("string"), |
|
"complex_target": datasets.Value("string"), |
|
"medium_masked_block": datasets.Value("string"), |
|
"medium_input": datasets.Value("string"), |
|
"medium_target": datasets.Value("string"), |
|
"simple_masked_block": datasets.Value("string"), |
|
"simple_input": datasets.Value("string"), |
|
"simple_target": datasets.Value("string"), |
|
} |
|
) |
|
elif self.config.name == "mix": |
|
features = datasets.Features( |
|
{ |
|
"input": datasets.Value("string"), |
|
"target": datasets.Value("string"), |
|
} |
|
) |
|
elif self.config.name.startswith("length_"): |
|
features = datasets.Features( |
|
{ |
|
"input": datasets.Value("string"), |
|
"target": datasets.Value("string"), |
|
"size": datasets.Value("int64"), |
|
} |
|
) |
|
|
|
return datasets.DatasetInfo( |
|
description=_DESCRIPTION, |
|
features=features, |
|
supervised_keys=None, |
|
homepage=_HOMEPAGE, |
|
license=_LICENSE, |
|
citation=_CITATION, |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
"""Returns SplitGenerators.""" |
|
|
|
|
|
|
|
|
|
|
|
|
|
my_urls = _DATA_URLs[self.config.name] |
|
if self.config.name == "tokenizer": |
|
data_dir = dl_manager.download_and_extract(my_urls) |
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, |
|
gen_kwargs={"filepath": data_dir}, |
|
), |
|
] |
|
else: |
|
data_dirs = {} |
|
for k, v in my_urls.items(): |
|
data_dirs[k] = dl_manager.download_and_extract(v) |
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, |
|
|
|
gen_kwargs={ |
|
"file_path": data_dirs["train"], |
|
}, |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.VALIDATION, |
|
|
|
gen_kwargs={ |
|
"file_path": data_dirs["valid"], |
|
}, |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TEST, |
|
|
|
gen_kwargs={ |
|
"file_path": data_dirs["test"], |
|
}, |
|
), |
|
] |
|
|
|
def _generate_examples( |
|
self, |
|
file_path, |
|
): |
|
"""Yields examples as (key, example) tuples.""" |
|
|
|
|
|
|
|
with open(file_path, encoding="utf-8") as f: |
|
csv_reader = csv.reader(f, quotechar='"', delimiter=",", quoting=csv.QUOTE_ALL, skipinitialspace=True) |
|
next(csv_reader, None) |
|
|
|
for row_id, row in enumerate(csv_reader): |
|
if self.config.name == "tokenizer": |
|
yield row_id, { |
|
"function": row[1], |
|
} |
|
elif self.config.name == "all": |
|
_, method, block, complex_masked_block, complex_input, complex_target, medium_masked_block, medium_input, medium_target, simple_masked_block, simple_input, simple_target = row |
|
yield row_id, { |
|
"method": method, |
|
"block": block, |
|
"complex_masked_block": complex_masked_block, |
|
"complex_input": complex_input, |
|
"complex_target": complex_target, |
|
"medium_masked_block": medium_masked_block, |
|
"medium_input": medium_input, |
|
"medium_target": medium_target, |
|
"simple_masked_block": simple_masked_block, |
|
"simple_input": simple_input, |
|
"simple_target": simple_target, |
|
} |
|
elif self.config.name == "mix": |
|
_, input, target = row |
|
yield row_id, { |
|
"input": input, |
|
"target": target, |
|
} |
|
elif self.config.name.startswith("length_"): |
|
_, input, target, size = row |
|
yield row_id, { |
|
"input": input, |
|
"target": target, |
|
"size": int(size), |
|
} |
|
|