completeformer-masked / completeformer-masked.py
ncoop57's picture
Update urls
719346d
# coding=utf-8
# Copyright 2020 The HuggingFace Datasets Authors and the Semeru Lab and SEART research group.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TODO: Add a description here."""
import csv
import glob
import os
import datasets
import numpy as np
# TODO: Add BibTeX citation
# Find for instance the citation on arxiv or on the dataset repo/website
_CITATION = """\
@InProceedings{huggingface:dataset,
title = {A great new dataset},
author={huggingface, Inc.
},
year={2020}
}
"""
# TODO: Add description of the dataset here
# You can copy an official description
_DESCRIPTION = """\
This new dataset is designed to solve this great NLP task and is crafted with a lot of care.
"""
# TODO: Add a link to an official homepage for the dataset here
_HOMEPAGE = ""
# TODO: Add the licence for the dataset here if you can find it
_LICENSE = ""
# TODO: Add link to the official dataset URLs here
# The HuggingFace dataset library don't host the datasets but only point to the original files
# This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
_DATA_URLs = {
"tokenizer": "https://huggingface.co/datasets/semeru/completeformer-masked/resolve/main/data_for_tokenizer_training.csv",
"all": {
"train": "https://huggingface.co/datasets/semeru/completeformer-masked/resolve/main/training_clean.csv",
"valid": "https://huggingface.co/datasets/semeru/completeformer-masked/resolve/main/validation_clean.csv",
"test": "https://huggingface.co/datasets/semeru/completeformer-masked/resolve/main/test_clean.csv",
},
"mix": {
"train": "https://huggingface.co/datasets/semeru/completeformer-masked/resolve/main/training_mix.csv",
"valid": "https://huggingface.co/datasets/semeru/completeformer-masked/resolve/main/validation_mix.csv",
"test": "https://huggingface.co/datasets/semeru/completeformer-masked/resolve/main/test_mix.csv",
},
"length_short": {
"train": "https://huggingface.co/datasets/semeru/completeformer-masked/resolve/main/training_short.csv",
"train_long": "https://huggingface.co/datasets/semeru/completeformer-masked/resolve/main/training_long.csv",
"valid": "https://huggingface.co/datasets/semeru/completeformer-masked/resolve/main/validation_length.csv",
"test": "https://huggingface.co/datasets/semeru/completeformer-masked/resolve/main/test_short.csv",
},
"length_medium": {
"train": "https://huggingface.co/datasets/semeru/completeformer-masked/resolve/main/training_medium.csv",
"valid": "https://huggingface.co/datasets/semeru/completeformer-masked/resolve/main/validation_length.csv",
"test": "https://huggingface.co/datasets/semeru/completeformer-masked/resolve/main/test_medium.csv",
},
"length_long": {
"train": "https://huggingface.co/datasets/semeru/completeformer-masked/resolve/main/training_long.csv",
"valid": "https://huggingface.co/datasets/semeru/completeformer-masked/resolve/main/validation_length.csv",
"test": "https://huggingface.co/datasets/semeru/completeformer-masked/resolve/main/test_long.csv",
},
"length_mix": {
"train": "https://huggingface.co/datasets/semeru/completeformer-masked/resolve/main/training_length_mix.csv",
"valid": "https://huggingface.co/datasets/semeru/completeformer-masked/resolve/main/validation_length.csv",
"test": "https://huggingface.co/datasets/semeru/completeformer-masked/resolve/main/test_length.csv",
},
}
# TODO: Name of the dataset usually match the script name with CamelCase instead of snake_case
class CSNCHumanJudgementDataset(datasets.GeneratorBasedBuilder):
"""TODO: Short description of my dataset."""
VERSION = datasets.Version("1.1.0")
# This is an example of a dataset with multiple configurations.
# If you don't want/need to define several sub-sets in your dataset,
# just remove the BUILDER_CONFIG_CLASS and the BUILDER_CONFIGS attributes.
# If you need to make complex sub-parts in the datasets with configurable options
# You can create your own builder configuration class to store attribute, inheriting from datasets.BuilderConfig
# BUILDER_CONFIG_CLASS = MyBuilderConfig
# You will be able to load one or the other configurations in the following list with
# data = datasets.load_dataset('my_dataset', 'first_domain')
# data = datasets.load_dataset('my_dataset', 'second_domain')
BUILDER_CONFIGS = [
datasets.BuilderConfig(
name="all",
version=VERSION,
description="",
),
datasets.BuilderConfig(
name="mix",
version=VERSION,
description="",
),
datasets.BuilderConfig(
name="length_short",
version=VERSION,
description="",
),
datasets.BuilderConfig(
name="length_medium",
version=VERSION,
description="",
),
datasets.BuilderConfig(
name="length_long",
version=VERSION,
description="",
),
datasets.BuilderConfig(
name="length_mix",
version=VERSION,
description="",
),
datasets.BuilderConfig(
name="tokenizer",
version=VERSION,
description="",
),
]
DEFAULT_CONFIG_NAME = "all"
def _info(self):
if self.config.name == "tokenizer":
features = datasets.Features(
{
"function": datasets.Value("string"),
}
)
elif self.config.name == "all":
features = datasets.Features(
{
"method": datasets.Value("string"),
"block": datasets.Value("string"),
"complex_masked_block": datasets.Value("string"),
"complex_input": datasets.Value("string"),
"complex_target": datasets.Value("string"),
"medium_masked_block": datasets.Value("string"),
"medium_input": datasets.Value("string"),
"medium_target": datasets.Value("string"),
"simple_masked_block": datasets.Value("string"),
"simple_input": datasets.Value("string"),
"simple_target": datasets.Value("string"),
}
)
elif self.config.name == "mix":
features = datasets.Features(
{
"input": datasets.Value("string"),
"target": datasets.Value("string"),
}
)
elif self.config.name.startswith("length_"):
features = datasets.Features(
{
"input": datasets.Value("string"),
"target": datasets.Value("string"),
"size": datasets.Value("int64"),
}
)
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=features,
supervised_keys=None,
homepage=_HOMEPAGE,
license=_LICENSE,
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
# TODO: This method is tasked with downloading/extracting the data and defining the splits depending on the configuration
# If several configurations are possible (listed in BUILDER_CONFIGS), the configuration selected by the user is in self.config.name
# dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLs
# It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
# By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
my_urls = _DATA_URLs[self.config.name]
if self.config.name == "tokenizer":
data_dir = dl_manager.download_and_extract(my_urls)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={"filepath": data_dir},
),
]
else:
data_dirs = {}
for k, v in my_urls.items():
data_dirs[k] = dl_manager.download_and_extract(v)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
# These kwargs will be passed to _generate_examples
gen_kwargs={
"file_path": data_dirs["train"],
},
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
# These kwargs will be passed to _generate_examples
gen_kwargs={
"file_path": data_dirs["valid"],
},
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
# These kwargs will be passed to _generate_examples
gen_kwargs={
"file_path": data_dirs["test"],
},
),
]
def _generate_examples(
self,
file_path,
):
"""Yields examples as (key, example) tuples."""
# This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
# The `key` is here for legacy reason (tfds) and is not important in itself.
with open(file_path, encoding="utf-8") as f:
csv_reader = csv.reader(f, quotechar='"', delimiter=",", quoting=csv.QUOTE_ALL, skipinitialspace=True)
next(csv_reader, None) # skip header
for row_id, row in enumerate(csv_reader):
if self.config.name == "tokenizer":
yield row_id, {
"function": row[1],
}
elif self.config.name == "all":
_, method, block, complex_masked_block, complex_input, complex_target, medium_masked_block, medium_input, medium_target, simple_masked_block, simple_input, simple_target = row
yield row_id, {
"method": method,
"block": block,
"complex_masked_block": complex_masked_block,
"complex_input": complex_input,
"complex_target": complex_target,
"medium_masked_block": medium_masked_block,
"medium_input": medium_input,
"medium_target": medium_target,
"simple_masked_block": simple_masked_block,
"simple_input": simple_input,
"simple_target": simple_target,
}
elif self.config.name == "mix":
_, input, target = row
yield row_id, {
"input": input,
"target": target,
}
elif self.config.name.startswith("length_"):
_, input, target, size = row
yield row_id, {
"input": input,
"target": target,
"size": int(size),
}