Datasets:
Languages:
English
Multilinguality:
monolingual
Size Categories:
100K<n<1M
Language Creators:
found
Annotations Creators:
no-annotation
Tags:
License:
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor. | |
# | |
# Licensed under the Apache License, Version 2.0 (the "License"); | |
# you may not use this file except in compliance with the License. | |
# You may obtain a copy of the License at | |
# | |
# http://www.apache.org/licenses/LICENSE-2.0 | |
# | |
# Unless required by applicable law or agreed to in writing, software | |
# distributed under the License is distributed on an "AS IS" BASIS, | |
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |
# See the License for the specific language governing permissions and | |
# limitations under the License. | |
"""This loads the fewshot-pretraining dataset.""" | |
import json | |
import os | |
import pandas as pd | |
import datasets | |
# TODO: Add BibTeX citation | |
# Find for instance the citation on arxiv or on the dataset repo/website | |
_CITATION = """\ | |
@InProceedings{huggingface:dataset, | |
title = {A great new dataset}, | |
author={huggingface, Inc. | |
}, | |
year={2020} | |
} | |
""" | |
# You can copy an official description | |
_DESCRIPTION = """\ | |
The Fewshot Table dataset consists of tables that naturally occur on the web, that are formatted as few-shot tasks for fine-tuning language models to improve their few-shot performance. The dataset consists of approximately 413K tables that are extracted from the WDC Web Table Corpora 2015, which is released under the Apache-2.0 license. The WDC Web Table Corpora "contains vast amounts of HTML tables. [...] The Web Data Commons project extracts relational Web tables from the Common Crawl, the largest and most up-to-date Web corpus that is currently available to the public." | |
""" | |
# TODO: Add a link to an official homepage for the dataset here | |
_HOMEPAGE = "" | |
_LICENSE = "Apache 2.0" | |
# The HuggingFace Datasets library doesn't host the datasets but only points to the original files. | |
# This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method) | |
_URLS = { | |
"data_0": ["https://huggingface.co/datasets/JeremyAlain/123_test/raw/main/data/files_0.jsonl"], | |
"data_1": ["https://huggingface.co/datasets/JeremyAlain/123_test/raw/main/data/files_1.jsonl"], | |
"data_2": ["https://huggingface.co/datasets/JeremyAlain/123_test/raw/main/data/files_2.jsonl"], | |
} | |
logger = datasets.logging.get_logger(__name__) | |
class FewshotPretraining(datasets.GeneratorBasedBuilder): | |
"""The Fewshot Table dataset consists of tables that naturally occur on the web, that are formatted as few-shot tasks for fine-tuning language models to improve their few-shot performance. The dataset consists of approximately 413K tables that are extracted from the WDC Web Table Corpora 2015, which is released under the Apache-2.0 license. The WDC Web Table Corpora "contains vast amounts of HTML tables. [...] The Web Data Commons project extracts relational Web tables from the Common Crawl, the largest and most up-to-date Web corpus that is currently available to the public." | |
""" | |
VERSION = datasets.Version("1.1.0") | |
# This is an example of a dataset with multiple configurations. | |
# If you don't want/need to define several sub-sets in your dataset, | |
# just remove the BUILDER_CONFIG_CLASS and the BUILDER_CONFIGS attributes. | |
# You will be able to load one or the other configurations in the following list with | |
# data = datasets.load_dataset('my_dataset', '1') | |
# data = datasets.load_dataset('my_dataset', '2') | |
BUILDER_CONFIGS = [ | |
datasets.BuilderConfig(name="data_0", version=VERSION, description="This part of my dataset covers data_0"), | |
datasets.BuilderConfig(name="data_1", version=VERSION, description="This part of my dataset covers data_1"), | |
datasets.BuilderConfig(name="data_2", version=VERSION, description="This part of my dataset covers data_2"), | |
] | |
DEFAULT_CONFIG_NAME = "data_0" # It's not mandatory to have a default configuration. Just use one if it make sense. | |
def _info(self): | |
features = datasets.Features( | |
{ | |
"task": datasets.Value("string"), | |
"input": datasets.Value("string"), | |
"output": datasets.Value("string"), | |
"options": datasets.Sequence([datasets.Value("string")]), | |
"pageTitle": datasets.Value("string"), | |
"outputColName": datasets.Value("string"), | |
"url": datasets.Value("string"), | |
"wdcFile": datasets.Value("string") | |
} | |
) | |
return datasets.DatasetInfo( | |
# This is the description that will appear on the datasets page. | |
description=_DESCRIPTION, | |
# This defines the different columns of the dataset and their types | |
features=features, # Here we define them above because they are different between the two configurations | |
# If there's a common (input, target) tuple from the features, uncomment supervised_keys line below and | |
# specify them. They'll be used if as_supervised=True in builder.as_dataset. | |
# supervised_keys=("sentence", "label"), | |
# Homepage of the dataset for documentation | |
# TODO ACTIVATE IF WE HAVE HOMEPAGE homepage=_HOMEPAGE, | |
# License for the dataset if available | |
license=_LICENSE, | |
# Citation for the dataset | |
citation=_CITATION, | |
) | |
def _split_generators(self, dl_manager): | |
# TODO: This method is tasked with downloading/extracting the data and defining the splits depending on the configuration | |
# If several configurations are possible (listed in BUILDER_CONFIGS), the configuration selected by the user is in self.config.name | |
# dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLS | |
# It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files. | |
# By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive | |
urls = _URLS[self.config.name] | |
local_extracted_path = dl_manager.download_and_extract(urls)[0] | |
all_file_names_for_dataset_pd = pd.read_json(local_extracted_path, lines=True, orient="records") | |
all_file_names_for_dataset = all_file_names_for_dataset_pd.values.tolist() | |
all_file_names_for_dataset = [file_name[0] for file_name in all_file_names_for_dataset] | |
all_local_extracted_paths = dl_manager.download_and_extract(all_file_names_for_dataset) | |
return [ | |
datasets.SplitGenerator( | |
name=datasets.Split.TRAIN, | |
# These kwargs will be passed to _generate_examples | |
gen_kwargs={ | |
"file_paths": all_local_extracted_paths, | |
}, | |
) | |
] | |
# method parameters are unpacked from `gen_kwargs` as given in `_split_generators` | |
def _generate_examples(self, file_paths): | |
# The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example. | |
for file_idx, file_path in enumerate(file_paths): | |
data = pd.read_json(file_path, orient="records", lines=True) | |
for i in range(data.shape[0]): | |
row = data.iloc[i] | |
# Yields examples as (key, example) tuples | |
key = str(row["task"]) + "{}_{}".format(file_idx, i) | |
yield key, { | |
"task": data["task"], | |
"input": data["input"], | |
"output": data["output"], | |
"options": data["options"], | |
"pageTitle": data["pageTitle"], | |
"outputColName": data["outputColName"], | |
"url": data["url"], | |
"wdcFile": data["wdcFile"], | |
} | |