20ng_not_enough_data / 20ng_not_enough_data.py
Vadim Alperovich
Update 20ng_not_enough_data.py
abe49a0
raw
history blame
3.26 kB
# Lint as: python3
"""20ng classification dataset."""
import csv
import datasets
from datasets.tasks import TextClassification
import sys
csv.field_size_limit(sys.maxsize)
_DESCRIPTION = """\
This data collection contains all the data used in our learning question classification experiments(see [1]), which has question class definitions, the training and testing question sets, examples of preprocessing the questions, feature definition scripts and examples of semantically related word features.
This work has been done by Xin Li and Dan Roth and supported by [2].
"""
_CITATION = """"""
_TRAIN_DOWNLOAD_URL = "https://huggingface.co/datasets/vmalperovich/20ng_not_enough_data/resolve/main/train.csv"
_TEST_DOWNLOAD_URL = "https://huggingface.co/datasets/vmalperovich/20ng_not_enough_data/resolve/main/test.csv"
_VALID_DOWNLOAD_URL = "https://huggingface.co/datasets/vmalperovich/20ng_not_enough_data/raw/main/validation.csv"
CATEGORY_MAPPING = {'comp.sys.mac.hardware': 0,
'comp.graphics': 1,
'sci.space': 2,
'talk.politics.guns': 3,
'sci.med': 4,
'comp.sys.ibm.pc.hardware': 5,
'comp.os.ms-windows.misc': 6,
'rec.motorcycles': 7,
'misc.forsale': 8,
'alt.atheism': 9,
'rec.autos': 10,
'sci.electronics': 11,
'comp.windows.x': 12,
'rec.sport.hockey': 13,
'rec.sport.baseball': 14,
'talk.politics.mideast': 15,
'sci.crypt': 16,
'soc.religion.christian': 17,
'talk.politics.misc': 18,
'talk.religion.misc': 19}
class NG(datasets.GeneratorBasedBuilder):
"""20ng classification dataset."""
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
"text": datasets.Value("string"),
"label": datasets.features.ClassLabel(names=list(CATEGORY_MAPPING.keys())),
"labeled_mask": datasets.Value("string"),
}
),
homepage="",
citation=_CITATION,
# task_templates=[TextClassification(text_column="text", label_column="label")],
)
def _split_generators(self, dl_manager):
train_path = dl_manager.download_and_extract(_TRAIN_DOWNLOAD_URL)
test_path = dl_manager.download_and_extract(_TEST_DOWNLOAD_URL)
valid_path = dl_manager.download_and_extract(_VALID_DOWNLOAD_URL)
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": train_path}),
datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": test_path}),
datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": valid_path}),
]
def _generate_examples(self, filepath):
"""Generate examples."""
with open(filepath, encoding="utf-8") as csv_file:
csv_reader = csv.reader(
csv_file, quotechar='"', delimiter=";", quoting=csv.QUOTE_ALL, skipinitialspace=True
)
_ = next(csv_reader) # skip header
for id_, row in enumerate(csv_reader):
text, label, label_mask = row
label = CATEGORY_MAPPING.get(label, label)
yield id_, {"text": text, "label": label, "labeled_mask": label_mask}