|
import datasets |
|
import textwrap |
|
from sklearn.datasets import fetch_20newsgroups |
|
from sklearn.model_selection import train_test_split |
|
import pandas as pd |
|
|
|
_NEWSGROUPS = [ |
|
"alt.atheism", |
|
"comp.graphics", |
|
"comp.os.ms-windows.misc", |
|
"comp.sys.ibm.pc.hardware", |
|
"comp.sys.mac.hardware", |
|
"comp.windows.x", |
|
"misc.forsale", |
|
"rec.autos", |
|
"rec.motorcycles", |
|
"rec.sport.baseball", |
|
"rec.sport.hockey", |
|
"sci.crypt", |
|
"sci.electronics", |
|
"sci.med", |
|
"sci.space", |
|
"soc.religion.christian", |
|
"talk.politics.guns", |
|
"talk.politics.mideast", |
|
"talk.politics.misc", |
|
"talk.religion.misc", |
|
] |
|
|
|
_DESCRIPTION = textwrap.dedent( |
|
"""\ |
|
The bydate version of the 20-newsgroup dataset fetched from scikit_learn and |
|
split in stratified manner into train, validation and test sets. With and |
|
without metadata is made available as individual config names. The test set |
|
from the original 20 newsgroup dataset is retained while the original train |
|
set is split 80:20 into train and validation sets in stratified manner based |
|
on the newsgroup. The 20 different newsgroup are provided as the labels |
|
instead of config names as specified in the official huggingface dataset. |
|
Newsgroups are specified as labels to provide a simplified setup for text |
|
classification task. The 20 different newsgroup functioning as labels are: |
|
""" |
|
) |
|
_DESCRIPTION += "\n".join(f"({i+1}) {j}" for i, j in enumerate(_NEWSGROUPS)) |
|
|
|
_HOMEPAGE = "http://qwone.com/~jason/20Newsgroups/" |
|
|
|
_CITATION = """ |
|
@inproceedings{Lang95, |
|
author = {Ken Lang}, |
|
title = {Newsweeder: Learning to filter netnews} |
|
year = {1995} |
|
booktitle = {Proceedings of the Twelfth International Conference on Machine Learning} |
|
pages = {331-339} |
|
} |
|
""" |
|
|
|
_VERSION = datasets.utils.Version("2.0.0") |
|
|
|
|
|
class NewsgroupsConfig(datasets.BuilderConfig): |
|
def __init__(self, **kwargs): |
|
super(NewsgroupsConfig, self).__init__(version=_VERSION, **kwargs) |
|
|
|
|
|
class Newsgroups(datasets.GeneratorBasedBuilder): |
|
|
|
BUILDER_CONFIGS = [ |
|
NewsgroupsConfig( |
|
name="with_metadata", |
|
description=textwrap.dedent( |
|
"""\ |
|
The original complete bydate 20-Newsgroups dataset with the headers, |
|
footers, and quotes metadata as intact and just the continuous |
|
whitespaces (including new-line) replaced by single whitespace |
|
characters.""" |
|
), |
|
), |
|
NewsgroupsConfig( |
|
name="without_metadata", |
|
description=textwrap.dedent( |
|
"""\ |
|
The bydate 20-Newsgroups dataset without the headers, footers, |
|
and quotes metadata as well as the continuous whitespaces |
|
(including new-line) replaced by single whitespace characters.""" |
|
), |
|
), |
|
] |
|
|
|
def _info(self): |
|
|
|
features = datasets.Features( |
|
{ |
|
"text": datasets.Value("large_string"), |
|
"labels": datasets.features.ClassLabel(names=_NEWSGROUPS), |
|
} |
|
) |
|
|
|
return datasets.DatasetInfo( |
|
description=_DESCRIPTION, |
|
features=features, |
|
supervised_keys=None, |
|
homepage=_HOMEPAGE, |
|
citation=_CITATION, |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
|
|
if self.config.name == "with_metadata": |
|
train_data = fetch_20newsgroups(subset="train", random_state=42) |
|
test_data = fetch_20newsgroups(subset="test", random_state=42) |
|
|
|
else: |
|
train_data = fetch_20newsgroups( |
|
subset="train", random_state=42, remove=("headers", "footers", "quotes") |
|
) |
|
test_data = fetch_20newsgroups( |
|
subset="test", random_state=42, remove=("headers", "footers", "quotes") |
|
) |
|
|
|
empty_data_idcs = set( |
|
[i for i, j in enumerate(train_data.data) if j.strip() == ""] |
|
) |
|
train_data.data = [ |
|
j for i, j in enumerate(train_data.data) if i not in empty_data_idcs |
|
] |
|
train_data.target = [ |
|
j for i, j in enumerate(train_data.target) if i not in empty_data_idcs |
|
] |
|
|
|
empty_data_idcs = set( |
|
[i for i, j in enumerate(test_data.data) if j.strip() == ""] |
|
) |
|
test_data.data = [ |
|
j for i, j in enumerate(test_data.data) if i not in empty_data_idcs |
|
] |
|
test_data.target = [ |
|
j for i, j in enumerate(test_data.target) if i not in empty_data_idcs |
|
] |
|
|
|
train_labels = [train_data.target_names[i] for i in train_data.target] |
|
test_labels = [test_data.target_names[i] for i in test_data.target] |
|
|
|
train_df = pd.DataFrame({"text": train_data.data, "labels": train_labels}) |
|
test_df = pd.DataFrame({"text": test_data.data, "labels": test_labels}) |
|
|
|
train_df["text"] = train_df["text"].str.replace("\s+", " ", regex=True) |
|
test_df["text"] = test_df["text"].str.replace("\s+", " ", regex=True) |
|
|
|
train_df, val_df = train_test_split( |
|
train_df, test_size=0.2, random_state=42, stratify=train_df["labels"] |
|
) |
|
train_df = train_df.reset_index(drop=True) |
|
val_df = val_df.reset_index(drop=True) |
|
|
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, gen_kwargs={"df": train_df} |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.VALIDATION, gen_kwargs={"df": val_df} |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TEST, gen_kwargs={"df": test_df} |
|
), |
|
] |
|
|
|
def _generate_examples(self, df): |
|
for idx, row in df.iterrows(): |
|
yield idx, row.to_dict() |
|
|