File size: 3,259 Bytes
3190cf8
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
abe49a0
3190cf8
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6ea8e60
3190cf8
 
 
 
b57f16d
3190cf8
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
# Lint as: python3
"""20ng  classification dataset."""


import csv

import datasets
from datasets.tasks import TextClassification
import sys
csv.field_size_limit(sys.maxsize)

_DESCRIPTION = """\
This data collection contains all the data used in our learning question classification experiments(see [1]), which has question class definitions, the training and testing question sets, examples of preprocessing the questions, feature definition scripts and examples of semantically related word features. 
This work has been done by Xin Li and Dan Roth and supported by [2].
"""

_CITATION = """"""

_TRAIN_DOWNLOAD_URL = "https://huggingface.co/datasets/vmalperovich/20ng_not_enough_data/resolve/main/train.csv"
_TEST_DOWNLOAD_URL = "https://huggingface.co/datasets/vmalperovich/20ng_not_enough_data/resolve/main/test.csv"
_VALID_DOWNLOAD_URL = "https://huggingface.co/datasets/vmalperovich/20ng_not_enough_data/raw/main/validation.csv"


CATEGORY_MAPPING = {'comp.sys.mac.hardware': 0,
 'comp.graphics': 1,
 'sci.space': 2,
 'talk.politics.guns': 3,
 'sci.med': 4,
 'comp.sys.ibm.pc.hardware': 5,
 'comp.os.ms-windows.misc': 6,
 'rec.motorcycles': 7,
 'misc.forsale': 8,
 'alt.atheism': 9,
 'rec.autos': 10,
 'sci.electronics': 11,
 'comp.windows.x': 12,
 'rec.sport.hockey': 13,
 'rec.sport.baseball': 14,
 'talk.politics.mideast': 15,
 'sci.crypt': 16,
 'soc.religion.christian': 17,
 'talk.politics.misc': 18,
 'talk.religion.misc': 19}

class NG(datasets.GeneratorBasedBuilder):
    """20ng  classification dataset."""

    def _info(self):
        return datasets.DatasetInfo(
            description=_DESCRIPTION,
            features=datasets.Features(
                {
                    "text": datasets.Value("string"),
                    "label": datasets.features.ClassLabel(names=list(CATEGORY_MAPPING.keys())),
                    "labeled_mask": datasets.Value("bool"),
                }
            ),
            homepage="",
            citation=_CITATION,
            # task_templates=[TextClassification(text_column="text", label_column="label")],
        )

    def _split_generators(self, dl_manager):
        train_path = dl_manager.download_and_extract(_TRAIN_DOWNLOAD_URL)
        test_path = dl_manager.download_and_extract(_TEST_DOWNLOAD_URL)
        valid_path = dl_manager.download_and_extract(_VALID_DOWNLOAD_URL)
        return [
            datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": train_path}),
            datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": test_path}),
            datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": valid_path}),
        ]

    def _generate_examples(self, filepath):
        """Generate examples."""
        with open(filepath, encoding="utf-8") as csv_file:
            csv_reader = csv.reader(
                csv_file, quotechar='"', delimiter=";", quoting=csv.QUOTE_ALL, skipinitialspace=True
            )
            _ = next(csv_reader) # skip header
            for id_, row in enumerate(csv_reader):
                text, label, label_mask = row
                label = CATEGORY_MAPPING.get(label, label)
                yield id_, {"text": text, "label": label, "labeled_mask": label_mask}