File size: 4,853 Bytes
3daa1b8
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
38c36d1
3daa1b8
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
# Loading script for the IntoxiCat dataset.

import json

import datasets


logger = datasets.logging.get_logger(__name__)


_CITATION = """ """


_DESCRIPTION = """ InToxiCat is a dataset for the detection of abusive language in Catalan. """


_HOMEPAGE = """ https://huggingface.co/datasets/projecte-aina/InToxiCat"""



_URL = "https://huggingface.co/datasets/projecte-aina/InToxicat/resolve/main/"
_FILE_TRAIN = "train.json"
_FILE_DEV = "dev.json"
_FILE_TEST = "test.json"


class InToxiCatConfig(datasets.BuilderConfig):
    """ Builder config for the InToxiCat dataset """

    def __init__(self, **kwargs):
        """BuilderConfig for InToxiCat.
        Args:
          **kwargs: keyword arguments forwarded to super.
        """
        super(InToxiCatConfig, self).__init__(**kwargs)


class InToxiCat(datasets.GeneratorBasedBuilder):
    """ InToxiCat Dataset """


    BUILDER_CONFIGS = [
        InToxiCatConfig(
            name="intoxicat",
            version=datasets.Version("1.0.0"),
            description="InToxiCat dataset",
        ),
    ]

    def _info(self):
        return datasets.DatasetInfo(
            description=_DESCRIPTION,
            features=datasets.Features(
                {
                 "id": datasets.Value("string"),
                 "context": datasets.Value("string"),
                 "sentence": datasets.Value("string"),
                 "topic": datasets.Value("string"),
                 "keywords": datasets.Sequence(datasets.Value("string")),
                 "context_needed": datasets.Value("string"),
                 "is_abusive": datasets.features.ClassLabel(names=['abusive','not_abusive']),
                 "abusiveness_agreement": datasets.Value("string"),
                 "target_type": datasets.Sequence(datasets.features.ClassLabel(names=['INDIVIDUAL','GROUP','OTHERS'])),
                 "abusive_spans": datasets.Sequence(feature={'text': datasets.Value(dtype='string', id=None), 'index': datasets.Value(dtype='string', id=None)}, length=-1, id=None), #datasets.Sequence(feature=datasets.Sequence(datasets.Value(dtype='string', id=None))),
                 "target_spans": datasets.Sequence(feature={'text': datasets.Value(dtype='string', id=None), 'index': datasets.Value(dtype='string', id=None)}, length=-1, id=None),
                 "is_implicit": datasets.Value("string")
                }
            ),
            homepage=_HOMEPAGE,
            citation=_CITATION,
        )

    def _split_generators(self, dl_manager):
        """Returns SplitGenerators."""
        urls_to_download = {
            "train": f"{_FILE_TRAIN}",
            "dev": f"{_FILE_DEV}",
            "test": f"{_FILE_TEST}"
        }
        downloaded_files = dl_manager.download_and_extract(urls_to_download)

        return [
            datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"]}),
            datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": downloaded_files["dev"]}),
            datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": downloaded_files["test"]})
        ]

    def _generate_examples(self, filepath):
        """This function returns the examples in the raw (text) form."""
        logger.info("generating examples from = %s", filepath)
        data = json.load(open(filepath, 'r'))
        for id_, example in enumerate(data):
            yield id_, {
                "id": example["id"],
                "context": example["context"],
                "sentence": example["sentence"],
                "topic": example["topic"],
                "keywords": example["key_words"],
                "context_needed": example["annotation"]["context_needed"] if example["annotation"]["context_needed"] else None,
                "is_abusive": example["annotation"]["is_abusive"] if example["annotation"]["is_abusive"] else None,
                "abusiveness_agreement": example["annotation"]["abusiveness_agreement"],
                "target_type": example["annotation"]["target_type"] if example["annotation"]["target_type"] else None,
                "abusive_spans": {
                    "text": [text for text, _ in example["annotation"]["abusive_spans"]],
                    "index": [index for _, index in example["annotation"]["abusive_spans"]]
                    } if example["annotation"]["abusive_spans"] != [] else None,
                "target_spans": {
                    "text": [text for text, _ in example["annotation"]["target_spans"]],
                    "index": [index for _, index in example["annotation"]["target_spans"]]
                    } if example["annotation"]["target_spans"] != [] else None,
                "is_implicit": example["annotation"]["is_implicit"] if example["annotation"]["is_implicit"] != "" else None
            }