File size: 4,298 Bytes
1c81d66
 
 
 
 
 
dbbf02f
 
 
1c81d66
 
 
dbbf02f
1c81d66
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
dbbf02f
1c81d66
 
 
 
 
dbbf02f
1c81d66
 
 
 
 
 
dbbf02f
 
 
1c81d66
dbbf02f
1c81d66
 
 
 
 
 
 
 
 
 
 
 
 
 
 
dbbf02f
1c81d66
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
dbbf02f
1c81d66
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
from pathlib import Path
from typing import List

import datasets
import pandas as pd

from seacrowd.utils import schemas
from seacrowd.utils.configs import SEACrowdConfig
from seacrowd.utils.constants import DEFAULT_SEACROWD_VIEW_NAME, DEFAULT_SOURCE_VIEW_NAME, Tasks

_DATASETNAME = "id_abusive_news_comment"
_SOURCE_VIEW_NAME = DEFAULT_SOURCE_VIEW_NAME
_UNIFIED_VIEW_NAME = DEFAULT_SEACROWD_VIEW_NAME

_LANGUAGES = ["ind"]  # We follow ISO639-3 langauge code (https://iso639-3.sil.org/code_tables/639/data)
_LOCAL = False
_CITATION = """\
@INPROCEEDINGS{9034620,  author={Kiasati Desrul, Dhamir Raniah and Romadhony, Ade},  booktitle={2019 International Seminar on Research of Information Technology and Intelligent Systems (ISRITI)},   title={Abusive Language Detection on Indonesian Online News Comments},   year={2019},  volume={},  number={},  pages={320-325},  doi={10.1109/ISRITI48646.2019.9034620}}
"""

_DESCRIPTION = """\
Abusive language is an expression used by a person with insulting delivery of any person's aspect.
In the modern era, the use of harsh words is often found on the internet, one of them is in the comment section of online news articles which contains harassment, insult, or a curse.
An abusive language detection system is important to prevent the negative effect of such comments.
This dataset contains 3184 samples of Indonesian online news comments with 3 labels.
"""

_HOMEPAGE = "https://github.com/dhamirdesrul/Indonesian-Online-News-Comments"

_LICENSE = "Creative Commons Attribution Share-Alike 4.0 International"

_URLs = {
    "train": "https://github.com/dhamirdesrul/Indonesian-Online-News-Comments/raw/master/Dataset/Abusive%20Language%20Detection%20on%20Indonesian%20Online%20News%20Comments%20Dataset%20.xlsx",
}

_SUPPORTED_TASKS = [Tasks.SENTIMENT_ANALYSIS]

_SOURCE_VERSION = "1.0.0"
_SEACROWD_VERSION = "2024.06.20"


class IdAbusiveNewsComment(datasets.GeneratorBasedBuilder):

    BUILDER_CONFIGS = [
        SEACrowdConfig(
            name="id_abusive_news_comment_source",
            version=datasets.Version(_SOURCE_VERSION),
            description="Abusive Online News Comment source schema",
            schema="source",
            subset_id="id_abusive_news_comment",
        ),
        SEACrowdConfig(
            name="id_abusive_news_comment_seacrowd_text",
            version=datasets.Version(_SEACROWD_VERSION),
            description="Abusive Online News Comment Nusantara schema",
            schema="seacrowd_text",
            subset_id="id_abusive_news_comment",
        ),
    ]

    DEFAULT_CONFIG_NAME = "id_abusive_news_comment"

    def _info(self):
        if self.config.schema == "source":
            features = datasets.Features(
                {
                    "index": datasets.Value("string"),
                    "text": datasets.Value("string"),
                    "label": datasets.Value("string"),
                }
            )
        elif self.config.schema == "seacrowd_text":
            features = schemas.text_features(['1', '2', '3'])

        return datasets.DatasetInfo(
            description=_DESCRIPTION,
            features=features,
            homepage=_HOMEPAGE,
            license=_LICENSE,
            citation=_CITATION,
        )

    def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
        train_tsv_path = Path(dl_manager.download(_URLs["train"]))
        data_files = {
            "train": train_tsv_path,
        }

        return [
            datasets.SplitGenerator(
                name=datasets.Split.TRAIN,
                gen_kwargs={"filepath": data_files["train"]},
            ),
        ]

    def _generate_examples(self, filepath: Path):
        df = pd.read_excel(filepath).reset_index()

        if self.config.schema == "source":
            for row in df.itertuples():
                ex = {"index": str(row.index), "text": row.Kalimat, "label": str(row.label)}
                yield row.index, ex
        elif self.config.schema == "seacrowd_text":
            for row in df.itertuples():
                ex = {"id": str(row.index), "text": row.Kalimat, "label": str(row.label)}
                yield row.index, ex
        else:
            raise ValueError(f"Invalid config: {self.config.name}")