holylovenia commited on
Commit
d3e282d
1 Parent(s): 561886a

Upload uit_victsd.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. uit_victsd.py +132 -0
uit_victsd.py ADDED
@@ -0,0 +1,132 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ from typing import Dict, List, Tuple
16
+
17
+ import datasets
18
+
19
+ from seacrowd.utils import schemas
20
+ from seacrowd.utils.configs import SEACrowdConfig
21
+ from seacrowd.utils.constants import Licenses, Tasks
22
+
23
+ _CITATION = """
24
+ @inproceedings{,
25
+ author = {Nguyen, Luan Thanh and Van Nguyen, Kiet and Nguyen, Ngan Luu-Thuy},
26
+ title = {Constructive and Toxic Speech Detection for Open-domain Social Media Comments in Vietnamese},
27
+ booktitle = {Advances and Trends in Artificial Intelligence. Artificial Intelligence Practices},
28
+ year = {2021},
29
+ publisher = {Springer International Publishing},
30
+ address = {Kuala Lumpur, Malaysia},
31
+ pages = {572--583},
32
+ }
33
+ """
34
+
35
+ _LOCAL = False
36
+ _LANGUAGES = ["vie"]
37
+ _DATASETNAME = "uit_victsd"
38
+ _DESCRIPTION = """
39
+ The UIT-ViCTSD (Vietnamese Constructive and Toxic Speech Detection dataset) is a compilation of 10,000 human-annotated
40
+ comments intended for constructive and toxic comments detection. The dataset spans 10 domains, reflecting the diverse topics
41
+ and expressions found in social media interactions among Vietnamese users.
42
+ """
43
+
44
+ _HOMEPAGE = "https://github.com/tarudesu/ViCTSD"
45
+ _LICENSE = Licenses.UNKNOWN.value
46
+ _URL = "https://huggingface.co/datasets/tarudesu/ViCTSD"
47
+
48
+
49
+ _SUPPORTED_TASKS = [Tasks.INTENT_CLASSIFICATION, Tasks.ABUSIVE_LANGUAGE_PREDICTION]
50
+ _SOURCE_VERSION = "1.0.0"
51
+ _SEACROWD_VERSION = "2024.06.20"
52
+
53
+
54
+ class UiTViCTSDDataset(datasets.GeneratorBasedBuilder):
55
+ """
56
+ Dataset of Vietnamese social media comments annotated
57
+ for constructiveness and toxicity.
58
+ """
59
+
60
+ SUBSETS = ["constructiveness", "toxicity"]
61
+ CLASS_LABELS = [0, 1]
62
+
63
+ BUILDER_CONFIGS = [
64
+ SEACrowdConfig(
65
+ name=f"{_DATASETNAME}_{subset}_source",
66
+ version=datasets.Version(_SOURCE_VERSION),
67
+ description=f"{_DATASETNAME} source schema for {subset} subset",
68
+ schema="source",
69
+ subset_id=f"{_DATASETNAME}_{subset}",
70
+ )
71
+ for subset in SUBSETS
72
+ ] + [
73
+ SEACrowdConfig(
74
+ name=f"{_DATASETNAME}_{subset}_seacrowd_text",
75
+ version=datasets.Version(_SEACROWD_VERSION),
76
+ description=f"{_DATASETNAME} SEACrowd schema for {subset} subset",
77
+ schema="seacrowd_text",
78
+ subset_id=f"{_DATASETNAME}_{subset}",
79
+ )
80
+ for subset in SUBSETS
81
+ ]
82
+
83
+ DEFAULT_CONFIG_NAME = f"{_DATASETNAME}_constructiveness_source"
84
+
85
+ def _info(self) -> datasets.DatasetInfo:
86
+ if self.config.schema == "source":
87
+ features = datasets.Features(
88
+ {
89
+ "Unnamed: 0": datasets.Value("int64"), # Column name missing in original dataset
90
+ "Comment": datasets.Value("string"),
91
+ "Constructiveness": datasets.ClassLabel(names=self.CLASS_LABELS),
92
+ "Toxicity": datasets.ClassLabel(names=self.CLASS_LABELS),
93
+ "Title": datasets.Value("string"),
94
+ "Topic": datasets.Value("string"),
95
+ }
96
+ )
97
+
98
+ elif self.config.schema == "seacrowd_text":
99
+ features = schemas.text_features(label_names=self.CLASS_LABELS)
100
+
101
+ return datasets.DatasetInfo(
102
+ description=_DESCRIPTION,
103
+ features=features,
104
+ homepage=_HOMEPAGE,
105
+ license=_LICENSE,
106
+ citation=_CITATION,
107
+ )
108
+
109
+ def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
110
+ # dl_manager not used since dataloader uses HF 'load_dataset'
111
+ return [datasets.SplitGenerator(name=split, gen_kwargs={"split": split._name}) for split in (datasets.Split.TRAIN, datasets.Split.VALIDATION, datasets.Split.TEST)]
112
+
113
+ def _load_hf_data_from_remote(self, split: str) -> datasets.DatasetDict:
114
+ """Load dataset from HuggingFace."""
115
+ HF_REMOTE_REF = "/".join(_URL.split("/")[-2:])
116
+ _hf_dataset_source = datasets.load_dataset(HF_REMOTE_REF, split=split)
117
+ return _hf_dataset_source
118
+
119
+ def _generate_examples(self, split: str) -> Tuple[int, Dict]:
120
+ """Yields examples as (key, example) tuples."""
121
+ data = self._load_hf_data_from_remote(split=split)
122
+ for index, row in enumerate(data):
123
+ if self.config.schema == "source":
124
+ example = row
125
+
126
+ elif self.config.schema == "seacrowd_text":
127
+ if "constructiveness" in self.config.name:
128
+ label = row["Constructiveness"]
129
+ elif "toxicity" in self.config.name:
130
+ label = row["Toxicity"]
131
+ example = {"id": str(index), "text": row["Comment"], "label": label}
132
+ yield index, example