holylovenia commited on
Commit
fca402d
1 Parent(s): daf93fc

Upload tgl_profanity.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. tgl_profanity.py +115 -0
tgl_profanity.py ADDED
@@ -0,0 +1,115 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import csv
2
+ from pathlib import Path
3
+ from typing import Dict, List, Tuple
4
+
5
+ import datasets
6
+ from datasets.download.download_manager import DownloadManager
7
+
8
+ from seacrowd.utils import schemas
9
+ from seacrowd.utils.configs import SEACrowdConfig
10
+ from seacrowd.utils.constants import Licenses, Tasks
11
+
12
+ _CITATION = """
13
+ @article{galinato-etal-2023-context,
14
+ title="Context-Based Profanity Detection and Censorship Using Bidirectional Encoder Representations from Transformers",
15
+ author="Galinato, Valfrid and Amores, Lawrence and Magsino, Gino Ben and Sumawang, David Rafael",
16
+ month="jan",
17
+ year="2023"
18
+ url="https://papers.ssrn.com/sol3/papers.cfm?abstract_id=4341604"
19
+ }
20
+ """
21
+
22
+ _LOCAL = False
23
+ _LANGUAGES = ["tgl"]
24
+ _DATASETNAME = "tgl_profanity"
25
+ _DESCRIPTION = """\
26
+ This dataset contains 13.8k Tagalog sentences containing profane words, together
27
+ with binary labels denoting whether or not the sentence conveys profanity /
28
+ abuse / hate speech. The data was scraped from Twitter using a Python library
29
+ called SNScrape and annotated manually by a panel of native Filipino speakers.
30
+ """
31
+
32
+ _HOMEPAGE = "https://huggingface.co/datasets/mginoben/tagalog-profanity-dataset/"
33
+ _LICENSE = Licenses.UNKNOWN.value
34
+ _SUPPORTED_TASKS = [Tasks.ABUSIVE_LANGUAGE_PREDICTION]
35
+ _SOURCE_VERSION = "1.0.0"
36
+ _SEACROWD_VERSION = "2024.06.20"
37
+ _URLS = {
38
+ "train": "https://huggingface.co/datasets/mginoben/tagalog-profanity-dataset/resolve/main/train.csv",
39
+ "val": "https://huggingface.co/datasets/mginoben/tagalog-profanity-dataset/resolve/main/val.csv",
40
+ }
41
+
42
+
43
+ class TagalogProfanityDataset(datasets.GeneratorBasedBuilder):
44
+ SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
45
+ SEACROWD_VERSION = datasets.Version(_SEACROWD_VERSION)
46
+
47
+ SEACROWD_SCHEMA_NAME = "text"
48
+
49
+ BUILDER_CONFIGS = [
50
+ SEACrowdConfig(
51
+ name=f"{_DATASETNAME}_source",
52
+ version=SOURCE_VERSION,
53
+ description=f"{_DATASETNAME} source schema",
54
+ schema="source",
55
+ subset_id=_DATASETNAME,
56
+ ),
57
+ SEACrowdConfig(
58
+ name=f"{_DATASETNAME}_seacrowd_{SEACROWD_SCHEMA_NAME}",
59
+ version=SEACROWD_VERSION,
60
+ description=f"{_DATASETNAME} SEACrowd schema",
61
+ schema=f"seacrowd_{SEACROWD_SCHEMA_NAME}",
62
+ subset_id=_DATASETNAME,
63
+ ),
64
+ ]
65
+
66
+ DEFAULT_CONFIG_NAME = f"{_DATASETNAME}_source"
67
+ CLASS_LABELS = ["1", "0"]
68
+
69
+ def _info(self) -> datasets.DatasetInfo:
70
+ if self.config.schema == "source":
71
+ features = datasets.Features(
72
+ {
73
+ "text": datasets.Value("string"),
74
+ "label": datasets.Value("int64"),
75
+ }
76
+ )
77
+ elif self.config.schema == f"seacrowd_{self.SEACROWD_SCHEMA_NAME}":
78
+ features = schemas.text_features(label_names=self.CLASS_LABELS)
79
+ else:
80
+ raise ValueError(f"Invalid config name: {self.config.schema}")
81
+ return datasets.DatasetInfo(
82
+ description=_DESCRIPTION,
83
+ features=features,
84
+ homepage=_HOMEPAGE,
85
+ license=_LICENSE,
86
+ citation=_CITATION,
87
+ )
88
+
89
+ def _split_generators(self, dl_manager: DownloadManager) -> List[datasets.SplitGenerator]:
90
+ """Returns SplitGenerators."""
91
+ data_files = dl_manager.download_and_extract(_URLS)
92
+
93
+ return [
94
+ datasets.SplitGenerator(
95
+ name=datasets.Split.TRAIN,
96
+ gen_kwargs={"filepath": data_files["train"]},
97
+ ),
98
+ datasets.SplitGenerator(
99
+ name=datasets.Split.VALIDATION,
100
+ gen_kwargs={"filepath": data_files["val"]},
101
+ ),
102
+ ]
103
+
104
+ def _generate_examples(self, filepath: Path) -> Tuple[int, Dict]:
105
+ """Yield examples as (key, example) tuples"""
106
+ with open(filepath, encoding="utf-8") as f:
107
+ csv_reader = csv.reader(f, delimiter=",")
108
+ next(csv_reader, None) # skip the headers
109
+ for idx, row in enumerate(csv_reader):
110
+ text, label = row
111
+ if self.config.schema == "source":
112
+ example = {"text": text, "label": int(label)}
113
+ elif self.config.schema == f"seacrowd_{self.SEACROWD_SCHEMA_NAME}":
114
+ example = {"id": idx, "text": text, "label": int(label)}
115
+ yield idx, example