albertvillanova HF staff commited on
Commit
dcd3eae
1 Parent(s): d5f0059

Delete loading script

Browse files
Files changed (1) hide show
  1. roman_urdu_hate_speech.py +0 -210
roman_urdu_hate_speech.py DELETED
@@ -1,210 +0,0 @@
1
- # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
- """roman_urdu_hate_speech dataset"""
15
-
16
-
17
- import csv
18
-
19
- import datasets
20
- from datasets.tasks import TextClassification
21
-
22
-
23
- # Find for instance the citation on arxiv or on the dataset repo/website
24
- _CITATION = """\
25
- @inproceedings{rizwan2020hate,
26
- title={Hate-speech and offensive language detection in roman Urdu},
27
- author={Rizwan, Hammad and Shakeel, Muhammad Haroon and Karim, Asim},
28
- booktitle={Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)},
29
- pages={2512--2522},
30
- year={2020}
31
- }
32
- """
33
-
34
- # You can copy an official description
35
- _DESCRIPTION = """\
36
- The Roman Urdu Hate-Speech and Offensive Language Detection (RUHSOLD) dataset is a \
37
- Roman Urdu dataset of tweets annotated by experts in the relevant language. \
38
- The authors develop the gold-standard for two sub-tasks. \
39
- First sub-task is based on binary labels of Hate-Offensive content and Normal content (i.e., inoffensive language). \
40
- These labels are self-explanatory. \
41
- The authors refer to this sub-task as coarse-grained classification. \
42
- Second sub-task defines Hate-Offensive content with \
43
- four labels at a granular level. \
44
- These labels are the most relevant for the demographic of users who converse in RU and \
45
- are defined in related literature. The authors refer to this sub-task as fine-grained classification. \
46
- The objective behind creating two gold-standards is to enable the researchers to evaluate the hate speech detection \
47
- approaches on both easier (coarse-grained) and challenging (fine-grained) scenarios. \
48
- """
49
-
50
- _HOMEPAGE = "https://github.com/haroonshakeel/roman_urdu_hate_speech"
51
-
52
- _LICENSE = "MIT License"
53
-
54
- _Download_URL = "https://raw.githubusercontent.com/haroonshakeel/roman_urdu_hate_speech/main/"
55
-
56
- # The HuggingFace Datasets library doesn't host the datasets but only points to the original files.
57
- # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
58
- _URLS = {
59
- "Coarse_Grained_train": _Download_URL + "task_1_train.tsv",
60
- "Coarse_Grained_validation": _Download_URL + "task_1_validation.tsv",
61
- "Coarse_Grained_test": _Download_URL + "task_1_test.tsv",
62
- "Fine_Grained_train": _Download_URL + "task_2_train.tsv",
63
- "Fine_Grained_validation": _Download_URL + "task_2_validation.tsv",
64
- "Fine_Grained_test": _Download_URL + "task_2_test.tsv",
65
- }
66
-
67
-
68
- class RomanUrduHateSpeechConfig(datasets.BuilderConfig):
69
- """BuilderConfig for RomanUrduHateSpeech Config"""
70
-
71
- def __init__(self, **kwargs):
72
- """BuilderConfig for RomanUrduHateSpeech Config.
73
- Args:
74
- **kwargs: keyword arguments forwarded to super.
75
- """
76
- super(RomanUrduHateSpeechConfig, self).__init__(**kwargs)
77
-
78
-
79
- class RomanUrduHateSpeech(datasets.GeneratorBasedBuilder):
80
- """Roman Urdu Hate Speech dataset"""
81
-
82
- VERSION = datasets.Version("1.1.0")
83
-
84
- # This is an example of a dataset with multiple configurations.
85
- # If you don't want/need to define several sub-sets in your dataset,
86
- # just remove the BUILDER_CONFIG_CLASS and the BUILDER_CONFIGS attributes.
87
-
88
- # If you need to make complex sub-parts in the datasets with configurable options
89
- # You can create your own builder configuration class to store attribute, inheriting from datasets.BuilderConfig
90
- # BUILDER_CONFIG_CLASS = MyBuilderConfig
91
-
92
- # You will be able to load one or the other configurations in the following list with
93
- # data = datasets.load_dataset('my_dataset', 'first_domain')
94
- # data = datasets.load_dataset('my_dataset', 'second_domain')
95
- BUILDER_CONFIGS = [
96
- RomanUrduHateSpeechConfig(
97
- name="Coarse_Grained",
98
- version=VERSION,
99
- description="This part of my dataset covers the Coarse Grained dataset",
100
- ),
101
- RomanUrduHateSpeechConfig(
102
- name="Fine_Grained", version=VERSION, description="This part of my dataset covers the Fine Grained dataset"
103
- ),
104
- ]
105
-
106
- DEFAULT_CONFIG_NAME = "Coarse_Grained"
107
- # It's not mandatory to have a default configuration. Just use one if it makes sense.
108
-
109
- def _info(self):
110
-
111
- if self.config.name == "Coarse_Grained":
112
- features = datasets.Features(
113
- {
114
- "tweet": datasets.Value("string"),
115
- "label": datasets.features.ClassLabel(names=["Abusive/Offensive", "Normal"]),
116
- # These are the features of your dataset like images, labels ...
117
- }
118
- )
119
- if self.config.name == "Fine_Grained":
120
- features = datasets.Features(
121
- {
122
- "tweet": datasets.Value("string"),
123
- "label": datasets.features.ClassLabel(
124
- names=["Abusive/Offensive", "Normal", "Religious Hate", "Sexism", "Profane/Untargeted"]
125
- ),
126
- # These are the features of your dataset like images, labels ...
127
- }
128
- )
129
- return datasets.DatasetInfo(
130
- # This is the description that will appear on the datasets page.
131
- description=_DESCRIPTION,
132
- # This defines the different columns of the dataset and their types
133
- features=features, # Here we define them above because they are different between the two configurations
134
- # If there's a common (input, target) tuple from the features, uncomment supervised_keys line below and
135
- # specify them. They'll be used if as_supervised=True in builder.as_dataset.
136
- # supervised_keys=("sentence", "label"),
137
- # Homepage of the dataset for documentation
138
- homepage=_HOMEPAGE,
139
- # License for the dataset if available
140
- license=_LICENSE,
141
- # Citation for the dataset
142
- citation=_CITATION,
143
- task_templates=[TextClassification(text_column="tweet", label_column="label")],
144
- )
145
-
146
- def _split_generators(self, dl_manager):
147
-
148
- urls_train = _URLS[self.config.name + "_train"]
149
-
150
- urls_validate = _URLS[self.config.name + "_validation"]
151
-
152
- urls_test = _URLS[self.config.name + "_test"]
153
-
154
- data_dir_train = dl_manager.download_and_extract(urls_train)
155
-
156
- data_dir_validate = dl_manager.download_and_extract(urls_validate)
157
-
158
- data_dir_test = dl_manager.download_and_extract(urls_test)
159
-
160
- return [
161
- datasets.SplitGenerator(
162
- name=datasets.Split.TRAIN,
163
- # These kwargs will be passed to _generate_examples
164
- gen_kwargs={
165
- "filepath": data_dir_train,
166
- "split": "train",
167
- },
168
- ),
169
- datasets.SplitGenerator(
170
- name=datasets.Split.TEST,
171
- # These kwargs will be passed to _generate_examples
172
- gen_kwargs={
173
- "filepath": data_dir_test,
174
- "split": "test",
175
- },
176
- ),
177
- datasets.SplitGenerator(
178
- name=datasets.Split.VALIDATION,
179
- # These kwargs will be passed to _generate_examples
180
- gen_kwargs={
181
- "filepath": data_dir_validate,
182
- "split": "dev",
183
- },
184
- ),
185
- ]
186
-
187
- # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
188
- def _generate_examples(self, filepath, split):
189
-
190
- # The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example.
191
- with open(filepath, encoding="utf-8") as tsv_file:
192
- tsv_reader = csv.reader(tsv_file, quotechar="|", delimiter="\t", quoting=csv.QUOTE_ALL)
193
- for key, row in enumerate(tsv_reader):
194
- if key == 0:
195
- continue
196
- if self.config.name == "Coarse_Grained":
197
- tweet, label = row
198
- label = int(label)
199
- yield key, {
200
- "tweet": tweet,
201
- "label": None if split == "test" else label,
202
- }
203
- if self.config.name == "Fine_Grained":
204
- tweet, label = row
205
- label = int(label)
206
- yield key, {
207
- "tweet": tweet,
208
- "label": None if split == "test" else label,
209
- }
210
- # Yields examples as (key, example) tuples