system HF staff commited on
Commit
7c6fa79
0 Parent(s):

Update files from the datasets library (from 1.2.0)

Browse files

Release notes: https://github.com/huggingface/datasets/releases/tag/1.2.0

Files changed (5) hide show
  1. .gitattributes +27 -0
  2. README.md +160 -0
  3. dataset_infos.json +1 -0
  4. dummy/0.0.0/dummy_data.zip +3 -0
  5. hate_speech18.py +110 -0
.gitattributes ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bin.* filter=lfs diff=lfs merge=lfs -text
5
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.model filter=lfs diff=lfs merge=lfs -text
12
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
13
+ *.onnx filter=lfs diff=lfs merge=lfs -text
14
+ *.ot filter=lfs diff=lfs merge=lfs -text
15
+ *.parquet filter=lfs diff=lfs merge=lfs -text
16
+ *.pb filter=lfs diff=lfs merge=lfs -text
17
+ *.pt filter=lfs diff=lfs merge=lfs -text
18
+ *.pth filter=lfs diff=lfs merge=lfs -text
19
+ *.rar filter=lfs diff=lfs merge=lfs -text
20
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
21
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
22
+ *.tflite filter=lfs diff=lfs merge=lfs -text
23
+ *.tgz filter=lfs diff=lfs merge=lfs -text
24
+ *.xz filter=lfs diff=lfs merge=lfs -text
25
+ *.zip filter=lfs diff=lfs merge=lfs -text
26
+ *.zstandard filter=lfs diff=lfs merge=lfs -text
27
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,160 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ annotations_creators:
3
+ - found
4
+ language_creators:
5
+ - found
6
+ languages:
7
+ - en
8
+ licenses:
9
+ - cc-by-sa-3-0
10
+ multilinguality:
11
+ - monolingual
12
+ size_categories:
13
+ - 10K<n<100K
14
+ source_datasets:
15
+ - original
16
+ task_categories:
17
+ - text-classification
18
+ task_ids:
19
+ - intent-classification
20
+ ---
21
+
22
+ # Dataset Card for [Dataset Name]
23
+
24
+ ## Table of Contents
25
+ - [Dataset Description](#dataset-description)
26
+ - [Dataset Summary](#dataset-summary)
27
+ - [Supported Tasks](#supported-tasks-and-leaderboards)
28
+ - [Languages](#languages)
29
+ - [Dataset Structure](#dataset-structure)
30
+ - [Data Instances](#data-instances)
31
+ - [Data Fields](#data-instances)
32
+ - [Data Splits](#data-instances)
33
+ - [Dataset Creation](#dataset-creation)
34
+ - [Curation Rationale](#curation-rationale)
35
+ - [Source Data](#source-data)
36
+ - [Annotations](#annotations)
37
+ - [Personal and Sensitive Information](#personal-and-sensitive-information)
38
+ - [Considerations for Using the Data](#considerations-for-using-the-data)
39
+ - [Social Impact of Dataset](#social-impact-of-dataset)
40
+ - [Discussion of Biases](#discussion-of-biases)
41
+ - [Other Known Limitations](#other-known-limitations)
42
+ - [Additional Information](#additional-information)
43
+ - [Dataset Curators](#dataset-curators)
44
+ - [Licensing Information](#licensing-information)
45
+ - [Citation Information](#citation-information)
46
+
47
+ ## Dataset Description
48
+
49
+ - **Homepage:** https://github.com/Vicomtech/hate-speech-dataset
50
+ - **Repository:** https://github.com/Vicomtech/hate-speech-dataset
51
+ - **Paper:** https://www.aclweb.org/anthology/W18-51.pdf
52
+ - **Leaderboard:**
53
+ - **Point of Contact:**
54
+
55
+ ### Dataset Summary
56
+
57
+ These files contain text extracted from Stormfront, a white supremacist forum. A random set of forums posts have been sampled from
58
+ several subforums and split into sentences. Those sentences have been manually labelled as containing hate speech or not, according
59
+ to certain annotation guidelines.
60
+
61
+ ### Supported Tasks and Leaderboards
62
+
63
+ [More Information Needed]
64
+
65
+ ### Languages
66
+
67
+ English
68
+
69
+ ## Dataset Structure
70
+
71
+ ### Data Instances
72
+
73
+ [More Information Needed]
74
+
75
+ ### Data Fields
76
+
77
+ - text: the provided sentence
78
+ - user_id: information to make it possible to re-build the conversations these sentences belong to
79
+ - subforum_id: information to make it possible to re-build the conversations these sentences belong to
80
+ - num_contexts: number of previous posts the annotator had to read before making a decision over the category of the sentence
81
+ - label: hate, noHate, relation (sentence in the post doesn't contain hate speech on their own, but combination of serveral sentences does)
82
+ or idk/skip (sentences that are not written in English or that don't contain information as to be classified into hate or noHate)
83
+
84
+ ### Data Splits
85
+
86
+ [More Information Needed]
87
+
88
+ ## Dataset Creation
89
+
90
+ ### Curation Rationale
91
+
92
+ [More Information Needed]
93
+
94
+ ### Source Data
95
+
96
+ #### Initial Data Collection and Normalization
97
+
98
+ [More Information Needed]
99
+
100
+ #### Who are the source language producers?
101
+
102
+ [More Information Needed]
103
+
104
+ ### Annotations
105
+
106
+ #### Annotation process
107
+
108
+ [More Information Needed]
109
+
110
+ #### Who are the annotators?
111
+
112
+ [More Information Needed]
113
+
114
+ ### Personal and Sensitive Information
115
+
116
+ [More Information Needed]
117
+
118
+ ## Considerations for Using the Data
119
+
120
+ ### Social Impact of Dataset
121
+
122
+ [More Information Needed]
123
+
124
+ ### Discussion of Biases
125
+
126
+ [More Information Needed]
127
+
128
+ ### Other Known Limitations
129
+
130
+ [More Information Needed]
131
+
132
+ ## Additional Information
133
+
134
+ ### Dataset Curators
135
+
136
+ [More Information Needed]
137
+
138
+ ### Licensing Information
139
+
140
+ [More Information Needed]
141
+
142
+ ### Citation Information
143
+
144
+ ```
145
+ @inproceedings{gibert2018hate,
146
+ title = "{Hate Speech Dataset from a White Supremacy Forum}",
147
+ author = "de Gibert, Ona and
148
+ Perez, Naiara and
149
+ Garc{\'\i}a-Pablos, Aitor and
150
+ Cuadros, Montse",
151
+ booktitle = "Proceedings of the 2nd Workshop on Abusive Language Online ({ALW}2)",
152
+ month = oct,
153
+ year = "2018",
154
+ address = "Brussels, Belgium",
155
+ publisher = "Association for Computational Linguistics",
156
+ url = "https://www.aclweb.org/anthology/W18-5102",
157
+ doi = "10.18653/v1/W18-5102",
158
+ pages = "11--20",
159
+ }
160
+ ```
dataset_infos.json ADDED
@@ -0,0 +1 @@
 
1
+ {"default": {"description": "These files contain text extracted from Stormfront, a white supremacist forum. A random set of \nforums posts have been sampled from several subforums and split into sentences. Those sentences \nhave been manually labelled as containing hate speech or not, according to certain annotation guidelines.\n", "citation": "@inproceedings{gibert2018hate,\n title = \"{Hate Speech Dataset from a White Supremacy Forum}\",\n author = \"de Gibert, Ona and\n Perez, Naiara and\n Garc{'\\i}a-Pablos, Aitor and\n Cuadros, Montse\",\n booktitle = \"Proceedings of the 2nd Workshop on Abusive Language Online ({ALW}2)\",\n month = oct,\n year = \"2018\",\n address = \"Brussels, Belgium\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/W18-5102\",\n doi = \"10.18653/v1/W18-5102\",\n pages = \"11--20\",\n}\n", "homepage": "https://github.com/Vicomtech/hate-speech-dataset", "license": "", "features": {"text": {"dtype": "string", "id": null, "_type": "Value"}, "user_id": {"dtype": "int64", "id": null, "_type": "Value"}, "subforum_id": {"dtype": "int64", "id": null, "_type": "Value"}, "num_contexts": {"dtype": "int64", "id": null, "_type": "Value"}, "label": {"num_classes": 4, "names": ["noHate", "hate", "idk/skip", "relation"], "names_file": null, "id": null, "_type": "ClassLabel"}}, "post_processed": null, "supervised_keys": null, "builder_name": "hate_speech18", "config_name": "default", "version": {"version_str": "0.0.0", "description": null, "major": 0, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 1375340, "num_examples": 10944, "dataset_name": "hate_speech18"}}, "download_checksums": {"https://github.com/Vicomtech/hate-speech-dataset/archive/master.zip": {"num_bytes": 3664530, "checksum": "acc0d7ce40e22cf019daa752a5136049a45462b9ba4eab8bf40ea82dcd867eba"}}, "download_size": 3664530, "post_processing_size": null, "dataset_size": 1375340, "size_in_bytes": 5039870}}
dummy/0.0.0/dummy_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:27604bc68f448b2e00372b185970c19b144b9682110bf3a963ee0ce0367ef20a
3
+ size 2890
hate_speech18.py ADDED
@@ -0,0 +1,110 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace Datasets Authors.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ # Lint as: python3
17
+ """Hate speech dataset"""
18
+
19
+ from __future__ import absolute_import, division, print_function
20
+
21
+ import csv
22
+ import os
23
+
24
+ import datasets
25
+
26
+
27
+ _CITATION = """\
28
+ @inproceedings{gibert2018hate,
29
+ title = "{Hate Speech Dataset from a White Supremacy Forum}",
30
+ author = "de Gibert, Ona and
31
+ Perez, Naiara and
32
+ Garcia-Pablos, Aitor and
33
+ Cuadros, Montse",
34
+ booktitle = "Proceedings of the 2nd Workshop on Abusive Language Online ({ALW}2)",
35
+ month = oct,
36
+ year = "2018",
37
+ address = "Brussels, Belgium",
38
+ publisher = "Association for Computational Linguistics",
39
+ url = "https://www.aclweb.org/anthology/W18-5102",
40
+ doi = "10.18653/v1/W18-5102",
41
+ pages = "11--20",
42
+ }
43
+ """
44
+
45
+ _DESCRIPTION = """\
46
+ These files contain text extracted from Stormfront, a white supremacist forum. A random set of
47
+ forums posts have been sampled from several subforums and split into sentences. Those sentences
48
+ have been manually labelled as containing hate speech or not, according to certain annotation guidelines.
49
+ """
50
+
51
+ _DATA_URL = "https://github.com/Vicomtech/hate-speech-dataset/archive/master.zip"
52
+
53
+
54
+ class HateSpeech18(datasets.GeneratorBasedBuilder):
55
+ """Hate speech dataset"""
56
+
57
+ def _info(self):
58
+ return datasets.DatasetInfo(
59
+ description=_DESCRIPTION,
60
+ features=datasets.Features(
61
+ {
62
+ "text": datasets.Value("string"),
63
+ "user_id": datasets.Value("int64"),
64
+ "subforum_id": datasets.Value("int64"),
65
+ "num_contexts": datasets.Value("int64"),
66
+ "label": datasets.features.ClassLabel(names=["noHate", "hate", "idk/skip", "relation"]),
67
+ }
68
+ ),
69
+ supervised_keys=None,
70
+ homepage="https://github.com/Vicomtech/hate-speech-dataset",
71
+ citation=_CITATION,
72
+ )
73
+
74
+ def _split_generators(self, dl_manager):
75
+ dl_dir = dl_manager.download_and_extract(_DATA_URL)
76
+
77
+ return [
78
+ datasets.SplitGenerator(
79
+ name=datasets.Split.TRAIN, gen_kwargs={"filepath": os.path.join(dl_dir, "hate-speech-dataset-master")}
80
+ ),
81
+ ]
82
+
83
+ def _generate_examples(self, filepath):
84
+
85
+ with open(os.path.join(filepath, "annotations_metadata.csv"), encoding="utf-8") as csv_file:
86
+
87
+ csv_reader = csv.reader(
88
+ csv_file, quotechar='"', delimiter=",", quoting=csv.QUOTE_ALL, skipinitialspace=True
89
+ )
90
+
91
+ next(csv_reader)
92
+
93
+ for idx, row in enumerate(csv_reader):
94
+
95
+ file_id, user_id, subforum_id, num_contexts, label = row
96
+
97
+ all_files_path = os.path.join(filepath, "all_files")
98
+
99
+ path = os.path.join(all_files_path, file_id + ".txt")
100
+
101
+ with open(path, encoding="utf-8") as file:
102
+ text = file.read()
103
+
104
+ yield idx, {
105
+ "text": text,
106
+ "user_id": user_id,
107
+ "subforum_id": subforum_id,
108
+ "num_contexts": num_contexts,
109
+ "label": label,
110
+ }