Datasets:
Tasks:
Text Classification
Modalities:
Text
Sub-tasks:
hate-speech-detection
Size:
10K - 100K
ArXiv:
File size: 6,494 Bytes
cc3335c 6a5b9e3 cc3335c 2ffd27d cc3335c 2ffd27d cc3335c b2b906e cc3335c 2ffd27d cc3335c 2ffd27d cc3335c 2ffd27d |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 |
# coding=utf-8
# Copyright 2022 Leon Derczynski.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""OffensEval 2020: Multilingual Offensive Language Detection"""
import csv
import os
import datasets
logger = datasets.logging.get_logger(__name__)
_CITATION = """\
@inproceedings{zampieri-etal-2020-semeval,
title = "{S}em{E}val-2020 Task 12: Multilingual Offensive Language Identification in Social Media ({O}ffens{E}val 2020)",
author = {Zampieri, Marcos and
Nakov, Preslav and
Rosenthal, Sara and
Atanasova, Pepa and
Karadzhov, Georgi and
Mubarak, Hamdy and
Derczynski, Leon and
Pitenis, Zeses and
Coltekin, Cagri,
booktitle = "Proceedings of the Fourteenth Workshop on Semantic Evaluation",
month = dec,
year = "2020",
address = "Barcelona (online)",
publisher = "International Committee for Computational Linguistics",
url = "https://aclanthology.org/2020.semeval-1.188",
doi = "10.18653/v1/2020.semeval-1.188",
pages = "1425--1447",
}
"""
_DESCRIPTION = """\
OffensEval 2020 features a multilingual dataset with five languages. The languages included in OffensEval 2020 are:
* Arabic
* Danish
* English
* Greek
* Turkish
The annotation follows the hierarchical tagset proposed in the Offensive Language Identification Dataset (OLID) and used in OffensEval 2019.
In this taxonomy we break down offensive content into the following three sub-tasks taking the type and target of offensive content into account.
The following sub-tasks were organized:
* Sub-task A - Offensive language identification;
* Sub-task B - Automatic categorization of offense types;
* Sub-task C - Offense target identification.
The English training data isn't included here (the text isn't available and needs rehydration of 9 million tweets;
see [https://zenodo.org/record/3950379#.XxZ-aFVKipp](https://zenodo.org/record/3950379#.XxZ-aFVKipp))
"""
# _URL = ""
class OffensEval2020Config(datasets.BuilderConfig):
"""BuilderConfig for OffensEval2020"""
def __init__(self, **kwargs):
"""BuilderConfig OffensEval2020.
Args:
**kwargs: keyword arguments forwarded to super.
"""
super(OffensEval2020Config, self).__init__(**kwargs)
class OffensEval2020(datasets.GeneratorBasedBuilder):
"""OffensEval2020 dataset."""
BUILDER_CONFIGS = [
OffensEval2020Config(name="ar", version=datasets.Version("1.0.0"), description="Offensive language data in Arabic"),
OffensEval2020Config(name="da", version=datasets.Version("1.0.0"), description="Offensive language data in Danish"),
OffensEval2020Config(name="en", version=datasets.Version("1.0.0"), description="Offensive language data in English"),
OffensEval2020Config(name="gr", version=datasets.Version("1.0.0"), description="Offensive language data in Greek"),
OffensEval2020Config(name="tr", version=datasets.Version("1.0.0"), description="Offensive language data in Turkish"),
]
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
"id": datasets.Value("string"),
"original_id": datasets.Value("string"),
"text": datasets.Value("string"),
"subtask_a": datasets.features.ClassLabel(
names=[
"NOT",
"OFF",
]
),
}
),
supervised_keys=None,
homepage="https://sites.google.com/site/offensevalsharedtask/results-and-paper-submission",
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
train_text = dl_manager.download_and_extract(f"offenseval-{self.config.name}-training-v1.tsv")
test_labels = dl_manager.download_and_extract(f"offenseval-{self.config.name}-labela-v1.csv")
test_text = dl_manager.download_and_extract(f"offenseval-{self.config.name}-test-v1.tsv")
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": train_text, "split": 'train'}),
datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": {'labels':test_labels, 'text':test_text}, "split": 'test'}),
]
def _generate_examples(self, filepath, split=None):
if split == "train":
logger.info("⏳ Generating examples from = %s", filepath)
with open(filepath, encoding="utf-8") as f:
OffensEval2020_reader = csv.DictReader(f, delimiter="\t", quotechar='"')
guid = 0
for instance in OffensEval2020_reader:
instance["text"] = instance.pop("tweet")
instance["original_id"] = instance.pop("id")
instance["id"] = str(guid)
yield guid, instance
guid += 1
elif split == 'test':
logger.info("⏳ Generating examples from = %s", filepath['text'])
labeldict = {}
with open(filepath['labels']) as labels:
for line in labels:
line = line.strip().split(',')
if line:
labeldict[line[0]] = line[1]
with open(filepath['text']) as f:
OffensEval2020_reader = csv.DictReader(f, delimiter="\t", quotechar='"')
guid = 0
for instance in OffensEval2020_reader:
instance["text"] = instance.pop("tweet")
instance["original_id"] = instance.pop("id")
instance["id"] = str(guid)
instance["subtask_a"] = labeldict[instance["original_id"]]
yield guid, instance
guid += 1
|