Datasets:

Modalities:
Text
ArXiv:
Libraries:
Datasets
offenseval_2020 / offenseval_2020.py
leondz's picture
reorder classes
b2b906e
raw
history blame contribute delete
No virus
6.49 kB
# coding=utf-8
# Copyright 2022 Leon Derczynski.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""OffensEval 2020: Multilingual Offensive Language Detection"""
import csv
import os
import datasets
logger = datasets.logging.get_logger(__name__)
_CITATION = """\
@inproceedings{zampieri-etal-2020-semeval,
title = "{S}em{E}val-2020 Task 12: Multilingual Offensive Language Identification in Social Media ({O}ffens{E}val 2020)",
author = {Zampieri, Marcos and
Nakov, Preslav and
Rosenthal, Sara and
Atanasova, Pepa and
Karadzhov, Georgi and
Mubarak, Hamdy and
Derczynski, Leon and
Pitenis, Zeses and
Coltekin, Cagri,
booktitle = "Proceedings of the Fourteenth Workshop on Semantic Evaluation",
month = dec,
year = "2020",
address = "Barcelona (online)",
publisher = "International Committee for Computational Linguistics",
url = "https://aclanthology.org/2020.semeval-1.188",
doi = "10.18653/v1/2020.semeval-1.188",
pages = "1425--1447",
}
"""
_DESCRIPTION = """\
OffensEval 2020 features a multilingual dataset with five languages. The languages included in OffensEval 2020 are:
* Arabic
* Danish
* English
* Greek
* Turkish
The annotation follows the hierarchical tagset proposed in the Offensive Language Identification Dataset (OLID) and used in OffensEval 2019.
In this taxonomy we break down offensive content into the following three sub-tasks taking the type and target of offensive content into account.
The following sub-tasks were organized:
* Sub-task A - Offensive language identification;
* Sub-task B - Automatic categorization of offense types;
* Sub-task C - Offense target identification.
The English training data isn't included here (the text isn't available and needs rehydration of 9 million tweets;
see [https://zenodo.org/record/3950379#.XxZ-aFVKipp](https://zenodo.org/record/3950379#.XxZ-aFVKipp))
"""
# _URL = ""
class OffensEval2020Config(datasets.BuilderConfig):
"""BuilderConfig for OffensEval2020"""
def __init__(self, **kwargs):
"""BuilderConfig OffensEval2020.
Args:
**kwargs: keyword arguments forwarded to super.
"""
super(OffensEval2020Config, self).__init__(**kwargs)
class OffensEval2020(datasets.GeneratorBasedBuilder):
"""OffensEval2020 dataset."""
BUILDER_CONFIGS = [
OffensEval2020Config(name="ar", version=datasets.Version("1.0.0"), description="Offensive language data in Arabic"),
OffensEval2020Config(name="da", version=datasets.Version("1.0.0"), description="Offensive language data in Danish"),
OffensEval2020Config(name="en", version=datasets.Version("1.0.0"), description="Offensive language data in English"),
OffensEval2020Config(name="gr", version=datasets.Version("1.0.0"), description="Offensive language data in Greek"),
OffensEval2020Config(name="tr", version=datasets.Version("1.0.0"), description="Offensive language data in Turkish"),
]
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
"id": datasets.Value("string"),
"original_id": datasets.Value("string"),
"text": datasets.Value("string"),
"subtask_a": datasets.features.ClassLabel(
names=[
"NOT",
"OFF",
]
),
}
),
supervised_keys=None,
homepage="https://sites.google.com/site/offensevalsharedtask/results-and-paper-submission",
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
train_text = dl_manager.download_and_extract(f"offenseval-{self.config.name}-training-v1.tsv")
test_labels = dl_manager.download_and_extract(f"offenseval-{self.config.name}-labela-v1.csv")
test_text = dl_manager.download_and_extract(f"offenseval-{self.config.name}-test-v1.tsv")
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": train_text, "split": 'train'}),
datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": {'labels':test_labels, 'text':test_text}, "split": 'test'}),
]
def _generate_examples(self, filepath, split=None):
if split == "train":
logger.info("⏳ Generating examples from = %s", filepath)
with open(filepath, encoding="utf-8") as f:
OffensEval2020_reader = csv.DictReader(f, delimiter="\t", quotechar='"')
guid = 0
for instance in OffensEval2020_reader:
instance["text"] = instance.pop("tweet")
instance["original_id"] = instance.pop("id")
instance["id"] = str(guid)
yield guid, instance
guid += 1
elif split == 'test':
logger.info("⏳ Generating examples from = %s", filepath['text'])
labeldict = {}
with open(filepath['labels']) as labels:
for line in labels:
line = line.strip().split(',')
if line:
labeldict[line[0]] = line[1]
with open(filepath['text']) as f:
OffensEval2020_reader = csv.DictReader(f, delimiter="\t", quotechar='"')
guid = 0
for instance in OffensEval2020_reader:
instance["text"] = instance.pop("tweet")
instance["original_id"] = instance.pop("id")
instance["id"] = str(guid)
instance["subtask_a"] = labeldict[instance["original_id"]]
yield guid, instance
guid += 1