style_change_detection / style_change_detection.py
system's picture
system HF staff
Update files from the datasets library (from 1.16.0)
dedec33
raw
history blame contribute delete
No virus
5.47 kB
# coding=utf-8
# Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Authorship Analysis: Style Change Detection"""
import json
import os
import datasets
_CITATION = """\
@inproceedings{bevendorff2020shared,
title={Shared Tasks on Authorship Analysis at PAN 2020},
author={Bevendorff, Janek and Ghanem, Bilal and Giachanou, Anastasia and Kestemont, Mike and Manjavacas, Enrique and Potthast, Martin and Rangel, Francisco and Rosso, Paolo and Specht, G{\"u}nther and Stamatatos, Efstathios and others},
booktitle={European Conference on Information Retrieval},
pages={508--516},
year={2020},
organization={Springer}
}
"""
_DESCRIPTION = """\
The goal of the style change detection task is to identify text positions within a given multi-author document at which the author switches. Detecting these positions is a crucial part of the authorship identification process, and for multi-author document analysis in general.
Access to the dataset needs to be requested from zenodo.
"""
class StyleChangeDetection(datasets.GeneratorBasedBuilder):
"""Style Change Detection Dataset from PAN20"""
VERSION = datasets.Version("1.0.0")
BUILDER_CONFIGS = [
datasets.BuilderConfig(
name="narrow",
version=datasets.Version("1.0.0", "Version 1"),
description="The narrow subset contains texts from a relatively narrow set of subjects matters (all related to technology).",
),
datasets.BuilderConfig(
name="wide",
version=datasets.Version("1.0.0", "Version 1"),
description="The wide subset adds additional subject areas (travel, philosophy, economics, history, etc.).",
),
]
@property
def manual_download_instructions(self):
return """\
You should download the dataset from https://zenodo.org/record/3660984
The dataset needs requesting.
Download each file, extract it and place in a dir of your choice,
which will be used as a manual_dir, e.g. `~/.manual_dirs/style_change_detection`
Style Change Detection can then be loaded via:
`datasets.load_dataset("style_change_detection", data_dir="~/.manual_dirs/style_change_detection")`.
"""
def _info(self):
features = {
"id": datasets.Value("string"),
"text": datasets.Value("string"),
"authors": datasets.Value("int32"),
"structure": datasets.features.Sequence(datasets.Value("string")),
"site": datasets.Value("string"),
"multi-author": datasets.Value("bool"),
"changes": datasets.features.Sequence(datasets.Value("bool")),
}
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(features),
homepage="https://pan.webis.de/clef20/pan20-web/style-change-detection.html",
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
data_dir = os.path.abspath(os.path.expanduser(dl_manager.manual_dir))
train_dir = os.path.join(data_dir, "train", "dataset-" + self.config.name)
val_dir = os.path.join(data_dir, "validation", "dataset-" + self.config.name)
if not os.path.exists(train_dir):
raise FileNotFoundError(
f"{train_dir} does not exist. Make sure you insert a manual dir via `datasets.load_dataset('style_change_detection', data_dir=...)` that includes {train_dir}. Manual download instructions: {self.manual_download_instructions}"
)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"articles": [f for f in os.listdir(train_dir) if f.endswith(".txt")],
"base_dir": train_dir,
},
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
gen_kwargs={"articles": [f for f in os.listdir(val_dir) if f.endswith(".txt")], "base_dir": val_dir},
),
]
def _generate_examples(self, articles=None, base_dir=None):
"""Yields examples."""
for idx, article_filename in enumerate(articles):
label_path = os.path.join(base_dir, "truth-" + article_filename[:-4] + ".json")
with open(label_path, encoding="utf-8") as f:
example = json.load(f)
example["id"] = article_filename[8:-4]
example["text"] = open(os.path.join(base_dir, article_filename), encoding="utf-8").read()
# Convert integers into boolean
example["multi-author"] = example["multi-author"] == 1
for i in range(len(example["changes"])):
example["changes"][i] = example["changes"][i] == 1
yield idx, example