# coding=utf-8 # Copyright 2021 The HuggingFace Datasets Authors and Ilya Gusev # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """HeadlineCause: A Dataset of News Headlines for Detecting Casualties""" import json import os import datasets _CITATION = """ @misc{gusev2021headlinecause, title={HeadlineCause: A Dataset of News Headlines for Detecting Casualties}, author={Ilya Gusev and Alexey Tikhonov}, year={2021}, eprint={2108.12626}, archivePrefix={arXiv}, primaryClass={cs.CL} } """ _DESCRIPTION = "A Dataset of News Headlines for Detecting Casualties" _HOMEPAGE = "https://github.com/IlyaGusev/HeadlineCause" _URLS = { "ru_simple": { "train": "ru/simple/train.jsonl", "val": "ru/simple/val.jsonl", "test": "ru/simple/test.jsonl" }, "ru_full": { "train": "ru/full/train.jsonl", "val": "ru/full/val.jsonl", "test": "ru/full/test.jsonl" }, "en_simple": { "train": "en/simple/train.jsonl", "val": "en/simple/val.jsonl", "test": "en/simple/test.jsonl" }, "en_full": { "train": "en/full/train.jsonl", "val": "en/full/val.jsonl", "test": "en/full/test.jsonl", } } class HeadlineCauseDataset(datasets.GeneratorBasedBuilder): """HeadlineCause Dataset""" VERSION = datasets.Version("1.1.0") BUILDER_CONFIGS = [ datasets.BuilderConfig(name="ru_simple", version=VERSION, description="Russian language, Simple task"), datasets.BuilderConfig(name="en_simple", version=VERSION, description="English language, Simple task"), datasets.BuilderConfig(name="ru_full", version=VERSION, description="Russian language, Full task"), datasets.BuilderConfig(name="en_full", version=VERSION, description="English language, Full task"), ] DEFAULT_CONFIG_NAME = "en_simple" def _info(self): features = datasets.Features( { "left_url": datasets.Value("string"), "right_url": datasets.Value("string"), "left_title": datasets.Value("string"), "right_title": datasets.Value("string"), "left_timestamp": datasets.Value("timestamp[s]"), "right_timestamp": datasets.Value("timestamp[s]"), "id": datasets.Value("string"), "has_link": datasets.Value("bool"), "label": datasets.Value("int8"), "result": datasets.Value("string"), "agreement": datasets.Value("double") } ) return datasets.DatasetInfo( description=_DESCRIPTION, features=features, supervised_keys=None, homepage=_HOMEPAGE, citation=_CITATION, ) def _split_generators(self, dl_manager): downloaded_files = dl_manager.download_and_extract(_URLS[self.config.name]) return [ datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"]}), datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": downloaded_files["test"]}), datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": downloaded_files["val"]}), ] def _generate_examples(self, filepath): with open(filepath, encoding="utf-8") as f: for row in f: data = json.loads(row) yield data["id"], data