Datasets:

Modalities:
Text
Languages:
English
ArXiv:
Libraries:
Datasets
License:
citesum / citesum.py
nbroad's picture
nbroad HF staff
config and copied code from cnn/dailymail
9e31b69
raw
history blame
No virus
4.57 kB
# coding=utf-8
# Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""CiteSum dataset"""
import hashlib
import os
import datasets
logger = datasets.logging.get_logger(__name__)
_HOMEPAGE = "https://github.com/morningmoni/CiteSum"
_DESCRIPTION = """\
Citation Text-guided Scientific Extreme Summarization and Low-resource Domain Adaptation
CiteSum contains TLDR summaries for scientific papers from their citation texts without human annotation.
CiteSum is around 30 times larger than the previous human-curated dataset SciTLDR.
"""
# The second citation introduces the source data, while the first
# introduces the specific form (non-anonymized) we use here.
_CITATION = """\
@misc{https://doi.org/10.48550/arxiv.2205.06207,
doi = {10.48550/ARXIV.2205.06207},
url = {https://arxiv.org/abs/2205.06207},
author = {Mao, Yuning and Zhong, Ming and Han, Jiawei},
keywords = {Computation and Language (cs.CL), FOS: Computer and information sciences, FOS: Computer and information sciences},
title = {CiteSum: Citation Text-guided Scientific Extreme Summarization and Low-resource Domain Adaptation},
publisher = {arXiv},
year = {2022},
copyright = {Creative Commons Attribution 4.0 International}
}
"""
_DOWNLOAD_URL = "https://drive.google.com/file/d/1ndHCREXGSPnDUNllladh9qCtayqbXAfJ"
class CiteSumConfig(datasets.BuilderConfig):
"""BuilderConfig for CiteSum."""
def __init__(self, **kwargs):
"""BuilderConfig for CiteSum.
Args:
**kwargs: keyword arguments forwarded to super.
"""
super().__init__(**kwargs)
class CiteSum(datasets.GeneratorBasedBuilder):
"""CiteSum summarization dataset."""
BUILDER_CONFIGS = [CiteSumConfig(name="citesum", description="Plain text")]
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
"src": datasets.Value("string"),
"tgt": datasets.Value("string"),
"paper_id": datasets.Value("string"),
"title": datasets.Value("string"),
"discipline": {
"venue": datasets.Value("string"),
"journal": datasets.Value("string"),
"mag_field_of_study": datasets.features.Sequence(
datasets.Value("string")
),
},
}
),
supervised_keys=None,
homepage=_HOMEPAGE,
citation=_CITATION,
)
def _split_generators(self, dl_manager):
dl_paths = dl_manager.download(_DOWNLOAD_URL)
return [
datasets.SplitGenerator(
name=split,
gen_kwargs={
"urls_file": dl_paths[split],
"files_per_archive": [
dl_manager.iter_archive(dl_paths["cnn_stories"]),
dl_manager.iter_archive(dl_paths["dm_stories"]),
],
},
)
for split in [
datasets.Split.TRAIN,
datasets.Split.VALIDATION,
datasets.Split.TEST,
]
]
def _generate_examples(self, urls_file, files_per_archive):
urls = _get_url_hashes(urls_file)
idx = 0
for files in files_per_archive:
for path, file in files:
hash_from_path = _get_hash_from_path(path)
if hash_from_path in urls:
article, highlights = _get_art_abs(file, self.config.version)
if not article or not highlights:
continue
yield idx, {
_ARTICLE: article,
_HIGHLIGHTS: highlights,
"id": hash_from_path,
}
idx += 1