Datasets:

Modalities:
Text
Languages:
English
ArXiv:
Libraries:
Datasets
License:
citesum / citesum.py
nbroad's picture
nbroad HF staff
better description, blackformat
2ae7253
raw
history blame
4.1 kB
# coding=utf-8
# Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""CiteSum dataset"""
import os
import json
import datasets
logger = datasets.logging.get_logger(__name__)
_HOMEPAGE = "https://github.com/morningmoni/CiteSum"
_DESCRIPTION = """\
CiteSum: Citation Text-guided Scientific Extreme Summarization and Low-resource Domain Adaptation.
CiteSum contains TLDR summaries for scientific papers from their citation texts without human annotation,
making it around 30 times larger than the previous human-curated dataset SciTLDR.
"""
# The second citation introduces the source data, while the first
# introduces the specific form (non-anonymized) we use here.
_CITATION = """\
@misc{https://doi.org/10.48550/arxiv.2205.06207,
doi = {10.48550/ARXIV.2205.06207},
url = {https://arxiv.org/abs/2205.06207},
author = {Mao, Yuning and Zhong, Ming and Han, Jiawei},
keywords = {Computation and Language (cs.CL), FOS: Computer and information sciences, FOS: Computer and information sciences},
title = {CiteSum: Citation Text-guided Scientific Extreme Summarization and Low-resource Domain Adaptation},
publisher = {arXiv},
year = {2022},
copyright = {Creative Commons Attribution 4.0 International}
}
"""
_DOWNLOAD_URL = (
"https://drive.google.com/uc?export=download&id=1ndHCREXGSPnDUNllladh9qCtayqbXAfJ"
)
class CiteSumConfig(datasets.BuilderConfig):
"""BuilderConfig for CiteSum."""
def __init__(self, **kwargs):
"""BuilderConfig for CiteSum.
Args:
**kwargs: keyword arguments forwarded to super.
"""
super().__init__(**kwargs)
class CiteSum(datasets.GeneratorBasedBuilder):
"""CiteSum summarization dataset."""
BUILDER_CONFIGS = [CiteSumConfig(name="citesum", description="Plain text")]
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
"src": datasets.Value("string"),
"tgt": datasets.Value("string"),
"paper_id": datasets.Value("string"),
"title": datasets.Value("string"),
"discipline": {
"venue": datasets.Value("string"),
"journal": datasets.Value("string"),
"mag_field_of_study": datasets.features.Sequence(
datasets.Value("string")
),
},
}
),
supervised_keys=None,
homepage=_HOMEPAGE,
citation=_CITATION,
)
def _split_generators(self, dl_manager):
dl_path = dl_manager.download_and_extract(_DOWNLOAD_URL)
file_mapping = {
datasets.Split.TRAIN: "train.json",
datasets.Split.VALIDATION: "val.json",
datasets.Split.TEST: "test.json",
}
return [
datasets.SplitGenerator(
name=split,
gen_kwargs={
"filepath": os.path.join(dl_path, file_mapping[split]),
},
)
for split in [
datasets.Split.TRAIN,
datasets.Split.VALIDATION,
datasets.Split.TEST,
]
]
def _generate_examples(self, filepath):
with open(filepath, "r") as fp:
for idx, line in enumerate(fp.readlines()):
yield idx, json.loads(line)