|
import json |
|
import os |
|
import datasets |
|
from tqdm import tqdm |
|
|
|
|
|
_ARTICLE_ID = "article_id" |
|
_ARTICLE_WORDS = "article_words" |
|
_ARTICLE_BBOXES = "article_bboxes" |
|
_ARTICLE_NORM_BBOXES = "article_norm_bboxes" |
|
_ABSTRACT = "abstract" |
|
_ARTICLE_PDF_URL = "article_pdf_url" |
|
|
|
def normalize_bbox(bbox, size): |
|
return [ |
|
int(1000 * bbox[0] / size[0]), |
|
int(1000 * bbox[1] / size[1]), |
|
int(1000 * bbox[2] / size[0]), |
|
int(1000 * bbox[3] / size[1]), |
|
] |
|
|
|
|
|
class ArxivLaySummarizationConfig(datasets.BuilderConfig): |
|
"""BuilderConfig for ArxivLaySummarization.""" |
|
def __init__(self, **kwargs): |
|
"""BuilderConfig for ArxivSummarization. |
|
Args: |
|
**kwargs: keyword arguments forwarded to super. |
|
""" |
|
super(ArxivLaySummarizationConfig, self).__init__(**kwargs) |
|
|
|
|
|
class ArxivLaySummarizationDataset(datasets.GeneratorBasedBuilder): |
|
"""ArxivLaySummarization Dataset.""" |
|
|
|
_TRAIN_ARCHIVE = "train.zip" |
|
_VAL_ARCHIVE = "val.zip" |
|
_TEST_ARCHIVE = "test.zip" |
|
_TRAIN_ABSTRACTS = "train.txt" |
|
_VAL_ABSTRACTS = "validation.txt" |
|
_TEST_ABSTRACTS = "test.txt" |
|
|
|
BUILDER_CONFIGS = [ |
|
ArxivLaySummarizationConfig( |
|
name="arxiv_lay", |
|
version=datasets.Version("1.0.0"), |
|
description="Layout-augmented Arxiv dataset for summarization", |
|
), |
|
] |
|
|
|
|
|
def _info(self): |
|
|
|
return datasets.DatasetInfo( |
|
features=datasets.Features( |
|
{ |
|
_ARTICLE_ID: datasets.Value("string"), |
|
_ARTICLE_WORDS: datasets.Sequence(datasets.Value("string")), |
|
_ARTICLE_BBOXES: datasets.Sequence(datasets.Sequence(datasets.Value("int64"))), |
|
_ARTICLE_NORM_BBOXES: datasets.Sequence(datasets.Sequence(datasets.Value("int64"))), |
|
_ABSTRACT: datasets.Value("string"), |
|
_ARTICLE_PDF_URL: datasets.Value("string"), |
|
} |
|
), |
|
supervised_keys=None, |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
|
|
train_dir = os.path.join(dl_manager.download_and_extract(self._TRAIN_ARCHIVE), "train") |
|
val_dir = os.path.join(dl_manager.download_and_extract(self._VAL_ARCHIVE), "val") |
|
test_dir = os.path.join(dl_manager.download_and_extract(self._TEST_ARCHIVE), "test") |
|
|
|
train_abstracts = dl_manager.download_and_extract(self._TRAIN_ABSTRACTS) |
|
val_abstracts = dl_manager.download_and_extract(self._VAL_ABSTRACTS) |
|
test_abstracts = dl_manager.download_and_extract(self._TEST_ABSTRACTS) |
|
|
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, |
|
gen_kwargs={"data_path": train_dir, "abstract_path": train_abstracts} |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.VALIDATION, |
|
gen_kwargs={"data_path": val_dir, "abstract_path": val_abstracts} |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TEST, |
|
gen_kwargs={"data_path": test_dir, "abstract_path": test_abstracts} |
|
), |
|
] |
|
|
|
def _generate_examples(self, data_path, abstract_path): |
|
"""Generate ArxivLaySummarization examples.""" |
|
filenames = sorted(os.listdir(data_path)) |
|
|
|
guid = 0 |
|
with open(abstract_path, 'r') as abstract_file: |
|
for line in tqdm(abstract_file, total=len(filenames), desc=f"Reading files in {data_path}"): |
|
guid += 1 |
|
item = json.loads(line) |
|
fname = item["id"] + ".txt" |
|
filepath = os.path.join(data_path, fname) |
|
|
|
words = [] |
|
bboxes = [] |
|
norm_bboxes = [] |
|
|
|
with open(filepath, encoding="utf-8") as f: |
|
for line in f: |
|
splits = line.split("\t") |
|
word = splits[0] |
|
bbox = splits[1:5] |
|
bbox = [int(b) for b in bbox] |
|
page_width, page_height = int(splits[5]), int(splits[6]) |
|
norm_bbox = normalize_bbox(bbox, (page_width, page_height)) |
|
|
|
words.append(word) |
|
bboxes.append(bbox) |
|
norm_bboxes.append(norm_bbox) |
|
|
|
assert len(words) == len(bboxes) |
|
assert len(bboxes) == len(norm_bboxes) |
|
|
|
yield guid, { |
|
_ARTICLE_ID: item["id"], |
|
_ARTICLE_WORDS: words, |
|
_ARTICLE_BBOXES: bboxes, |
|
_ARTICLE_NORM_BBOXES: norm_bboxes, |
|
_ABSTRACT: item["abstract"], |
|
_ARTICLE_PDF_URL: item["pdf_url"], |
|
} |
|
|
|
|