File size: 5,375 Bytes
092f382 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 |
from __future__ import absolute_import, division, print_function
import json
import os
import sys
import datasets
from pyarrow import csv
_DESCRIPTION = """Papers with aspects from paperswithcode.com dataset"""
_HOMEPAGE = "https://github.com/malteos/aspect-document-embeddings"
_CITATION = '''@InProceedings{Ostendorff2022,
title = {Specialized Document Embeddings for Aspect-based Similarity of Research Papers},
booktitle = {Proceedings of the {ACM}/{IEEE} {Joint} {Conference} on {Digital} {Libraries} ({JCDL})},
author = {Ostendorff, Malte and Blume, Till, Ruas, Terry and Gipp, Bela and Rehm, Georg},
year = {2022},
}'''
DATA_URL = "http://datasets.fiq.de/paperswithcode_aspects.tar.gz"
DOC_A_COL = "from_paper_id"
DOC_B_COL = "to_paper_id"
LABEL_COL = "label"
# binary classification (y=similar, n=dissimilar)
LABEL_CLASSES = labels = ['y', 'n']
ASPECTS = ['task', 'method', 'dataset']
def get_train_split(aspect, k):
return datasets.Split(f'fold_{aspect}_{k}_train')
def get_test_split(aspect, k):
return datasets.Split(f'fold_{aspect}_{k}_test')
class PWCConfig(datasets.BuilderConfig):
def __init__(self, features, data_url, aspects, **kwargs):
super().__init__(version=datasets.Version("0.1.0"), **kwargs)
self.features = features
self.data_url = data_url
self.aspects = aspects
class PWCAspects(datasets.GeneratorBasedBuilder):
"""Paper aspects dataset."""
BUILDER_CONFIGS = [
PWCConfig(
name="docs",
description="document text and meta data",
# Metadata format from paperswithcode.com
# see https://github.com/paperswithcode/paperswithcode-data
features={
"paper_id": datasets.Value("string"),
"paper_url": datasets.Value("string"),
"title": datasets.Value("string"),
"abstract": datasets.Value("string"),
"arxiv_id": datasets.Value("string"),
"url_abs": datasets.Value("string"),
"url_pdf": datasets.Value("string"),
"aspect_tasks": datasets.Sequence(datasets.Value('string', id='task')),
"aspect_methods": datasets.Sequence(datasets.Value('string', id='method')),
"aspect_datasets": datasets.Sequence(datasets.Value('string', id='dataset')),
},
data_url=DATA_URL,
aspects=ASPECTS,
),
PWCConfig(
name="relations",
description=" relation data",
features={
DOC_A_COL: datasets.Value("string"),
DOC_B_COL: datasets.Value("string"),
LABEL_COL: datasets.Value("string"),
},
data_url=DATA_URL,
aspects=ASPECTS,
),
]
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION + self.config.description,
features=datasets.Features(self.config.features),
homepage=_HOMEPAGE,
citation=_CITATION,
)
def _split_generators(self, dl_manager):
arch_path = dl_manager.download_and_extract(self.config.data_url)
if "relations" in self.config.name:
train_file = "train.csv"
test_file = "test.csv"
generators = []
# for k in [1, 2, 3, 4]:
for aspect in self.config.aspects:
for k in ["sample"] + [1, 2, 3, 4]:
folds_path = os.path.join(arch_path, 'folds', aspect, str(k))
generators += [
datasets.SplitGenerator(
name=get_train_split(aspect, k),
gen_kwargs={'filepath': os.path.join(folds_path, train_file)}
),
datasets.SplitGenerator(
name=get_test_split(aspect, k),
gen_kwargs={'filepath': os.path.join(folds_path, test_file)}
)
]
return generators
elif "docs" in self.config.name:
# docs
docs_file = os.path.join(arch_path, "docs.jsonl")
return [
datasets.SplitGenerator(name=datasets.Split('docs'), gen_kwargs={"filepath": docs_file}),
]
else:
raise ValueError()
@staticmethod
def get_dict_value(d, key, default=None):
if key in d:
return d[key]
else:
return default
def _generate_examples(self, filepath):
"""Generate docs + rel examples."""
if "relations" in self.config.name:
df = csv.read_csv(filepath).to_pandas()
for idx, row in df.iterrows():
yield idx, {
DOC_A_COL: str(row[DOC_A_COL]),
DOC_B_COL: str(row[DOC_B_COL]),
LABEL_COL: row['label'], # !!! labels != label
}
elif self.config.name == "docs":
with open(filepath, 'r') as f:
for i, line in enumerate(f):
doc = json.loads(line)
# extract feature keys from doc
features = {k: doc[k] if k in doc else None for k in self.config.features.keys()}
yield i, features
|