Datasets:
Tasks:
Token Classification
Sub-tasks:
parsing
Languages:
English
Multilinguality:
monolingual
Size Categories:
unknown
Language Creators:
found
Annotations Creators:
expert-generated
Source Datasets:
original
import glob | |
import json | |
import os | |
from dataclasses import dataclass | |
import datasets | |
from datasets import BuilderConfig, SplitGenerator | |
_CITATION = """\ | |
@article{yang2018scidtb, | |
title={Scidtb: Discourse dependency treebank for scientific abstracts}, | |
author={Yang, An and Li, Sujian}, | |
journal={arXiv preprint arXiv:1806.03653}, | |
year={2018} | |
} | |
""" | |
_DESCRIPTION = """Annotation corpus for discourse relations benefits NLP tasks such as machine translation and question | |
answering. SciDTB is a domain-specific discourse treebank annotated on scientific articles. | |
Different from widely-used RST-DT and PDTB, SciDTB uses dependency trees to represent discourse structure, which is | |
flexible and simplified to some extent but do not sacrifice structural integrity. We discuss the labeling framework, | |
annotation workflow and some statistics about SciDTB. Furthermore, our treebank is made as a benchmark for evaluating | |
discourse dependency parsers, on which we provide several baselines as fundamental work.""" | |
_URL = "https://codeload.github.com/PKU-TANGENT/SciDTB/zip/refs/heads/master" | |
_HOMEPAGE = "https://github.com/PKU-TANGENT/SciDTB" | |
class SciDTBConfig(BuilderConfig): | |
"""BuilderConfig for SciDTB.""" | |
def __init__(self, subdirectory_mapping, encoding, **kwargs): | |
super(SciDTBConfig, self).__init__(**kwargs) | |
self.subdirectory_mapping = subdirectory_mapping | |
self.encoding = encoding | |
class SciDTBDataset(datasets.GeneratorBasedBuilder): | |
"""Scientific Discourse Treebank Dataset""" | |
BUILDER_CONFIGS = [ | |
SciDTBConfig( | |
name="SciDTB", | |
version=datasets.Version("1.0.0", ""), | |
description=_DESCRIPTION, | |
subdirectory_mapping={ | |
"train": "SciDTB-master/dataset/train", | |
"dev": "SciDTB-master/dataset/dev/gold", | |
"test": "SciDTB-master/dataset/test/gold", | |
}, | |
encoding="utf-8-sig", | |
), | |
] | |
def _info(self): | |
features = datasets.Features( | |
{ | |
"root": datasets.Sequence( | |
{ | |
"id": datasets.Value("int32"), | |
"parent": datasets.Value("int32"), | |
"text": datasets.Value("string"), | |
"relation": datasets.Value("string"), | |
} | |
), | |
"file_name": datasets.Value("string"), | |
} | |
) | |
return datasets.DatasetInfo( | |
description=_DESCRIPTION, | |
features=features, | |
homepage=_HOMEPAGE, | |
citation=_CITATION, | |
) | |
def _split_generators(self, dl_manager): | |
data_dir = dl_manager.download_and_extract(_URL) | |
return [ | |
SplitGenerator( | |
name=split, | |
# These kwargs will be passed to _generate_examples | |
gen_kwargs={ | |
"dir_path": os.path.join(data_dir, subdir), | |
}, | |
) | |
for split, subdir in self.config.subdirectory_mapping.items() | |
] | |
def _generate_examples(self, dir_path): | |
_files = glob.glob(f"{dir_path}/*.dep") | |
for file_path in _files: | |
with open(file_path, mode="r", encoding=self.config.encoding) as f: | |
annotations = json.load(f) | |
annotations["file_name"] = os.path.basename(file_path) | |
yield annotations["file_name"], annotations | |