Datasets:
Tasks:
Token Classification
Modalities:
Text
Languages:
English
Size:
100K - 1M
ArXiv:
Tags:
abbreviation-detection
License:
File size: 4,187 Bytes
fe2f1d0 a87ff0e fabacba fe2f1d0 4575131 79750b5 fe2f1d0 79750b5 fe2f1d0 fabacba 0dc1574 fabacba d7761f1 fabacba |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 |
import os
import datasets
from typing import List
import json
logger = datasets.logging.get_logger(__name__)
_CITATION = """
"""
_DESCRIPTION = """
This is the dataset repository for PLOD Dataset accepted to be published at LREC 2022.
The dataset can help build sequence labelling models for the task Abbreviation Detection.
"""
class PLODfilteredConfig(datasets.BuilderConfig):
"""BuilderConfig for Conll2003"""
def __init__(self, **kwargs):
"""BuilderConfig forConll2003.
Args:
**kwargs: keyword arguments forwarded to super.
"""
super(PLODfilteredConfig, self).__init__(**kwargs)
class PLODfilteredConfig(datasets.GeneratorBasedBuilder):
"""PLOD Filtered dataset."""
BUILDER_CONFIGS = [
PLODfilteredConfig(name="PLODfiltered", version=datasets.Version("0.0.2"), description="PLOD filtered dataset"),
]
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
"id": datasets.Value("string"),
"tokens": datasets.Sequence(datasets.Value("string")),
"pos_tags": datasets.Sequence(
datasets.features.ClassLabel(
names=[
"ADJ",
"ADP",
"ADV",
"AUX",
"CONJ",
"CCONJ",
"DET",
"INTJ",
"NOUN",
"NUM",
"PART",
"PRON",
"PROPN",
"PUNCT",
"SCONJ",
"SYM",
"VERB",
"X",
"SPACE"
]
)
),
"ner_tags": datasets.Sequence(
datasets.features.ClassLabel(
names=[
"B-O",
"B-AC",
"I-AC",
"B-LF",
"I-LF"
]
)
),
}
),
supervised_keys=None,
homepage="https://github.com/surrey-nlp/PLOD-AbbreviationDetection",
citation=_CITATION,
)
_URL = "https://huggingface.co/datasets/surrey-nlp/PLOD-filtered/resolve/main/data/"
_URLS = {
"train": _URL + "PLOS-train70-filtered-pos_bio.json",
"dev": _URL + "PLOS-val15-filtered-pos_bio.json",
"test": _URL + "PLOS-test15-filtered-pos_bio.json"
}
def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
urls_to_download = self._URLS
downloaded_files = dl_manager.download_and_extract(urls_to_download)
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"]}),
datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": downloaded_files["dev"]}),
datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": downloaded_files["test"]})
]
def _generate_examples(self, filepath):
"""This function returns the examples in the raw (text) form."""
logger.info("generating examples from = %s", filepath)
with open(filepath) as f:
plod = json.load(f)
for object in plod:
id_ = int(object['id'])
yield id_, {
"id": str(id_),
"tokens": object['tokens'],
"pos_tags": object['pos_tags'],
"ner_tags": object['ner_tags'],
} |