Datasets:
Tasks:
Token Classification
Modalities:
Text
Languages:
English
Size:
100K - 1M
ArXiv:
Tags:
abbreviation-detection
License:
dipteshkanojia
commited on
Commit
•
79750b5
1
Parent(s):
4575131
cahngeS
Browse files- PLOD-filtered.py +37 -21
PLOD-filtered.py
CHANGED
@@ -14,14 +14,6 @@ This is the dataset repository for PLOD Dataset accepted to be published at LREC
|
|
14 |
The dataset can help build sequence labelling models for the task Abbreviation Detection.
|
15 |
"""
|
16 |
|
17 |
-
_TRAINING_FILE_URL = "https://huggingface.co/datasets/surrey-nlp/PLOD-filtered/resolve/main/data/PLOS-train70-filtered-pos_bio.json"
|
18 |
-
_DEV_FILE_URL = "https://huggingface.co/datasets/surrey-nlp/PLOD-filtered/resolve/main/data/PLOS-val15-filtered-pos_bio.json"
|
19 |
-
_TEST_FILE_URL = "https://huggingface.co/datasets/surrey-nlp/PLOD-filtered/resolve/main/data/PLOS-test15-filtered-pos_bio.json"
|
20 |
-
|
21 |
-
_TRAINING_FILE = "PLOS-train70-filtered-pos_bio.json"
|
22 |
-
_DEV_FILE = "PLOS-val15-filtered-pos_bio.json"
|
23 |
-
_TEST_FILE = "PLOS-test15-filtered-pos_bio.json"
|
24 |
-
|
25 |
class PLODfilteredConfig(datasets.BuilderConfig):
|
26 |
"""BuilderConfig for Conll2003"""
|
27 |
|
@@ -89,25 +81,49 @@ class PLODfilteredConfig(datasets.GeneratorBasedBuilder):
|
|
89 |
homepage="https://github.com/surrey-nlp/PLOD-AbbreviationDetection",
|
90 |
citation=_CITATION,
|
91 |
)
|
|
|
|
|
|
|
92 |
|
93 |
-
|
94 |
-
|
95 |
-
|
96 |
-
downloaded_val = dl_manager.download_and_extract(_DEV_FILE_URL)
|
97 |
-
downloaded_test = dl_manager.download_and_extract(_TEST_FILE_URL)
|
98 |
|
99 |
-
|
100 |
-
|
101 |
-
|
102 |
-
|
103 |
-
|
|
|
|
|
|
|
|
|
|
|
104 |
|
105 |
return [
|
106 |
-
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath":
|
107 |
-
datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath":
|
108 |
-
datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath":
|
109 |
]
|
110 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
111 |
def _generate_examples(self, filepath):
|
112 |
logger.info("⏳ Generating examples from = %s", filepath)
|
113 |
with open(filepath, encoding="utf-8") as f:
|
|
|
14 |
The dataset can help build sequence labelling models for the task Abbreviation Detection.
|
15 |
"""
|
16 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
17 |
class PLODfilteredConfig(datasets.BuilderConfig):
|
18 |
"""BuilderConfig for Conll2003"""
|
19 |
|
|
|
81 |
homepage="https://github.com/surrey-nlp/PLOD-AbbreviationDetection",
|
82 |
citation=_CITATION,
|
83 |
)
|
84 |
+
# _TRAINING_FILE_URL = "https://huggingface.co/datasets/surrey-nlp/PLOD-filtered/resolve/main/data/PLOS-train70-filtered-pos_bio.json"
|
85 |
+
# _DEV_FILE_URL = "https://huggingface.co/datasets/surrey-nlp/PLOD-filtered/resolve/main/data/PLOS-val15-filtered-pos_bio.json"
|
86 |
+
# _TEST_FILE_URL = "https://huggingface.co/datasets/surrey-nlp/PLOD-filtered/resolve/main/data/PLOS-test15-filtered-pos_bio.json"
|
87 |
|
88 |
+
# _TRAINING_FILE = "PLOS-train70-filtered-pos_bio.json"
|
89 |
+
# _DEV_FILE = "PLOS-val15-filtered-pos_bio.json"
|
90 |
+
# _TEST_FILE = "PLOS-test15-filtered-pos_bio.json"
|
|
|
|
|
91 |
|
92 |
+
_URL = "https://huggingface.co/datasets/surrey-nlp/PLOD-filtered/resolve/main/data/"
|
93 |
+
_URLS = {
|
94 |
+
"train": _URL + "PLOS-train70-filtered-pos_bio.json",
|
95 |
+
"dev": _URL + "PLOS-val15-filtered-pos_bio.json",
|
96 |
+
"test": _URL + "PLOS-test15-filtered-pos_bio.json"
|
97 |
+
}
|
98 |
+
|
99 |
+
def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
|
100 |
+
urls_to_download = self._URLS
|
101 |
+
downloaded_files = dl_manager.download_and_extract(urls_to_download)
|
102 |
|
103 |
return [
|
104 |
+
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"]}),
|
105 |
+
datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": downloaded_files["dev"]}),
|
106 |
+
datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": downloaded_files["test"]})
|
107 |
]
|
108 |
|
109 |
+
# def _split_generators(self, dl_manager):
|
110 |
+
# """Returns SplitGenerators."""
|
111 |
+
# downloaded_train = dl_manager.download_and_extract(_TRAINING_FILE_URL)
|
112 |
+
# downloaded_val = dl_manager.download_and_extract(_DEV_FILE_URL)
|
113 |
+
# downloaded_test = dl_manager.download_and_extract(_TEST_FILE_URL)
|
114 |
+
|
115 |
+
# data_files = {
|
116 |
+
# "train": _TRAINING_FILE,
|
117 |
+
# "dev": _DEV_FILE,
|
118 |
+
# "test": _TEST_FILE,
|
119 |
+
# }
|
120 |
+
|
121 |
+
# return [
|
122 |
+
# datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": data_files["train"]}),
|
123 |
+
# datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": data_files["dev"]}),
|
124 |
+
# datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": data_files["test"]}),
|
125 |
+
# ]
|
126 |
+
|
127 |
def _generate_examples(self, filepath):
|
128 |
logger.info("⏳ Generating examples from = %s", filepath)
|
129 |
with open(filepath, encoding="utf-8") as f:
|