Datasets:
Tasks:
Token Classification
Modalities:
Text
Formats:
parquet
Sub-tasks:
named-entity-recognition
Languages:
English
Size:
1K - 10K
ArXiv:
License:
dipteshkanojia
commited on
Commit
•
8810dba
1
Parent(s):
f985260
modify reader
Browse files- PLOD-CW.py +40 -18
PLOD-CW.py
CHANGED
@@ -16,10 +16,10 @@ The dataset can help build sequence labelling models for the task Abbreviation a
|
|
16 |
"""
|
17 |
|
18 |
class PLODfilteredConfig(datasets.BuilderConfig):
|
19 |
-
"""BuilderConfig for
|
20 |
|
21 |
def __init__(self, **kwargs):
|
22 |
-
"""BuilderConfig
|
23 |
Args:
|
24 |
**kwargs: keyword arguments forwarded to super.
|
25 |
"""
|
@@ -27,10 +27,10 @@ class PLODfilteredConfig(datasets.BuilderConfig):
|
|
27 |
|
28 |
|
29 |
class PLODfilteredConfig(datasets.GeneratorBasedBuilder):
|
30 |
-
"""PLOD
|
31 |
|
32 |
BUILDER_CONFIGS = [
|
33 |
-
PLODfilteredConfig(name="
|
34 |
]
|
35 |
|
36 |
def _info(self):
|
@@ -85,9 +85,9 @@ class PLODfilteredConfig(datasets.GeneratorBasedBuilder):
|
|
85 |
|
86 |
_URL = "https://huggingface.co/datasets/surrey-nlp/PLOD-CW/resolve/main/data/"
|
87 |
_URLS = {
|
88 |
-
"train": _URL + "
|
89 |
-
"dev": _URL + "
|
90 |
-
"test": _URL + "
|
91 |
}
|
92 |
|
93 |
def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
|
@@ -101,15 +101,37 @@ class PLODfilteredConfig(datasets.GeneratorBasedBuilder):
|
|
101 |
]
|
102 |
|
103 |
def _generate_examples(self, filepath):
|
104 |
-
"
|
105 |
-
|
106 |
-
|
107 |
-
|
108 |
-
|
109 |
-
|
110 |
-
|
111 |
-
|
112 |
-
|
113 |
-
|
114 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
115 |
}
|
|
|
16 |
"""
|
17 |
|
18 |
class PLODfilteredConfig(datasets.BuilderConfig):
|
19 |
+
"""BuilderConfig for PLOD-CW"""
|
20 |
|
21 |
def __init__(self, **kwargs):
|
22 |
+
"""BuilderConfig for PLOD-CW.
|
23 |
Args:
|
24 |
**kwargs: keyword arguments forwarded to super.
|
25 |
"""
|
|
|
27 |
|
28 |
|
29 |
class PLODfilteredConfig(datasets.GeneratorBasedBuilder):
|
30 |
+
"""PLOD CW dataset."""
|
31 |
|
32 |
BUILDER_CONFIGS = [
|
33 |
+
PLODfilteredConfig(name="PLOD-CW", version=datasets.Version("0.0.5"), description="PLOD CW dataset for NLP 2024"),
|
34 |
]
|
35 |
|
36 |
def _info(self):
|
|
|
85 |
|
86 |
_URL = "https://huggingface.co/datasets/surrey-nlp/PLOD-CW/resolve/main/data/"
|
87 |
_URLS = {
|
88 |
+
"train": _URL + "train.conll",
|
89 |
+
"dev": _URL + "dev.conll",
|
90 |
+
"test": _URL + "test.conll"
|
91 |
}
|
92 |
|
93 |
def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
|
|
|
101 |
]
|
102 |
|
103 |
def _generate_examples(self, filepath):
|
104 |
+
logger.info("⏳ Generating examples from = %s", filepath)
|
105 |
+
with open(filepath, encoding="utf-8") as f:
|
106 |
+
guid = 0
|
107 |
+
tokens = []
|
108 |
+
pos_tags = []
|
109 |
+
chunk_tags = []
|
110 |
+
ner_tags = []
|
111 |
+
for line in f:
|
112 |
+
if line.startswith("-DOCSTART-") or line == "" or line == "\n":
|
113 |
+
if tokens:
|
114 |
+
yield guid, {
|
115 |
+
"id": str(guid),
|
116 |
+
"tokens": tokens,
|
117 |
+
"pos_tags": pos_tags,
|
118 |
+
"ner_tags": ner_tags,
|
119 |
+
}
|
120 |
+
guid += 1
|
121 |
+
tokens = []
|
122 |
+
pos_tags = []
|
123 |
+
chunk_tags = []
|
124 |
+
ner_tags = []
|
125 |
+
else:
|
126 |
+
splits = line.split(" ")
|
127 |
+
tokens.append(splits[0])
|
128 |
+
pos_tags.append(splits[1])
|
129 |
+
ner_tags.append(splits[3].rstrip())
|
130 |
+
# last example
|
131 |
+
if tokens:
|
132 |
+
yield guid, {
|
133 |
+
"id": str(guid),
|
134 |
+
"tokens": tokens,
|
135 |
+
"pos_tags": pos_tags,
|
136 |
+
"ner_tags": ner_tags,
|
137 |
}
|