Datasets:

Modalities:
Text
Formats:
parquet
Languages:
English
ArXiv:
Libraries:
Datasets
pandas
License:
dipteshkanojia commited on
Commit
8810dba
1 Parent(s): f985260

modify reader

Browse files
Files changed (1) hide show
  1. PLOD-CW.py +40 -18
PLOD-CW.py CHANGED
@@ -16,10 +16,10 @@ The dataset can help build sequence labelling models for the task Abbreviation a
16
  """
17
 
18
  class PLODfilteredConfig(datasets.BuilderConfig):
19
- """BuilderConfig for Conll2003"""
20
 
21
  def __init__(self, **kwargs):
22
- """BuilderConfig forConll2003.
23
  Args:
24
  **kwargs: keyword arguments forwarded to super.
25
  """
@@ -27,10 +27,10 @@ class PLODfilteredConfig(datasets.BuilderConfig):
27
 
28
 
29
  class PLODfilteredConfig(datasets.GeneratorBasedBuilder):
30
- """PLOD Filtered dataset."""
31
 
32
  BUILDER_CONFIGS = [
33
- PLODfilteredConfig(name="PLODfiltered", version=datasets.Version("0.0.2"), description="PLOD filtered dataset"),
34
  ]
35
 
36
  def _info(self):
@@ -85,9 +85,9 @@ class PLODfilteredConfig(datasets.GeneratorBasedBuilder):
85
 
86
  _URL = "https://huggingface.co/datasets/surrey-nlp/PLOD-CW/resolve/main/data/"
87
  _URLS = {
88
- "train": _URL + "data/train.conll",
89
- "dev": _URL + "data/train.conll",
90
- "test": _URL + "data/train.conll"
91
  }
92
 
93
  def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
@@ -101,15 +101,37 @@ class PLODfilteredConfig(datasets.GeneratorBasedBuilder):
101
  ]
102
 
103
  def _generate_examples(self, filepath):
104
- """This function returns the examples in the raw (text) form."""
105
- logger.info("generating examples from = %s", filepath)
106
- with open(filepath) as f:
107
- plod = json.load(f)
108
- for object in plod:
109
- id_ = int(object['id'])
110
- yield id_, {
111
- "id": str(id_),
112
- "tokens": object['tokens'],
113
- "pos_tags": object['pos_tags'],
114
- "ner_tags": object['ner_tags'],
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
115
  }
 
16
  """
17
 
18
  class PLODfilteredConfig(datasets.BuilderConfig):
19
+ """BuilderConfig for PLOD-CW"""
20
 
21
  def __init__(self, **kwargs):
22
+ """BuilderConfig for PLOD-CW.
23
  Args:
24
  **kwargs: keyword arguments forwarded to super.
25
  """
 
27
 
28
 
29
  class PLODfilteredConfig(datasets.GeneratorBasedBuilder):
30
+ """PLOD CW dataset."""
31
 
32
  BUILDER_CONFIGS = [
33
+ PLODfilteredConfig(name="PLOD-CW", version=datasets.Version("0.0.5"), description="PLOD CW dataset for NLP 2024"),
34
  ]
35
 
36
  def _info(self):
 
85
 
86
  _URL = "https://huggingface.co/datasets/surrey-nlp/PLOD-CW/resolve/main/data/"
87
  _URLS = {
88
+ "train": _URL + "train.conll",
89
+ "dev": _URL + "dev.conll",
90
+ "test": _URL + "test.conll"
91
  }
92
 
93
  def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
 
101
  ]
102
 
103
  def _generate_examples(self, filepath):
104
+ logger.info(" Generating examples from = %s", filepath)
105
+ with open(filepath, encoding="utf-8") as f:
106
+ guid = 0
107
+ tokens = []
108
+ pos_tags = []
109
+ chunk_tags = []
110
+ ner_tags = []
111
+ for line in f:
112
+ if line.startswith("-DOCSTART-") or line == "" or line == "\n":
113
+ if tokens:
114
+ yield guid, {
115
+ "id": str(guid),
116
+ "tokens": tokens,
117
+ "pos_tags": pos_tags,
118
+ "ner_tags": ner_tags,
119
+ }
120
+ guid += 1
121
+ tokens = []
122
+ pos_tags = []
123
+ chunk_tags = []
124
+ ner_tags = []
125
+ else:
126
+ splits = line.split(" ")
127
+ tokens.append(splits[0])
128
+ pos_tags.append(splits[1])
129
+ ner_tags.append(splits[3].rstrip())
130
+ # last example
131
+ if tokens:
132
+ yield guid, {
133
+ "id": str(guid),
134
+ "tokens": tokens,
135
+ "pos_tags": pos_tags,
136
+ "ner_tags": ner_tags,
137
  }