holylovenia commited on
Commit
e099352
1 Parent(s): c62c270

Upload indqner.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. indqner.py +11 -11
indqner.py CHANGED
@@ -19,10 +19,10 @@ from typing import List
19
 
20
  import datasets
21
 
22
- from nusacrowd.utils import schemas
23
- from nusacrowd.utils.common_parser import load_conll_data
24
- from nusacrowd.utils.configs import NusantaraConfig
25
- from nusacrowd.utils.constants import Tasks
26
 
27
  _CITATION = """\
28
  @misc{,
@@ -66,7 +66,7 @@ _URLs = {
66
  }
67
  _SUPPORTED_TASKS = [Tasks.NAMED_ENTITY_RECOGNITION]
68
  _SOURCE_VERSION = "1.0.0"
69
- _NUSANTARA_VERSION = "1.0.0"
70
 
71
 
72
  class IndqnerDataset(datasets.GeneratorBasedBuilder):
@@ -109,18 +109,18 @@ class IndqnerDataset(datasets.GeneratorBasedBuilder):
109
  ]
110
 
111
  BUILDER_CONFIGS = [
112
- NusantaraConfig(
113
  name="indqner_source",
114
  version=datasets.Version(_SOURCE_VERSION),
115
  description="NER dataset from Indonesian translation Quran source schema",
116
  schema="source",
117
  subset_id="indqner",
118
  ),
119
- NusantaraConfig(
120
- name="indqner_nusantara_seq_label",
121
  version=datasets.Version(_SOURCE_VERSION),
122
  description="NER dataset from Indonesian translation Quran Nusantara schema",
123
- schema="nusantara_seq_label",
124
  subset_id="indqner",
125
  ),
126
  ]
@@ -130,7 +130,7 @@ class IndqnerDataset(datasets.GeneratorBasedBuilder):
130
  def _info(self):
131
  if self.config.schema == "source":
132
  features = datasets.Features({"index": datasets.Value("string"), "tokens": [datasets.Value("string")], "ner_tag": [datasets.Value("string")]})
133
- elif self.config.schema == "nusantara_seq_label":
134
  features = schemas.seq_label_features(self.label_classes)
135
 
136
  return datasets.DatasetInfo(
@@ -173,7 +173,7 @@ class IndqnerDataset(datasets.GeneratorBasedBuilder):
173
  for index, row in enumerate(conll_dataset):
174
  ex = {"index": str(index), "tokens": row["sentence"], "ner_tag": row["label"]}
175
  yield index, ex
176
- elif self.config.schema == "nusantara_seq_label":
177
  for index, row in enumerate(conll_dataset):
178
  ex = {"id": str(index), "tokens": row["sentence"], "labels": row["label"]}
179
  yield index, ex
 
19
 
20
  import datasets
21
 
22
+ from seacrowd.utils import schemas
23
+ from seacrowd.utils.common_parser import load_conll_data
24
+ from seacrowd.utils.configs import SEACrowdConfig
25
+ from seacrowd.utils.constants import Tasks
26
 
27
  _CITATION = """\
28
  @misc{,
 
66
  }
67
  _SUPPORTED_TASKS = [Tasks.NAMED_ENTITY_RECOGNITION]
68
  _SOURCE_VERSION = "1.0.0"
69
+ _SEACROWD_VERSION = "2024.06.20"
70
 
71
 
72
  class IndqnerDataset(datasets.GeneratorBasedBuilder):
 
109
  ]
110
 
111
  BUILDER_CONFIGS = [
112
+ SEACrowdConfig(
113
  name="indqner_source",
114
  version=datasets.Version(_SOURCE_VERSION),
115
  description="NER dataset from Indonesian translation Quran source schema",
116
  schema="source",
117
  subset_id="indqner",
118
  ),
119
+ SEACrowdConfig(
120
+ name="indqner_seacrowd_seq_label",
121
  version=datasets.Version(_SOURCE_VERSION),
122
  description="NER dataset from Indonesian translation Quran Nusantara schema",
123
+ schema="seacrowd_seq_label",
124
  subset_id="indqner",
125
  ),
126
  ]
 
130
  def _info(self):
131
  if self.config.schema == "source":
132
  features = datasets.Features({"index": datasets.Value("string"), "tokens": [datasets.Value("string")], "ner_tag": [datasets.Value("string")]})
133
+ elif self.config.schema == "seacrowd_seq_label":
134
  features = schemas.seq_label_features(self.label_classes)
135
 
136
  return datasets.DatasetInfo(
 
173
  for index, row in enumerate(conll_dataset):
174
  ex = {"index": str(index), "tokens": row["sentence"], "ner_tag": row["label"]}
175
  yield index, ex
176
+ elif self.config.schema == "seacrowd_seq_label":
177
  for index, row in enumerate(conll_dataset):
178
  ex = {"id": str(index), "tokens": row["sentence"], "labels": row["label"]}
179
  yield index, ex