holylovenia commited on
Commit
b3e6654
1 Parent(s): bbdf347

Upload indonlu_nergrit.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. indonlu_nergrit.py +12 -12
indonlu_nergrit.py CHANGED
@@ -19,10 +19,10 @@ from typing import List
19
 
20
  import datasets
21
 
22
- from nusacrowd.utils import schemas
23
- from nusacrowd.utils.common_parser import load_conll_data
24
- from nusacrowd.utils.configs import NusantaraConfig
25
- from nusacrowd.utils.constants import Tasks
26
 
27
  _CITATION = """\
28
  @inproceedings{wilie2020indonlu,
@@ -58,7 +58,7 @@ _URLs = {
58
  }
59
  _SUPPORTED_TASKS = [Tasks.NAMED_ENTITY_RECOGNITION]
60
  _SOURCE_VERSION = "1.0.0"
61
- _NUSANTARA_VERSION = "1.0.0"
62
 
63
 
64
  class IndonluNergritDataset(datasets.GeneratorBasedBuilder):
@@ -67,18 +67,18 @@ class IndonluNergritDataset(datasets.GeneratorBasedBuilder):
67
  label_classes = ["I-PERSON", "B-ORGANISATION", "I-ORGANISATION", "B-PLACE", "I-PLACE", "O", "B-PERSON"]
68
 
69
  BUILDER_CONFIGS = [
70
- NusantaraConfig(
71
  name="indonlu_nergrit_source",
72
  version=datasets.Version(_SOURCE_VERSION),
73
  description="IndoNLU NERGrit source schema",
74
  schema="source",
75
  subset_id="indonlu_nergrit",
76
  ),
77
- NusantaraConfig(
78
- name="indonlu_nergrit_nusantara_seq_label",
79
- version=datasets.Version(_NUSANTARA_VERSION),
80
  description="IndoNLU NERGrit Nusantara schema",
81
- schema="nusantara_seq_label",
82
  subset_id="indonlu_nergrit",
83
  ),
84
  ]
@@ -89,7 +89,7 @@ class IndonluNergritDataset(datasets.GeneratorBasedBuilder):
89
  features = None
90
  if self.config.schema == "source":
91
  features = datasets.Features({"index": datasets.Value("string"), "tokens": [datasets.Value("string")], "ner_tag": [datasets.Value("string")]})
92
- elif self.config.schema == "nusantara_seq_label":
93
  features = schemas.seq_label_features(self.label_classes)
94
 
95
  return datasets.DatasetInfo(
@@ -132,7 +132,7 @@ class IndonluNergritDataset(datasets.GeneratorBasedBuilder):
132
  for index, row in enumerate(conll_dataset):
133
  ex = {"index": str(index), "tokens": row["sentence"], "ner_tag": row["label"]}
134
  yield index, ex
135
- elif self.config.schema == "nusantara_seq_label":
136
  for index, row in enumerate(conll_dataset):
137
  ex = {"id": str(index), "tokens": row["sentence"], "labels": row["label"]}
138
  yield index, ex
 
19
 
20
  import datasets
21
 
22
+ from seacrowd.utils import schemas
23
+ from seacrowd.utils.common_parser import load_conll_data
24
+ from seacrowd.utils.configs import SEACrowdConfig
25
+ from seacrowd.utils.constants import Tasks
26
 
27
  _CITATION = """\
28
  @inproceedings{wilie2020indonlu,
 
58
  }
59
  _SUPPORTED_TASKS = [Tasks.NAMED_ENTITY_RECOGNITION]
60
  _SOURCE_VERSION = "1.0.0"
61
+ _SEACROWD_VERSION = "2024.06.20"
62
 
63
 
64
  class IndonluNergritDataset(datasets.GeneratorBasedBuilder):
 
67
  label_classes = ["I-PERSON", "B-ORGANISATION", "I-ORGANISATION", "B-PLACE", "I-PLACE", "O", "B-PERSON"]
68
 
69
  BUILDER_CONFIGS = [
70
+ SEACrowdConfig(
71
  name="indonlu_nergrit_source",
72
  version=datasets.Version(_SOURCE_VERSION),
73
  description="IndoNLU NERGrit source schema",
74
  schema="source",
75
  subset_id="indonlu_nergrit",
76
  ),
77
+ SEACrowdConfig(
78
+ name="indonlu_nergrit_seacrowd_seq_label",
79
+ version=datasets.Version(_SEACROWD_VERSION),
80
  description="IndoNLU NERGrit Nusantara schema",
81
+ schema="seacrowd_seq_label",
82
  subset_id="indonlu_nergrit",
83
  ),
84
  ]
 
89
  features = None
90
  if self.config.schema == "source":
91
  features = datasets.Features({"index": datasets.Value("string"), "tokens": [datasets.Value("string")], "ner_tag": [datasets.Value("string")]})
92
+ elif self.config.schema == "seacrowd_seq_label":
93
  features = schemas.seq_label_features(self.label_classes)
94
 
95
  return datasets.DatasetInfo(
 
132
  for index, row in enumerate(conll_dataset):
133
  ex = {"index": str(index), "tokens": row["sentence"], "ner_tag": row["label"]}
134
  yield index, ex
135
+ elif self.config.schema == "seacrowd_seq_label":
136
  for index, row in enumerate(conll_dataset):
137
  ex = {"id": str(index), "tokens": row["sentence"], "labels": row["label"]}
138
  yield index, ex