holylovenia commited on
Commit
1558eb0
1 Parent(s): decd96f

Upload nerp.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. nerp.py +130 -0
nerp.py ADDED
@@ -0,0 +1,130 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from pathlib import Path
2
+ from typing import List
3
+
4
+ import datasets
5
+
6
+ from nusacrowd.utils import schemas
7
+ from nusacrowd.utils.common_parser import load_conll_data
8
+ from nusacrowd.utils.configs import NusantaraConfig
9
+ from nusacrowd.utils.constants import (DEFAULT_NUSANTARA_VIEW_NAME,
10
+ DEFAULT_SOURCE_VIEW_NAME, Tasks)
11
+
12
+ _DATASETNAME = "nerp"
13
+ _SOURCE_VIEW_NAME = DEFAULT_SOURCE_VIEW_NAME
14
+ _UNIFIED_VIEW_NAME = DEFAULT_NUSANTARA_VIEW_NAME
15
+
16
+ _LANGUAGES = ["ind"]
17
+ _LOCAL = False
18
+ _CITATION = """\
19
+ @inproceedings{hoesen2018investigating,
20
+ title={Investigating bi-lstm and crf with pos tag embedding for indonesian named entity tagger},
21
+ author={Hoesen, Devin and Purwarianti, Ayu},
22
+ booktitle={2018 International Conference on Asian Language Processing (IALP)},
23
+ pages={35--38},
24
+ year={2018},
25
+ organization={IEEE}
26
+ }
27
+ """
28
+
29
+ _DESCRIPTION = """\
30
+ The NERP dataset (Hoesen and Purwarianti, 2018) contains texts collected from several Indonesian news websites with five labels
31
+ - PER (name of person)
32
+ - LOC (name of location)
33
+ - IND (name of product or brand)
34
+ - EVT (name of the event)
35
+ - FNB (name of food and beverage).
36
+ NERP makes use of the IOB chunking format, just like the TermA dataset.
37
+ """
38
+
39
+ _HOMEPAGE = "https://github.com/IndoNLP/indonlu"
40
+
41
+ _LICENSE = "Creative Common Attribution Share-Alike 4.0 International"
42
+
43
+ _URLs = {
44
+ "train": "https://raw.githubusercontent.com/IndoNLP/indonlu/master/dataset/nerp_ner-prosa/train_preprocess.txt",
45
+ "validation": "https://raw.githubusercontent.com/IndoNLP/indonlu/master/dataset/nerp_ner-prosa/valid_preprocess.txt",
46
+ "test": "https://raw.githubusercontent.com/IndoNLP/indonlu/master/dataset/nerp_ner-prosa/test_preprocess_masked_label.txt",
47
+ }
48
+
49
+ _SUPPORTED_TASKS = [Tasks.NAMED_ENTITY_RECOGNITION]
50
+
51
+ _SOURCE_VERSION = "1.0.0"
52
+ _NUSANTARA_VERSION = "1.0.0"
53
+
54
+
55
+ class NerpDataset(datasets.GeneratorBasedBuilder):
56
+ """NERP is an NER tagging dataset contains about (train=6720,valid=840,test=840) sentences, with 11 classes."""
57
+
58
+ label_classes = ["B-PPL", "B-PLC", "B-EVT", "B-IND", "B-FNB", "I-PPL", "I-PLC", "I-EVT", "I-IND", "I-FNB", "O"]
59
+
60
+ BUILDER_CONFIGS = [
61
+ NusantaraConfig(
62
+ name="nerp_source",
63
+ version=datasets.Version(_SOURCE_VERSION),
64
+ description="NERP source schema",
65
+ schema="source",
66
+ subset_id="nerp",
67
+ ),
68
+ NusantaraConfig(
69
+ name="nerp_nusantara_seq_label",
70
+ version=datasets.Version(_NUSANTARA_VERSION),
71
+ description="NERP Nusantara schema",
72
+ schema="nusantara_seq_label",
73
+ subset_id="nerp",
74
+ ),
75
+ ]
76
+
77
+ DEFAULT_CONFIG_NAME = "nerp_source"
78
+
79
+ def _info(self):
80
+ if self.config.schema == "source":
81
+ features = datasets.Features({"index": datasets.Value("string"), "tokens": [datasets.Value("string")], "ner_tag": [datasets.Value("string")]})
82
+ elif self.config.schema == "nusantara_seq_label":
83
+ features = schemas.seq_label_features(self.label_classes)
84
+
85
+ return datasets.DatasetInfo(
86
+ description=_DESCRIPTION,
87
+ features=features,
88
+ homepage=_HOMEPAGE,
89
+ license=_LICENSE,
90
+ citation=_CITATION,
91
+ )
92
+
93
+ def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
94
+ train_tsv_path = Path(dl_manager.download_and_extract(_URLs["train"]))
95
+ validation_tsv_path = Path(dl_manager.download_and_extract(_URLs["validation"]))
96
+ test_tsv_path = Path(dl_manager.download_and_extract(_URLs["test"]))
97
+ data_files = {
98
+ "train": train_tsv_path,
99
+ "validation": validation_tsv_path,
100
+ "test": test_tsv_path,
101
+ }
102
+
103
+ return [
104
+ datasets.SplitGenerator(
105
+ name=datasets.Split.TRAIN,
106
+ gen_kwargs={"filepath": data_files["train"]},
107
+ ),
108
+ datasets.SplitGenerator(
109
+ name=datasets.Split.VALIDATION,
110
+ gen_kwargs={"filepath": data_files["validation"]},
111
+ ),
112
+ datasets.SplitGenerator(
113
+ name=datasets.Split.TEST,
114
+ gen_kwargs={"filepath": data_files["test"]},
115
+ ),
116
+ ]
117
+
118
+ def _generate_examples(self, filepath: Path):
119
+ conll_dataset = load_conll_data(filepath)
120
+
121
+ if self.config.schema == "source":
122
+ for i, row in enumerate(conll_dataset):
123
+ ex = {"index": str(i), "tokens": row["sentence"], "ner_tag": row["label"]}
124
+ yield i, ex
125
+ elif self.config.schema == "nusantara_seq_label":
126
+ for i, row in enumerate(conll_dataset):
127
+ ex = {"id": str(i), "tokens": row["sentence"], "labels": row["label"]}
128
+ yield i, ex
129
+ else:
130
+ raise ValueError(f"Invalid config: {self.config.name}")