Commit
•
6bea591
1
Parent(s):
f685d71
Upload saltik.py with huggingface_hub
Browse files
saltik.py
ADDED
@@ -0,0 +1,133 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import json
|
2 |
+
from pathlib import Path
|
3 |
+
from typing import Dict, List, Tuple
|
4 |
+
|
5 |
+
import datasets
|
6 |
+
import jsonlines
|
7 |
+
|
8 |
+
from seacrowd.utils.configs import SEACrowdConfig
|
9 |
+
from seacrowd.utils.constants import Licenses, Tasks
|
10 |
+
|
11 |
+
_CITATION = """\
|
12 |
+
@article{,
|
13 |
+
author = {Audah, Hanif Arkan and Yuliawati, Arlisa and Alfina, Ika},
|
14 |
+
title = {A Comparison Between SymSpell and a Combination of Damerau-Levenshtein Distance With the Trie Data Structure},
|
15 |
+
journal = {2023 10th International Conference on Advanced Informatics: Concept, Theory and Application (ICAICTA)},
|
16 |
+
volume = {},
|
17 |
+
year = {2023},
|
18 |
+
url = {https://ieeexplore.ieee.org/stamp/stamp.jsp?arnumber=10390399&casa_token=HtJUCIGGlWYAAAAA:q8ll1RWmpHtSAq2Qp5uQAE1NJETx7tUYFZIvTO1IWoaYy4eqFETSsm9p6C7tJwLZBGq5y8zc3A&tag=1},
|
19 |
+
doi = {},
|
20 |
+
biburl = {https://github.com/ir-nlp-csui/saltik?tab=readme-ov-file#references},
|
21 |
+
bibsource = {https://github.com/ir-nlp-csui/saltik?tab=readme-ov-file#references}
|
22 |
+
}
|
23 |
+
"""
|
24 |
+
|
25 |
+
_DATASETNAME = "saltik"
|
26 |
+
_DESCRIPTION = """\
|
27 |
+
Saltik is a dataset for benchmarking non-word error correction method accuracy in evaluating Indonesian words.
|
28 |
+
It consists of 58,532 non-word errors generated from 3,000 of the most popular Indonesian words.
|
29 |
+
"""
|
30 |
+
_HOMEPAGE = "https://github.com/ir-nlp-csui/saltik"
|
31 |
+
_LANGUAGES = ["ind"]
|
32 |
+
_LICENSE = Licenses.AGPL_3_0.value
|
33 |
+
_LOCAL = False
|
34 |
+
_URLS = {
|
35 |
+
_DATASETNAME: "https://raw.githubusercontent.com/ir-nlp-csui/saltik/main/saltik.json",
|
36 |
+
}
|
37 |
+
_SUPPORTED_TASKS = []
|
38 |
+
_SOURCE_VERSION = "1.0.0"
|
39 |
+
_SEACROWD_VERSION = "2024.06.20"
|
40 |
+
|
41 |
+
|
42 |
+
class Saltik(datasets.GeneratorBasedBuilder):
|
43 |
+
"""It consists of 58,532 non-word errors generated from 3,000 of the most popular Indonesian words."""
|
44 |
+
|
45 |
+
SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
|
46 |
+
SEACROWD_VERSION = datasets.Version(_SEACROWD_VERSION)
|
47 |
+
|
48 |
+
BUILDER_CONFIGS = [
|
49 |
+
SEACrowdConfig(
|
50 |
+
name=f"{_DATASETNAME}_source",
|
51 |
+
version=SOURCE_VERSION,
|
52 |
+
description=f"{_DATASETNAME} source schema",
|
53 |
+
schema="source",
|
54 |
+
subset_id=f"{_DATASETNAME}",
|
55 |
+
),
|
56 |
+
]
|
57 |
+
|
58 |
+
DEFAULT_CONFIG_NAME = f"{_DATASETNAME}_source"
|
59 |
+
|
60 |
+
def _info(self) -> datasets.DatasetInfo:
|
61 |
+
|
62 |
+
if self.config.schema == "source":
|
63 |
+
# EX: Arbitrary NER type dataset
|
64 |
+
features = datasets.Features(
|
65 |
+
{
|
66 |
+
"id": datasets.Value("string"),
|
67 |
+
"word": datasets.Value("string"),
|
68 |
+
"errors": [
|
69 |
+
{
|
70 |
+
"typo": datasets.Value("string"),
|
71 |
+
"error_type": datasets.Value("string"),
|
72 |
+
}
|
73 |
+
],
|
74 |
+
}
|
75 |
+
)
|
76 |
+
else:
|
77 |
+
raise NotImplementedError()
|
78 |
+
|
79 |
+
return datasets.DatasetInfo(
|
80 |
+
description=_DESCRIPTION,
|
81 |
+
features=features,
|
82 |
+
homepage=_HOMEPAGE,
|
83 |
+
license=_LICENSE,
|
84 |
+
citation=_CITATION,
|
85 |
+
)
|
86 |
+
|
87 |
+
def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
|
88 |
+
"""Returns SplitGenerators."""
|
89 |
+
urls = _URLS[_DATASETNAME]
|
90 |
+
file_path = dl_manager.download(urls)
|
91 |
+
data = self._read_jsonl(file_path)
|
92 |
+
all_words = list(data.keys())
|
93 |
+
processed_data = []
|
94 |
+
id = 0
|
95 |
+
for word in all_words:
|
96 |
+
processed_data.append({"id": id, "word": word, "errors": data[word]})
|
97 |
+
id += 1
|
98 |
+
self._write_jsonl(file_path + ".jsonl", processed_data)
|
99 |
+
return [
|
100 |
+
datasets.SplitGenerator(
|
101 |
+
name=datasets.Split.TRAIN,
|
102 |
+
# Whatever you put in gen_kwargs will be passed to _generate_examples
|
103 |
+
gen_kwargs={
|
104 |
+
"filepath": file_path + ".jsonl",
|
105 |
+
"split": "train",
|
106 |
+
},
|
107 |
+
),
|
108 |
+
]
|
109 |
+
|
110 |
+
def _generate_examples(self, filepath: Path, split: str) -> Tuple[int, Dict]:
|
111 |
+
"""Yields examples as (key, example) tuples."""
|
112 |
+
if self.config.schema == "source":
|
113 |
+
i = 0
|
114 |
+
with jsonlines.open(filepath) as f:
|
115 |
+
for each_data in f.iter():
|
116 |
+
ex = {
|
117 |
+
"id": each_data["id"],
|
118 |
+
"word": each_data["word"],
|
119 |
+
"errors": each_data["errors"],
|
120 |
+
}
|
121 |
+
|
122 |
+
yield i, ex
|
123 |
+
i += 1
|
124 |
+
|
125 |
+
def _read_jsonl(self, filepath: Path):
|
126 |
+
with open(filepath) as user_file:
|
127 |
+
parsed_json = json.load(user_file)
|
128 |
+
return parsed_json
|
129 |
+
|
130 |
+
def _write_jsonl(self, filepath, values):
|
131 |
+
with jsonlines.open(filepath, "w") as writer:
|
132 |
+
for line in values:
|
133 |
+
writer.write(line)
|