Datasets:

ArXiv:
holylovenia commited on
Commit
08325f6
1 Parent(s): 9f49104

Upload kopi_nllb.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. kopi_nllb.py +159 -0
kopi_nllb.py ADDED
@@ -0,0 +1,159 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """KoPI-NLLB corpus."""
16
+ import json
17
+
18
+ import datasets
19
+ import zstandard as zstd
20
+
21
+ from nusacrowd.utils import schemas
22
+ from nusacrowd.utils.configs import NusantaraConfig
23
+ from nusacrowd.utils.constants import (DEFAULT_NUSANTARA_VIEW_NAME,
24
+ DEFAULT_SOURCE_VIEW_NAME, Tasks)
25
+
26
+ logger = datasets.logging.get_logger(__name__)
27
+
28
+ _CITATION = """
29
+
30
+ Hefferman et al, Bitext Mining Using Distilled Sentence Representations for Low-Resource Languages. Arxiv https://arxiv.org/abs/2205.12654, 2022.
31
+ NLLB Team et al, No Language Left Behind: Scaling Human-Centered Machine Translation, Arxiv https://arxiv.org/abs/2207.04672, 2022.
32
+
33
+ """
34
+ _DESCRIPTION = """\
35
+
36
+ KopI(Korpus Perayapan Indonesia)-NLLB, is Indonesian family language(aceh,bali,banjar,indonesia,jawa,minang,sunda) only extracted from NLLB Dataset, allenai/nllb
37
+
38
+ each language set also filtered using some some deduplicate technique such as exact hash(md5) dedup technique and minhash LSH neardup
39
+
40
+ """
41
+ _TYPE = ["raw", "dedup", "neardup"]
42
+
43
+
44
+ _CONF_LANG = ["ace_Latn", "ban_Latn", "bjn_Latn", "ind_Latn", "jav_Latn", "min_Latn", "sun_Latn"]
45
+
46
+ _CONFIGS = []
47
+ for j in _CONF_LANG:
48
+ for m in _TYPE:
49
+ _CONFIGS.append(j + "-" + m)
50
+
51
+ _ALL_CONFIG = ["all-raw", "all-dedup", "all-neardup"] + _CONFIGS
52
+
53
+ _HOMEPAGE = "https://huggingface.co/datasets/munggok/KoPI-NLLB"
54
+
55
+ _LICENSE = "ODC_C"
56
+
57
+ _BASE_URL = "https://huggingface.co/datasets/munggok/KoPI-NLLB/resolve/main/{tipe}/{lang}.json.zst"
58
+
59
+ _DATASETNAME = "kopi_nllb"
60
+
61
+ _SUPPORTED_TASKS = [Tasks.SELF_SUPERVISED_PRETRAINING]
62
+
63
+ _LANGUAGES = ["ind", "jav", "ace", "ban", "bjn", "min", "sun"]
64
+
65
+ _NUSANTARA_VERSION = "1.0.0"
66
+
67
+ _SOURCE_VERSION = "2022.09.13"
68
+
69
+ _LOCAL = False
70
+
71
+ _SOURCE_VIEW_NAME = DEFAULT_SOURCE_VIEW_NAME
72
+
73
+ _UNIFIED_VIEW_NAME = DEFAULT_NUSANTARA_VIEW_NAME
74
+
75
+ _URL = "https://huggingface.co/datasets/allenai/nllb"
76
+
77
+
78
+ def nusantara_config_constructor(lang, schema, version):
79
+ """Construct NusantaraConfig"""
80
+ if schema != "source" and schema != "nusantara_ssp":
81
+ raise ValueError(f"Invalid schema: {schema}")
82
+
83
+ if lang == "":
84
+ raise ValueError(f"Snapshot is required. Choose one of these Snapshot: {_ALL_CONFIG}.")
85
+ elif lang in _ALL_CONFIG:
86
+ return NusantaraConfig(
87
+ name=f"{_DATASETNAME}_{lang}_{schema}",
88
+ version=datasets.Version(version),
89
+ description=f"KoPI-NLLB with {schema} schema for {lang}",
90
+ schema=schema,
91
+ subset_id="kopi_nllb",
92
+ )
93
+ else:
94
+ raise ValueError(f"Invalid language: {lang}. Choose one of these snapshots: {_ALL_CONFIG}.")
95
+
96
+
97
+ class KoPINLLBConfig(datasets.BuilderConfig):
98
+ """BuilderConfig for the Clean KoPI corpus."""
99
+
100
+ def __init__(self, **kwargs):
101
+ """BuilderConfig for Clean KoPI corpus.
102
+ Args:
103
+ **kwargs: keyword arguments forwarded to super.
104
+ """
105
+ super().__init__(**kwargs)
106
+
107
+
108
+ class KoPINLLB(datasets.GeneratorBasedBuilder):
109
+ """KoPI NLLB corpus."""
110
+
111
+ BUILDER_CONFIGS = [nusantara_config_constructor(sn, "source", _SOURCE_VERSION) for sn in _ALL_CONFIG] + [nusantara_config_constructor(sn, "nusantara_ssp", _NUSANTARA_VERSION) for sn in _ALL_CONFIG]
112
+
113
+ def _info(self):
114
+
115
+ if self.config.schema == "source":
116
+ features = datasets.Features(
117
+ {
118
+ "text": datasets.Value("string"),
119
+ "url": datasets.Value("string"),
120
+ "score": datasets.Value("float32"),
121
+ "source": datasets.Value("string"),
122
+ }
123
+ )
124
+ elif self.config.schema == "nusantara_ssp":
125
+ features = schemas.self_supervised_pretraining.features
126
+ return datasets.DatasetInfo(
127
+ description=_DESCRIPTION,
128
+ features=features,
129
+ homepage=_HOMEPAGE,
130
+ license=_LICENSE,
131
+ citation=_CITATION,
132
+ )
133
+
134
+ def _split_generators(self, dl_manager):
135
+ name = self.config.name.replace("_" + self.config.schema, "")
136
+ name = name.replace(_DATASETNAME + "_", "")
137
+ split_name = name.split("-")
138
+ if split_name[0] == "all":
139
+ train = [_BASE_URL.format(tipe=split_name[1], lang=m) for m in _CONF_LANG]
140
+ else:
141
+ train = [_BASE_URL.format(tipe=split_name[1], lang=split_name[0])]
142
+ train_downloaded_files = dl_manager.download(train)
143
+ return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepaths": train_downloaded_files})]
144
+
145
+ def _generate_examples(self, filepaths):
146
+ """This function returns the examples in the raw (text) form by iterating on all the files."""
147
+ id_ = 0
148
+ for filepath in filepaths:
149
+ logger.info(f"Generating examples from {filepath}")
150
+ with zstd.open(open(filepath, "rb"), "rt", encoding="utf-8") as f:
151
+ for line in f:
152
+ if line:
153
+ example = json.loads(line)
154
+ if self.config.schema == "nusantara_ssp":
155
+ yield id_, {"id": str(id_), "text": example["text"]}
156
+ id_ += 1
157
+ else:
158
+ yield id_, {"text": example["text"], "url": example["url"], "source": example["source"], "score": float(example["score"])}
159
+ id_ += 1