holylovenia commited on
Commit
5ee6042
1 Parent(s): 463c0fc

Upload indo_general_mt_en_id.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. indo_general_mt_en_id.py +168 -0
indo_general_mt_en_id.py ADDED
@@ -0,0 +1,168 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from pathlib import Path
2
+ from typing import List
3
+
4
+ import datasets
5
+
6
+ from nusacrowd.utils import schemas
7
+ from nusacrowd.utils.configs import NusantaraConfig
8
+ from nusacrowd.utils.constants import Tasks
9
+
10
+ _CITATION = """\
11
+ @inproceedings{guntara-etal-2020-benchmarking,
12
+ title = "Benchmarking Multidomain {E}nglish-{I}ndonesian Machine Translation",
13
+ author = "Guntara, Tri Wahyu and
14
+ Aji, Alham Fikri and
15
+ Prasojo, Radityo Eko",
16
+ booktitle = "Proceedings of the 13th Workshop on Building and Using Comparable Corpora",
17
+ month = may,
18
+ year = "2020",
19
+ address = "Marseille, France",
20
+ publisher = "European Language Resources Association",
21
+ url = "https://aclanthology.org/2020.bucc-1.6",
22
+ pages = "35--43",
23
+ language = "English",
24
+ ISBN = "979-10-95546-42-9",
25
+ }
26
+ """
27
+
28
+ _LOCAL = False
29
+ _LANGUAGES = ["ind"] # We follow ISO639-3 language code (https://iso639-3.sil.org/code_tables/639/data)
30
+ _DATASETNAME = "indo_general_mt_en_id"
31
+ _DESCRIPTION = """\
32
+ "In the context of Machine Translation (MT) from-and-to English, Bahasa Indonesia has been considered a low-resource language,
33
+ and therefore applying Neural Machine Translation (NMT) which typically requires large training dataset proves to be problematic.
34
+ In this paper, we show otherwise by collecting large, publicly-available datasets from the Web, which we split into several domains: news, religion, general, and
35
+ conversation,to train and benchmark some variants of transformer-based NMT models across the domains.
36
+ We show using BLEU that our models perform well across them , outperform the baseline Statistical Machine Translation (SMT) models,
37
+ and perform comparably with Google Translate. Our datasets (with the standard split for training, validation, and testing), code, and models are available on https://github.com/gunnxx/indonesian-mt-data."
38
+ """
39
+
40
+ _HOMEPAGE = "https://github.com/gunnxx/indonesian-mt-data"
41
+
42
+ _LICENSE = "Creative Commons Attribution Share-Alike 4.0 International"
43
+
44
+ _URLS = {
45
+ _DATASETNAME: "https://github.com/gunnxx/indonesian-mt-data/archive/refs/heads/master.zip",
46
+ }
47
+
48
+ _SUPPORTED_TASKS = [Tasks.MACHINE_TRANSLATION]
49
+ # Dataset does not have versioning
50
+ _SOURCE_VERSION = "1.0.0"
51
+ _NUSANTARA_VERSION = "1.0.0"
52
+
53
+
54
+ class IndoGeneralMTEnId(datasets.GeneratorBasedBuilder):
55
+ """Indonesian General Domain MT En-Id is a machine translation dataset containing English-Indonesian parallel sentences collected from the general manuscripts."""
56
+
57
+ BUILDER_CONFIGS = [
58
+ NusantaraConfig(
59
+ name="indo_general_mt_en_id_source",
60
+ version=datasets.Version(_SOURCE_VERSION),
61
+ description="Indonesian General Domain MT En-Id source schema",
62
+ schema="source",
63
+ subset_id="indo_general_mt_en_id",
64
+ ),
65
+ NusantaraConfig(
66
+ name="indo_general_mt_en_id_nusantara_t2t",
67
+ version=datasets.Version(_NUSANTARA_VERSION),
68
+ description="Indonesian General Domain MT Nusantara schema",
69
+ schema="nusantara_t2t",
70
+ subset_id="indo_general_mt_en_id",
71
+ ),
72
+ ]
73
+
74
+ DEFAULT_CONFIG_NAME = "indo_general_mt_en_id_source"
75
+
76
+ def _info(self):
77
+ if self.config.schema == "source":
78
+ features = datasets.Features(
79
+ {
80
+ "id": datasets.Value("string"),
81
+ "src": datasets.Value("string"),
82
+ "tgt": datasets.Value("string"),
83
+ }
84
+ )
85
+ elif self.config.schema == "nusantara_t2t":
86
+ features = schemas.text2text_features
87
+
88
+ return datasets.DatasetInfo(
89
+ description=_DESCRIPTION,
90
+ features=features,
91
+ homepage=_HOMEPAGE,
92
+ license=_LICENSE,
93
+ citation=_CITATION,
94
+ )
95
+
96
+ def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
97
+ urls = _URLS[_DATASETNAME]
98
+ data_dir = Path(dl_manager.download_and_extract(urls)) / "indonesian-mt-data-master" / "general"
99
+
100
+ return [
101
+ datasets.SplitGenerator(
102
+ name=datasets.Split.TRAIN,
103
+ gen_kwargs={
104
+ "filepath": {
105
+ "en": [data_dir / "train.en.0", data_dir / "train.en.1", data_dir / "train.en.2", data_dir / "train.en.3"],
106
+ "id": [data_dir / "train.id.0", data_dir / "train.id.1", data_dir / "train.id.2", data_dir / "train.id.3"],
107
+ }
108
+ },
109
+ ),
110
+ datasets.SplitGenerator(
111
+ name=datasets.Split.TEST,
112
+ gen_kwargs={
113
+ "filepath": {
114
+ "en": [data_dir / "test.en"],
115
+ "id": [data_dir / "test.id"],
116
+ }
117
+ },
118
+ ),
119
+ datasets.SplitGenerator(
120
+ name=datasets.Split.VALIDATION,
121
+ gen_kwargs={
122
+ "filepath": {
123
+ "en": [data_dir / "valid.en"],
124
+ "id": [data_dir / "valid.id"],
125
+ }
126
+ },
127
+ ),
128
+ ]
129
+
130
+ def _generate_examples(self, filepath: dict):
131
+
132
+ data_en = None
133
+ for file in filepath["en"]:
134
+ if data_en is None:
135
+ data_en = open(file, "r").readlines()
136
+ else:
137
+ data_en += open(file, "r").readlines()
138
+
139
+ data_id = None
140
+ for file in filepath["id"]:
141
+ if data_id is None:
142
+ data_id = open(file, "r").readlines()
143
+ else:
144
+ data_id += open(file, "r").readlines()
145
+
146
+ data_en = list(map(str.strip, data_en))
147
+ data_id = list(map(str.strip, data_id))
148
+
149
+ if self.config.schema == "source":
150
+ for id, (src, tgt) in enumerate(zip(data_en, data_id)):
151
+ row = {
152
+ "id": str(id),
153
+ "src": src,
154
+ "tgt": tgt,
155
+ }
156
+ yield id, row
157
+ elif self.config.schema == "nusantara_t2t":
158
+ for id, (src, tgt) in enumerate(zip(data_en, data_id)):
159
+ row = {
160
+ "id": str(id),
161
+ "text_1": src,
162
+ "text_2": tgt,
163
+ "text_1_name": "eng",
164
+ "text_2_name": "ind",
165
+ }
166
+ yield id, row
167
+ else:
168
+ raise ValueError(f"Invalid config: {self.config.name}")