holylovenia commited on
Commit
aa21961
1 Parent(s): 2a43b49

Upload parallel_asian_treebank.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. parallel_asian_treebank.py +178 -0
parallel_asian_treebank.py ADDED
@@ -0,0 +1,178 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import itertools
2
+ from pathlib import Path
3
+ from typing import List, Tuple
4
+
5
+ import datasets
6
+ import pandas as pd
7
+
8
+ from seacrowd.utils import schemas
9
+ from seacrowd.utils.configs import SEACrowdConfig
10
+ from seacrowd.utils.constants import Licenses, Tasks
11
+
12
+ _DATASETNAME = "parallel_asian_treebank"
13
+
14
+ _LANGUAGES = ["khm", "lao", "mya", "ind", "fil", "zlm", "tha", "vie"]
15
+ _LANGUAGES_TO_FILENAME_LANGUAGE_CODE = {
16
+ "khm": "khm",
17
+ "lao": "lo",
18
+ "mya": "my",
19
+ "ind": "id",
20
+ "fil": "fil",
21
+ "zlm": "ms",
22
+ "tha": "th",
23
+ "vie": "vi",
24
+ "eng": "en",
25
+ "hin": "hi",
26
+ "jpn": "ja",
27
+ "zho": "zh",
28
+ }
29
+ _LOCAL = False
30
+ _CITATION = """\
31
+ @inproceedings{riza2016introduction,
32
+ title={Introduction of the asian language treebank},
33
+ author={Riza, Hammam and Purwoadi, Michael and Uliniansyah, Teduh and Ti, Aw Ai and Aljunied, Sharifah Mahani and Mai, Luong Chi and Thang, Vu Tat and Thai, Nguyen Phuong and Chea, Vichet and Sam, Sethserey and others},
34
+ booktitle={2016 Conference of The Oriental Chapter of International Committee for Coordination and Standardization of Speech Databases and Assessment Techniques (O-COCOSDA)},
35
+ pages={1--6},
36
+ year={2016},
37
+ organization={IEEE}
38
+ }
39
+ """
40
+
41
+ _DESCRIPTION = """\
42
+ The ALT project aims to advance the state-of-the-art Asian natural language processing (NLP) techniques through the open collaboration for developing and using ALT.
43
+ It was first conducted by NICT and UCSY as described in Ye Kyaw Thu, Win Pa Pa, Masao Utiyama, Andrew Finch and Eiichiro Sumita (2016).
44
+ Then, it was developed under ASEAN IVO.
45
+ The process of building ALT began with sampling about 20,000 sentences from English Wikinews, and then these sentences were translated into the other languages.
46
+ ALT now has 13 languages: Bengali, English, Filipino, Hindi, Bahasa Indonesia, Japanese, Khmer, Lao, Malay, Myanmar (Burmese), Thai, Vietnamese, Chinese (Simplified Chinese).
47
+ """
48
+
49
+ _HOMEPAGE = "https://www2.nict.go.jp/astrec-att/member/mutiyama/ALT/"
50
+
51
+ _LICENSE = Licenses.CC_BY_4_0.value
52
+
53
+ _URLS = {
54
+ "data": "https://www2.nict.go.jp/astrec-att/member/mutiyama/ALT/ALT-Parallel-Corpus-20191206.zip",
55
+ "train": "https://www2.nict.go.jp/astrec-att/member/mutiyama/ALT/URL-train.txt",
56
+ "dev": "https://www2.nict.go.jp/astrec-att/member/mutiyama/ALT/URL-dev.txt",
57
+ "test": "https://www2.nict.go.jp/astrec-att/member/mutiyama/ALT/URL-test.txt",
58
+ }
59
+
60
+ _SUPPORTED_TASKS = [Tasks.MACHINE_TRANSLATION]
61
+
62
+ _SOURCE_VERSION = "1.0.0"
63
+ _SEACROWD_VERSION = "2024.06.20"
64
+
65
+
66
+ class ParallelAsianTreebankDataset(datasets.GeneratorBasedBuilder):
67
+ """The ALT project aims to advance the state-of-the-art Asian natural language processing (NLP) techniques through the open collaboration for developing and using ALT"""
68
+
69
+ BUILDER_CONFIGS = []
70
+ lang_combinations = list(itertools.combinations(_LANGUAGES_TO_FILENAME_LANGUAGE_CODE.keys(), 2))
71
+ for lang_a, lang_b in lang_combinations:
72
+ if lang_a not in _LANGUAGES and lang_b not in _LANGUAGES:
73
+ # Don't create a subset if both languages are not from SEA
74
+ pass
75
+ else:
76
+ BUILDER_CONFIGS.append(
77
+ SEACrowdConfig(
78
+ name=f"{_DATASETNAME}_{lang_a}_{lang_b}_source",
79
+ version=_SOURCE_VERSION,
80
+ description=f"{_DATASETNAME} source schema",
81
+ schema="source",
82
+ subset_id=f"{_DATASETNAME}_{lang_a}_{lang_b}_source",
83
+ )
84
+ )
85
+ BUILDER_CONFIGS.append(
86
+ SEACrowdConfig(
87
+ name=f"{_DATASETNAME}_{lang_a}_{lang_b}_seacrowd_t2t",
88
+ version=_SOURCE_VERSION,
89
+ description=f"{_DATASETNAME} seacrowd schema",
90
+ schema="seacrowd_t2t",
91
+ subset_id=f"{_DATASETNAME}_{lang_a}_{lang_b}_seacrowd_t2t",
92
+ )
93
+ )
94
+
95
+ def _info(self):
96
+ # The features are the same for both source and seacrowd
97
+ features = schemas.text2text_features
98
+ return datasets.DatasetInfo(
99
+ description=_DESCRIPTION,
100
+ features=features,
101
+ homepage=_HOMEPAGE,
102
+ license=_LICENSE,
103
+ citation=_CITATION,
104
+ )
105
+
106
+ def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
107
+
108
+ def _split_at_n(text: str, n: int) -> Tuple[str, str]:
109
+ """Split text on the n-th instance"""
110
+ return ("_".join(text.split("_")[:n]), "_".join(text.split("_")[n:]))
111
+
112
+ _, subset = _split_at_n(self.config.subset_id, 3)
113
+ lang_pair, _ = _split_at_n(subset, 2)
114
+ lang_a, lang_b = lang_pair.split("_")
115
+
116
+ data_dir = Path(dl_manager.download_and_extract(_URLS["data"])) / "ALT-Parallel-Corpus-20191206"
117
+
118
+ return [
119
+ datasets.SplitGenerator(
120
+ name=datasets.Split.TRAIN,
121
+ gen_kwargs={"data_dir": data_dir, "lang_a": lang_a, "lang_b": lang_b, "split_file": dl_manager.download(_URLS["train"])},
122
+ ),
123
+ datasets.SplitGenerator(
124
+ name=datasets.Split.TEST,
125
+ gen_kwargs={"data_dir": data_dir, "lang_a": lang_a, "lang_b": lang_b, "split_file": dl_manager.download(_URLS["test"])},
126
+ ),
127
+ datasets.SplitGenerator(
128
+ name=datasets.Split.VALIDATION,
129
+ gen_kwargs={"data_dir": data_dir, "lang_a": lang_a, "lang_b": lang_b, "split_file": dl_manager.download(_URLS["dev"])},
130
+ ),
131
+ ]
132
+
133
+ def _generate_examples(self, data_dir: Path, lang_a: str, lang_b: str, split_file: str):
134
+
135
+ def _get_texts(lang: str) -> pd.DataFrame:
136
+ with open(data_dir / f"data_{_LANGUAGES_TO_FILENAME_LANGUAGE_CODE[lang]}.txt", "r") as f:
137
+ rows = [line.strip().split("\t") for line in f.readlines()]
138
+
139
+ url_id = [row[0].split(".")[1] for row in rows]
140
+ sent_id = [row[0].split(".")[-1] for row in rows]
141
+ text = []
142
+ for row in rows:
143
+ # There are rows with an empty text, but they are still tagged with an ID
144
+ # so we keep them and just pass an empty string.
145
+ sent = row[1] if len(row) > 1 else ""
146
+ text.append(sent)
147
+
148
+
149
+ df = pd.DataFrame({"url_id": url_id, "sent_id": sent_id, "text": text})
150
+ return df
151
+
152
+ with open(split_file, "r") as f:
153
+ url_texts = [line.strip() for line in f.readlines()]
154
+ # Get valid URLs for the split
155
+ urlids_for_current_split = [row.split("\t")[0].split(".")[1] for row in url_texts]
156
+
157
+ lang_a_df = _get_texts(lang_a)
158
+ lang_b_df = _get_texts(lang_b)
159
+
160
+ for idx, urlid in enumerate(urlids_for_current_split):
161
+ lang_a_df_split = lang_a_df[lang_a_df["url_id"] == urlid]
162
+ lang_b_df_split = lang_b_df[lang_b_df["url_id"] == urlid]
163
+
164
+ if len(lang_a_df_split) == 0 or len(lang_b_df_split) == 0:
165
+ # Sometimes, not all languages have values for a specific ID
166
+ pass
167
+ else:
168
+ text_a = " ".join(lang_a_df_split["text"].to_list())
169
+ text_b = " ".join(lang_b_df_split["text"].to_list())
170
+
171
+ # Same schema for both source and SEACrowd
172
+ yield idx, {
173
+ "id": idx,
174
+ "text_1": text_a,
175
+ "text_2": text_b,
176
+ "text_1_name": lang_a,
177
+ "text_2_name": lang_b,
178
+ }