File size: 13,154 Bytes
e0f2365
 
 
 
 
8115cd6
e0f2365
 
 
 
 
 
 
 
 
 
 
 
 
8115cd6
c45e282
e0f2365
 
 
 
c45e282
 
e0f2365
 
 
ecb5a0a
 
e0f2365
 
d35b8a7
4a785b4
92e2550
 
 
 
 
 
 
 
1a16c8a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
03d8b46
1a16c8a
 
03d8b46
1a16c8a
cb8b66a
03d8b46
cb8b66a
5be2bf6
03d8b46
1a16c8a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
03d8b46
1a16c8a
 
03d8b46
1a16c8a
cb8b66a
03d8b46
cb8b66a
d8b7fa3
03d8b46
92e2550
1a16c8a
 
 
 
 
 
 
92e2550
 
 
1a16c8a
 
 
4faee69
1a16c8a
 
4faee69
1a16c8a
cb8b66a
4faee69
cb8b66a
4faee69
 
92e2550
1a16c8a
 
 
 
 
 
 
92e2550
1a16c8a
 
 
6bdb191
1a16c8a
 
6bdb191
1a16c8a
cb8b66a
6bdb191
cb8b66a
6bdb191
 
1a16c8a
 
 
 
 
 
 
 
0fdd47d
1a16c8a
0fdd47d
 
1a16c8a
 
 
 
 
 
 
 
2a43003
1a16c8a
2a43003
 
92e2550
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5be2bf6
 
 
 
 
 
 
 
d8b7fa3
 
 
 
 
 
 
 
4faee69
 
 
 
 
 
 
 
6bdb191
 
 
 
 
 
 
 
0fdd47d
 
 
 
2a43003
 
 
 
92e2550
 
 
 
 
 
 
 
 
e0f2365
 
 
 
 
 
 
d35b8a7
e0f2365
 
 
d35b8a7
 
e0f2365
 
 
 
 
 
 
 
 
 
 
 
 
677d293
e0f2365
 
 
 
 
c9a1930
e0f2365
 
 
 
 
 
 
 
 
 
 
 
 
 
8115cd6
e0f2365
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
677d293
 
 
1a16c8a
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
---
annotations_creators:
- expert-generated
language_creators:
- crowdsourced
language:
- bn
- en
- fil
- hi
- id
- ja
- km
- lo
- ms
- my
- th
- vi
- zh
license:
- cc-by-4.0
multilinguality:
- multilingual
- translation
size_categories:
- 100K<n<1M
- 10K<n<100K
source_datasets:
- original
task_categories:
- translation
- token-classification
task_ids:
- parsing
paperswithcode_id: alt
pretty_name: Asian Language Treebank
config_names:
- alt-en
- alt-jp
- alt-km
- alt-my
- alt-my-transliteration
- alt-my-west-transliteration
- alt-parallel
dataset_info:
- config_name: alt-en
  features:
  - name: SNT.URLID
    dtype: string
  - name: SNT.URLID.SNTID
    dtype: string
  - name: url
    dtype: string
  - name: status
    dtype: string
  - name: value
    dtype: string
  splits:
  - name: train
    num_bytes: 10075569
    num_examples: 17889
  - name: validation
    num_bytes: 544719
    num_examples: 988
  - name: test
    num_bytes: 567272
    num_examples: 1017
  download_size: 3781814
  dataset_size: 11187560
- config_name: alt-jp
  features:
  - name: SNT.URLID
    dtype: string
  - name: SNT.URLID.SNTID
    dtype: string
  - name: url
    dtype: string
  - name: status
    dtype: string
  - name: value
    dtype: string
  - name: word_alignment
    dtype: string
  - name: jp_tokenized
    dtype: string
  - name: en_tokenized
    dtype: string
  splits:
  - name: train
    num_bytes: 21888277
    num_examples: 17202
  - name: validation
    num_bytes: 1181555
    num_examples: 953
  - name: test
    num_bytes: 1175592
    num_examples: 931
  download_size: 10355366
  dataset_size: 24245424
- config_name: alt-km
  features:
  - name: SNT.URLID
    dtype: string
  - name: SNT.URLID.SNTID
    dtype: string
  - name: url
    dtype: string
  - name: km_pos_tag
    dtype: string
  - name: km_tokenized
    dtype: string
  splits:
  - name: train
    num_bytes: 12015371
    num_examples: 18088
  - name: validation
    num_bytes: 655212
    num_examples: 1000
  - name: test
    num_bytes: 673733
    num_examples: 1018
  download_size: 4344096
  dataset_size: 13344316
- config_name: alt-my
  features:
  - name: SNT.URLID
    dtype: string
  - name: SNT.URLID.SNTID
    dtype: string
  - name: url
    dtype: string
  - name: value
    dtype: string
  splits:
  - name: train
    num_bytes: 20433243
    num_examples: 18088
  - name: validation
    num_bytes: 1111394
    num_examples: 1000
  - name: test
    num_bytes: 1135193
    num_examples: 1018
  download_size: 6569025
  dataset_size: 22679830
- config_name: alt-my-transliteration
  features:
  - name: en
    dtype: string
  - name: my
    sequence: string
  splits:
  - name: train
    num_bytes: 4249316
    num_examples: 84022
  download_size: 2163951
  dataset_size: 4249316
- config_name: alt-my-west-transliteration
  features:
  - name: en
    dtype: string
  - name: my
    sequence: string
  splits:
  - name: train
    num_bytes: 7411911
    num_examples: 107121
  download_size: 2857511
  dataset_size: 7411911
- config_name: alt-parallel
  features:
  - name: SNT.URLID
    dtype: string
  - name: SNT.URLID.SNTID
    dtype: string
  - name: url
    dtype: string
  - name: translation
    dtype:
      translation:
        languages:
        - bg
        - en
        - en_tok
        - fil
        - hi
        - id
        - ja
        - khm
        - lo
        - ms
        - my
        - th
        - vi
        - zh
  splits:
  - name: train
    num_bytes: 68445916
    num_examples: 18088
  - name: validation
    num_bytes: 3710979
    num_examples: 1000
  - name: test
    num_bytes: 3814431
    num_examples: 1019
  download_size: 34707907
  dataset_size: 75971326
configs:
- config_name: alt-en
  data_files:
  - split: train
    path: alt-en/train-*
  - split: validation
    path: alt-en/validation-*
  - split: test
    path: alt-en/test-*
- config_name: alt-jp
  data_files:
  - split: train
    path: alt-jp/train-*
  - split: validation
    path: alt-jp/validation-*
  - split: test
    path: alt-jp/test-*
- config_name: alt-km
  data_files:
  - split: train
    path: alt-km/train-*
  - split: validation
    path: alt-km/validation-*
  - split: test
    path: alt-km/test-*
- config_name: alt-my
  data_files:
  - split: train
    path: alt-my/train-*
  - split: validation
    path: alt-my/validation-*
  - split: test
    path: alt-my/test-*
- config_name: alt-my-transliteration
  data_files:
  - split: train
    path: alt-my-transliteration/train-*
- config_name: alt-my-west-transliteration
  data_files:
  - split: train
    path: alt-my-west-transliteration/train-*
- config_name: alt-parallel
  data_files:
  - split: train
    path: alt-parallel/train-*
  - split: validation
    path: alt-parallel/validation-*
  - split: test
    path: alt-parallel/test-*
  default: true
---

# Dataset Card for Asian Language Treebank (ALT)

## Table of Contents
- [Dataset Description](#dataset-description)
  - [Dataset Summary](#dataset-summary)
  - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards)
  - [Languages](#languages)
- [Dataset Structure](#dataset-structure)
  - [Data Instances](#data-instances)
  - [Data Fields](#data-fields)
  - [Data Splits](#data-splits)
- [Dataset Creation](#dataset-creation)
  - [Curation Rationale](#curation-rationale)
  - [Source Data](#source-data)
  - [Annotations](#annotations)
  - [Personal and Sensitive Information](#personal-and-sensitive-information)
- [Considerations for Using the Data](#considerations-for-using-the-data)
  - [Social Impact of Dataset](#social-impact-of-dataset)
  - [Discussion of Biases](#discussion-of-biases)
  - [Other Known Limitations](#other-known-limitations)
- [Additional Information](#additional-information)
  - [Dataset Curators](#dataset-curators)
  - [Licensing Information](#licensing-information)
  - [Citation Information](#citation-information)
  - [Contributions](#contributions)

## Dataset Description

- **Homepage:** https://www2.nict.go.jp/astrec-att/member/mutiyama/ALT/
- **Leaderboard:** 
- **Paper:** [Introduction of the Asian Language Treebank](https://ieeexplore.ieee.org/abstract/document/7918974)
- **Point of Contact:** [ALT info](alt-info@khn.nict.go.jp)

### Dataset Summary
The ALT project aims to advance the state-of-the-art Asian natural language processing (NLP) techniques through the open collaboration for developing and using ALT. It was first conducted by NICT and UCSY as described in Ye Kyaw Thu, Win Pa Pa, Masao Utiyama, Andrew Finch and Eiichiro Sumita (2016). Then, it was developed under [ASEAN IVO](https://www.nict.go.jp/en/asean_ivo/index.html) as described in this Web page. 

The process of building ALT began with sampling about 20,000 sentences from English Wikinews, and then these sentences were translated into the other languages.

### Supported Tasks and Leaderboards

Machine Translation, Dependency Parsing


### Languages

It supports 13 language: 
  * Bengali
  * English
  * Filipino
  * Hindi
  * Bahasa Indonesia
  * Japanese
  * Khmer
  * Lao
  * Malay
  * Myanmar (Burmese)
  * Thai
  * Vietnamese
  * Chinese (Simplified Chinese).

## Dataset Structure

### Data Instances

#### ALT Parallel Corpus 
```
{
    "SNT.URLID": "80188",
    "SNT.URLID.SNTID": "1",
    "url": "http://en.wikinews.org/wiki/2007_Rugby_World_Cup:_Italy_31_-_5_Portugal",
    "bg": "[translated sentence]",
    "en": "[translated sentence]",
    "en_tok": "[translated sentence]",
    "fil": "[translated sentence]",
    "hi": "[translated sentence]",
    "id": "[translated sentence]",
    "ja": "[translated sentence]",
    "khm": "[translated sentence]",
    "lo": "[translated sentence]",
    "ms": "[translated sentence]",
    "my": "[translated sentence]",
    "th": "[translated sentence]",
    "vi": "[translated sentence]",
    "zh": "[translated sentence]"
}
```

#### ALT Treebank 
```
{
    "SNT.URLID": "80188",
    "SNT.URLID.SNTID": "1",
    "url": "http://en.wikinews.org/wiki/2007_Rugby_World_Cup:_Italy_31_-_5_Portugal",
    "status": "draft/reviewed",
    "value": "(S (S (BASENP (NNP Italy)) (VP (VBP have) (VP (VP (VP (VBN defeated) (BASENP (NNP Portugal))) (ADVP (RB 31-5))) (PP (IN in) (NP (BASENP (NNP Pool) (NNP C)) (PP (IN of) (NP (BASENP (DT the) (NN 2007) (NNP Rugby) (NNP World) (NNP Cup)) (PP (IN at) (NP (BASENP (NNP Parc) (FW des) (NNP Princes)) (COMMA ,) (BASENP (NNP Paris) (COMMA ,) (NNP France))))))))))) (PERIOD .))"
}
```

#### ALT Myanmar transliteration
```
{
    "en": "CASINO",
    "my": [
      "ကက်စီနို",
      "ကစီနို",
      "ကာစီနို",
      "ကာဆီနို"
    ]
}
```

### Data Fields


#### ALT Parallel Corpus 
- SNT.URLID: URL link to the source article listed in [URL.txt](https://www2.nict.go.jp/astrec-att/member/mutiyama/ALT/ALT-Parallel-Corpus-20191206/URL.txt)
- SNT.URLID.SNTID: index number from 1 to 20000. It is a seletected sentence from `SNT.URLID`

and bg, en, fil, hi, id, ja, khm, lo, ms, my, th, vi, zh correspond to the target language

#### ALT Treebank
- status: it indicates how a sentence is annotated; `draft` sentences are annotated by one annotater and `reviewed` sentences are annotated by two annotater 

The annotatation is different from language to language, please see [their guildlines](https://www2.nict.go.jp/astrec-att/member/mutiyama/ALT/) for more detail.

### Data Splits

|           | train | valid | test  |
|-----------|-------|-------|-------|
| # articles | 1698 |  98   |  97   |
| # sentences | 18088 | 1000  | 1018  |


## Dataset Creation

### Curation Rationale

The ALT project was initiated by the [National Institute of Information and Communications Technology, Japan](https://www.nict.go.jp/en/) (NICT) in 2014. NICT started to build Japanese and English ALT and worked with the University of Computer Studies, Yangon, Myanmar (UCSY) to build Myanmar ALT in 2014. Then, the Badan Pengkajian dan Penerapan Teknologi, Indonesia (BPPT), the Institute for Infocomm Research, Singapore (I2R), the Institute of Information Technology, Vietnam (IOIT), and the National Institute of Posts, Telecoms and ICT, Cambodia (NIPTICT) joined to make ALT for Indonesian, Malay, Vietnamese, and Khmer in 2015. 


### Source Data

#### Initial Data Collection and Normalization

[More Information Needed]

#### Who are the source language producers?

The dataset is sampled from the English Wikinews in 2014. These will be annotated with word segmentation, POS tags, and syntax information, in addition to the word alignment information by linguistic experts from
* National Institute of Information and Communications Technology, Japan (NICT) for Japanses and English
* University of Computer Studies, Yangon, Myanmar (UCSY) for Myanmar
* the Badan Pengkajian dan Penerapan Teknologi, Indonesia (BPPT) for Indonesian
* the Institute for Infocomm Research, Singapore (I2R) for Malay
* the Institute of Information Technology, Vietnam (IOIT) for Vietnamese
* the National Institute of Posts, Telecoms and ICT, Cambodia for Khmer

### Annotations

#### Annotation process

[More Information Needed]

#### Who are the annotators?

[More Information Needed]

### Personal and Sensitive Information

[More Information Needed]

## Considerations for Using the Data

### Social Impact of Dataset

[More Information Needed]

### Discussion of Biases

[More Information Needed]

### Other Known Limitations

[More Information Needed]


## Additional Information

### Dataset Curators

* National Institute of Information and Communications Technology, Japan (NICT) for Japanses and English
* University of Computer Studies, Yangon, Myanmar (UCSY) for Myanmar
* the Badan Pengkajian dan Penerapan Teknologi, Indonesia (BPPT) for Indonesian
* the Institute for Infocomm Research, Singapore (I2R) for Malay
* the Institute of Information Technology, Vietnam (IOIT) for Vietnamese
* the National Institute of Posts, Telecoms and ICT, Cambodia for Khmer

### Licensing Information

[Creative Commons Attribution 4.0 International (CC BY 4.0)](https://creativecommons.org/licenses/by/4.0/)

### Citation Information

Please cite the following if you make use of the dataset:

Hammam Riza, Michael Purwoadi, Gunarso, Teduh Uliniansyah, Aw Ai Ti, Sharifah Mahani Aljunied, Luong Chi Mai, Vu Tat Thang, Nguyen Phuong Thai, Vichet Chea, Rapid Sun, Sethserey Sam, Sopheap Seng, Khin Mar Soe, Khin Thandar Nwet, Masao Utiyama, Chenchen Ding. (2016) "Introduction of the Asian Language Treebank" Oriental COCOSDA.

BibTeX:
```
@inproceedings{riza2016introduction,
  title={Introduction of the asian language treebank},
  author={Riza, Hammam and Purwoadi, Michael and Uliniansyah, Teduh and Ti, Aw Ai and Aljunied, Sharifah Mahani and Mai, Luong Chi and Thang, Vu Tat and Thai, Nguyen Phuong and Chea, Vichet and Sam, Sethserey and others},
  booktitle={2016 Conference of The Oriental Chapter of International Committee for Coordination and Standardization of Speech Databases and Assessment Techniques (O-COCOSDA)},
  pages={1--6},
  year={2016},
  organization={IEEE}
}
```

### Contributions

Thanks to [@chameleonTK](https://github.com/chameleonTK) for adding this dataset.