parquet-converter commited on
Commit
25cc091
1 Parent(s): da458d1

Update parquet files

Browse files
Files changed (40) hide show
  1. .gitattributes +0 -54
  2. data/v3/wikititles-v3.ca-ro.tsv.gz → bn-hi/wikititles-train.parquet +2 -2
  3. ca-es/wikititles-train.parquet +3 -0
  4. ca-pt/wikititles-train.parquet +3 -0
  5. data/v3/wikititles-v3.ca-es.tsv.gz → ca-ro/wikititles-train.parquet +2 -2
  6. cs-en/wikititles-train.parquet +3 -0
  7. data/v3/wikititles-v3.cs-en.tsv.gz +0 -3
  8. data/v3/wikititles-v3.de-en.tsv.gz +0 -3
  9. data/v3/wikititles-v3.de-fr.tsv.gz +0 -3
  10. data/v3/wikititles-v3.es-pt.tsv.gz +0 -3
  11. data/v3/wikititles-v3.es-ro.tsv.gz +0 -3
  12. data/v3/wikititles-v3.is-en.tsv.gz +0 -3
  13. data/v3/wikititles-v3.ja-en.tsv.gz +0 -3
  14. data/v3/wikititles-v3.ps-en.tsv.gz +0 -3
  15. data/v3/wikititles-v3.pt-ro.tsv.gz +0 -3
  16. data/v3/wikititles-v3.ru-en.tsv.gz +0 -3
  17. data/v3/wikititles-v3.zh-en.tsv.gz +0 -3
  18. de-en/wikititles-train.parquet +3 -0
  19. de-fr/wikititles-train.parquet +3 -0
  20. es-pt/wikititles-train.parquet +3 -0
  21. data/v3/wikititles-v3.ca-pt.tsv.gz → es-ro/wikititles-train.parquet +2 -2
  22. data/v3/wikititles-v3.bn-hi.tsv.gz → ha-en/wikititles-train.parquet +2 -2
  23. data/v3/wikititles-v3.ha-en.tsv.gz → ig-en/wikititles-train.parquet +2 -2
  24. is-en/wikititles-train.parquet +3 -0
  25. ja-en/wikititles-train.parquet +3 -0
  26. data/v3/wikititles-v3.ig-en.tsv.gz → ps-en/wikititles-train.parquet +2 -2
  27. pt-ro/wikititles-train.parquet +3 -0
  28. ru-en/wikititles-train.parquet +3 -0
  29. v1/wikititles-v1.cs-en.tsv.gz +0 -3
  30. v1/wikititles-v1.de-en.tsv.gz +0 -3
  31. v1/wikititles-v1.fi-en.tsv.gz +0 -3
  32. v1/wikititles-v1.gu-en.tsv.gz +0 -3
  33. v1/wikititles-v1.kk-en.tsv.gz +0 -3
  34. v1/wikititles-v1.lt-en.tsv.gz +0 -3
  35. v1/wikititles-v1.ru-en.tsv.gz +0 -3
  36. v1/wikititles-v1.zh-en.tsv.gz +0 -3
  37. wikititles.py +0 -41
  38. wmt_utils.py +0 -1050
  39. data/v3/wikititles-v3.xh-zu.tsv.gz → xh-zu/wikititles-train.parquet +2 -2
  40. zh-en/wikititles-train.parquet +3 -0
.gitattributes DELETED
@@ -1,54 +0,0 @@
1
- *.7z filter=lfs diff=lfs merge=lfs -text
2
- *.arrow filter=lfs diff=lfs merge=lfs -text
3
- *.bin filter=lfs diff=lfs merge=lfs -text
4
- *.bz2 filter=lfs diff=lfs merge=lfs -text
5
- *.ckpt filter=lfs diff=lfs merge=lfs -text
6
- *.ftz filter=lfs diff=lfs merge=lfs -text
7
- *.gz filter=lfs diff=lfs merge=lfs -text
8
- *.h5 filter=lfs diff=lfs merge=lfs -text
9
- *.joblib filter=lfs diff=lfs merge=lfs -text
10
- *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
- *.lz4 filter=lfs diff=lfs merge=lfs -text
12
- *.mlmodel filter=lfs diff=lfs merge=lfs -text
13
- *.model filter=lfs diff=lfs merge=lfs -text
14
- *.msgpack filter=lfs diff=lfs merge=lfs -text
15
- *.npy filter=lfs diff=lfs merge=lfs -text
16
- *.npz filter=lfs diff=lfs merge=lfs -text
17
- *.onnx filter=lfs diff=lfs merge=lfs -text
18
- *.ot filter=lfs diff=lfs merge=lfs -text
19
- *.parquet filter=lfs diff=lfs merge=lfs -text
20
- *.pb filter=lfs diff=lfs merge=lfs -text
21
- *.pickle filter=lfs diff=lfs merge=lfs -text
22
- *.pkl filter=lfs diff=lfs merge=lfs -text
23
- *.pt filter=lfs diff=lfs merge=lfs -text
24
- *.pth filter=lfs diff=lfs merge=lfs -text
25
- *.rar filter=lfs diff=lfs merge=lfs -text
26
- *.safetensors filter=lfs diff=lfs merge=lfs -text
27
- saved_model/**/* filter=lfs diff=lfs merge=lfs -text
28
- *.tar.* filter=lfs diff=lfs merge=lfs -text
29
- *.tflite filter=lfs diff=lfs merge=lfs -text
30
- *.tgz filter=lfs diff=lfs merge=lfs -text
31
- *.wasm filter=lfs diff=lfs merge=lfs -text
32
- *.xz filter=lfs diff=lfs merge=lfs -text
33
- *.zip filter=lfs diff=lfs merge=lfs -text
34
- *.zst filter=lfs diff=lfs merge=lfs -text
35
- *tfevents* filter=lfs diff=lfs merge=lfs -text
36
- # Audio files - uncompressed
37
- *.pcm filter=lfs diff=lfs merge=lfs -text
38
- *.sam filter=lfs diff=lfs merge=lfs -text
39
- *.raw filter=lfs diff=lfs merge=lfs -text
40
- # Audio files - compressed
41
- *.aac filter=lfs diff=lfs merge=lfs -text
42
- *.flac filter=lfs diff=lfs merge=lfs -text
43
- *.mp3 filter=lfs diff=lfs merge=lfs -text
44
- *.ogg filter=lfs diff=lfs merge=lfs -text
45
- *.wav filter=lfs diff=lfs merge=lfs -text
46
- # Image files - uncompressed
47
- *.bmp filter=lfs diff=lfs merge=lfs -text
48
- *.gif filter=lfs diff=lfs merge=lfs -text
49
- *.png filter=lfs diff=lfs merge=lfs -text
50
- *.tiff filter=lfs diff=lfs merge=lfs -text
51
- # Image files - compressed
52
- *.jpg filter=lfs diff=lfs merge=lfs -text
53
- *.jpeg filter=lfs diff=lfs merge=lfs -text
54
- *.webp filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
data/v3/wikititles-v3.ca-ro.tsv.gz → bn-hi/wikititles-train.parquet RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:bd1477bb43aac6868547969224aef6bf54e4ba41dc2409d3f963c42f8f71b4d8
3
- size 1915137
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:23265448163ca3b4ef4180895988c56840ae588ba6c0ccb17653ed05cef0984d
3
+ size 1646325
ca-es/wikititles-train.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e0950cc6f90187d05472e29546b6285376ebde11a2fb5d03d1ca55f9edb7f8e0
3
+ size 15327132
ca-pt/wikititles-train.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6684d4a7903a914e2b1fb42eef2224ab67eac48de72120477899561a75036e8f
3
+ size 10032833
data/v3/wikititles-v3.ca-es.tsv.gz → ca-ro/wikititles-train.parquet RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:40a6cf43060719716e09a0e8e261e563e2ed145d6fd590c6f7767b130acb2cc5
3
- size 6152304
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:208ca25f4620bb1328893f3b6309ab42a418abbdeb9dd577b8b624796b00701d
3
+ size 4582728
cs-en/wikititles-train.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2f4492a51c7c754f5c77ded9bc67a525f4e58922911988c76d3059677fe820f3
3
+ size 13416935
data/v3/wikititles-v3.cs-en.tsv.gz DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:ed9360f8d2bf6f4688f442b46485b2d88b36ee04e81386d2a79ce256840ed594
3
- size 5848486
 
 
 
 
data/v3/wikititles-v3.de-en.tsv.gz DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:09d7d283895b761b03ce1f377eddc5d3ea67cd0d0501bc88700721329afda7bf
3
- size 20300948
 
 
 
 
data/v3/wikititles-v3.de-fr.tsv.gz DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:fc245a1edd2553ac7901192fd6022349ca9fc4b487a037dff925c5ba8eb4a153
3
- size 13983690
 
 
 
 
data/v3/wikititles-v3.es-pt.tsv.gz DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:3fedb01247ee7ecb3dff7ec325dfc6fe6719bf6334a21c6c13a66dbf50286912
3
- size 8907995
 
 
 
 
data/v3/wikititles-v3.es-ro.tsv.gz DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:a028f0a497078b52c7e67585600c2616b71371096bc8af6913f4bacc412cd59a
3
- size 3171339
 
 
 
 
data/v3/wikititles-v3.is-en.tsv.gz DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:1e3f49c17875f39a84932f2e9e96102600e39d26ac0862732e6a4ac8b94e7147
3
- size 598944
 
 
 
 
data/v3/wikititles-v3.ja-en.tsv.gz DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:aa3e2b5b276841944235d4ec3416afddfeacb14adfb8f3b8c7cc21f96ebe73ba
3
- size 14114462
 
 
 
 
data/v3/wikititles-v3.ps-en.tsv.gz DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:485832d37845223e4745e44d3a03dc7675c9194d6f104bce13c3039bad8f44f1
3
- size 144996
 
 
 
 
data/v3/wikititles-v3.pt-ro.tsv.gz DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:35c1dea6a6776ee1a40d1d654f7faead80b2db6cb0986361701e77da888977ff
3
- size 2997499
 
 
 
 
data/v3/wikititles-v3.ru-en.tsv.gz DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:a0152f0bd1dde65de1293de311c85c976ab96b36b8bc3229f7cfbc97f7ce958c
3
- size 23529774
 
 
 
 
data/v3/wikititles-v3.zh-en.tsv.gz DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:c8302d9f5e893e8bb8537219e6b56b4b307b203b74884ac86eb298316362c0bf
3
- size 15712736
 
 
 
 
de-en/wikititles-train.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:05017c4aef34002384459830952f4bab60981206490d2140bf2535d2188d31d1
3
+ size 49506665
de-fr/wikititles-train.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d7b4b8802ce60e8ced1f914551c120f87054746a9836e40dc85f3b9fd9aabf7a
3
+ size 33535887
es-pt/wikititles-train.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:977f573057887a422d1f0b28f54ea454de31d241a95cbd52361c4a442fb29794
3
+ size 22409694
data/v3/wikititles-v3.ca-pt.tsv.gz → es-ro/wikititles-train.parquet RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:091a2cafe611f660d5351cb9c34a57bd02f82f5a837118152d40a291d2ae4416
3
- size 4077476
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5383264e3c5609e6996cfdf57ccea8e9b9d441e9c80e93261d4060b2f47e861f
3
+ size 7689927
data/v3/wikititles-v3.bn-hi.tsv.gz → ha-en/wikititles-train.parquet RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:765c0a936f876201985c2ed7829fe53e4d2f85c0ba721939320638db970ffcb7
3
- size 819305
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8fd58279dbda17222c98027d8ff071eb613d75d8222a620ab868b44339de7607
3
+ size 195069
data/v3/wikititles-v3.ha-en.tsv.gz → ig-en/wikititles-train.parquet RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:f2fc225aac5ab1c2435167e8523e74811f9d0d3776d4306d656d7be33a492230
3
- size 74546
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c859686756e02135fe68f82053b32db946d7b051fa3d4acaa1b8b585d15f0c0f
3
+ size 50514
is-en/wikititles-train.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fc121f98872583f97508ccc51b4b9ec983ca42fbe51931365577ba39be2c7555
3
+ size 1379447
ja-en/wikititles-train.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c6659b29269d3b03accbfd6fe7fe72b77901cff6b6902faab0868254f3895d9b
3
+ size 25843970
data/v3/wikititles-v3.ig-en.tsv.gz → ps-en/wikititles-train.parquet RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:6f0a1bcc13e07ec9dd448d0a8f7f43155cabe6ae5d6e592697ace8441aed2357
3
- size 19280
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:78a9996321fda0191d6f74c6249d147e80e0195821bea0b1bc33128fc35c551e
3
+ size 286365
pt-ro/wikititles-train.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5361ff6a09ede16ab2db981194fe83f54049f26fafd277d82bd04f8addfa537c
3
+ size 7335528
ru-en/wikititles-train.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8d2b05758ee8f5e8c7087076db5cc686563c02f15ee38cdb0e6298262781bc02
3
+ size 45006042
v1/wikititles-v1.cs-en.tsv.gz DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:54c9899b3cf897aaa520645436843d57e36ba9cce22f2c544a63a62493e18002
3
- size 5112423
 
 
 
 
v1/wikititles-v1.de-en.tsv.gz DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:24b0222ee87b6b8b3142c92f1b6f3c49374503fb066c46f190f00bf717a67102
3
- size 17919359
 
 
 
 
v1/wikititles-v1.fi-en.tsv.gz DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:01906ddfc88b20039939e69652c5ba9b335c4671be5131c00d76ec0420760fad
3
- size 5101486
 
 
 
 
v1/wikititles-v1.gu-en.tsv.gz DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:d07da4ff7e648e1e1bc3ac1303cd7c51df15704a13b8e6ce7ed46e938ef4ae8d
3
- size 177183
 
 
 
 
v1/wikititles-v1.kk-en.tsv.gz DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:6117eb91ca5538298c15fb366fb7f15e5294c12ef7993f249f307f93c52ae504
3
- size 1575037
 
 
 
 
v1/wikititles-v1.lt-en.tsv.gz DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:4ee533538e441811cb98caf08ad749e8d407905952d3b880e2eb1e795f1f28fc
3
- size 1725255
 
 
 
 
v1/wikititles-v1.ru-en.tsv.gz DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:5da4f340b3ac684dca016bb5fc0730b00fc9808f0a4891a6d745aaa686a600e4
3
- size 20299017
 
 
 
 
v1/wikititles-v1.zh-en.tsv.gz DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:78a8d371c0c1de14786bcbaaa08049d7f77ed58ecef3bc95a305ad17944f1fcb
3
- size 12974754
 
 
 
 
wikititles.py DELETED
@@ -1,41 +0,0 @@
1
- import datasets
2
-
3
- from .wmt_utils import Wmt, WmtConfig
4
-
5
-
6
- _URL = "http://www.statmt.org/wmt22/translation-task.html"
7
- # TODO: Update with citation of overview paper once it is published.
8
- _CITATION = """
9
- @ONLINE {wmt22translate,
10
- author = {Wikimedia Foundation},
11
- title = {EMNLP 2022 Seventh Conference on Machine Translation (WMT22), Shared Task: General Machine Translation},
12
- url = {http://www.statmt.org/wmt22/translation-task.html}
13
- }
14
- """
15
-
16
-
17
- _LANGUAGE_PAIRS = (
18
- {(src, "en") for src in ["cs", "de", "ha", "ig", "is", "ja", "ps", "ru", "zh"]}
19
- | {("ca", "es"), ("de", "fr"), ("bn", "hi")}
20
- | {(src, "pt") for src in ["ca", "es"]}
21
- | {(src, "ro") for src in ["ca", "es", "pt"]}
22
- | {("xh", "zu")}
23
- )
24
-
25
-
26
- class Wikititles(Wmt):
27
-
28
- BUILDER_CONFIGS = [
29
- WmtConfig(
30
- description="Wikititles v3 {0}-{1} translation dataset".format(*language_pair),
31
- url=_URL,
32
- citation=_CITATION,
33
- language_pair=language_pair,
34
- version=datasets.Version("3.0.0"),
35
- )
36
- for language_pair in _LANGUAGE_PAIRS
37
- ]
38
-
39
- @property
40
- def _subsets(self):
41
- return {datasets.Split.TRAIN: ["wikititles_v3"]}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
wmt_utils.py DELETED
@@ -1,1050 +0,0 @@
1
- """WMT: Translate dataset."""
2
-
3
-
4
- import codecs
5
- import functools
6
- import glob
7
- import gzip
8
- import itertools
9
- import os
10
- import re
11
- import xml.etree.cElementTree as ElementTree
12
-
13
- import datasets
14
-
15
-
16
- logger = datasets.logging.get_logger(__name__)
17
-
18
-
19
- _DESCRIPTION = """\
20
- Translation dataset based on the data from statmt.org.
21
-
22
- Versions exist for different years using a combination of data
23
- sources. The base `wmt` allows you to create a custom dataset by choosing
24
- your own data/language pair. This can be done as follows:
25
-
26
- ```python
27
- from datasets import inspect_dataset, load_dataset_builder
28
-
29
- inspect_dataset("wmt19", "path/to/scripts")
30
- builder = load_dataset_builder(
31
- "path/to/scripts/wmt_utils.py",
32
- language_pair=("fr", "de"),
33
- subsets={
34
- datasets.Split.TRAIN: ["commoncrawl_frde"],
35
- datasets.Split.VALIDATION: ["euelections_dev2019"],
36
- },
37
- )
38
-
39
- # Standard version
40
- builder.download_and_prepare()
41
- ds = builder.as_dataset()
42
-
43
- # Streamable version
44
- ds = builder.as_streaming_dataset()
45
- ```
46
-
47
- """
48
-
49
-
50
- CWMT_SUBSET_NAMES = ["casia2015", "casict2011", "casict2015", "datum2015", "datum2017", "neu2017"]
51
-
52
-
53
- class SubDataset:
54
- """Class to keep track of information on a sub-dataset of WMT."""
55
-
56
- def __init__(self, name, url, path="", target=None, sources=None, language_pairs=None, manual_dl_files=None):
57
- """Sub-dataset of WMT.
58
-
59
- Args:
60
- name: `string`, a unique dataset identifier.
61
- target: `string`, the target language code.
62
- sources: `set<string>`, the set of source language codes.
63
- url: `string` or `(string, string)`, URL(s) or URL template(s) specifying
64
- where to download the raw data from. If two strings are provided, the
65
- first is used for the source language and the second for the target.
66
- Template strings can either contain '{src}' placeholders that will be
67
- filled in with the source language code, '{0}' and '{1}' placeholders
68
- that will be filled in with the source and target language codes in
69
- alphabetical order, or all 3.
70
- path: `string` or `(string, string)`, path(s) or path template(s)
71
- specifing the path to the raw data relative to the root of the
72
- downloaded archive. If two strings are provided, the dataset is assumed
73
- to be made up of parallel text files, the first being the source and the
74
- second the target. If one string is provided, both languages are assumed
75
- to be stored within the same file and the extension is used to determine
76
- how to parse it. Template strings should be formatted the same as in
77
- `url`.
78
- manual_dl_files: `<list>(string)` (optional), the list of files that must
79
- be manually downloaded to the data directory.
80
- """
81
- self._paths = (path,) if isinstance(path, str) else path
82
- self._urls = (url,) if isinstance(url, str) else url
83
- self._manual_dl_files = manual_dl_files if manual_dl_files else []
84
- self.name = name
85
- self.target = target
86
- self.sources = set(sources) if sources else sources
87
- self.language_pairs = language_pairs if language_pairs else {(src, target) for src in self.sources}
88
-
89
- def _inject_language(self, src, tgt, strings):
90
- """Injects languages into (potentially) template strings."""
91
- if (src, tgt) not in self.language_pairs:
92
- raise ValueError(f"Invalid source for '{self.name}': ({src}-{tgt})")
93
-
94
- def _format_string(s):
95
- if "{0}" in s and "{1}" in s and "{src}" in s:
96
- return s.format(*sorted([src, tgt]), src=src)
97
- elif "{0}" in s and "{1}" in s:
98
- return s.format(*sorted([src, tgt]))
99
- elif "{src}" in s:
100
- return s.format(src=src, tgt=tgt)
101
- else:
102
- return s
103
-
104
- return [_format_string(s) for s in strings]
105
-
106
- def get_url(self, src, tgt):
107
- return self._inject_language(src=src, tgt=tgt, strings=self._urls)
108
-
109
- def get_manual_dl_files(self, src, tgt):
110
- return self._inject_language(src=src, tgt=tgt, strings=self._manual_dl_files)
111
-
112
- def get_path(self, src, tgt):
113
- return self._inject_language(src=src, tgt=tgt, strings=self._paths)
114
-
115
-
116
- # Subsets used in the training sets for various years of WMT.
117
- _TRAIN_SUBSETS = [
118
- # pylint:disable=line-too-long
119
- SubDataset(
120
- name="commoncrawl",
121
- target="en", # fr-de pair in commoncrawl_frde
122
- sources={"cs", "de", "es", "fr", "ru"},
123
- url="https://huggingface.co/datasets/wmt/wmt13/resolve/main-zip/training-parallel-commoncrawl.zip",
124
- path=("commoncrawl.{src}-en.{src}", "commoncrawl.{src}-en.en"),
125
- ),
126
- SubDataset(
127
- name="commoncrawl_frde",
128
- target="de",
129
- sources={"fr"},
130
- url=(
131
- "https://huggingface.co/datasets/wmt/wmt19/resolve/main/translation-task/fr-de/bitexts/commoncrawl.fr.gz",
132
- "https://huggingface.co/datasets/wmt/wmt19/resolve/main/translation-task/fr-de/bitexts/commoncrawl.de.gz",
133
- ),
134
- path=("", ""),
135
- ),
136
- SubDataset(
137
- name="czeng_10",
138
- target="en",
139
- sources={"cs"},
140
- url="http://ufal.mff.cuni.cz/czeng/czeng10",
141
- manual_dl_files=["data-plaintext-format.%d.tar" % i for i in range(10)],
142
- # Each tar contains multiple files, which we process specially in
143
- # _parse_czeng.
144
- path=("data.plaintext-format/??train.gz",) * 10,
145
- ),
146
- SubDataset(
147
- name="czeng_16pre",
148
- target="en",
149
- sources={"cs"},
150
- url="http://ufal.mff.cuni.cz/czeng/czeng16pre",
151
- manual_dl_files=["czeng16pre.deduped-ignoring-sections.txt.gz"],
152
- path="",
153
- ),
154
- SubDataset(
155
- name="czeng_16",
156
- target="en",
157
- sources={"cs"},
158
- url="http://ufal.mff.cuni.cz/czeng",
159
- manual_dl_files=["data-plaintext-format.%d.tar" % i for i in range(10)],
160
- # Each tar contains multiple files, which we process specially in
161
- # _parse_czeng.
162
- path=("data.plaintext-format/??train.gz",) * 10,
163
- ),
164
- SubDataset(
165
- # This dataset differs from the above in the filtering that is applied
166
- # during parsing.
167
- name="czeng_17",
168
- target="en",
169
- sources={"cs"},
170
- url="http://ufal.mff.cuni.cz/czeng",
171
- manual_dl_files=["data-plaintext-format.%d.tar" % i for i in range(10)],
172
- # Each tar contains multiple files, which we process specially in
173
- # _parse_czeng.
174
- path=("data.plaintext-format/??train.gz",) * 10,
175
- ),
176
- SubDataset(
177
- name="dcep_v1",
178
- target="en",
179
- sources={"lv"},
180
- url="https://huggingface.co/datasets/wmt/wmt17/resolve/main-zip/translation-task/dcep.lv-en.v1.zip",
181
- path=("dcep.en-lv/dcep.lv", "dcep.en-lv/dcep.en"),
182
- ),
183
- SubDataset(
184
- name="europarl_v7",
185
- target="en",
186
- sources={"cs", "de", "es", "fr"},
187
- url="https://huggingface.co/datasets/wmt/wmt13/resolve/main-zip/training-parallel-europarl-v7.zip",
188
- path=("training/europarl-v7.{src}-en.{src}", "training/europarl-v7.{src}-en.en"),
189
- ),
190
- SubDataset(
191
- name="europarl_v7_frde",
192
- target="de",
193
- sources={"fr"},
194
- url=(
195
- "https://huggingface.co/datasets/wmt/wmt19/resolve/main/translation-task/fr-de/bitexts/europarl-v7.fr.gz",
196
- "https://huggingface.co/datasets/wmt/wmt19/resolve/main/translation-task/fr-de/bitexts/europarl-v7.de.gz",
197
- ),
198
- path=("", ""),
199
- ),
200
- SubDataset(
201
- name="europarl_v8_18",
202
- target="en",
203
- sources={"et", "fi"},
204
- url="https://huggingface.co/datasets/wmt/wmt18/resolve/main-zip/translation-task/training-parallel-ep-v8.zip",
205
- path=("training/europarl-v8.{src}-en.{src}", "training/europarl-v8.{src}-en.en"),
206
- ),
207
- SubDataset(
208
- name="europarl_v8_16",
209
- target="en",
210
- sources={"fi", "ro"},
211
- url="https://huggingface.co/datasets/wmt/wmt16/resolve/main-zip/translation-task/training-parallel-ep-v8.zip",
212
- path=("training-parallel-ep-v8/europarl-v8.{src}-en.{src}", "training-parallel-ep-v8/europarl-v8.{src}-en.en"),
213
- ),
214
- SubDataset(
215
- name="europarl_v9",
216
- target="en",
217
- sources={"cs", "de", "fi", "lt"},
218
- url="https://huggingface.co/datasets/wmt/europarl/resolve/main/v9/training/europarl-v9.{src}-en.tsv.gz",
219
- path="",
220
- ),
221
- SubDataset(
222
- name="gigafren",
223
- target="en",
224
- sources={"fr"},
225
- url="https://huggingface.co/datasets/wmt/wmt10/resolve/main-zip/training-giga-fren.zip",
226
- path=("giga-fren.release2.fixed.fr.gz", "giga-fren.release2.fixed.en.gz"),
227
- ),
228
- SubDataset(
229
- name="hindencorp_01",
230
- target="en",
231
- sources={"hi"},
232
- url="http://ufallab.ms.mff.cuni.cz/~bojar/hindencorp",
233
- manual_dl_files=["hindencorp0.1.gz"],
234
- path="",
235
- ),
236
- SubDataset(
237
- name="leta_v1",
238
- target="en",
239
- sources={"lv"},
240
- url="https://huggingface.co/datasets/wmt/wmt17/resolve/main-zip/translation-task/leta.v1.zip",
241
- path=("LETA-lv-en/leta.lv", "LETA-lv-en/leta.en"),
242
- ),
243
- SubDataset(
244
- name="multiun",
245
- target="en",
246
- sources={"es", "fr"},
247
- url="https://huggingface.co/datasets/wmt/wmt13/resolve/main-zip/training-parallel-un.zip",
248
- path=("un/undoc.2000.{src}-en.{src}", "un/undoc.2000.{src}-en.en"),
249
- ),
250
- SubDataset(
251
- name="newscommentary_v9",
252
- target="en",
253
- sources={"cs", "de", "fr", "ru"},
254
- url="https://huggingface.co/datasets/wmt/wmt14/resolve/main-zip/training-parallel-nc-v9.zip",
255
- path=("training/news-commentary-v9.{src}-en.{src}", "training/news-commentary-v9.{src}-en.en"),
256
- ),
257
- SubDataset(
258
- name="newscommentary_v10",
259
- target="en",
260
- sources={"cs", "de", "fr", "ru"},
261
- url="https://huggingface.co/datasets/wmt/wmt15/resolve/main-zip/training-parallel-nc-v10.zip",
262
- path=("news-commentary-v10.{src}-en.{src}", "news-commentary-v10.{src}-en.en"),
263
- ),
264
- SubDataset(
265
- name="newscommentary_v11",
266
- target="en",
267
- sources={"cs", "de", "ru"},
268
- url="https://huggingface.co/datasets/wmt/wmt16/resolve/main-zip/translation-task/training-parallel-nc-v11.zip",
269
- path=(
270
- "training-parallel-nc-v11/news-commentary-v11.{src}-en.{src}",
271
- "training-parallel-nc-v11/news-commentary-v11.{src}-en.en",
272
- ),
273
- ),
274
- SubDataset(
275
- name="newscommentary_v12",
276
- target="en",
277
- sources={"cs", "de", "ru", "zh"},
278
- url="https://huggingface.co/datasets/wmt/wmt17/resolve/main-zip/translation-task/training-parallel-nc-v12.zip",
279
- path=("training/news-commentary-v12.{src}-en.{src}", "training/news-commentary-v12.{src}-en.en"),
280
- ),
281
- SubDataset(
282
- name="newscommentary_v13",
283
- target="en",
284
- sources={"cs", "de", "ru", "zh"},
285
- url="https://huggingface.co/datasets/wmt/wmt18/resolve/main-zip/translation-task/training-parallel-nc-v13.zip",
286
- path=(
287
- "training-parallel-nc-v13/news-commentary-v13.{src}-en.{src}",
288
- "training-parallel-nc-v13/news-commentary-v13.{src}-en.en",
289
- ),
290
- ),
291
- SubDataset(
292
- name="newscommentary_v14",
293
- target="en", # fr-de pair in newscommentary_v14_frde
294
- sources={"cs", "de", "kk", "ru", "zh"},
295
- url="http://data.statmt.org/news-commentary/v14/training/news-commentary-v14.{0}-{1}.tsv.gz",
296
- path="",
297
- ),
298
- SubDataset(
299
- name="newscommentary_v14_frde",
300
- target="de",
301
- sources={"fr"},
302
- url="http://data.statmt.org/news-commentary/v14/training/news-commentary-v14.de-fr.tsv.gz",
303
- path="",
304
- ),
305
- SubDataset(
306
- name="onlinebooks_v1",
307
- target="en",
308
- sources={"lv"},
309
- url="https://huggingface.co/datasets/wmt/wmt17/resolve/main-zip/translation-task/books.lv-en.v1.zip",
310
- path=("farewell/farewell.lv", "farewell/farewell.en"),
311
- ),
312
- SubDataset(
313
- name="paracrawl_v1",
314
- target="en",
315
- sources={"cs", "de", "et", "fi", "ru"},
316
- url="https://s3.amazonaws.com/web-language-models/paracrawl/release1/paracrawl-release1.en-{src}.zipporah0-dedup-clean.tgz", # TODO(QL): use gzip for streaming
317
- path=(
318
- "paracrawl-release1.en-{src}.zipporah0-dedup-clean.{src}",
319
- "paracrawl-release1.en-{src}.zipporah0-dedup-clean.en",
320
- ),
321
- ),
322
- SubDataset(
323
- name="paracrawl_v1_ru",
324
- target="en",
325
- sources={"ru"},
326
- url="https://s3.amazonaws.com/web-language-models/paracrawl/release1/paracrawl-release1.en-ru.zipporah0-dedup-clean.tgz", # TODO(QL): use gzip for streaming
327
- path=(
328
- "paracrawl-release1.en-ru.zipporah0-dedup-clean.ru",
329
- "paracrawl-release1.en-ru.zipporah0-dedup-clean.en",
330
- ),
331
- ),
332
- SubDataset(
333
- name="paracrawl_v3",
334
- target="en", # fr-de pair in paracrawl_v3_frde
335
- sources={"cs", "de", "fi", "lt"},
336
- url="https://s3.amazonaws.com/web-language-models/paracrawl/release3/en-{src}.bicleaner07.tmx.gz",
337
- path="",
338
- ),
339
- SubDataset(
340
- name="paracrawl_v3_frde",
341
- target="de",
342
- sources={"fr"},
343
- url=(
344
- "https://huggingface.co/datasets/wmt/wmt19/resolve/main/translation-task/fr-de/bitexts/de-fr.bicleaner07.de.gz",
345
- "https://huggingface.co/datasets/wmt/wmt19/resolve/main/translation-task/fr-de/bitexts/de-fr.bicleaner07.fr.gz",
346
- ),
347
- path=("", ""),
348
- ),
349
- SubDataset(
350
- name="rapid_2016",
351
- target="en",
352
- sources={"de", "et", "fi"},
353
- url="https://huggingface.co/datasets/wmt/wmt18/resolve/main-zip/translation-task/rapid2016.zip",
354
- path=("rapid2016.{0}-{1}.{src}", "rapid2016.{0}-{1}.en"),
355
- ),
356
- SubDataset(
357
- name="rapid_2016_ltfi",
358
- target="en",
359
- sources={"fi", "lt"},
360
- url="https://tilde-model.s3-eu-west-1.amazonaws.com/rapid2016.en-{src}.tmx.zip",
361
- path="rapid2016.en-{src}.tmx",
362
- ),
363
- SubDataset(
364
- name="rapid_2019",
365
- target="en",
366
- sources={"de"},
367
- url="https://s3-eu-west-1.amazonaws.com/tilde-model/rapid2019.de-en.zip",
368
- path=("rapid2019.de-en.de", "rapid2019.de-en.en"),
369
- ),
370
- SubDataset(
371
- name="setimes_2",
372
- target="en",
373
- sources={"ro", "tr"},
374
- url="https://opus.nlpl.eu/download.php?f=SETIMES/v2/tmx/en-{src}.tmx.gz",
375
- path="",
376
- ),
377
- SubDataset(
378
- name="uncorpus_v1",
379
- target="en",
380
- sources={"ru", "zh"},
381
- url="https://huggingface.co/datasets/wmt/uncorpus/resolve/main-zip/UNv1.0.en-{src}.zip",
382
- path=("en-{src}/UNv1.0.en-{src}.{src}", "en-{src}/UNv1.0.en-{src}.en"),
383
- ),
384
- SubDataset(
385
- name="wikiheadlines_fi",
386
- target="en",
387
- sources={"fi"},
388
- url="https://huggingface.co/datasets/wmt/wmt15/resolve/main-zip/wiki-titles.zip",
389
- path="wiki/fi-en/titles.fi-en",
390
- ),
391
- SubDataset(
392
- name="wikiheadlines_hi",
393
- target="en",
394
- sources={"hi"},
395
- url="https://huggingface.co/datasets/wmt/wmt14/resolve/main-zip/wiki-titles.zip",
396
- path="wiki/hi-en/wiki-titles.hi-en",
397
- ),
398
- SubDataset(
399
- # Verified that wmt14 and wmt15 files are identical.
400
- name="wikiheadlines_ru",
401
- target="en",
402
- sources={"ru"},
403
- url="https://huggingface.co/datasets/wmt/wmt15/resolve/main-zip/wiki-titles.zip",
404
- path="wiki/ru-en/wiki.ru-en",
405
- ),
406
- SubDataset(
407
- name="wikititles_v1",
408
- target="en",
409
- sources={"cs", "de", "fi", "gu", "kk", "lt", "ru", "zh"},
410
- url="https://huggingface.co/datasets/wmt/wikititles/resolve/main/v1/wikititles-v1.{src}-en.tsv.gz",
411
- path="",
412
- ),
413
- SubDataset(
414
- name="wikititles_v3",
415
- language_pairs={(src, "en") for src in ["cs", "de", "ha", "ig", "is", "ja", "ps", "ru", "zh"]}
416
- | {("ca", "es"), ("de", "fr"), ("bn", "hi")}
417
- | {(src, "pt") for src in ["ca", "es"]}
418
- | {(src, "ro") for src in ["ca", "es", "pt"]}
419
- | {("xh", "zu")},
420
- url="https://huggingface.co/datasets/wmt/wikititles/resolve/main/data/v3/wikititles-v3.{src}-{tgt}.tsv.gz",
421
- ),
422
- SubDataset(
423
- name="yakut",
424
- target="ru",
425
- sources={"sah"},
426
- url="https://huggingface.co/datasets/wmt/yakut/resolve/main/data/yakut.zip",
427
- path="yakut/sah-ru.parallel.uniq.tsv",
428
- ),
429
- SubDataset(
430
- name="yandexcorpus",
431
- target="en",
432
- sources={"ru"},
433
- url="https://translate.yandex.ru/corpus?lang=en",
434
- manual_dl_files=["1mcorpus.zip"],
435
- path=("corpus.en_ru.1m.ru", "corpus.en_ru.1m.en"),
436
- ),
437
- # pylint:enable=line-too-long
438
- ] + [
439
- SubDataset( # pylint:disable=g-complex-comprehension
440
- name=ss,
441
- target="en",
442
- sources={"zh"},
443
- url="https://huggingface.co/datasets/wmt/wmt18/resolve/main/cwmt-wmt/%s.zip" % ss,
444
- path=("%s/*_c[hn].txt" % ss, "%s/*_en.txt" % ss),
445
- )
446
- for ss in CWMT_SUBSET_NAMES
447
- ]
448
-
449
- _DEV_SUBSETS = [
450
- SubDataset(
451
- name="euelections_dev2019",
452
- target="de",
453
- sources={"fr"},
454
- url="https://huggingface.co/datasets/wmt/wmt19/resolve/main-zip/translation-task/dev.zip",
455
- path=("dev/euelections_dev2019.fr-de.src.fr", "dev/euelections_dev2019.fr-de.tgt.de"),
456
- ),
457
- SubDataset(
458
- name="newsdev2014",
459
- target="en",
460
- sources={"hi"},
461
- url="https://huggingface.co/datasets/wmt/wmt19/resolve/main-zip/translation-task/dev.zip",
462
- path=("dev/newsdev2014.hi", "dev/newsdev2014.en"),
463
- ),
464
- SubDataset(
465
- name="newsdev2015",
466
- target="en",
467
- sources={"fi"},
468
- url="https://huggingface.co/datasets/wmt/wmt19/resolve/main-zip/translation-task/dev.zip",
469
- path=("dev/newsdev2015-fien-src.{src}.sgm", "dev/newsdev2015-fien-ref.en.sgm"),
470
- ),
471
- SubDataset(
472
- name="newsdiscussdev2015",
473
- target="en",
474
- sources={"ro", "tr"},
475
- url="https://huggingface.co/datasets/wmt/wmt19/resolve/main-zip/translation-task/dev.zip",
476
- path=("dev/newsdiscussdev2015-{src}en-src.{src}.sgm", "dev/newsdiscussdev2015-{src}en-ref.en.sgm"),
477
- ),
478
- SubDataset(
479
- name="newsdev2016",
480
- target="en",
481
- sources={"ro", "tr"},
482
- url="https://huggingface.co/datasets/wmt/wmt19/resolve/main-zip/translation-task/dev.zip",
483
- path=("dev/newsdev2016-{src}en-src.{src}.sgm", "dev/newsdev2016-{src}en-ref.en.sgm"),
484
- ),
485
- SubDataset(
486
- name="newsdev2017",
487
- target="en",
488
- sources={"lv", "zh"},
489
- url="https://huggingface.co/datasets/wmt/wmt19/resolve/main-zip/translation-task/dev.zip",
490
- path=("dev/newsdev2017-{src}en-src.{src}.sgm", "dev/newsdev2017-{src}en-ref.en.sgm"),
491
- ),
492
- SubDataset(
493
- name="newsdev2018",
494
- target="en",
495
- sources={"et"},
496
- url="https://huggingface.co/datasets/wmt/wmt19/resolve/main-zip/translation-task/dev.zip",
497
- path=("dev/newsdev2018-{src}en-src.{src}.sgm", "dev/newsdev2018-{src}en-ref.en.sgm"),
498
- ),
499
- SubDataset(
500
- name="newsdev2019",
501
- target="en",
502
- sources={"gu", "kk", "lt"},
503
- url="https://huggingface.co/datasets/wmt/wmt19/resolve/main-zip/translation-task/dev.zip",
504
- path=("dev/newsdev2019-{src}en-src.{src}.sgm", "dev/newsdev2019-{src}en-ref.en.sgm"),
505
- ),
506
- SubDataset(
507
- name="newsdiscussdev2015",
508
- target="en",
509
- sources={"fr"},
510
- url="https://huggingface.co/datasets/wmt/wmt19/resolve/main-zip/translation-task/dev.zip",
511
- path=("dev/newsdiscussdev2015-{src}en-src.{src}.sgm", "dev/newsdiscussdev2015-{src}en-ref.en.sgm"),
512
- ),
513
- SubDataset(
514
- name="newsdiscusstest2015",
515
- target="en",
516
- sources={"fr"},
517
- url="https://huggingface.co/datasets/wmt/wmt19/resolve/main-zip/translation-task/dev.zip",
518
- path=("dev/newsdiscusstest2015-{src}en-src.{src}.sgm", "dev/newsdiscusstest2015-{src}en-ref.en.sgm"),
519
- ),
520
- SubDataset(
521
- name="newssyscomb2009",
522
- target="en",
523
- sources={"cs", "de", "es", "fr"},
524
- url="https://huggingface.co/datasets/wmt/wmt19/resolve/main-zip/translation-task/dev.zip",
525
- path=("dev/newssyscomb2009.{src}", "dev/newssyscomb2009.en"),
526
- ),
527
- SubDataset(
528
- name="newstest2008",
529
- target="en",
530
- sources={"cs", "de", "es", "fr", "hu"},
531
- url="https://huggingface.co/datasets/wmt/wmt19/resolve/main-zip/translation-task/dev.zip",
532
- path=("dev/news-test2008.{src}", "dev/news-test2008.en"),
533
- ),
534
- SubDataset(
535
- name="newstest2009",
536
- target="en",
537
- sources={"cs", "de", "es", "fr"},
538
- url="https://huggingface.co/datasets/wmt/wmt19/resolve/main-zip/translation-task/dev.zip",
539
- path=("dev/newstest2009.{src}", "dev/newstest2009.en"),
540
- ),
541
- SubDataset(
542
- name="newstest2010",
543
- target="en",
544
- sources={"cs", "de", "es", "fr"},
545
- url="https://huggingface.co/datasets/wmt/wmt19/resolve/main-zip/translation-task/dev.zip",
546
- path=("dev/newstest2010.{src}", "dev/newstest2010.en"),
547
- ),
548
- SubDataset(
549
- name="newstest2011",
550
- target="en",
551
- sources={"cs", "de", "es", "fr"},
552
- url="https://huggingface.co/datasets/wmt/wmt19/resolve/main-zip/translation-task/dev.zip",
553
- path=("dev/newstest2011.{src}", "dev/newstest2011.en"),
554
- ),
555
- SubDataset(
556
- name="newstest2012",
557
- target="en",
558
- sources={"cs", "de", "es", "fr", "ru"},
559
- url="https://huggingface.co/datasets/wmt/wmt19/resolve/main-zip/translation-task/dev.zip",
560
- path=("dev/newstest2012.{src}", "dev/newstest2012.en"),
561
- ),
562
- SubDataset(
563
- name="newstest2013",
564
- target="en",
565
- sources={"cs", "de", "es", "fr", "ru"},
566
- url="https://huggingface.co/datasets/wmt/wmt19/resolve/main-zip/translation-task/dev.zip",
567
- path=("dev/newstest2013.{src}", "dev/newstest2013.en"),
568
- ),
569
- SubDataset(
570
- name="newstest2014",
571
- target="en",
572
- sources={"cs", "de", "es", "fr", "hi", "ru"},
573
- url="https://huggingface.co/datasets/wmt/wmt19/resolve/main-zip/translation-task/dev.zip",
574
- path=("dev/newstest2014-{src}en-src.{src}.sgm", "dev/newstest2014-{src}en-ref.en.sgm"),
575
- ),
576
- SubDataset(
577
- name="newstest2015",
578
- target="en",
579
- sources={"cs", "de", "fi", "ru"},
580
- url="https://huggingface.co/datasets/wmt/wmt19/resolve/main-zip/translation-task/dev.zip",
581
- path=("dev/newstest2015-{src}en-src.{src}.sgm", "dev/newstest2015-{src}en-ref.en.sgm"),
582
- ),
583
- SubDataset(
584
- name="newsdiscusstest2015",
585
- target="en",
586
- sources={"fr"},
587
- url="https://huggingface.co/datasets/wmt/wmt19/resolve/main-zip/translation-task/dev.zip",
588
- path=("dev/newsdiscusstest2015-{src}en-src.{src}.sgm", "dev/newsdiscusstest2015-{src}en-ref.en.sgm"),
589
- ),
590
- SubDataset(
591
- name="newstest2016",
592
- target="en",
593
- sources={"cs", "de", "fi", "ro", "ru", "tr"},
594
- url="https://huggingface.co/datasets/wmt/wmt19/resolve/main-zip/translation-task/dev.zip",
595
- path=("dev/newstest2016-{src}en-src.{src}.sgm", "dev/newstest2016-{src}en-ref.en.sgm"),
596
- ),
597
- SubDataset(
598
- name="newstestB2016",
599
- target="en",
600
- sources={"fi"},
601
- url="https://huggingface.co/datasets/wmt/wmt19/resolve/main-zip/translation-task/dev.zip",
602
- path=("dev/newstestB2016-enfi-ref.{src}.sgm", "dev/newstestB2016-enfi-src.en.sgm"),
603
- ),
604
- SubDataset(
605
- name="newstest2017",
606
- target="en",
607
- sources={"cs", "de", "fi", "lv", "ru", "tr", "zh"},
608
- url="https://huggingface.co/datasets/wmt/wmt19/resolve/main-zip/translation-task/dev.zip",
609
- path=("dev/newstest2017-{src}en-src.{src}.sgm", "dev/newstest2017-{src}en-ref.en.sgm"),
610
- ),
611
- SubDataset(
612
- name="newstestB2017",
613
- target="en",
614
- sources={"fi"},
615
- url="https://huggingface.co/datasets/wmt/wmt19/resolve/main-zip/translation-task/dev.zip",
616
- path=("dev/newstestB2017-fien-src.fi.sgm", "dev/newstestB2017-fien-ref.en.sgm"),
617
- ),
618
- SubDataset(
619
- name="newstest2018",
620
- target="en",
621
- sources={"cs", "de", "et", "fi", "ru", "tr", "zh"},
622
- url="https://huggingface.co/datasets/wmt/wmt19/resolve/main-zip/translation-task/dev.zip",
623
- path=("dev/newstest2018-{src}en-src.{src}.sgm", "dev/newstest2018-{src}en-ref.en.sgm"),
624
- ),
625
- ]
626
-
627
- DATASET_MAP = {dataset.name: dataset for dataset in _TRAIN_SUBSETS + _DEV_SUBSETS}
628
-
629
- _CZENG17_FILTER = SubDataset(
630
- name="czeng17_filter",
631
- target="en",
632
- sources={"cs"},
633
- url="http://ufal.mff.cuni.cz/czeng/download.php?f=convert_czeng16_to_17.pl.zip",
634
- path="convert_czeng16_to_17.pl",
635
- )
636
-
637
-
638
- class WmtConfig(datasets.BuilderConfig):
639
- """BuilderConfig for WMT."""
640
-
641
- def __init__(self, url=None, citation=None, description=None, language_pair=(None, None), subsets=None, **kwargs):
642
- """BuilderConfig for WMT.
643
-
644
- Args:
645
- url: The reference URL for the dataset.
646
- citation: The paper citation for the dataset.
647
- description: The description of the dataset.
648
- language_pair: pair of languages that will be used for translation. Should
649
- contain 2 letter coded strings. For example: ("en", "de").
650
- configuration for the `datasets.features.text.TextEncoder` used for the
651
- `datasets.features.text.Translation` features.
652
- subsets: Dict[split, list[str]]. List of the subset to use for each of the
653
- split. Note that WMT subclasses overwrite this parameter.
654
- **kwargs: keyword arguments forwarded to super.
655
- """
656
- name = "%s-%s" % (language_pair[0], language_pair[1])
657
- if "name" in kwargs: # Add name suffix for custom configs
658
- name += "." + kwargs.pop("name")
659
-
660
- super(WmtConfig, self).__init__(name=name, description=description, **kwargs)
661
-
662
- self.url = url or "http://www.statmt.org"
663
- self.citation = citation
664
- self.language_pair = language_pair
665
- self.subsets = subsets
666
-
667
- # TODO(PVP): remove when manual dir works
668
- # +++++++++++++++++++++
669
- if language_pair[1] in ["cs", "hi", "ru"]:
670
- assert NotImplementedError(f"The dataset for {language_pair[1]}-en is currently not fully supported.")
671
- # +++++++++++++++++++++
672
-
673
-
674
- class Wmt(datasets.GeneratorBasedBuilder):
675
- """WMT translation dataset."""
676
-
677
- BUILDER_CONFIG_CLASS = WmtConfig
678
-
679
- def __init__(self, *args, **kwargs):
680
- super(Wmt, self).__init__(*args, **kwargs)
681
-
682
- @property
683
- def _subsets(self):
684
- """Subsets that make up each split of the dataset."""
685
- raise NotImplementedError("This is a abstract method")
686
-
687
- @property
688
- def subsets(self):
689
- """Subsets that make up each split of the dataset for the language pair."""
690
- language_pair = self.config.language_pair
691
- filtered_subsets = {}
692
- subsets = self._subsets if self.config.subsets is None else self.config.subsets
693
- for split, ss_names in subsets.items():
694
- filtered_subsets[split] = []
695
- for ss_name in ss_names:
696
- dataset = DATASET_MAP[ss_name]
697
- if language_pair not in dataset.language_pairs:
698
- logger.info("Skipping sub-dataset that does not include language pair: %s", ss_name)
699
- else:
700
- filtered_subsets[split].append(ss_name)
701
- logger.info("Using sub-datasets: %s", filtered_subsets)
702
- return filtered_subsets
703
-
704
- def _info(self):
705
- src, target = self.config.language_pair
706
- return datasets.DatasetInfo(
707
- description=_DESCRIPTION,
708
- features=datasets.Features(
709
- {"translation": datasets.features.Translation(languages=self.config.language_pair)}
710
- ),
711
- supervised_keys=(src, target),
712
- homepage=self.config.url,
713
- citation=self.config.citation,
714
- )
715
-
716
- def _vocab_text_gen(self, split_subsets, extraction_map, language):
717
- for _, ex in self._generate_examples(split_subsets, extraction_map, with_translation=False):
718
- yield ex[language]
719
-
720
- def _split_generators(self, dl_manager):
721
- source, target = self.config.language_pair
722
- manual_paths_dict = {}
723
- urls_to_download = {}
724
- for ss_name in itertools.chain.from_iterable(self.subsets.values()):
725
- if ss_name == "czeng_17":
726
- # CzEng1.7 is CzEng1.6 with some blocks filtered out. We must download
727
- # the filtering script so we can parse out which blocks need to be
728
- # removed.
729
- urls_to_download[_CZENG17_FILTER.name] = _CZENG17_FILTER.get_url(source, target)
730
-
731
- # get dataset
732
- dataset = DATASET_MAP[ss_name]
733
- if dataset.get_manual_dl_files(source, target):
734
- # TODO(PVP): following two lines skip configs that are incomplete for now
735
- # +++++++++++++++++++++
736
- logger.info("Skipping {dataset.name} for now. Incomplete dataset for {self.config.name}")
737
- continue
738
- # +++++++++++++++++++++
739
-
740
- manual_dl_files = dataset.get_manual_dl_files(source, target)
741
- manual_paths = [
742
- os.path.join(os.path.abspath(os.path.expanduser(dl_manager.manual_dir)), fname)
743
- for fname in manual_dl_files
744
- ]
745
- assert all(
746
- os.path.exists(path) for path in manual_paths
747
- ), f"For {dataset.name}, you must manually download the following file(s) from {dataset.get_url(source, target)} and place them in {dl_manager.manual_dir}: {', '.join(manual_dl_files)}"
748
-
749
- # set manual path for correct subset
750
- manual_paths_dict[ss_name] = manual_paths
751
- else:
752
- urls_to_download[ss_name] = dataset.get_url(source, target)
753
-
754
- # Download and extract files from URLs.
755
- downloaded_files = dl_manager.download_and_extract(urls_to_download)
756
- # Extract manually downloaded files.
757
- manual_files = dl_manager.extract(manual_paths_dict)
758
- extraction_map = dict(downloaded_files, **manual_files)
759
-
760
- for language in self.config.language_pair:
761
- self._vocab_text_gen(self.subsets[datasets.Split.TRAIN], extraction_map, language)
762
-
763
- return [
764
- datasets.SplitGenerator( # pylint:disable=g-complex-comprehension
765
- name=split, gen_kwargs={"split_subsets": split_subsets, "extraction_map": extraction_map}
766
- )
767
- for split, split_subsets in self.subsets.items()
768
- ]
769
-
770
- def _generate_examples(self, split_subsets, extraction_map, with_translation=True):
771
- """Returns the examples in the raw (text) form."""
772
- source, target = self.config.language_pair
773
-
774
- def _get_local_paths(dataset, extract_dirs):
775
- rel_paths = dataset.get_path(source, target)
776
- if len(extract_dirs) == 1:
777
- extract_dirs = extract_dirs * len(rel_paths)
778
- return [
779
- os.path.join(ex_dir, rel_path) if rel_path else ex_dir
780
- for ex_dir, rel_path in zip(extract_dirs, rel_paths)
781
- ]
782
-
783
- def _get_filenames(dataset):
784
- rel_paths = dataset.get_path(source, target)
785
- urls = dataset.get_url(source, target)
786
- if len(urls) == 1:
787
- urls = urls * len(rel_paths)
788
- return [rel_path if rel_path else os.path.basename(url) for url, rel_path in zip(urls, rel_paths)]
789
-
790
- for ss_name in split_subsets:
791
- # TODO(PVP) remove following five lines when manual data works
792
- # +++++++++++++++++++++
793
- dataset = DATASET_MAP[ss_name]
794
- source, _ = self.config.language_pair
795
- if dataset.get_manual_dl_files(source, target):
796
- logger.info(f"Skipping {dataset.name} for now. Incomplete dataset for {self.config.name}")
797
- continue
798
- # +++++++++++++++++++++
799
-
800
- logger.info("Generating examples from: %s", ss_name)
801
- dataset = DATASET_MAP[ss_name]
802
- extract_dirs = extraction_map[ss_name]
803
- files = _get_local_paths(dataset, extract_dirs)
804
- filenames = _get_filenames(dataset)
805
-
806
- sub_generator_args = tuple(files)
807
-
808
- if ss_name.startswith("czeng"):
809
- if ss_name.endswith("16pre"):
810
- sub_generator = functools.partial(_parse_tsv, language_pair=("en", "cs"))
811
- sub_generator_args += tuple(filenames)
812
- elif ss_name.endswith("17"):
813
- filter_path = _get_local_paths(_CZENG17_FILTER, extraction_map[_CZENG17_FILTER.name])[0]
814
- sub_generator = functools.partial(_parse_czeng, filter_path=filter_path)
815
- else:
816
- sub_generator = _parse_czeng
817
- elif ss_name == "hindencorp_01":
818
- sub_generator = _parse_hindencorp
819
- elif ss_name == "yakut":
820
- sub_generator, sub_generator_args = YakutParser.create_generator(
821
- sub_generator_args=sub_generator_args, config=self.config
822
- )
823
- elif ss_name == "wikititles_v3":
824
- sub_generator, sub_generator_args = WikititlesV3Parser.create_generator(
825
- sub_generator_args=sub_generator_args, config=self.config
826
- )
827
- elif len(files) == 2:
828
- if ss_name.endswith("_frde"):
829
- sub_generator = _parse_frde_bitext
830
- else:
831
- sub_generator = _parse_parallel_sentences
832
- sub_generator_args += tuple(filenames)
833
- elif len(files) == 1:
834
- fname = filenames[0]
835
- # Note: Due to formatting used by `download_manager`, the file
836
- # extension may not be at the end of the file path.
837
- if ".tsv" in fname:
838
- sub_generator = _parse_tsv
839
- sub_generator_args += tuple(filenames)
840
- elif (
841
- ss_name.startswith("newscommentary_v14")
842
- or ss_name.startswith("europarl_v9")
843
- or ss_name.startswith("wikititles_v1")
844
- ):
845
- sub_generator = functools.partial(_parse_tsv, language_pair=self.config.language_pair)
846
- sub_generator_args += tuple(filenames)
847
- elif "tmx" in fname or ss_name.startswith("paracrawl_v3"):
848
- sub_generator = _parse_tmx
849
- elif ss_name.startswith("wikiheadlines"):
850
- sub_generator = _parse_wikiheadlines
851
- else:
852
- raise ValueError("Unsupported file format: %s" % fname)
853
- else:
854
- raise ValueError("Invalid number of files: %d" % len(files))
855
-
856
- for sub_key, ex in sub_generator(*sub_generator_args):
857
- if not all(ex.values()):
858
- continue
859
- # TODO(adarob): Add subset feature.
860
- # ex["subset"] = subset
861
- key = f"{ss_name}/{sub_key}"
862
- if with_translation is True:
863
- ex = {"translation": ex}
864
- yield key, ex
865
-
866
-
867
- def _parse_parallel_sentences(f1, f2, filename1, filename2):
868
- """Returns examples from parallel SGML or text files, which may be gzipped."""
869
-
870
- def _parse_text(path, original_filename):
871
- """Returns the sentences from a single text file, which may be gzipped."""
872
- split_path = original_filename.split(".")
873
-
874
- if split_path[-1] == "gz":
875
- lang = split_path[-2]
876
-
877
- def gen():
878
- with open(path, "rb") as f, gzip.GzipFile(fileobj=f) as g:
879
- for line in g:
880
- yield line.decode("utf-8").rstrip()
881
-
882
- return gen(), lang
883
-
884
- if split_path[-1] == "txt":
885
- # CWMT
886
- lang = split_path[-2].split("_")[-1]
887
- lang = "zh" if lang in ("ch", "cn", "c[hn]") else lang
888
- else:
889
- lang = split_path[-1]
890
-
891
- def gen():
892
- with open(path, "rb") as f:
893
- for line in f:
894
- yield line.decode("utf-8").rstrip()
895
-
896
- return gen(), lang
897
-
898
- def _parse_sgm(path, original_filename):
899
- """Returns sentences from a single SGML file."""
900
- lang = original_filename.split(".")[-2]
901
- # Note: We can't use the XML parser since some of the files are badly
902
- # formatted.
903
- seg_re = re.compile(r"<seg id=\"\d+\">(.*)</seg>")
904
-
905
- def gen():
906
- with open(path, encoding="utf-8") as f:
907
- for line in f:
908
- seg_match = re.match(seg_re, line)
909
- if seg_match:
910
- assert len(seg_match.groups()) == 1
911
- yield seg_match.groups()[0]
912
-
913
- return gen(), lang
914
-
915
- parse_file = _parse_sgm if os.path.basename(f1).endswith(".sgm") else _parse_text
916
-
917
- # Some datasets (e.g., CWMT) contain multiple parallel files specified with
918
- # a wildcard. We sort both sets to align them and parse them one by one.
919
- f1_files = sorted(glob.glob(f1))
920
- f2_files = sorted(glob.glob(f2))
921
-
922
- assert f1_files and f2_files, "No matching files found: %s, %s." % (f1, f2)
923
- assert len(f1_files) == len(f2_files), "Number of files do not match: %d vs %d for %s vs %s." % (
924
- len(f1_files),
925
- len(f2_files),
926
- f1,
927
- f2,
928
- )
929
-
930
- for f_id, (f1_i, f2_i) in enumerate(zip(sorted(f1_files), sorted(f2_files))):
931
- l1_sentences, l1 = parse_file(f1_i, filename1)
932
- l2_sentences, l2 = parse_file(f2_i, filename2)
933
-
934
- for line_id, (s1, s2) in enumerate(zip(l1_sentences, l2_sentences)):
935
- key = f"{f_id}/{line_id}"
936
- yield key, {l1: s1, l2: s2}
937
-
938
-
939
- def _parse_frde_bitext(fr_path, de_path):
940
- with open(fr_path, encoding="utf-8") as fr_f:
941
- with open(de_path, encoding="utf-8") as de_f:
942
- for line_id, (s1, s2) in enumerate(zip(fr_f, de_f)):
943
- yield line_id, {"fr": s1.rstrip(), "de": s2.rstrip()}
944
-
945
-
946
- def _parse_tmx(path):
947
- """Generates examples from TMX file."""
948
-
949
- def _get_tuv_lang(tuv):
950
- for k, v in tuv.items():
951
- if k.endswith("}lang"):
952
- return v
953
- raise AssertionError("Language not found in `tuv` attributes.")
954
-
955
- def _get_tuv_seg(tuv):
956
- segs = tuv.findall("seg")
957
- assert len(segs) == 1, "Invalid number of segments: %d" % len(segs)
958
- return segs[0].text
959
-
960
- with open(path, "rb") as f:
961
- # Workaround due to: https://github.com/tensorflow/tensorflow/issues/33563
962
- utf_f = codecs.getreader("utf-8")(f)
963
- for line_id, (_, elem) in enumerate(ElementTree.iterparse(utf_f)):
964
- if elem.tag == "tu":
965
- yield line_id, {_get_tuv_lang(tuv): _get_tuv_seg(tuv) for tuv in elem.iterfind("tuv")}
966
- elem.clear()
967
-
968
-
969
- def _parse_tsv(path, filename=None, language_pair=None, skiprows=None):
970
- """Generates examples from TSV file."""
971
- if language_pair is None:
972
- lang_match = re.match(r".*\.([a-z][a-z])-([a-z][a-z])\.tsv", filename)
973
- assert lang_match is not None, "Invalid TSV filename: %s" % filename
974
- l1, l2 = lang_match.groups()
975
- else:
976
- l1, l2 = language_pair
977
- with open(path, encoding="utf-8") as f:
978
- for key, line in enumerate(f):
979
- if skiprows and key < skiprows:
980
- continue
981
- cols = line.split("\t")
982
- if len(cols) != 2:
983
- logger.warning("Skipping line %d in TSV (%s) with %d != 2 columns.", key, path, len(cols))
984
- continue
985
- s1, s2 = cols
986
- yield key, {l1: s1.strip(), l2: s2.strip()}
987
-
988
-
989
- def _parse_wikiheadlines(path):
990
- """Generates examples from Wikiheadlines dataset file."""
991
- lang_match = re.match(r".*\.([a-z][a-z])-([a-z][a-z])$", path)
992
- assert lang_match is not None, "Invalid Wikiheadlines filename: %s" % path
993
- l1, l2 = lang_match.groups()
994
- with open(path, encoding="utf-8") as f:
995
- for line_id, line in enumerate(f):
996
- s1, s2 = line.split("|||")
997
- yield line_id, {l1: s1.strip(), l2: s2.strip()}
998
-
999
-
1000
- def _parse_czeng(*paths, **kwargs):
1001
- """Generates examples from CzEng v1.6, with optional filtering for v1.7."""
1002
- filter_path = kwargs.get("filter_path", None)
1003
- if filter_path:
1004
- re_block = re.compile(r"^[^-]+-b(\d+)-\d\d[tde]")
1005
- with open(filter_path, encoding="utf-8") as f:
1006
- bad_blocks = {blk for blk in re.search(r"qw{([\s\d]*)}", f.read()).groups()[0].split()}
1007
- logger.info("Loaded %d bad blocks to filter from CzEng v1.6 to make v1.7.", len(bad_blocks))
1008
-
1009
- for path in paths:
1010
- for gz_path in sorted(glob.glob(path)):
1011
- with open(gz_path, "rb") as g, gzip.GzipFile(fileobj=g) as f:
1012
- filename = os.path.basename(gz_path)
1013
- for line_id, line in enumerate(f):
1014
- line = line.decode("utf-8") # required for py3
1015
- if not line.strip():
1016
- continue
1017
- id_, unused_score, cs, en = line.split("\t")
1018
- if filter_path:
1019
- block_match = re.match(re_block, id_)
1020
- if block_match and block_match.groups()[0] in bad_blocks:
1021
- continue
1022
- sub_key = f"{filename}/{line_id}"
1023
- yield sub_key, {
1024
- "cs": cs.strip(),
1025
- "en": en.strip(),
1026
- }
1027
-
1028
-
1029
- def _parse_hindencorp(path):
1030
- with open(path, encoding="utf-8") as f:
1031
- for line_id, line in enumerate(f):
1032
- split_line = line.split("\t")
1033
- if len(split_line) != 5:
1034
- logger.warning("Skipping invalid HindEnCorp line: %s", line)
1035
- continue
1036
- yield line_id, {"translation": {"en": split_line[3].strip(), "hi": split_line[4].strip()}}
1037
-
1038
-
1039
- class YakutParser:
1040
- @staticmethod
1041
- def create_generator(sub_generator_args=None, config=None):
1042
- sub_generator = functools.partial(_parse_tsv, language_pair=config.language_pair, skiprows=1)
1043
- return sub_generator, sub_generator_args
1044
-
1045
-
1046
- class WikititlesV3Parser:
1047
- @staticmethod
1048
- def create_generator(sub_generator_args=None, config=None):
1049
- sub_generator = functools.partial(_parse_tsv, language_pair=config.language_pair)
1050
- return sub_generator, sub_generator_args
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
data/v3/wikititles-v3.xh-zu.tsv.gz → xh-zu/wikititles-train.parquet RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:597b10d7aa26de560a48ce2a89e85ccfba445d290cfb418dd8cae43a1ef535cc
3
- size 8819
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8b5f61893a09b808f0178b22fbf6769a4cdd4b6a67e5fbe7e7e66d0fc0398859
3
+ size 22940
zh-en/wikititles-train.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bf6a246a12d3d568ea01e7f1a3455959a154eb89c43b07e82bfb49feaceea1dc
3
+ size 28986037