diff --git a/README.md b/README.md index bf293061ffa937d6cf5abb51255f64730471ab21..e4dec4a3ec301d01c4eb78e57c9fa0530b585f1f 100644 --- a/README.md +++ b/README.md @@ -28,8 +28,8 @@ source_datasets: task_categories: - translation task_ids: [] -pretty_name: WMT18 paperswithcode_id: wmt-2018 +pretty_name: WMT18 dataset_info: - config_name: cs-en features: @@ -41,16 +41,16 @@ dataset_info: - en splits: - name: train - num_bytes: 1461016186 + num_bytes: 1461007346 num_examples: 11046024 - name: validation - num_bytes: 674430 + num_bytes: 674422 num_examples: 3005 - name: test - num_bytes: 696229 + num_bytes: 696221 num_examples: 2983 - download_size: 2030359086 - dataset_size: 1462386845 + download_size: 738874648 + dataset_size: 1462377989 - config_name: de-en features: - name: translation @@ -61,16 +61,16 @@ dataset_info: - en splits: - name: train - num_bytes: 8187552108 + num_bytes: 8187518284 num_examples: 42271874 - name: validation - num_bytes: 729519 + num_bytes: 729511 num_examples: 3004 - name: test - num_bytes: 757649 + num_bytes: 757641 num_examples: 2998 - download_size: 3808612335 - dataset_size: 8189039276 + download_size: 4436297213 + dataset_size: 8189005436 - config_name: et-en features: - name: translation @@ -81,16 +81,16 @@ dataset_info: - en splits: - name: train - num_bytes: 647992667 + num_bytes: 647990923 num_examples: 2175873 - name: validation - num_bytes: 459398 + num_bytes: 459390 num_examples: 2000 - name: test - num_bytes: 489394 + num_bytes: 489386 num_examples: 2000 - download_size: 524534404 - dataset_size: 648941459 + download_size: 283931426 + dataset_size: 648939699 - config_name: fi-en features: - name: translation @@ -101,16 +101,16 @@ dataset_info: - en splits: - name: train - num_bytes: 857171881 + num_bytes: 857169249 num_examples: 3280600 - name: validation - num_bytes: 1388828 + num_bytes: 1388820 num_examples: 6004 - name: test - num_bytes: 691841 + num_bytes: 691833 num_examples: 3000 - download_size: 491874780 - dataset_size: 859252550 + download_size: 488708706 + dataset_size: 859249902 - config_name: kk-en features: - name: translation @@ -135,16 +135,16 @@ dataset_info: - en splits: - name: train - num_bytes: 13665367647 + num_bytes: 13665338159 num_examples: 36858512 - name: validation - num_bytes: 1040195 + num_bytes: 1040187 num_examples: 3001 - name: test - num_bytes: 1085596 + num_bytes: 1085588 num_examples: 3000 - download_size: 4195144356 - dataset_size: 13667493438 + download_size: 6130744133 + dataset_size: 13667463934 - config_name: tr-en features: - name: translation @@ -155,16 +155,16 @@ dataset_info: - en splits: - name: train - num_bytes: 60416617 + num_bytes: 60416449 num_examples: 205756 - name: validation - num_bytes: 752773 + num_bytes: 752765 num_examples: 3007 - name: test - num_bytes: 770313 + num_bytes: 770305 num_examples: 3000 - download_size: 62263061 - dataset_size: 61939703 + download_size: 37733844 + dataset_size: 61939519 - config_name: zh-en features: - name: translation @@ -175,16 +175,73 @@ dataset_info: - en splits: - name: train - num_bytes: 5536169801 + num_bytes: 6342987000 num_examples: 25160346 - name: validation - num_bytes: 540347 + num_bytes: 540339 num_examples: 2001 - name: test - num_bytes: 1107522 + num_bytes: 1107514 num_examples: 3981 - download_size: 2259428767 - dataset_size: 5537817670 + download_size: 3581074494 + dataset_size: 6344634853 +configs: +- config_name: cs-en + data_files: + - split: train + path: cs-en/train-* + - split: validation + path: cs-en/validation-* + - split: test + path: cs-en/test-* +- config_name: de-en + data_files: + - split: train + path: de-en/train-* + - split: validation + path: de-en/validation-* + - split: test + path: de-en/test-* +- config_name: et-en + data_files: + - split: train + path: et-en/train-* + - split: validation + path: et-en/validation-* + - split: test + path: et-en/test-* +- config_name: fi-en + data_files: + - split: train + path: fi-en/train-* + - split: validation + path: fi-en/validation-* + - split: test + path: fi-en/test-* +- config_name: ru-en + data_files: + - split: train + path: ru-en/train-* + - split: validation + path: ru-en/validation-* + - split: test + path: ru-en/test-* +- config_name: tr-en + data_files: + - split: train + path: tr-en/train-* + - split: validation + path: tr-en/validation-* + - split: test + path: tr-en/test-* +- config_name: zh-en + data_files: + - split: train + path: zh-en/train-* + - split: validation + path: zh-en/validation-* + - split: test + path: zh-en/test-* --- # Dataset Card for "wmt18" diff --git a/cs-en/test-00000-of-00001.parquet b/cs-en/test-00000-of-00001.parquet new file mode 100644 index 0000000000000000000000000000000000000000..e0ffe0a7abdf675bc6b18c9bd9fa406ad370515e --- /dev/null +++ b/cs-en/test-00000-of-00001.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fc3e136dfcb05e2883b5676b19b57c183af5407625f9a2e0cffe22a9d306ab40 +size 473321 diff --git a/cs-en/train-00000-of-00003.parquet b/cs-en/train-00000-of-00003.parquet new file mode 100644 index 0000000000000000000000000000000000000000..58bac0c849b0b5e3ddfa52605aef7c2e833b92db --- /dev/null +++ b/cs-en/train-00000-of-00003.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5d233036f697bfc8086aef14af8d478e4fcd943a4aac0e855ecb6c7a737d6615 +size 286501932 diff --git a/cs-en/train-00001-of-00003.parquet b/cs-en/train-00001-of-00003.parquet new file mode 100644 index 0000000000000000000000000000000000000000..0163da35aae8d1fd6d0175b3bbf868b93e6b67e3 --- /dev/null +++ b/cs-en/train-00001-of-00003.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1566bc7e03a31b4df716cb53746fb1b9d03c593deeefc7f7467bd0660bd23297 +size 199423610 diff --git a/cs-en/train-00002-of-00003.parquet b/cs-en/train-00002-of-00003.parquet new file mode 100644 index 0000000000000000000000000000000000000000..90a0a727e2d694ac3390b412ae520e16c6ae3b99 --- /dev/null +++ b/cs-en/train-00002-of-00003.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f01e9912db5059330241fa367eadbb628fdf7aa537da291cc349c64fe6ace944 +size 252022714 diff --git a/cs-en/validation-00000-of-00001.parquet b/cs-en/validation-00000-of-00001.parquet new file mode 100644 index 0000000000000000000000000000000000000000..79c4b4934ad5c630ee1621baa722f000ce2b2694 --- /dev/null +++ b/cs-en/validation-00000-of-00001.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9e7ad0362955ee9c4c7ad1ebbcd93f6ff24f77e07f84a34f3dd9f200da7b523b +size 453071 diff --git a/de-en/test-00000-of-00001.parquet b/de-en/test-00000-of-00001.parquet new file mode 100644 index 0000000000000000000000000000000000000000..5be7d00ae464ae25a3960b449246cfafad69abd0 --- /dev/null +++ b/de-en/test-00000-of-00001.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2a7df8d261a8de56c50a39ec14baa1d53bad8a2628164478e0f7f20ebc45c215 +size 494994 diff --git a/de-en/train-00000-of-00017.parquet b/de-en/train-00000-of-00017.parquet new file mode 100644 index 0000000000000000000000000000000000000000..058a03f5d44825cd0bd4c9c6d9e023582c789e0b --- /dev/null +++ b/de-en/train-00000-of-00017.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4f718e91e17f9925e127871050bd0d558a4ea6063c403d63e2a092b1a8888de5 +size 417124040 diff --git a/de-en/train-00001-of-00017.parquet b/de-en/train-00001-of-00017.parquet new file mode 100644 index 0000000000000000000000000000000000000000..451d3864955ab2e096734d233bec5a856aad299e --- /dev/null +++ b/de-en/train-00001-of-00017.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:492205048d66386b94b216894f4ce9633cf5dafd6653fced999facbf0399c61d +size 187427117 diff --git a/de-en/train-00002-of-00017.parquet b/de-en/train-00002-of-00017.parquet new file mode 100644 index 0000000000000000000000000000000000000000..574366212321820f015ea7ca38d5e3cb4cab4565 --- /dev/null +++ b/de-en/train-00002-of-00017.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:432da7277ce5c3d51d4cbd97fcc07ed9a29d88aafe017a11b3e111b76fe82a84 +size 184704329 diff --git a/de-en/train-00003-of-00017.parquet b/de-en/train-00003-of-00017.parquet new file mode 100644 index 0000000000000000000000000000000000000000..c9e6edf77c7a4c00849356b7a0cc4a53d5104a16 --- /dev/null +++ b/de-en/train-00003-of-00017.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9ec151fa1d8b2f29c2b0465063294f001123ca4849d8c81e713b7ed6d5169707 +size 221585066 diff --git a/de-en/train-00004-of-00017.parquet b/de-en/train-00004-of-00017.parquet new file mode 100644 index 0000000000000000000000000000000000000000..f4729be3670172b4ddde72a19b1b2fd89bf8ddee --- /dev/null +++ b/de-en/train-00004-of-00017.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0c814f615e0fd8a36078adc376059ecc784d64e1cfb89255e5a831e4dac59a3f +size 283637452 diff --git a/de-en/train-00005-of-00017.parquet b/de-en/train-00005-of-00017.parquet new file mode 100644 index 0000000000000000000000000000000000000000..fb640cd121ff4ee0568eabaa215058ada6ed3448 --- /dev/null +++ b/de-en/train-00005-of-00017.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6a2cc6d1234f2db76a504ba1f3ce67eec99287cd2c95e740ca7bcab5fe0f9e35 +size 260253494 diff --git a/de-en/train-00006-of-00017.parquet b/de-en/train-00006-of-00017.parquet new file mode 100644 index 0000000000000000000000000000000000000000..5ae769c2db1ad33424ac7b43e6fe6121c7eff421 --- /dev/null +++ b/de-en/train-00006-of-00017.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bfab7685abedc7d1cb405aafa02e860461e244e1ccf877612098db5578be4dec +size 255444757 diff --git a/de-en/train-00007-of-00017.parquet b/de-en/train-00007-of-00017.parquet new file mode 100644 index 0000000000000000000000000000000000000000..748ac02bc6ad7ff921f1a6feade56473ac3a03fd --- /dev/null +++ b/de-en/train-00007-of-00017.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:897d643c42fce65729e74fe7e86dc10476c4cf5dcd07fb92206dcdf679a54cb3 +size 288152538 diff --git a/de-en/train-00008-of-00017.parquet b/de-en/train-00008-of-00017.parquet new file mode 100644 index 0000000000000000000000000000000000000000..9ee9bab3c28608164dc6d9842dbb2d96481ec8e6 --- /dev/null +++ b/de-en/train-00008-of-00017.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ec9a60aa073f4a5985e50d54a0211303857c7a1a887817c596c34ed24d18eade +size 198850903 diff --git a/de-en/train-00009-of-00017.parquet b/de-en/train-00009-of-00017.parquet new file mode 100644 index 0000000000000000000000000000000000000000..e68a5876bf10817d25f1cefc7d765a90807b4abf --- /dev/null +++ b/de-en/train-00009-of-00017.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:41f2c02fbe3195f6be9e7c49bc59dcf443c6d98c908a42ccd9845584b33a80af +size 237581500 diff --git a/de-en/train-00010-of-00017.parquet b/de-en/train-00010-of-00017.parquet new file mode 100644 index 0000000000000000000000000000000000000000..a6bc96c53c4aea31fdc650d3eab1c630710039e6 --- /dev/null +++ b/de-en/train-00010-of-00017.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e00498c2e783a5430f134110dae17b675cd3aca644aec247f5dae1d2f1b64c82 +size 205769970 diff --git a/de-en/train-00011-of-00017.parquet b/de-en/train-00011-of-00017.parquet new file mode 100644 index 0000000000000000000000000000000000000000..0abe126953e4d4d3372c8dd48d94334932d8acd1 --- /dev/null +++ b/de-en/train-00011-of-00017.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c901b7bf3e45645a6a300daf6d48ab3bc21376e1ae0fe1d9de896b37f4a3c06c +size 249167083 diff --git a/de-en/train-00012-of-00017.parquet b/de-en/train-00012-of-00017.parquet new file mode 100644 index 0000000000000000000000000000000000000000..79740f3b49a5a29482f6ba8b4e7d15ff27c2421d --- /dev/null +++ b/de-en/train-00012-of-00017.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:635677152b66af20b091730a166b59ad524d4ec6f8b27b86f003945f2eaa158e +size 252189411 diff --git a/de-en/train-00013-of-00017.parquet b/de-en/train-00013-of-00017.parquet new file mode 100644 index 0000000000000000000000000000000000000000..a312e69e65e0da59d52346ad172feb5f7203eecc --- /dev/null +++ b/de-en/train-00013-of-00017.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8f7487483cfbe19a2e74ddb9b708c40707d868620d24be0283f7cdc64d9a41b0 +size 192040035 diff --git a/de-en/train-00014-of-00017.parquet b/de-en/train-00014-of-00017.parquet new file mode 100644 index 0000000000000000000000000000000000000000..4ebbdb3649e4d8cc28c000e15e6bbdf6d34b025e --- /dev/null +++ b/de-en/train-00014-of-00017.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d01c4e486194675e89e5eca02596bba6faa32ada895eeaec241cae6f08f1c0fe +size 243531244 diff --git a/de-en/train-00015-of-00017.parquet b/de-en/train-00015-of-00017.parquet new file mode 100644 index 0000000000000000000000000000000000000000..74b96054190fce48943520ce87848d5770d1b35b --- /dev/null +++ b/de-en/train-00015-of-00017.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a9b37c85d6f5153e0c8483c89a75194daf556b1a94af0f4c0d91dc8942453945 +size 367184979 diff --git a/de-en/train-00016-of-00017.parquet b/de-en/train-00016-of-00017.parquet new file mode 100644 index 0000000000000000000000000000000000000000..fd39ae7c1873b6eec2762c4e7f8addb43f3cb0c0 --- /dev/null +++ b/de-en/train-00016-of-00017.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:56fb0415d92721adf9ed4d6028c0063b77d8ce3c46fd31f88ae274486fb728bd +size 390687202 diff --git a/de-en/validation-00000-of-00001.parquet b/de-en/validation-00000-of-00001.parquet new file mode 100644 index 0000000000000000000000000000000000000000..3c9c3a94f4a55a64886fa29760828805811ead5f --- /dev/null +++ b/de-en/validation-00000-of-00001.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6746a421a33ac6c77982a04bfd8b91e9ce218afe3be117ca6da0494712f70c78 +size 471099 diff --git a/et-en/test-00000-of-00001.parquet b/et-en/test-00000-of-00001.parquet new file mode 100644 index 0000000000000000000000000000000000000000..cbe63bbc7996f28154003a4870095d7f1635b75e --- /dev/null +++ b/et-en/test-00000-of-00001.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9e05c2420d56cd35aac9ba003da8dfb6d175fa8325e6102120f615d76eda0e24 +size 323792 diff --git a/et-en/train-00000-of-00002.parquet b/et-en/train-00000-of-00002.parquet new file mode 100644 index 0000000000000000000000000000000000000000..79232864b68758e1fe0e22cf5a2796f3bbdeb614 --- /dev/null +++ b/et-en/train-00000-of-00002.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4a7417b972f63793303393ffc28ac43c488daa72662d4681de2dba8401d8e69e +size 145421172 diff --git a/et-en/train-00001-of-00002.parquet b/et-en/train-00001-of-00002.parquet new file mode 100644 index 0000000000000000000000000000000000000000..2d44d86f5f225dbbb9fbfe69c15fa0c41e02566b --- /dev/null +++ b/et-en/train-00001-of-00002.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:541da395cea632e8282130152e91ffac62d32f3daf5e9e4d3c8c8cc894dfaad8 +size 137876661 diff --git a/et-en/validation-00000-of-00001.parquet b/et-en/validation-00000-of-00001.parquet new file mode 100644 index 0000000000000000000000000000000000000000..0c6eabdebb3276ad238027392e338a9aeaa8e724 --- /dev/null +++ b/et-en/validation-00000-of-00001.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ab47f9ce23278ea589534391062072219c3188d2b95c45c532e867722bbe83fe +size 309801 diff --git a/fi-en/test-00000-of-00001.parquet b/fi-en/test-00000-of-00001.parquet new file mode 100644 index 0000000000000000000000000000000000000000..75315b8d90200b686dde899bef84d2b88ac970d5 --- /dev/null +++ b/fi-en/test-00000-of-00001.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:24eadd768ec4f481fa16ce8e1469bd2a077432cf6201a417ead5f7ae7e1ea3f0 +size 444918 diff --git a/fi-en/train-00000-of-00002.parquet b/fi-en/train-00000-of-00002.parquet new file mode 100644 index 0000000000000000000000000000000000000000..89c90a1595773b36f33c935d81d4266b53993e54 --- /dev/null +++ b/fi-en/train-00000-of-00002.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a1599c2dfaceb0e9b1c2e8cc30022a424ecc1f4f7524a215a8abaa1a06c4abe5 +size 292754856 diff --git a/fi-en/train-00001-of-00002.parquet b/fi-en/train-00001-of-00002.parquet new file mode 100644 index 0000000000000000000000000000000000000000..9c10f3294365e02d8f55be5ff0277b15d13e15be --- /dev/null +++ b/fi-en/train-00001-of-00002.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:15fc4a983826522390277bac038c32650d5a27c35bb02adff6d69c6bb0bc442b +size 194620571 diff --git a/fi-en/validation-00000-of-00001.parquet b/fi-en/validation-00000-of-00001.parquet new file mode 100644 index 0000000000000000000000000000000000000000..0a0a534ffb5e36854548e2abc84fe1acbd67adf1 --- /dev/null +++ b/fi-en/validation-00000-of-00001.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e5ba5f7ff8be4871125e564e2c43ceba6d435f2db439aed8edb48cd4bf18c07a +size 888361 diff --git a/ru-en/test-00000-of-00001.parquet b/ru-en/test-00000-of-00001.parquet new file mode 100644 index 0000000000000000000000000000000000000000..b560a4d92cb33e6c71d4eedbea66922718423117 --- /dev/null +++ b/ru-en/test-00000-of-00001.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:98e2e6c7646408aae7fe5ed1334181e1d585ec722d162793ceebc6220adb2cbd +size 610785 diff --git a/ru-en/train-00000-of-00028.parquet b/ru-en/train-00000-of-00028.parquet new file mode 100644 index 0000000000000000000000000000000000000000..54dde82078e3d31840261c650fb511aac4a594c0 --- /dev/null +++ b/ru-en/train-00000-of-00028.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7d64f1d03baee6cf8228321535ba1993768c7aaba0fe83869100dfcd12a284c7 +size 133603323 diff --git a/ru-en/train-00001-of-00028.parquet b/ru-en/train-00001-of-00028.parquet new file mode 100644 index 0000000000000000000000000000000000000000..d2881b49bfebeb932e2bf44dcbd31b129ad7b765 --- /dev/null +++ b/ru-en/train-00001-of-00028.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:35095d3ffd3284d8d2f22cc4004bbade6479201bdce81a6c845b838c7f523d27 +size 147572714 diff --git a/ru-en/train-00002-of-00028.parquet b/ru-en/train-00002-of-00028.parquet new file mode 100644 index 0000000000000000000000000000000000000000..d67f682176bcc6c97de0a624e5537c0637737771 --- /dev/null +++ b/ru-en/train-00002-of-00028.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e5f5c98d42973ebbd8de5c1194b149f6e0e2cd36b117457d6b1f7537f6a6893c +size 123992553 diff --git a/ru-en/train-00003-of-00028.parquet b/ru-en/train-00003-of-00028.parquet new file mode 100644 index 0000000000000000000000000000000000000000..ccc7119377fc2aca994545e33dfd5a114b0b8e52 --- /dev/null +++ b/ru-en/train-00003-of-00028.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5d14e32562d891d9c1f48fa9468747ff583bb74c0bd482f109d338bfcbbbdea0 +size 165357467 diff --git a/ru-en/train-00004-of-00028.parquet b/ru-en/train-00004-of-00028.parquet new file mode 100644 index 0000000000000000000000000000000000000000..0a7255305bfc4d919b071f7206c084fb8919e6f0 --- /dev/null +++ b/ru-en/train-00004-of-00028.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bbc5ec78133e2b26b0589aa84e3570323a25f9bd5c3dd34bd2f336a3c3a9c234 +size 159761716 diff --git a/ru-en/train-00005-of-00028.parquet b/ru-en/train-00005-of-00028.parquet new file mode 100644 index 0000000000000000000000000000000000000000..d6fa3af968ecaa00b490ece80e06b411432b363e --- /dev/null +++ b/ru-en/train-00005-of-00028.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:99ea468fade44f9e32b951ff3e973de7dcce6c328e37e9e02c770d523a02c54f +size 146569638 diff --git a/ru-en/train-00006-of-00028.parquet b/ru-en/train-00006-of-00028.parquet new file mode 100644 index 0000000000000000000000000000000000000000..713fa01321a3dc78ee78c8c0a1240202fc690463 --- /dev/null +++ b/ru-en/train-00006-of-00028.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:002a2e3edfffe7e673095e59ff4b1c930464555a2e42e06030ef4da45992a817 +size 126643611 diff --git a/ru-en/train-00007-of-00028.parquet b/ru-en/train-00007-of-00028.parquet new file mode 100644 index 0000000000000000000000000000000000000000..63fdd8df490da3b7aa80d81ae70d8abf0eee0a3f --- /dev/null +++ b/ru-en/train-00007-of-00028.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:dc24182bc25b1ce697ac307057c57f58a39efa3f7f7fdeab52d12ec993d28588 +size 129885805 diff --git a/ru-en/train-00008-of-00028.parquet b/ru-en/train-00008-of-00028.parquet new file mode 100644 index 0000000000000000000000000000000000000000..c1d6c909a665a7b903c5b7209b461335cbf2b6c3 --- /dev/null +++ b/ru-en/train-00008-of-00028.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:633f11c9f33f908e63c6efaba90afc720c7704c237c7c2046637ca614771eab8 +size 118785246 diff --git a/ru-en/train-00009-of-00028.parquet b/ru-en/train-00009-of-00028.parquet new file mode 100644 index 0000000000000000000000000000000000000000..486e4369e43c4777dd9b26e6489760b0ea0bab56 --- /dev/null +++ b/ru-en/train-00009-of-00028.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:06e3218e7126da467ecb3248eeef6ec6f0277d62faf00961308e99e9a4aba1a7 +size 232575890 diff --git a/ru-en/train-00010-of-00028.parquet b/ru-en/train-00010-of-00028.parquet new file mode 100644 index 0000000000000000000000000000000000000000..6a5795365c7807ead10d349c6d1db3ba8f6bb882 --- /dev/null +++ b/ru-en/train-00010-of-00028.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d1bb4b258331d4be3fe5591c741acb92e37b84ad906f6cbd249994df30d6113c +size 187753961 diff --git a/ru-en/train-00011-of-00028.parquet b/ru-en/train-00011-of-00028.parquet new file mode 100644 index 0000000000000000000000000000000000000000..51daa9eb6920e74a5452d000fa67144b10542ce8 --- /dev/null +++ b/ru-en/train-00011-of-00028.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3c8856e47f810a199ccd25e0cc6a12abaea4ffd6d64fe2786a5e346f81007b60 +size 258877053 diff --git a/ru-en/train-00012-of-00028.parquet b/ru-en/train-00012-of-00028.parquet new file mode 100644 index 0000000000000000000000000000000000000000..5d0e23931a55c9c7df7d53e9d978e89fef6bb6f7 --- /dev/null +++ b/ru-en/train-00012-of-00028.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2fd5294588be93d0d8f8b288e156f44cbb3bcda05a5600262790049b4f0aa878 +size 253735796 diff --git a/ru-en/train-00013-of-00028.parquet b/ru-en/train-00013-of-00028.parquet new file mode 100644 index 0000000000000000000000000000000000000000..675778714703e436c975776faafa5c3f69362817 --- /dev/null +++ b/ru-en/train-00013-of-00028.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e024233fcd444c675929905354421c6601c453f01c7d0df61a8374468ce106ac +size 265661019 diff --git a/ru-en/train-00014-of-00028.parquet b/ru-en/train-00014-of-00028.parquet new file mode 100644 index 0000000000000000000000000000000000000000..27496b49843964277c40b8097ec31c781953774e --- /dev/null +++ b/ru-en/train-00014-of-00028.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:deebe793436a76d624fe6a7c4a1c02b2aafd54256b6a91d260cab6c862c69038 +size 250329563 diff --git a/ru-en/train-00015-of-00028.parquet b/ru-en/train-00015-of-00028.parquet new file mode 100644 index 0000000000000000000000000000000000000000..f979cd70c9700588b8dbcd74c45fcf325976d14a --- /dev/null +++ b/ru-en/train-00015-of-00028.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f33e41bb265ebb1a2570cb5ae14547588002531c4a82a050beb40ebca8edca5d +size 260835702 diff --git a/ru-en/train-00016-of-00028.parquet b/ru-en/train-00016-of-00028.parquet new file mode 100644 index 0000000000000000000000000000000000000000..bba6747b8a6eb8e12f71a0a23d91dbabaad408e2 --- /dev/null +++ b/ru-en/train-00016-of-00028.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cd332e0f3fb37eff27c6d1c6464072f9da6987ff8d094d33ccea6b011ca10efb +size 254907628 diff --git a/ru-en/train-00017-of-00028.parquet b/ru-en/train-00017-of-00028.parquet new file mode 100644 index 0000000000000000000000000000000000000000..f537c0c861e30628b6333e3113132c1e5ea83ff7 --- /dev/null +++ b/ru-en/train-00017-of-00028.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d130c4e2a64e54b73da1e9484a572a2b47401e496e7a183497d33fcf9b8d3689 +size 254691448 diff --git a/ru-en/train-00018-of-00028.parquet b/ru-en/train-00018-of-00028.parquet new file mode 100644 index 0000000000000000000000000000000000000000..540e013b18f177e0234af7f8a22ff27f98665e11 --- /dev/null +++ b/ru-en/train-00018-of-00028.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a80bc34de3f288d5938a49457ad1d3324fbcf29d6f191f04f6f8ab93fce98032 +size 265382233 diff --git a/ru-en/train-00019-of-00028.parquet b/ru-en/train-00019-of-00028.parquet new file mode 100644 index 0000000000000000000000000000000000000000..84cdda2efdf9226b8d260befc9cb3d5ac7cf42d5 --- /dev/null +++ b/ru-en/train-00019-of-00028.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6f747c8ca2a969203f219ba92db76bfc721ad4fc1427d4631671ded8607cd07e +size 260475202 diff --git a/ru-en/train-00020-of-00028.parquet b/ru-en/train-00020-of-00028.parquet new file mode 100644 index 0000000000000000000000000000000000000000..0a7b30e2ae210ae4d21f6ca9a4cce82f4285c1c8 --- /dev/null +++ b/ru-en/train-00020-of-00028.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:156c689add81cda93a415ec1a62cc062f26a243b089e0f09ff149232e234283f +size 265758895 diff --git a/ru-en/train-00021-of-00028.parquet b/ru-en/train-00021-of-00028.parquet new file mode 100644 index 0000000000000000000000000000000000000000..91582ff78104dce75d11a871a2f29699314a57b7 --- /dev/null +++ b/ru-en/train-00021-of-00028.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:72e6a7bf9008e22ec579eda8b144a1a17e0b3c813d315efb6186e026efc29590 +size 265024278 diff --git a/ru-en/train-00022-of-00028.parquet b/ru-en/train-00022-of-00028.parquet new file mode 100644 index 0000000000000000000000000000000000000000..bd5cc62e6a1754b030b89523ab6f11c2d7769d28 --- /dev/null +++ b/ru-en/train-00022-of-00028.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3aca8921d6564aea07ca181554e364d7f02eeb62e16cfcf326f4843a29c3b8be +size 265198381 diff --git a/ru-en/train-00023-of-00028.parquet b/ru-en/train-00023-of-00028.parquet new file mode 100644 index 0000000000000000000000000000000000000000..78c8f35aa77f07260b753c604920991a92f2e04d --- /dev/null +++ b/ru-en/train-00023-of-00028.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:217b20b5ec4d0b89e9fd489dff34466aaba6874340498701daed1b3de4c76122 +size 268995464 diff --git a/ru-en/train-00024-of-00028.parquet b/ru-en/train-00024-of-00028.parquet new file mode 100644 index 0000000000000000000000000000000000000000..4dd473078946b5360f1e885f0f7d885e38070a73 --- /dev/null +++ b/ru-en/train-00024-of-00028.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e05a91169bc027a82fd20c81b4e71cde4e7484d65c063de452bc6733cbb785e1 +size 263444514 diff --git a/ru-en/train-00025-of-00028.parquet b/ru-en/train-00025-of-00028.parquet new file mode 100644 index 0000000000000000000000000000000000000000..0b6bb5646c65d4c9d03e8483cd0b9ca6d6e7a25a --- /dev/null +++ b/ru-en/train-00025-of-00028.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:85da54373f8ff316286d36742d3c24024b63adfbb566fb31d695fcd259f46360 +size 265203934 diff --git a/ru-en/train-00026-of-00028.parquet b/ru-en/train-00026-of-00028.parquet new file mode 100644 index 0000000000000000000000000000000000000000..2606f06eb7c61def595c596565d4669298878c97 --- /dev/null +++ b/ru-en/train-00026-of-00028.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e49c53177f08bfa7eb9d11dbdabba4764088fc1a28dec96f3bda8f423105d093 +size 269895546 diff --git a/ru-en/train-00027-of-00028.parquet b/ru-en/train-00027-of-00028.parquet new file mode 100644 index 0000000000000000000000000000000000000000..78f9859d4d5ef1b684b180a82879bb4fdb4b887e --- /dev/null +++ b/ru-en/train-00027-of-00028.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9fd665e9590ab5aae9ba6c32685697651326f958f6bf3800854afaf912ad795b +size 268628206 diff --git a/ru-en/validation-00000-of-00001.parquet b/ru-en/validation-00000-of-00001.parquet new file mode 100644 index 0000000000000000000000000000000000000000..e7b8a21dfc302e116e872807f843bfea50df873e --- /dev/null +++ b/ru-en/validation-00000-of-00001.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1e9980c73410ccdce609d853d59c0d81fce32d03d39989e3152b76f0dc8973d9 +size 586562 diff --git a/tr-en/test-00000-of-00001.parquet b/tr-en/test-00000-of-00001.parquet new file mode 100644 index 0000000000000000000000000000000000000000..8b3e43468962ee2a728f9026a450a7e683c6c4a5 --- /dev/null +++ b/tr-en/test-00000-of-00001.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9034c271562106ee1bafb0175bbe1efbc505fe2ce55d5125cfefcb7e865287e5 +size 493398 diff --git a/tr-en/train-00000-of-00001.parquet b/tr-en/train-00000-of-00001.parquet new file mode 100644 index 0000000000000000000000000000000000000000..8462f9d94013d1c501904006a91a8c7ca3d21c6a --- /dev/null +++ b/tr-en/train-00000-of-00001.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0195fe43ea5ed06b7d243112459a021f33780351543d853b0d6420ab19759def +size 36771180 diff --git a/tr-en/validation-00000-of-00001.parquet b/tr-en/validation-00000-of-00001.parquet new file mode 100644 index 0000000000000000000000000000000000000000..584d8cb99a8352195264a18816d214c44e08a16b --- /dev/null +++ b/tr-en/validation-00000-of-00001.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:862775d5af514c4391042a3ea121a094ad0f7c83b0a9eaed1028d36fff3e78c9 +size 469266 diff --git a/wmt18.py b/wmt18.py deleted file mode 100644 index c97a0bbc55e6470f6d44e884308dbabeaad674cb..0000000000000000000000000000000000000000 --- a/wmt18.py +++ /dev/null @@ -1,86 +0,0 @@ -# coding=utf-8 -# Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace Datasets Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# Lint as: python3 -"""WMT18: Translate dataset.""" - -import datasets - -from .wmt_utils import CWMT_SUBSET_NAMES, Wmt, WmtConfig - - -_URL = "http://www.statmt.org/wmt18/translation-task.html" -_CITATION = """\ -@InProceedings{bojar-EtAl:2018:WMT1, - author = {Bojar, Ond\v{r}ej and Federmann, Christian and Fishel, Mark - and Graham, Yvette and Haddow, Barry and Huck, Matthias and - Koehn, Philipp and Monz, Christof}, - title = {Findings of the 2018 Conference on Machine Translation (WMT18)}, - booktitle = {Proceedings of the Third Conference on Machine Translation, - Volume 2: Shared Task Papers}, - month = {October}, - year = {2018}, - address = {Belgium, Brussels}, - publisher = {Association for Computational Linguistics}, - pages = {272--307}, - url = {http://www.aclweb.org/anthology/W18-6401} -} -""" - -_LANGUAGE_PAIRS = [(lang, "en") for lang in ["cs", "de", "et", "fi", "kk", "ru", "tr", "zh"]] - - -class Wmt18(Wmt): - """WMT 18 translation datasets for all {xx, "en"} language pairs.""" - - # Version history: - # 1.0.0: S3 (new shuffling, sharding and slicing mechanism). - BUILDER_CONFIGS = [ - WmtConfig( # pylint:disable=g-complex-comprehension - description="WMT 2018 %s-%s translation task dataset." % (l1, l2), - url=_URL, - citation=_CITATION, - language_pair=(l1, l2), - version=datasets.Version("1.0.0"), - ) - for l1, l2 in _LANGUAGE_PAIRS - ] - - @property - def manual_download_instructions(self): - if self.config.language_pair[1] in ["cs", "hi", "ru"]: - return "Please download the data manually as explained. TODO(PVP)" - - @property - def _subsets(self): - return { - datasets.Split.TRAIN: [ - "europarl_v7", - "europarl_v8_18", - "paracrawl_v1", - "commoncrawl", - "newscommentary_v13", - "czeng_17", - "yandexcorpus", - "wikiheadlines_fi", - "wikiheadlines_ru", - "setimes_2", - "uncorpus_v1", - "rapid_2016", - ] - + CWMT_SUBSET_NAMES, - datasets.Split.VALIDATION: ["newsdev2018", "newstest2017", "newstestB2017"], - datasets.Split.TEST: ["newstest2018"], - } diff --git a/wmt_utils.py b/wmt_utils.py deleted file mode 100644 index 3f5049758ae723f1a9441fec40232384c2e7f5da..0000000000000000000000000000000000000000 --- a/wmt_utils.py +++ /dev/null @@ -1,1025 +0,0 @@ -# coding=utf-8 -# Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace Datasets Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# Lint as: python3 -"""WMT: Translate dataset.""" - - -import codecs -import functools -import glob -import gzip -import itertools -import os -import re -import xml.etree.cElementTree as ElementTree - -import datasets - - -logger = datasets.logging.get_logger(__name__) - - -_DESCRIPTION = """\ -Translation dataset based on the data from statmt.org. - -Versions exist for different years using a combination of data -sources. The base `wmt` allows you to create a custom dataset by choosing -your own data/language pair. This can be done as follows: - -```python -from datasets import inspect_dataset, load_dataset_builder - -inspect_dataset("wmt18", "path/to/scripts") -builder = load_dataset_builder( - "path/to/scripts/wmt_utils.py", - language_pair=("fr", "de"), - subsets={ - datasets.Split.TRAIN: ["commoncrawl_frde"], - datasets.Split.VALIDATION: ["euelections_dev2019"], - }, -) - -# Standard version -builder.download_and_prepare() -ds = builder.as_dataset() - -# Streamable version -ds = builder.as_streaming_dataset() -``` - -""" - - -CWMT_SUBSET_NAMES = ["casia2015", "casict2011", "casict2015", "datum2015", "datum2017", "neu2017"] - - -class SubDataset: - """Class to keep track of information on a sub-dataset of WMT.""" - - def __init__(self, name, target, sources, url, path, manual_dl_files=None): - """Sub-dataset of WMT. - - Args: - name: `string`, a unique dataset identifier. - target: `string`, the target language code. - sources: `set`, the set of source language codes. - url: `string` or `(string, string)`, URL(s) or URL template(s) specifying - where to download the raw data from. If two strings are provided, the - first is used for the source language and the second for the target. - Template strings can either contain '{src}' placeholders that will be - filled in with the source language code, '{0}' and '{1}' placeholders - that will be filled in with the source and target language codes in - alphabetical order, or all 3. - path: `string` or `(string, string)`, path(s) or path template(s) - specifing the path to the raw data relative to the root of the - downloaded archive. If two strings are provided, the dataset is assumed - to be made up of parallel text files, the first being the source and the - second the target. If one string is provided, both languages are assumed - to be stored within the same file and the extension is used to determine - how to parse it. Template strings should be formatted the same as in - `url`. - manual_dl_files: `(string)` (optional), the list of files that must - be manually downloaded to the data directory. - """ - self._paths = (path,) if isinstance(path, str) else path - self._urls = (url,) if isinstance(url, str) else url - self._manual_dl_files = manual_dl_files if manual_dl_files else [] - self.name = name - self.target = target - self.sources = set(sources) - - def _inject_language(self, src, strings): - """Injects languages into (potentially) template strings.""" - if src not in self.sources: - raise ValueError(f"Invalid source for '{self.name}': {src}") - - def _format_string(s): - if "{0}" in s and "{1}" and "{src}" in s: - return s.format(*sorted([src, self.target]), src=src) - elif "{0}" in s and "{1}" in s: - return s.format(*sorted([src, self.target])) - elif "{src}" in s: - return s.format(src=src) - else: - return s - - return [_format_string(s) for s in strings] - - def get_url(self, src): - return self._inject_language(src, self._urls) - - def get_manual_dl_files(self, src): - return self._inject_language(src, self._manual_dl_files) - - def get_path(self, src): - return self._inject_language(src, self._paths) - - -# Subsets used in the training sets for various years of WMT. -_TRAIN_SUBSETS = [ - # pylint:disable=line-too-long - SubDataset( - name="commoncrawl", - target="en", # fr-de pair in commoncrawl_frde - sources={"cs", "de", "es", "fr", "ru"}, - url="https://huggingface.co/datasets/wmt/wmt13/resolve/main-zip/training-parallel-commoncrawl.zip", - path=("commoncrawl.{src}-en.{src}", "commoncrawl.{src}-en.en"), - ), - SubDataset( - name="commoncrawl_frde", - target="de", - sources={"fr"}, - url=( - "https://huggingface.co/datasets/wmt/wmt19/resolve/main-zip/translation-task/fr-de/bitexts/commoncrawl.fr.gz", - "https://huggingface.co/datasets/wmt/wmt19/resolve/main-zip/translation-task/fr-de/bitexts/commoncrawl.de.gz", - ), - path=("", ""), - ), - SubDataset( - name="czeng_10", - target="en", - sources={"cs"}, - url="http://ufal.mff.cuni.cz/czeng/czeng10", - manual_dl_files=["data-plaintext-format.%d.tar" % i for i in range(10)], - # Each tar contains multiple files, which we process specially in - # _parse_czeng. - path=("data.plaintext-format/??train.gz",) * 10, - ), - SubDataset( - name="czeng_16pre", - target="en", - sources={"cs"}, - url="http://ufal.mff.cuni.cz/czeng/czeng16pre", - manual_dl_files=["czeng16pre.deduped-ignoring-sections.txt.gz"], - path="", - ), - SubDataset( - name="czeng_16", - target="en", - sources={"cs"}, - url="http://ufal.mff.cuni.cz/czeng", - manual_dl_files=["data-plaintext-format.%d.tar" % i for i in range(10)], - # Each tar contains multiple files, which we process specially in - # _parse_czeng. - path=("data.plaintext-format/??train.gz",) * 10, - ), - SubDataset( - # This dataset differs from the above in the filtering that is applied - # during parsing. - name="czeng_17", - target="en", - sources={"cs"}, - url="http://ufal.mff.cuni.cz/czeng", - manual_dl_files=["data-plaintext-format.%d.tar" % i for i in range(10)], - # Each tar contains multiple files, which we process specially in - # _parse_czeng. - path=("data.plaintext-format/??train.gz",) * 10, - ), - SubDataset( - name="dcep_v1", - target="en", - sources={"lv"}, - url="https://huggingface.co/datasets/wmt/wmt17/resolve/main-zip/translation-task/dcep.lv-en.v1.zip", - path=("dcep.en-lv/dcep.lv", "dcep.en-lv/dcep.en"), - ), - SubDataset( - name="europarl_v7", - target="en", - sources={"cs", "de", "es", "fr"}, - url="https://huggingface.co/datasets/wmt/wmt13/resolve/main-zip/training-parallel-europarl-v7.zip", - path=("training/europarl-v7.{src}-en.{src}", "training/europarl-v7.{src}-en.en"), - ), - SubDataset( - name="europarl_v7_frde", - target="de", - sources={"fr"}, - url=( - "https://huggingface.co/datasets/wmt/wmt19/resolve/main-zip/translation-task/fr-de/bitexts/europarl-v7.fr.gz", - "https://huggingface.co/datasets/wmt/wmt19/resolve/main-zip/translation-task/fr-de/bitexts/europarl-v7.de.gz", - ), - path=("", ""), - ), - SubDataset( - name="europarl_v8_18", - target="en", - sources={"et", "fi"}, - url="https://huggingface.co/datasets/wmt/wmt18/resolve/main-zip/translation-task/training-parallel-ep-v8.zip", - path=("training/europarl-v8.{src}-en.{src}", "training/europarl-v8.{src}-en.en"), - ), - SubDataset( - name="europarl_v8_16", - target="en", - sources={"fi", "ro"}, - url="https://huggingface.co/datasets/wmt/wmt16/resolve/main-zip/translation-task/training-parallel-ep-v8.zip", - path=("training-parallel-ep-v8/europarl-v8.{src}-en.{src}", "training-parallel-ep-v8/europarl-v8.{src}-en.en"), - ), - SubDataset( - name="europarl_v9", - target="en", - sources={"cs", "de", "fi", "lt"}, - url="https://huggingface.co/datasets/wmt/europarl/resolve/main/v9/training/europarl-v9.{src}-en.tsv.gz", - path="", - ), - SubDataset( - name="gigafren", - target="en", - sources={"fr"}, - url="https://huggingface.co/datasets/wmt/wmt10/resolve/main-zip/training-giga-fren.zip", - path=("giga-fren.release2.fixed.fr.gz", "giga-fren.release2.fixed.en.gz"), - ), - SubDataset( - name="hindencorp_01", - target="en", - sources={"hi"}, - url="http://ufallab.ms.mff.cuni.cz/~bojar/hindencorp", - manual_dl_files=["hindencorp0.1.gz"], - path="", - ), - SubDataset( - name="leta_v1", - target="en", - sources={"lv"}, - url="https://huggingface.co/datasets/wmt/wmt17/resolve/main-zip/translation-task/leta.v1.zip", - path=("LETA-lv-en/leta.lv", "LETA-lv-en/leta.en"), - ), - SubDataset( - name="multiun", - target="en", - sources={"es", "fr"}, - url="https://huggingface.co/datasets/wmt/wmt13/resolve/main-zip/training-parallel-un.zip", - path=("un/undoc.2000.{src}-en.{src}", "un/undoc.2000.{src}-en.en"), - ), - SubDataset( - name="newscommentary_v9", - target="en", - sources={"cs", "de", "fr", "ru"}, - url="https://huggingface.co/datasets/wmt/wmt14/resolve/main-zip/training-parallel-nc-v9.zip", - path=("training/news-commentary-v9.{src}-en.{src}", "training/news-commentary-v9.{src}-en.en"), - ), - SubDataset( - name="newscommentary_v10", - target="en", - sources={"cs", "de", "fr", "ru"}, - url="https://huggingface.co/datasets/wmt/wmt15/resolve/main-zip/training-parallel-nc-v10.zip", - path=("news-commentary-v10.{src}-en.{src}", "news-commentary-v10.{src}-en.en"), - ), - SubDataset( - name="newscommentary_v11", - target="en", - sources={"cs", "de", "ru"}, - url="https://huggingface.co/datasets/wmt/wmt16/resolve/main-zip/translation-task/training-parallel-nc-v11.zip", - path=( - "training-parallel-nc-v11/news-commentary-v11.{src}-en.{src}", - "training-parallel-nc-v11/news-commentary-v11.{src}-en.en", - ), - ), - SubDataset( - name="newscommentary_v12", - target="en", - sources={"cs", "de", "ru", "zh"}, - url="https://huggingface.co/datasets/wmt/wmt17/resolve/main-zip/translation-task/training-parallel-nc-v12.zip", - path=("training/news-commentary-v12.{src}-en.{src}", "training/news-commentary-v12.{src}-en.en"), - ), - SubDataset( - name="newscommentary_v13", - target="en", - sources={"cs", "de", "ru", "zh"}, - url="https://huggingface.co/datasets/wmt/wmt18/resolve/main-zip/translation-task/training-parallel-nc-v13.zip", - path=( - "training-parallel-nc-v13/news-commentary-v13.{src}-en.{src}", - "training-parallel-nc-v13/news-commentary-v13.{src}-en.en", - ), - ), - SubDataset( - name="newscommentary_v14", - target="en", # fr-de pair in newscommentary_v14_frde - sources={"cs", "de", "kk", "ru", "zh"}, - url="http://data.statmt.org/news-commentary/v14/training/news-commentary-v14.{0}-{1}.tsv.gz", - path="", - ), - SubDataset( - name="newscommentary_v14_frde", - target="de", - sources={"fr"}, - url="http://data.statmt.org/news-commentary/v14/training/news-commentary-v14.de-fr.tsv.gz", - path="", - ), - SubDataset( - name="onlinebooks_v1", - target="en", - sources={"lv"}, - url="https://huggingface.co/datasets/wmt/wmt17/resolve/main-zip/translation-task/books.lv-en.v1.zip", - path=("farewell/farewell.lv", "farewell/farewell.en"), - ), - SubDataset( - name="paracrawl_v1", - target="en", - sources={"cs", "de", "et", "fi", "ru"}, - url="https://s3.amazonaws.com/web-language-models/paracrawl/release1/paracrawl-release1.en-{src}.zipporah0-dedup-clean.tgz", # TODO(QL): use gzip for streaming - path=( - "paracrawl-release1.en-{src}.zipporah0-dedup-clean.{src}", - "paracrawl-release1.en-{src}.zipporah0-dedup-clean.en", - ), - ), - SubDataset( - name="paracrawl_v1_ru", - target="en", - sources={"ru"}, - url="https://s3.amazonaws.com/web-language-models/paracrawl/release1/paracrawl-release1.en-ru.zipporah0-dedup-clean.tgz", # TODO(QL): use gzip for streaming - path=( - "paracrawl-release1.en-ru.zipporah0-dedup-clean.ru", - "paracrawl-release1.en-ru.zipporah0-dedup-clean.en", - ), - ), - SubDataset( - name="paracrawl_v3", - target="en", # fr-de pair in paracrawl_v3_frde - sources={"cs", "de", "fi", "lt"}, - url="https://s3.amazonaws.com/web-language-models/paracrawl/release3/en-{src}.bicleaner07.tmx.gz", - path="", - ), - SubDataset( - name="paracrawl_v3_frde", - target="de", - sources={"fr"}, - url=( - "https://huggingface.co/datasets/wmt/wmt19/resolve/main-zip/translation-task/fr-de/bitexts/de-fr.bicleaner07.de.gz", - "https://huggingface.co/datasets/wmt/wmt19/resolve/main-zip/translation-task/fr-de/bitexts/de-fr.bicleaner07.fr.gz", - ), - path=("", ""), - ), - SubDataset( - name="rapid_2016", - target="en", - sources={"de", "et", "fi"}, - url="https://huggingface.co/datasets/wmt/wmt18/resolve/main-zip/translation-task/rapid2016.zip", - path=("rapid2016.{0}-{1}.{src}", "rapid2016.{0}-{1}.en"), - ), - SubDataset( - name="rapid_2016_ltfi", - target="en", - sources={"fi", "lt"}, - url="https://tilde-model.s3-eu-west-1.amazonaws.com/rapid2016.en-{src}.tmx.zip", - path="rapid2016.en-{src}.tmx", - ), - SubDataset( - name="rapid_2019", - target="en", - sources={"de"}, - url="https://s3-eu-west-1.amazonaws.com/tilde-model/rapid2019.de-en.zip", - path=("rapid2019.de-en.de", "rapid2019.de-en.en"), - ), - SubDataset( - name="setimes_2", - target="en", - sources={"ro", "tr"}, - url="https://object.pouta.csc.fi/OPUS-SETIMES/v2/tmx/en-{src}.tmx.gz", - path="", - ), - SubDataset( - name="uncorpus_v1", - target="en", - sources={"ru", "zh"}, - url="https://huggingface.co/datasets/wmt/uncorpus/resolve/main-zip/UNv1.0.en-{src}.zip", - path=("en-{src}/UNv1.0.en-{src}.{src}", "en-{src}/UNv1.0.en-{src}.en"), - ), - SubDataset( - name="wikiheadlines_fi", - target="en", - sources={"fi"}, - url="https://huggingface.co/datasets/wmt/wmt15/resolve/main-zip/wiki-titles.zip", - path="wiki/fi-en/titles.fi-en", - ), - SubDataset( - name="wikiheadlines_hi", - target="en", - sources={"hi"}, - url="https://huggingface.co/datasets/wmt/wmt14/resolve/main-zip/wiki-titles.zip", - path="wiki/hi-en/wiki-titles.hi-en", - ), - SubDataset( - # Verified that wmt14 and wmt15 files are identical. - name="wikiheadlines_ru", - target="en", - sources={"ru"}, - url="https://huggingface.co/datasets/wmt/wmt15/resolve/main-zip/wiki-titles.zip", - path="wiki/ru-en/wiki.ru-en", - ), - SubDataset( - name="wikititles_v1", - target="en", - sources={"cs", "de", "fi", "gu", "kk", "lt", "ru", "zh"}, - url="https://huggingface.co/datasets/wmt/wikititles/resolve/main/v1/wikititles-v1.{src}-en.tsv.gz", - path="", - ), - SubDataset( - name="yandexcorpus", - target="en", - sources={"ru"}, - url="https://translate.yandex.ru/corpus?lang=en", - manual_dl_files=["1mcorpus.zip"], - path=("corpus.en_ru.1m.ru", "corpus.en_ru.1m.en"), - ), - # pylint:enable=line-too-long -] + [ - SubDataset( # pylint:disable=g-complex-comprehension - name=ss, - target="en", - sources={"zh"}, - url="https://huggingface.co/datasets/wmt/wmt18/resolve/main-zip/cwmt-wmt/%s.zip" % ss, - path=("%s/*_c[hn].txt" % ss, "%s/*_en.txt" % ss), - ) - for ss in CWMT_SUBSET_NAMES -] - -_DEV_SUBSETS = [ - SubDataset( - name="euelections_dev2019", - target="de", - sources={"fr"}, - url="https://huggingface.co/datasets/wmt/wmt19/resolve/main-zip/translation-task/dev.zip", - path=("dev/euelections_dev2019.fr-de.src.fr", "dev/euelections_dev2019.fr-de.tgt.de"), - ), - SubDataset( - name="newsdev2014", - target="en", - sources={"hi"}, - url="https://huggingface.co/datasets/wmt/wmt19/resolve/main-zip/translation-task/dev.zip", - path=("dev/newsdev2014.hi", "dev/newsdev2014.en"), - ), - SubDataset( - name="newsdev2015", - target="en", - sources={"fi"}, - url="https://huggingface.co/datasets/wmt/wmt19/resolve/main-zip/translation-task/dev.zip", - path=("dev/newsdev2015-fien-src.{src}.sgm", "dev/newsdev2015-fien-ref.en.sgm"), - ), - SubDataset( - name="newsdiscussdev2015", - target="en", - sources={"ro", "tr"}, - url="https://huggingface.co/datasets/wmt/wmt19/resolve/main-zip/translation-task/dev.zip", - path=("dev/newsdiscussdev2015-{src}en-src.{src}.sgm", "dev/newsdiscussdev2015-{src}en-ref.en.sgm"), - ), - SubDataset( - name="newsdev2016", - target="en", - sources={"ro", "tr"}, - url="https://huggingface.co/datasets/wmt/wmt19/resolve/main-zip/translation-task/dev.zip", - path=("dev/newsdev2016-{src}en-src.{src}.sgm", "dev/newsdev2016-{src}en-ref.en.sgm"), - ), - SubDataset( - name="newsdev2017", - target="en", - sources={"lv", "zh"}, - url="https://huggingface.co/datasets/wmt/wmt19/resolve/main-zip/translation-task/dev.zip", - path=("dev/newsdev2017-{src}en-src.{src}.sgm", "dev/newsdev2017-{src}en-ref.en.sgm"), - ), - SubDataset( - name="newsdev2018", - target="en", - sources={"et"}, - url="https://huggingface.co/datasets/wmt/wmt19/resolve/main-zip/translation-task/dev.zip", - path=("dev/newsdev2018-{src}en-src.{src}.sgm", "dev/newsdev2018-{src}en-ref.en.sgm"), - ), - SubDataset( - name="newsdev2019", - target="en", - sources={"gu", "kk", "lt"}, - url="https://huggingface.co/datasets/wmt/wmt19/resolve/main-zip/translation-task/dev.zip", - path=("dev/newsdev2019-{src}en-src.{src}.sgm", "dev/newsdev2019-{src}en-ref.en.sgm"), - ), - SubDataset( - name="newsdiscussdev2015", - target="en", - sources={"fr"}, - url="https://huggingface.co/datasets/wmt/wmt19/resolve/main-zip/translation-task/dev.zip", - path=("dev/newsdiscussdev2015-{src}en-src.{src}.sgm", "dev/newsdiscussdev2015-{src}en-ref.en.sgm"), - ), - SubDataset( - name="newsdiscusstest2015", - target="en", - sources={"fr"}, - url="https://huggingface.co/datasets/wmt/wmt19/resolve/main-zip/translation-task/dev.zip", - path=("dev/newsdiscusstest2015-{src}en-src.{src}.sgm", "dev/newsdiscusstest2015-{src}en-ref.en.sgm"), - ), - SubDataset( - name="newssyscomb2009", - target="en", - sources={"cs", "de", "es", "fr"}, - url="https://huggingface.co/datasets/wmt/wmt19/resolve/main-zip/translation-task/dev.zip", - path=("dev/newssyscomb2009.{src}", "dev/newssyscomb2009.en"), - ), - SubDataset( - name="newstest2008", - target="en", - sources={"cs", "de", "es", "fr", "hu"}, - url="https://huggingface.co/datasets/wmt/wmt19/resolve/main-zip/translation-task/dev.zip", - path=("dev/news-test2008.{src}", "dev/news-test2008.en"), - ), - SubDataset( - name="newstest2009", - target="en", - sources={"cs", "de", "es", "fr"}, - url="https://huggingface.co/datasets/wmt/wmt19/resolve/main-zip/translation-task/dev.zip", - path=("dev/newstest2009.{src}", "dev/newstest2009.en"), - ), - SubDataset( - name="newstest2010", - target="en", - sources={"cs", "de", "es", "fr"}, - url="https://huggingface.co/datasets/wmt/wmt19/resolve/main-zip/translation-task/dev.zip", - path=("dev/newstest2010.{src}", "dev/newstest2010.en"), - ), - SubDataset( - name="newstest2011", - target="en", - sources={"cs", "de", "es", "fr"}, - url="https://huggingface.co/datasets/wmt/wmt19/resolve/main-zip/translation-task/dev.zip", - path=("dev/newstest2011.{src}", "dev/newstest2011.en"), - ), - SubDataset( - name="newstest2012", - target="en", - sources={"cs", "de", "es", "fr", "ru"}, - url="https://huggingface.co/datasets/wmt/wmt19/resolve/main-zip/translation-task/dev.zip", - path=("dev/newstest2012.{src}", "dev/newstest2012.en"), - ), - SubDataset( - name="newstest2013", - target="en", - sources={"cs", "de", "es", "fr", "ru"}, - url="https://huggingface.co/datasets/wmt/wmt19/resolve/main-zip/translation-task/dev.zip", - path=("dev/newstest2013.{src}", "dev/newstest2013.en"), - ), - SubDataset( - name="newstest2014", - target="en", - sources={"cs", "de", "es", "fr", "hi", "ru"}, - url="https://huggingface.co/datasets/wmt/wmt19/resolve/main-zip/translation-task/dev.zip", - path=("dev/newstest2014-{src}en-src.{src}.sgm", "dev/newstest2014-{src}en-ref.en.sgm"), - ), - SubDataset( - name="newstest2015", - target="en", - sources={"cs", "de", "fi", "ru"}, - url="https://huggingface.co/datasets/wmt/wmt19/resolve/main-zip/translation-task/dev.zip", - path=("dev/newstest2015-{src}en-src.{src}.sgm", "dev/newstest2015-{src}en-ref.en.sgm"), - ), - SubDataset( - name="newsdiscusstest2015", - target="en", - sources={"fr"}, - url="https://huggingface.co/datasets/wmt/wmt19/resolve/main-zip/translation-task/dev.zip", - path=("dev/newsdiscusstest2015-{src}en-src.{src}.sgm", "dev/newsdiscusstest2015-{src}en-ref.en.sgm"), - ), - SubDataset( - name="newstest2016", - target="en", - sources={"cs", "de", "fi", "ro", "ru", "tr"}, - url="https://huggingface.co/datasets/wmt/wmt19/resolve/main-zip/translation-task/dev.zip", - path=("dev/newstest2016-{src}en-src.{src}.sgm", "dev/newstest2016-{src}en-ref.en.sgm"), - ), - SubDataset( - name="newstestB2016", - target="en", - sources={"fi"}, - url="https://huggingface.co/datasets/wmt/wmt19/resolve/main-zip/translation-task/dev.zip", - path=("dev/newstestB2016-enfi-ref.{src}.sgm", "dev/newstestB2016-enfi-src.en.sgm"), - ), - SubDataset( - name="newstest2017", - target="en", - sources={"cs", "de", "fi", "lv", "ru", "tr", "zh"}, - url="https://huggingface.co/datasets/wmt/wmt19/resolve/main-zip/translation-task/dev.zip", - path=("dev/newstest2017-{src}en-src.{src}.sgm", "dev/newstest2017-{src}en-ref.en.sgm"), - ), - SubDataset( - name="newstestB2017", - target="en", - sources={"fi"}, - url="https://huggingface.co/datasets/wmt/wmt19/resolve/main-zip/translation-task/dev.zip", - path=("dev/newstestB2017-fien-src.fi.sgm", "dev/newstestB2017-fien-ref.en.sgm"), - ), - SubDataset( - name="newstest2018", - target="en", - sources={"cs", "de", "et", "fi", "ru", "tr", "zh"}, - url="https://huggingface.co/datasets/wmt/wmt19/resolve/main-zip/translation-task/dev.zip", - path=("dev/newstest2018-{src}en-src.{src}.sgm", "dev/newstest2018-{src}en-ref.en.sgm"), - ), -] - -DATASET_MAP = {dataset.name: dataset for dataset in _TRAIN_SUBSETS + _DEV_SUBSETS} - -_CZENG17_FILTER = SubDataset( - name="czeng17_filter", - target="en", - sources={"cs"}, - url="http://ufal.mff.cuni.cz/czeng/download.php?f=convert_czeng16_to_17.pl.zip", - path="convert_czeng16_to_17.pl", -) - - -class WmtConfig(datasets.BuilderConfig): - """BuilderConfig for WMT.""" - - def __init__(self, url=None, citation=None, description=None, language_pair=(None, None), subsets=None, **kwargs): - """BuilderConfig for WMT. - - Args: - url: The reference URL for the dataset. - citation: The paper citation for the dataset. - description: The description of the dataset. - language_pair: pair of languages that will be used for translation. Should - contain 2 letter coded strings. For example: ("en", "de"). - configuration for the `datasets.features.text.TextEncoder` used for the - `datasets.features.text.Translation` features. - subsets: Dict[split, list[str]]. List of the subset to use for each of the - split. Note that WMT subclasses overwrite this parameter. - **kwargs: keyword arguments forwarded to super. - """ - name = "%s-%s" % (language_pair[0], language_pair[1]) - if "name" in kwargs: # Add name suffix for custom configs - name += "." + kwargs.pop("name") - - super(WmtConfig, self).__init__(name=name, description=description, **kwargs) - - self.url = url or "http://www.statmt.org" - self.citation = citation - self.language_pair = language_pair - self.subsets = subsets - - # TODO(PVP): remove when manual dir works - # +++++++++++++++++++++ - if language_pair[1] in ["cs", "hi", "ru"]: - assert NotImplementedError(f"The dataset for {language_pair[1]}-en is currently not fully supported.") - # +++++++++++++++++++++ - - -class Wmt(datasets.GeneratorBasedBuilder): - """WMT translation dataset.""" - - BUILDER_CONFIG_CLASS = WmtConfig - - def __init__(self, *args, **kwargs): - super(Wmt, self).__init__(*args, **kwargs) - - @property - def _subsets(self): - """Subsets that make up each split of the dataset.""" - raise NotImplementedError("This is a abstract method") - - @property - def subsets(self): - """Subsets that make up each split of the dataset for the language pair.""" - source, target = self.config.language_pair - filtered_subsets = {} - subsets = self._subsets if self.config.subsets is None else self.config.subsets - for split, ss_names in subsets.items(): - filtered_subsets[split] = [] - for ss_name in ss_names: - dataset = DATASET_MAP[ss_name] - if dataset.target != target or source not in dataset.sources: - logger.info("Skipping sub-dataset that does not include language pair: %s", ss_name) - else: - filtered_subsets[split].append(ss_name) - logger.info("Using sub-datasets: %s", filtered_subsets) - return filtered_subsets - - def _info(self): - src, target = self.config.language_pair - return datasets.DatasetInfo( - description=_DESCRIPTION, - features=datasets.Features( - {"translation": datasets.features.Translation(languages=self.config.language_pair)} - ), - supervised_keys=(src, target), - homepage=self.config.url, - citation=self.config.citation, - ) - - def _vocab_text_gen(self, split_subsets, extraction_map, language): - for _, ex in self._generate_examples(split_subsets, extraction_map, with_translation=False): - yield ex[language] - - def _split_generators(self, dl_manager): - source, _ = self.config.language_pair - manual_paths_dict = {} - urls_to_download = {} - for ss_name in itertools.chain.from_iterable(self.subsets.values()): - if ss_name == "czeng_17": - # CzEng1.7 is CzEng1.6 with some blocks filtered out. We must download - # the filtering script so we can parse out which blocks need to be - # removed. - urls_to_download[_CZENG17_FILTER.name] = _CZENG17_FILTER.get_url(source) - - # get dataset - dataset = DATASET_MAP[ss_name] - if dataset.get_manual_dl_files(source): - # TODO(PVP): following two lines skip configs that are incomplete for now - # +++++++++++++++++++++ - logger.info("Skipping {dataset.name} for now. Incomplete dataset for {self.config.name}") - continue - # +++++++++++++++++++++ - - manual_dl_files = dataset.get_manual_dl_files(source) - manual_paths = [ - os.path.join(os.path.abspath(os.path.expanduser(dl_manager.manual_dir)), fname) - for fname in manual_dl_files - ] - assert all( - os.path.exists(path) for path in manual_paths - ), f"For {dataset.name}, you must manually download the following file(s) from {dataset.get_url(source)} and place them in {dl_manager.manual_dir}: {', '.join(manual_dl_files)}" - - # set manual path for correct subset - manual_paths_dict[ss_name] = manual_paths - else: - urls_to_download[ss_name] = dataset.get_url(source) - - # Download and extract files from URLs. - downloaded_files = dl_manager.download_and_extract(urls_to_download) - # Extract manually downloaded files. - manual_files = dl_manager.extract(manual_paths_dict) - extraction_map = dict(downloaded_files, **manual_files) - - for language in self.config.language_pair: - self._vocab_text_gen(self.subsets[datasets.Split.TRAIN], extraction_map, language) - - return [ - datasets.SplitGenerator( # pylint:disable=g-complex-comprehension - name=split, gen_kwargs={"split_subsets": split_subsets, "extraction_map": extraction_map} - ) - for split, split_subsets in self.subsets.items() - ] - - def _generate_examples(self, split_subsets, extraction_map, with_translation=True): - """Returns the examples in the raw (text) form.""" - source, _ = self.config.language_pair - - def _get_local_paths(dataset, extract_dirs): - rel_paths = dataset.get_path(source) - if len(extract_dirs) == 1: - extract_dirs = extract_dirs * len(rel_paths) - return [ - os.path.join(ex_dir, rel_path) if rel_path else ex_dir - for ex_dir, rel_path in zip(extract_dirs, rel_paths) - ] - - def _get_filenames(dataset): - rel_paths = dataset.get_path(source) - urls = dataset.get_url(source) - if len(urls) == 1: - urls = urls * len(rel_paths) - return [rel_path if rel_path else os.path.basename(url) for url, rel_path in zip(urls, rel_paths)] - - for ss_name in split_subsets: - # TODO(PVP) remove following five lines when manual data works - # +++++++++++++++++++++ - dataset = DATASET_MAP[ss_name] - source, _ = self.config.language_pair - if dataset.get_manual_dl_files(source): - logger.info(f"Skipping {dataset.name} for now. Incomplete dataset for {self.config.name}") - continue - # +++++++++++++++++++++ - - logger.info("Generating examples from: %s", ss_name) - dataset = DATASET_MAP[ss_name] - extract_dirs = extraction_map[ss_name] - files = _get_local_paths(dataset, extract_dirs) - filenames = _get_filenames(dataset) - - sub_generator_args = tuple(files) - - if ss_name.startswith("czeng"): - if ss_name.endswith("16pre"): - sub_generator = functools.partial(_parse_tsv, language_pair=("en", "cs")) - sub_generator_args += tuple(filenames) - elif ss_name.endswith("17"): - filter_path = _get_local_paths(_CZENG17_FILTER, extraction_map[_CZENG17_FILTER.name])[0] - sub_generator = functools.partial(_parse_czeng, filter_path=filter_path) - else: - sub_generator = _parse_czeng - elif ss_name == "hindencorp_01": - sub_generator = _parse_hindencorp - elif len(files) == 2: - if ss_name.endswith("_frde"): - sub_generator = _parse_frde_bitext - else: - sub_generator = _parse_parallel_sentences - sub_generator_args += tuple(filenames) - elif len(files) == 1: - fname = filenames[0] - # Note: Due to formatting used by `download_manager`, the file - # extension may not be at the end of the file path. - if ".tsv" in fname: - sub_generator = _parse_tsv - sub_generator_args += tuple(filenames) - elif ( - ss_name.startswith("newscommentary_v14") - or ss_name.startswith("europarl_v9") - or ss_name.startswith("wikititles_v1") - ): - sub_generator = functools.partial(_parse_tsv, language_pair=self.config.language_pair) - sub_generator_args += tuple(filenames) - elif "tmx" in fname or ss_name.startswith("paracrawl_v3"): - sub_generator = _parse_tmx - elif ss_name.startswith("wikiheadlines"): - sub_generator = _parse_wikiheadlines - else: - raise ValueError("Unsupported file format: %s" % fname) - else: - raise ValueError("Invalid number of files: %d" % len(files)) - - for sub_key, ex in sub_generator(*sub_generator_args): - if not all(ex.values()): - continue - # TODO(adarob): Add subset feature. - # ex["subset"] = subset - key = f"{ss_name}/{sub_key}" - if with_translation is True: - ex = {"translation": ex} - yield key, ex - - -def _parse_parallel_sentences(f1, f2, filename1, filename2): - """Returns examples from parallel SGML or text files, which may be gzipped.""" - - def _parse_text(path, original_filename): - """Returns the sentences from a single text file, which may be gzipped.""" - split_path = original_filename.split(".") - - if split_path[-1] == "gz": - lang = split_path[-2] - - def gen(): - with open(path, "rb") as f, gzip.GzipFile(fileobj=f) as g: - for line in g: - yield line.decode("utf-8").rstrip() - - return gen(), lang - - if split_path[-1] == "txt": - # CWMT - lang = split_path[-2].split("_")[-1] - lang = "zh" if lang in ("ch", "cn", "c[hn]") else lang - else: - lang = split_path[-1] - - def gen(): - with open(path, "rb") as f: - for line in f: - yield line.decode("utf-8").rstrip() - - return gen(), lang - - def _parse_sgm(path, original_filename): - """Returns sentences from a single SGML file.""" - lang = original_filename.split(".")[-2] - # Note: We can't use the XML parser since some of the files are badly - # formatted. - seg_re = re.compile(r"(.*)") - - def gen(): - with open(path, encoding="utf-8") as f: - for line in f: - seg_match = re.match(seg_re, line) - if seg_match: - assert len(seg_match.groups()) == 1 - yield seg_match.groups()[0] - - return gen(), lang - - parse_file = _parse_sgm if os.path.basename(f1).endswith(".sgm") else _parse_text - - # Some datasets (e.g., CWMT) contain multiple parallel files specified with - # a wildcard. We sort both sets to align them and parse them one by one. - f1_files = sorted(glob.glob(f1)) - f2_files = sorted(glob.glob(f2)) - - assert f1_files and f2_files, "No matching files found: %s, %s." % (f1, f2) - assert len(f1_files) == len(f2_files), "Number of files do not match: %d vs %d for %s vs %s." % ( - len(f1_files), - len(f2_files), - f1, - f2, - ) - - for f_id, (f1_i, f2_i) in enumerate(zip(sorted(f1_files), sorted(f2_files))): - l1_sentences, l1 = parse_file(f1_i, filename1) - l2_sentences, l2 = parse_file(f2_i, filename2) - - for line_id, (s1, s2) in enumerate(zip(l1_sentences, l2_sentences)): - key = f"{f_id}/{line_id}" - yield key, {l1: s1, l2: s2} - - -def _parse_frde_bitext(fr_path, de_path): - with open(fr_path, encoding="utf-8") as fr_f: - with open(de_path, encoding="utf-8") as de_f: - for line_id, (s1, s2) in enumerate(zip(fr_f, de_f)): - yield line_id, {"fr": s1.rstrip(), "de": s2.rstrip()} - - -def _parse_tmx(path): - """Generates examples from TMX file.""" - - def _get_tuv_lang(tuv): - for k, v in tuv.items(): - if k.endswith("}lang"): - return v - raise AssertionError("Language not found in `tuv` attributes.") - - def _get_tuv_seg(tuv): - segs = tuv.findall("seg") - assert len(segs) == 1, "Invalid number of segments: %d" % len(segs) - return segs[0].text - - with open(path, "rb") as f: - # Workaround due to: https://github.com/tensorflow/tensorflow/issues/33563 - utf_f = codecs.getreader("utf-8")(f) - for line_id, (_, elem) in enumerate(ElementTree.iterparse(utf_f)): - if elem.tag == "tu": - yield line_id, {_get_tuv_lang(tuv): _get_tuv_seg(tuv) for tuv in elem.iterfind("tuv")} - elem.clear() - - -def _parse_tsv(path, filename, language_pair=None): - """Generates examples from TSV file.""" - if language_pair is None: - lang_match = re.match(r".*\.([a-z][a-z])-([a-z][a-z])\.tsv", filename) - assert lang_match is not None, "Invalid TSV filename: %s" % filename - l1, l2 = lang_match.groups() - else: - l1, l2 = language_pair - with open(path, encoding="utf-8") as f: - for j, line in enumerate(f): - cols = line.split("\t") - if len(cols) != 2: - logger.warning("Skipping line %d in TSV (%s) with %d != 2 columns.", j, path, len(cols)) - continue - s1, s2 = cols - yield j, {l1: s1.strip(), l2: s2.strip()} - - -def _parse_wikiheadlines(path): - """Generates examples from Wikiheadlines dataset file.""" - lang_match = re.match(r".*\.([a-z][a-z])-([a-z][a-z])$", path) - assert lang_match is not None, "Invalid Wikiheadlines filename: %s" % path - l1, l2 = lang_match.groups() - with open(path, encoding="utf-8") as f: - for line_id, line in enumerate(f): - s1, s2 = line.split("|||") - yield line_id, {l1: s1.strip(), l2: s2.strip()} - - -def _parse_czeng(*paths, **kwargs): - """Generates examples from CzEng v1.6, with optional filtering for v1.7.""" - filter_path = kwargs.get("filter_path", None) - if filter_path: - re_block = re.compile(r"^[^-]+-b(\d+)-\d\d[tde]") - with open(filter_path, encoding="utf-8") as f: - bad_blocks = {blk for blk in re.search(r"qw{([\s\d]*)}", f.read()).groups()[0].split()} - logger.info("Loaded %d bad blocks to filter from CzEng v1.6 to make v1.7.", len(bad_blocks)) - - for path in paths: - for gz_path in sorted(glob.glob(path)): - with open(gz_path, "rb") as g, gzip.GzipFile(fileobj=g) as f: - filename = os.path.basename(gz_path) - for line_id, line in enumerate(f): - line = line.decode("utf-8") # required for py3 - if not line.strip(): - continue - id_, unused_score, cs, en = line.split("\t") - if filter_path: - block_match = re.match(re_block, id_) - if block_match and block_match.groups()[0] in bad_blocks: - continue - sub_key = f"{filename}/{line_id}" - yield sub_key, { - "cs": cs.strip(), - "en": en.strip(), - } - - -def _parse_hindencorp(path): - with open(path, encoding="utf-8") as f: - for line_id, line in enumerate(f): - split_line = line.split("\t") - if len(split_line) != 5: - logger.warning("Skipping invalid HindEnCorp line: %s", line) - continue - yield line_id, {"translation": {"en": split_line[3].strip(), "hi": split_line[4].strip()}} diff --git a/zh-en/test-00000-of-00001.parquet b/zh-en/test-00000-of-00001.parquet new file mode 100644 index 0000000000000000000000000000000000000000..16ca0902f4ebb5190a83ca32f862c769f26c50be --- /dev/null +++ b/zh-en/test-00000-of-00001.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:338679811a459e781d91b2b10af0f43922f8fd9e118086cf3377b249978e1c2b +size 728463 diff --git a/zh-en/train-00000-of-00013.parquet b/zh-en/train-00000-of-00013.parquet new file mode 100644 index 0000000000000000000000000000000000000000..de6ae9319d5ee708b88f78dd8daf906286ffd1d5 --- /dev/null +++ b/zh-en/train-00000-of-00013.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5cda4bff9382721d406c48cdd049dd13fb979e539ea5925d6802931826833dd4 +size 286371772 diff --git a/zh-en/train-00001-of-00013.parquet b/zh-en/train-00001-of-00013.parquet new file mode 100644 index 0000000000000000000000000000000000000000..3a53351a3e1b1b8a2926a6d96ba1a280c2d29a15 --- /dev/null +++ b/zh-en/train-00001-of-00013.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c841596f5990d3f9fab045a238640eb11f84bbf8b50e58dd7276f14a07806263 +size 273203425 diff --git a/zh-en/train-00002-of-00013.parquet b/zh-en/train-00002-of-00013.parquet new file mode 100644 index 0000000000000000000000000000000000000000..3227132aeffa72ec5a6ae7ac84df823d9072c0f9 --- /dev/null +++ b/zh-en/train-00002-of-00013.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:49b74d174e230fdbdd22e14f9bf80ef5d9e0e77ac7913a08e2359de1f269d767 +size 280397887 diff --git a/zh-en/train-00003-of-00013.parquet b/zh-en/train-00003-of-00013.parquet new file mode 100644 index 0000000000000000000000000000000000000000..e554eedef87b6ca38cd6ab55533285c55f84c088 --- /dev/null +++ b/zh-en/train-00003-of-00013.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7859afbe510d8768cec5d6e2112a18dfd2111bb040418232087fcbc261de5231 +size 278397413 diff --git a/zh-en/train-00004-of-00013.parquet b/zh-en/train-00004-of-00013.parquet new file mode 100644 index 0000000000000000000000000000000000000000..d5fae89e5002c9daee7a98fcb0f1526f50e88d45 --- /dev/null +++ b/zh-en/train-00004-of-00013.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:41e7db3c5449fcfc40ccb3f09d7d6ff52e4d613a054f7c7b351ce335f96c496e +size 278327818 diff --git a/zh-en/train-00005-of-00013.parquet b/zh-en/train-00005-of-00013.parquet new file mode 100644 index 0000000000000000000000000000000000000000..3b4b25fadad927af442430eab7c447bd421cf9fc --- /dev/null +++ b/zh-en/train-00005-of-00013.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:64815519ffbf232cc893a0acc16f4189cc788ef2761be0379bc29c93b3fcf04f +size 281032050 diff --git a/zh-en/train-00006-of-00013.parquet b/zh-en/train-00006-of-00013.parquet new file mode 100644 index 0000000000000000000000000000000000000000..93a5410fe60fec967f5c63223476df43aca79658 --- /dev/null +++ b/zh-en/train-00006-of-00013.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c1eec7f179658146e980acccb48187b8be7865b5e93c3bd22d33099b7daf3444 +size 282330612 diff --git a/zh-en/train-00007-of-00013.parquet b/zh-en/train-00007-of-00013.parquet new file mode 100644 index 0000000000000000000000000000000000000000..af2b8fb3d26ca9c3234cf58e846eede1ebd8a7f4 --- /dev/null +++ b/zh-en/train-00007-of-00013.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f87fe04ec11658b72e693254bb25d425449e894db159f6942f0406a8d5010f38 +size 281106001 diff --git a/zh-en/train-00008-of-00013.parquet b/zh-en/train-00008-of-00013.parquet new file mode 100644 index 0000000000000000000000000000000000000000..b99095610000a042e3acf2b00580f97489e751e3 --- /dev/null +++ b/zh-en/train-00008-of-00013.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9262ddba5608c524160bf22bdf6496ae26f1146d6f9c668f0834fe634259f350 +size 294619463 diff --git a/zh-en/train-00009-of-00013.parquet b/zh-en/train-00009-of-00013.parquet new file mode 100644 index 0000000000000000000000000000000000000000..5ae894d063f7d60dd3deff0f327d1c0fab272412 --- /dev/null +++ b/zh-en/train-00009-of-00013.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:403dcb8e90c7af1f091dde86ad39f11d8b8473ff43213947e38a0e0842e50eb9 +size 272081906 diff --git a/zh-en/train-00010-of-00013.parquet b/zh-en/train-00010-of-00013.parquet new file mode 100644 index 0000000000000000000000000000000000000000..be64e7481844f5cece183b5e1caa86d2861527d4 --- /dev/null +++ b/zh-en/train-00010-of-00013.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:172ff92bc42e6067e9915b2399e4a48ea25293947c69cbfb501bddd0a0b271e5 +size 190232424 diff --git a/zh-en/train-00011-of-00013.parquet b/zh-en/train-00011-of-00013.parquet new file mode 100644 index 0000000000000000000000000000000000000000..243561d6b5cdc82cf1fe539017e659c9cce6a79b --- /dev/null +++ b/zh-en/train-00011-of-00013.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3f2b0beb917010742dd9e9bc030a62c71a777bc5179d724bb2fff01de2a658d0 +size 327619889 diff --git a/zh-en/train-00012-of-00013.parquet b/zh-en/train-00012-of-00013.parquet new file mode 100644 index 0000000000000000000000000000000000000000..dc115272878e70ba53255795718037cfb04e9a9f --- /dev/null +++ b/zh-en/train-00012-of-00013.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8eea20412d0967c73d638a9ce19addbd472d0baa2edc2f0d51c02e639801bac4 +size 254263650 diff --git a/zh-en/validation-00000-of-00001.parquet b/zh-en/validation-00000-of-00001.parquet new file mode 100644 index 0000000000000000000000000000000000000000..8c99d3aed5a6449a800b7ebf0321cc95a32684bd --- /dev/null +++ b/zh-en/validation-00000-of-00001.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c7315f0fed1d99c9c553050bafe4d80764a1761a1e11d1de75fa3ee1da30139e +size 361721