parquet-converter commited on
Commit
6968453
1 Parent(s): 7617090

Update parquet files

Browse files
.gitattributes CHANGED
@@ -14,3 +14,6 @@
14
  *.pb filter=lfs diff=lfs merge=lfs -text
15
  *.pt filter=lfs diff=lfs merge=lfs -text
16
  *.pth filter=lfs diff=lfs merge=lfs -text
 
 
 
 
14
  *.pb filter=lfs diff=lfs merge=lfs -text
15
  *.pt filter=lfs diff=lfs merge=lfs -text
16
  *.pth filter=lfs diff=lfs merge=lfs -text
17
+ ende/wmt14-en-de-pre-processed-train-00002-of-00003.parquet filter=lfs diff=lfs merge=lfs -text
18
+ ende/wmt14-en-de-pre-processed-train-00000-of-00003.parquet filter=lfs diff=lfs merge=lfs -text
19
+ ende/wmt14-en-de-pre-processed-train-00001-of-00003.parquet filter=lfs diff=lfs merge=lfs -text
README.md DELETED
@@ -1,16 +0,0 @@
1
- # WMT14 English-German Translation Data w/ further preprocessing
2
-
3
- The original pre-processing script is [here](https://github.com/pytorch/fairseq/blob/master/examples/translation/prepare-wmt14en2de.sh).
4
-
5
- This pre-processed dataset was created by running:
6
-
7
- ```
8
- git clone https://github.com/pytorch/fairseq
9
- cd fairseq
10
- cd examples/translation/
11
- ./prepare-wmt14en2de.sh
12
- ```
13
-
14
- It was originally used by `transformers` [`finetune_trainer.py`](https://github.com/huggingface/transformers/blob/641f418e102218c4bf16fcd3124bfebed6217ef6/examples/seq2seq/finetune_trainer.py)
15
-
16
- The data itself resides at https://cdn-datasets.huggingface.co/translation/wmt_en_de.tgz
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
dataset_infos.json DELETED
@@ -1 +0,0 @@
1
- {"ende": {"description": "WMT14 English-German Translation Data with further preprocessing", "citation": "@InProceedings{huggingface:dataset,\ntitle = {WMT14 English-German Translation Data with further preprocessing},\nauthors={},\nyear={2016}\n}\n", "homepage": "http://www.statmt.org/wmt16/", "license": "", "features": {"translation": {"languages": ["en", "de"], "id": null, "_type": "Translation"}}, "post_processed": null, "supervised_keys": {"input": "en", "output": "de"}, "builder_name": "wmt14_en_de_pre_processed", "config_name": "ende", "version": {"version_str": "1.1.0", "description": "", "major": 1, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 1373123263, "num_examples": 4548885, "dataset_name": "wmt14_en_de_pre_processed"}, "validation": {"name": "validation", "num_bytes": 522989, "num_examples": 2169, "dataset_name": "wmt14_en_de_pre_processed"}, "test": {"name": "test", "num_bytes": 735516, "num_examples": 2999, "dataset_name": "wmt14_en_de_pre_processed"}}, "download_checksums": {"https://cdn-datasets.huggingface.co/translation/wmt_en_de.tgz": {"num_bytes": 476503157, "checksum": "6011fdebebcd196040c77aa1853e7631adf47e078acbc5cbc7c7264019130c51"}}, "download_size": 476503157, "post_processing_size": null, "dataset_size": 1374381768, "size_in_bytes": 1850884925}}
 
 
ende/wmt14-en-de-pre-processed-test.parquet ADDED
Binary file (475 kB). View file
 
ende/wmt14-en-de-pre-processed-train-00000-of-00003.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1cb31c5ba9b6af86133d0f6c045192cb8e18fbd933696e59f518706d1e941304
3
+ size 283127090
ende/wmt14-en-de-pre-processed-train-00001-of-00003.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c281b35245161c77eea1148cca0ab92e99a0d28e3ef95ab31db2c0ee9854df41
3
+ size 304859947
ende/wmt14-en-de-pre-processed-train-00002-of-00003.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:15d83e3f4de65f5f11de074db23c7c2f38872c917b449937c144a4237faa24b0
3
+ size 238379852
ende/wmt14-en-de-pre-processed-validation.parquet ADDED
Binary file (343 kB). View file
 
process.txt DELETED
@@ -1,21 +0,0 @@
1
-
2
- # how this build script and dataset_infos.json were generated
3
-
4
- # this is translation, so let's adapt flores - it has an almost identical input format, just files are named differently:
5
- cp https://github.com/huggingface/datasets/blob/master/datasets/flores/flores.py wmt14-en-de-pre-processed.py
6
- perl -pi -e 's|Flores|wmt14-en-de-pre-processed|g' wmt14-en-de-pre-processed.py
7
-
8
- (good models for other tasks can be found here: https://huggingface.co/docs/datasets/add_dataset.html#dataset-scripts-of-reference)
9
-
10
- # then edit to change the language pairs, file template and the data url
11
- git add wmt14-en-de-pre-processed.py
12
- git commit -m "build script" wmt14-en-de-pre-processed.py
13
- git push
14
-
15
- # finally test
16
- datasets-cli test stas/wmt14-en-de-pre-processed --save_infos --all_configs
17
-
18
- # add push the generated config
19
- git add dataset_infos.json
20
- git commit -m "add dataset_infos.json" dataset_infos.json
21
- git push
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
wmt14-en-de-pre-processed.py DELETED
@@ -1,133 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
- """ WMT16 English-Romanian Translation Data with further preprocessing """
16
-
17
- from __future__ import absolute_import, division, print_function
18
-
19
- import csv
20
- import json
21
- import os
22
-
23
- import datasets
24
-
25
- _CITATION = """\
26
- @InProceedings{huggingface:dataset,
27
- title = {WMT14 English-German Translation Data with further preprocessing},
28
- authors={},
29
- year={2016}
30
- }
31
- """
32
-
33
- _DESCRIPTION = "WMT14 English-German Translation Data with further preprocessing"
34
- _HOMEPAGE = "http://www.statmt.org/wmt16/"
35
- _LICENSE = ""
36
-
37
-
38
- _DATA_URL = "https://cdn-datasets.huggingface.co/translation/wmt_en_de.tgz"
39
-
40
-
41
- class Wmt14EnDePreProcessedConfig(datasets.BuilderConfig):
42
- """BuilderConfig for wmt16."""
43
-
44
- def __init__(self, language_pair=(None, None), **kwargs):
45
- """BuilderConfig for wmt16
46
-
47
- Args:
48
- for the `datasets.features.text.TextEncoder` used for the features feature.
49
- language_pair: pair of languages that will be used for translation. Should
50
- contain 2-letter coded strings. First will be used at source and second
51
- as target in supervised mode. For example: ("se", "en").
52
- **kwargs: keyword arguments forwarded to super.
53
- """
54
- name = "%s%s" % (language_pair[0], language_pair[1])
55
-
56
- description = ("Translation dataset from %s to %s") % (language_pair[0], language_pair[1])
57
- super(Wmt14EnDePreProcessedConfig, self).__init__(
58
- name=name,
59
- description=description,
60
- version=datasets.Version("1.1.0", ""),
61
- **kwargs,
62
- )
63
-
64
- # Validate language pair.
65
- assert "en" in language_pair, ("Config language pair must contain `en`, got: %s", language_pair)
66
- source, target = language_pair
67
- non_en = source if target == "en" else target
68
- assert non_en in ["de"], ("Invalid non-en language in pair: %s", non_en)
69
-
70
- self.language_pair = language_pair
71
-
72
-
73
- # TODO: Name of the dataset usually match the script name with CamelCase instead of snake_case
74
- class Wmt14EnDePreProcessed(datasets.GeneratorBasedBuilder):
75
-
76
- BUILDER_CONFIGS = [
77
- Wmt14EnDePreProcessedConfig(
78
- language_pair=("en", "de"),
79
- ),
80
- ]
81
-
82
- def _info(self):
83
- source, target = self.config.language_pair
84
- return datasets.DatasetInfo(
85
- description=_DESCRIPTION,
86
- features=datasets.Features(
87
- {"translation": datasets.features.Translation(languages=self.config.language_pair)}
88
- ),
89
- supervised_keys=(source, target),
90
- homepage=_HOMEPAGE,
91
- citation=_CITATION,
92
- )
93
-
94
- def _split_generators(self, dl_manager):
95
- dl_dir = dl_manager.download_and_extract(_DATA_URL)
96
-
97
- source, target = self.config.language_pair
98
- non_en = source if target == "en" else target
99
- path_tmpl = "{dl_dir}/wmt_en_de/{split}.{type}"
100
-
101
- files = {}
102
- for split in ("train", "val", "test"):
103
- files[split] = {
104
- "source_file": path_tmpl.format(dl_dir=dl_dir, split=split, type="source"),
105
- "target_file": path_tmpl.format(dl_dir=dl_dir, split=split, type="target"),
106
- }
107
-
108
- return [
109
- datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs=files["train"]),
110
- datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs=files["val"]),
111
- datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs=files["test"]),
112
- ]
113
-
114
- def _generate_examples(self, source_file, target_file):
115
- """This function returns the examples in the raw (text) form."""
116
- with open(source_file, mode="rb") as f:
117
- source_sentences = f.read().decode("utf8").split("\n")
118
- with open(target_file, mode="rb") as f:
119
- target_sentences = f.read().decode("utf8").split("\n")
120
-
121
- assert len(target_sentences) == len(source_sentences), "Sizes do not match: %d vs %d for %s vs %s." % (
122
- len(source_sentences),
123
- len(target_sentences),
124
- source_file,
125
- target_file,
126
- )
127
-
128
- source, target = self.config.language_pair
129
- for idx, (l1, l2) in enumerate(zip(source_sentences, target_sentences)):
130
- result = {"translation": {source: l1, target: l2}}
131
- # Make sure that both translations are non-empty.
132
- if all(result.values()):
133
- yield idx, result