renatk commited on
Commit
1ed18cc
0 Parent(s):

sharing...

Browse files
Files changed (5) hide show
  1. .gitattributes +54 -0
  2. .gitignore +3 -0
  3. README.md +34 -0
  4. dataset_infos.json +1 -0
  5. translate_enaz_10m.py +78 -0
.gitattributes ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.lz4 filter=lfs diff=lfs merge=lfs -text
12
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
13
+ *.model filter=lfs diff=lfs merge=lfs -text
14
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
15
+ *.npy filter=lfs diff=lfs merge=lfs -text
16
+ *.npz filter=lfs diff=lfs merge=lfs -text
17
+ *.onnx filter=lfs diff=lfs merge=lfs -text
18
+ *.ot filter=lfs diff=lfs merge=lfs -text
19
+ *.parquet filter=lfs diff=lfs merge=lfs -text
20
+ *.pb filter=lfs diff=lfs merge=lfs -text
21
+ *.pickle filter=lfs diff=lfs merge=lfs -text
22
+ *.pkl filter=lfs diff=lfs merge=lfs -text
23
+ *.pt filter=lfs diff=lfs merge=lfs -text
24
+ *.pth filter=lfs diff=lfs merge=lfs -text
25
+ *.rar filter=lfs diff=lfs merge=lfs -text
26
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
27
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
29
+ *.tflite filter=lfs diff=lfs merge=lfs -text
30
+ *.tgz filter=lfs diff=lfs merge=lfs -text
31
+ *.wasm filter=lfs diff=lfs merge=lfs -text
32
+ *.xz filter=lfs diff=lfs merge=lfs -text
33
+ *.zip filter=lfs diff=lfs merge=lfs -text
34
+ *.zst filter=lfs diff=lfs merge=lfs -text
35
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ # Audio files - uncompressed
37
+ *.pcm filter=lfs diff=lfs merge=lfs -text
38
+ *.sam filter=lfs diff=lfs merge=lfs -text
39
+ *.raw filter=lfs diff=lfs merge=lfs -text
40
+ # Audio files - compressed
41
+ *.aac filter=lfs diff=lfs merge=lfs -text
42
+ *.flac filter=lfs diff=lfs merge=lfs -text
43
+ *.mp3 filter=lfs diff=lfs merge=lfs -text
44
+ *.ogg filter=lfs diff=lfs merge=lfs -text
45
+ *.wav filter=lfs diff=lfs merge=lfs -text
46
+ # Image files - uncompressed
47
+ *.bmp filter=lfs diff=lfs merge=lfs -text
48
+ *.gif filter=lfs diff=lfs merge=lfs -text
49
+ *.png filter=lfs diff=lfs merge=lfs -text
50
+ *.tiff filter=lfs diff=lfs merge=lfs -text
51
+ # Image files - compressed
52
+ *.jpg filter=lfs diff=lfs merge=lfs -text
53
+ *.jpeg filter=lfs diff=lfs merge=lfs -text
54
+ *.webp filter=lfs diff=lfs merge=lfs -text
.gitignore ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ *.zip
2
+ *.tsv
3
+ *.gz
README.md ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: openrail
3
+ task_categories:
4
+ - translation
5
+ - text-generation
6
+ - text2text-generation
7
+ language:
8
+ - en
9
+ - az
10
+ tags:
11
+ - azerbaijani books
12
+ - azerbaijani news
13
+ - azerbaijani poems
14
+ - azerbaijani articles
15
+ - azerbaijani dataset
16
+ pretty_name: English-Azerbaijani Dataset
17
+ size_categories:
18
+ - 1M<n<10M
19
+ ---
20
+
21
+ # Description
22
+ Dataset used to train our mT5 based model for machine translation, extracted from various text sources of National Library of Azerbaijan: [mT5-translation-enaz](https://huggingface.co/learningmachineaz/mt5-enaz-10m) \
23
+ It has only clean texts. Wiki articles wasn't used as they contain a lot of irrelevant data.
24
+
25
+ | Key point | Info |
26
+ |-------------------------|---------|
27
+ | Rows | ~10mil. EN-AZ sentence pairs |
28
+ | Size | 975M (zipped) / 2.8G (unzipped) |
29
+ | Format | TSV (tab separated pairs) |
30
+ | English | Google Translate |
31
+ | Azerbaijani | Original cleaned text |
32
+
33
+ ## Author
34
+ Collected and prepared by [Renat Kalimulin](https://www.linkedin.com/in/rinat-kalimulin-16853358/)
dataset_infos.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"default": {"description": "Machine translation EN-AZ dataset based on Google Translate and National Library of Azerbaijan.\n", "citation": "@InProceedings{\nhuggingface:dataset,\ntitle={Machine translation EN-AZ dataset},\nauthor={Learning Machine LLC},\nyear={2022}\n}\n", "homepage": "https://huggingface.co/datasets/learningmachineaz/translate_enaz_10m", "license": "Apache", "features": {"translation": {"dtype": "string", "id": null, "_type": "Value"}, "source_text": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "translate_enaz_10m", "config_name": "default", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 3007177626, "num_examples": 9962727, "dataset_name": "translate_enaz_10m"}}, "download_checksums": {"https://learningmachine.az/datasets/translate_enaz_10m.zip": {"num_bytes": 1022192839, "checksum": "93031000aa80000abf9d2d0df033053c91af86104e2c7f36b9fbbd2283d24bf4"}}, "download_size": 1022192839, "post_processing_size": null, "dataset_size": 3007177626, "size_in_bytes": 4029370465}}
translate_enaz_10m.py ADDED
@@ -0,0 +1,78 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ """Machine translation EN-AZ dataset based on Google Translate and National Library of Azerbaijan."""
15
+
16
+
17
+ import os
18
+ import datasets
19
+
20
+
21
+ _CITATION = """\
22
+ @InProceedings{
23
+ huggingface:dataset,
24
+ title={Machine translation EN-AZ dataset},
25
+ author={Learning Machine LLC},
26
+ year={2022}
27
+ }
28
+ """
29
+
30
+ _DESCRIPTION = """\
31
+ Machine translation EN-AZ dataset based on Google Translate and National Library of Azerbaijan.
32
+ """
33
+
34
+ _HOMEPAGE = "https://huggingface.co/datasets/learningmachineaz/translate_enaz_10m"
35
+
36
+ _LICENSE = "Apache"
37
+
38
+ _URL = "https://learningmachine.az/datasets/translate_enaz_10m.zip"
39
+
40
+
41
+ class TranslateEnaz10m(datasets.GeneratorBasedBuilder):
42
+
43
+ VERSION = datasets.Version("1.0.0")
44
+
45
+ def _info(self):
46
+ features = datasets.Features(
47
+ {
48
+ "translation": datasets.Value("string"),
49
+ "source_text": datasets.Value("string"),
50
+ }
51
+ )
52
+ return datasets.DatasetInfo(
53
+ description=_DESCRIPTION,
54
+ features=features,
55
+ homepage=_HOMEPAGE,
56
+ license=_LICENSE,
57
+ citation=_CITATION,
58
+ )
59
+
60
+ def _split_generators(self, dl_manager):
61
+ data_dir = dl_manager.download_and_extract(_URL)
62
+ return [
63
+ datasets.SplitGenerator(
64
+ name=datasets.Split.TRAIN,
65
+ gen_kwargs={
66
+ "file_path": os.path.join(data_dir, "dataset_enaz_10m.tsv")
67
+ }
68
+ )
69
+ ]
70
+
71
+ def _generate_examples(self, file_path):
72
+ with open(file_path, "r", encoding="utf-8") as f:
73
+ for id_, row in enumerate(f):
74
+ row = row.split("\t")
75
+ yield id_, {
76
+ "translation": row[0].strip(),
77
+ "source_text": row[1].strip(),
78
+ }