cartesinus commited on
Commit
0cc561c
1 Parent(s): 4a69685

release for of en2pl with massive filtering and en2es without massive filtering

Browse files
Files changed (2) hide show
  1. README.md +98 -0
  2. iva_mt_wslot-exp.py +141 -0
README.md ADDED
@@ -0,0 +1,98 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ dataset_info:
3
+ features:
4
+ - name: id
5
+ dtype: string
6
+ - name: locale
7
+ dtype: string
8
+ - name: origin
9
+ dtype: string
10
+ - name: partition
11
+ dtype: string
12
+ - name: translation_utt
13
+ dtype:
14
+ translation:
15
+ languages:
16
+ - en
17
+ - pl
18
+ - name: translation_xml
19
+ dtype:
20
+ translation:
21
+ languages:
22
+ - en
23
+ - pl
24
+ - name: src_bio
25
+ dtype: string
26
+ - name: tgt_bio
27
+ dtype: string
28
+ task_categories:
29
+ - translation
30
+ language:
31
+ - en
32
+ - pl
33
+ - de
34
+ - es
35
+ - sv
36
+ tags:
37
+ - machine translation
38
+ - nlu
39
+ - natural-language-understanding
40
+ - virtual assistant
41
+ pretty_name: Machine translation for NLU with slot transfer
42
+ size_categories:
43
+ - 10K<n<100K
44
+ license: cc-by-4.0
45
+ ---
46
+ # Machine translation dataset for NLU (Virual Assistant) with slot transfer between languages
47
+ ## Dataset Summary
48
+
49
+ Disclaimer: This is for research purposes only. Please have a look at the license section below. Some of the datasets used to construct IVA_MT have an unknown license.
50
+
51
+ IVA_MT is a machine translation dataset that can be used to train, adapt and evaluate MT models used in Virtual Assistant NLU context (e.g. to translate trainig corpus of NLU).
52
+
53
+ ## Dataset Composition
54
+
55
+ ### en-pl
56
+
57
+ | Corpus | Train | Dev | Test |
58
+ |----------------------------------------------------------------------|--------|-------|-------|
59
+ | [Massive 1.1](https://huggingface.co/datasets/AmazonScience/massive) | 11514 | 2033 | 2974 |
60
+ | [Leyzer 0.2.0](https://github.com/cartesinus/leyzer/tree/0.2.0) | 3974 | 701 | 1380 |
61
+ | [OpenSubtitles from OPUS](https://opus.nlpl.eu/OpenSubtitles-v1.php) | 2329 | 411 | 500 |
62
+ | [KDE from OPUS](https://opus.nlpl.eu/KDE4.php) | 1154 | 241 | 241 |
63
+ | [CCMatrix from Opus](https://opus.nlpl.eu/CCMatrix.php) | 1096 | 232 | 237 |
64
+ | [Ubuntu from OPUS](https://opus.nlpl.eu/Ubuntu.php) | 281 | 60 | 59 |
65
+ | [Gnome from OPUS](https://opus.nlpl.eu/GNOME.php) | 14 | 3 | 3 |
66
+ | *total* | 20362 | 3681 | 5394 |
67
+
68
+ ### en-de
69
+
70
+ | Corpus | Train | Dev | Test |
71
+ |----------------------------------------------------------------------|--------|-------|-------|
72
+ | [Massive 1.1](https://huggingface.co/datasets/AmazonScience/massive) | 7536 | 1346 | 1955 |
73
+
74
+ ### en-es
75
+
76
+ | Corpus | Train | Dev | Test |
77
+ |----------------------------------------------------------------------|--------|-------|-------|
78
+ | [Massive 1.1](https://huggingface.co/datasets/AmazonScience/massive) | 8415 | 1526 | 2202 |
79
+
80
+ ### en-sv
81
+
82
+ | Corpus | Train | Dev | Test |
83
+ |----------------------------------------------------------------------|--------|-------|-------|
84
+ | [Massive 1.1](https://huggingface.co/datasets/AmazonScience/massive) | 7540 | 1360 | 1921 |
85
+
86
+
87
+ ## Tools
88
+ Scripts used to generate this dataset can be found on [github](https://github.com/cartesinus/iva_mt).
89
+
90
+ ## License
91
+ This is a composition of 7 datasets, and the license is as defined in original release:
92
+ - MASSIVE: [CC-BY 4.0](https://huggingface.co/datasets/AmazonScience/massive/blob/main/LICENSE)
93
+ - Leyzer: [CC BY-NC 4.0](https://github.com/cartesinus/leyzer/blob/master/LICENSE)
94
+ - OpenSubtitles: unknown
95
+ - KDE: [GNU Public License](https://l10n.kde.org/about.php)
96
+ - CCMatrix: no license given, therefore assuming it is LASER project license [BSD](https://github.com/facebookresearch/LASER/blob/main/LICENSE)
97
+ - Ubuntu: [GNU Public License](https://help.launchpad.net/Legal)
98
+ - Gnome: unknown
iva_mt_wslot-exp.py ADDED
@@ -0,0 +1,141 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ """IVA_MT_WSLOT"""
3
+
4
+
5
+ import datasets
6
+ import json
7
+
8
+
9
+ _DESCRIPTION = """\
10
+ """
11
+
12
+ _URL = "https://github.com/cartesinus/iva_mt/raw/main/release/0.2/iva_mt_wslot-dataset-0.2.1.tar.gz"
13
+
14
+ _LANGUAGE_PAIRS = ["en-pl", "en-de", "en-es", "en-sv"]
15
+
16
+ class IVA_MTConfig(datasets.BuilderConfig):
17
+ """BuilderConfig for IVA_MT"""
18
+
19
+ def __init__(self, language_pair, **kwargs):
20
+ super().__init__(**kwargs)
21
+ """
22
+
23
+ Args:
24
+ language_pair: language pair, you want to load
25
+ **kwargs: keyword arguments forwarded to super.
26
+ """
27
+ self.language_pair = language_pair
28
+
29
+
30
+ class IVA_MT(datasets.GeneratorBasedBuilder):
31
+ """OPUS-100 is English-centric, meaning that all training pairs include English on either the source or target side."""
32
+
33
+ VERSION = datasets.Version("0.2.1")
34
+
35
+ BUILDER_CONFIG_CLASS = IVA_MTConfig
36
+ BUILDER_CONFIGS = [
37
+ IVA_MTConfig(name=pair, description=_DESCRIPTION, language_pair=pair)
38
+ for pair in _LANGUAGE_PAIRS
39
+ ]
40
+
41
+ def _info(self):
42
+ src_tag, tgt_tag = self.config.language_pair.split("-")
43
+ return datasets.DatasetInfo(
44
+ features=datasets.Features(
45
+ {
46
+ "id": datasets.Value("int64"),
47
+ "locale": datasets.Value("string"),
48
+ "origin": datasets.Value("string"),
49
+ "partition": datasets.Value("string"),
50
+ "translation_utt": datasets.features.Translation(languages=(src_tag, tgt_tag)),
51
+ "translation_xml": datasets.features.Translation(languages=(src_tag, tgt_tag)),
52
+ "src_bio": datasets.Value("string"),
53
+ "tgt_bio": datasets.Value("string")
54
+ }
55
+ ),
56
+ supervised_keys=(src_tag, tgt_tag),
57
+ )
58
+
59
+ def _split_generators(self, dl_manager):
60
+
61
+ lang_pair = self.config.language_pair
62
+ src_tag, tgt_tag = lang_pair.split("-")
63
+
64
+ archive = dl_manager.download(_URL)
65
+
66
+ data_dir = "/".join(["iva_mt_wslot-dataset", "0.2.1", lang_pair])
67
+ output = []
68
+
69
+ test = datasets.SplitGenerator(
70
+ name=datasets.Split.TEST,
71
+ # These kwargs will be passed to _generate_examples
72
+ gen_kwargs={
73
+ "filepath": f"{data_dir}/iva_mt_wslot-{lang_pair}-test.jsonl",
74
+ "files": dl_manager.iter_archive(archive),
75
+ "split": "test",
76
+ },
77
+ )
78
+
79
+ output.append(test)
80
+
81
+ train = datasets.SplitGenerator(
82
+ name=datasets.Split.TRAIN,
83
+ gen_kwargs={
84
+ "filepath": f"{data_dir}/iva_mt_wslot-{lang_pair}-train.jsonl",
85
+ "files": dl_manager.iter_archive(archive),
86
+ "split": "train",
87
+ },
88
+ )
89
+
90
+ output.append(train)
91
+
92
+ valid = datasets.SplitGenerator(
93
+ name=datasets.Split.VALIDATION,
94
+ # These kwargs will be passed to _generate_examples
95
+ gen_kwargs={
96
+ "filepath": f"{data_dir}/iva_mt_wslot-{lang_pair}-valid.jsonl",
97
+ "files": dl_manager.iter_archive(archive),
98
+ "split": "valid",
99
+ },
100
+ )
101
+
102
+ output.append(valid)
103
+
104
+ return output
105
+
106
+ def _generate_examples(self, filepath, files, split):
107
+ """Yields examples."""
108
+ src_tag, tgt_tag = self.config.language_pair.split("-")
109
+ key_ = 0
110
+ lang = _LANGUAGE_PAIRS.copy()
111
+
112
+ for path, f in files:
113
+ l = path.split("/")[-1].split("-")[1].replace('2', '-')
114
+
115
+ if l != self.config.language_pair:
116
+ continue
117
+
118
+ # Read the file
119
+ lines = f.read().decode(encoding="utf-8").split("\n")
120
+
121
+ for line in lines:
122
+ if not line:
123
+ continue
124
+
125
+ data = json.loads(line)
126
+
127
+ if data["partition"] != split:
128
+ continue
129
+
130
+ yield key_, {
131
+ "id": data["id"],
132
+ "locale": data["locale"],
133
+ "origin": data["origin"],
134
+ "partition": data["partition"],
135
+ "translation_utt": {src_tag: str(data['src_utt']), tgt_tag: str(data['tgt_utt'])},
136
+ "translation_xml": {src_tag: str(data['src_xml']), tgt_tag: str(data['tgt_xml'])},
137
+ "src_bio": str(data['src_bio']),
138
+ "tgt_bio": str(data['tgt_bio'])
139
+ }
140
+
141
+ key_ += 1