nouamanetazi HF staff commited on
Commit
472a874
1 Parent(s): e0387bf

Upload amazon_massive_intent.py

Browse files
Files changed (1) hide show
  1. amazon_massive_intent.py +218 -0
amazon_massive_intent.py ADDED
@@ -0,0 +1,218 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+
3
+ """MASSIVE: A 1M-Example Multilingual Natural Language Understanding Dataset with 51 Typologically-Diverse Languages"""
4
+
5
+ import json
6
+ import datasets
7
+
8
+ logger = datasets.logging.get_logger(__name__)
9
+
10
+ _DESCRIPTION = """\
11
+ MASSIVE is a parallel dataset of > 1M utterances across 51 languages with annotations
12
+ for the Natural Language Understanding tasks of intent prediction and slot annotation.
13
+ Utterances span 60 intents and include 55 slot types. MASSIVE was created by localizing
14
+ the SLURP dataset, composed of general Intelligent Voice Assistant single-shot interactions.
15
+ """
16
+ _URL = "https://amazon-massive-nlu-dataset.s3.amazonaws.com/amazon-massive-dataset-1.0.tar.gz"
17
+
18
+
19
+ _LANGUAGES = {
20
+ "af": "af-ZA",
21
+ "am": "am-ET",
22
+ "ar": "ar-SA",
23
+ "az": "az-AZ",
24
+ "bn": "bn-BD",
25
+ "cy": "cy-GB",
26
+ "da": "da-DK",
27
+ "de": "de-DE",
28
+ "el": "el-GR",
29
+ "en": "en-US",
30
+ "es": "es-ES",
31
+ "fa": "fa-IR",
32
+ "fi": "fi-FI",
33
+ "fr": "fr-FR",
34
+ "he": "he-IL",
35
+ "hi": "hi-IN",
36
+ "hu": "hu-HU",
37
+ "hy": "hy-AM",
38
+ "id": "id-ID",
39
+ "is": "is-IS",
40
+ "it": "it-IT",
41
+ "ja": "ja-JP",
42
+ "jv": "jv-ID",
43
+ "ka": "ka-GE",
44
+ "km": "km-KH",
45
+ "kn": "kn-IN",
46
+ "ko": "ko-KR",
47
+ "lv": "lv-LV",
48
+ "ml": "ml-IN",
49
+ "mn": "mn-MN",
50
+ "ms": "ms-MY",
51
+ "my": "my-MM",
52
+ "nb": "nb-NO",
53
+ "nl": "nl-NL",
54
+ "pl": "pl-PL",
55
+ "pt": "pt-PT",
56
+ "ro": "ro-RO",
57
+ "ru": "ru-RU",
58
+ "sl": "sl-SL",
59
+ "sq": "sq-AL",
60
+ "sv": "sv-SE",
61
+ "sw": "sw-KE",
62
+ "ta": "ta-IN",
63
+ "te": "te-IN",
64
+ "th": "th-TH",
65
+ "tl": "tl-PH",
66
+ "tr": "tr-TR",
67
+ "ur": "ur-PK",
68
+ "vi": "vi-VN",
69
+ "zh-CN": "zh-CN",
70
+ "zh-TW": "zh-TW",
71
+ }
72
+
73
+ _INTENTS = [
74
+ "datetime_query",
75
+ "iot_hue_lightchange",
76
+ "transport_ticket",
77
+ "takeaway_query",
78
+ "qa_stock",
79
+ "general_greet",
80
+ "recommendation_events",
81
+ "music_dislikeness",
82
+ "iot_wemo_off",
83
+ "cooking_recipe",
84
+ "qa_currency",
85
+ "transport_traffic",
86
+ "general_quirky",
87
+ "weather_query",
88
+ "audio_volume_up",
89
+ "email_addcontact",
90
+ "takeaway_order",
91
+ "email_querycontact",
92
+ "iot_hue_lightup",
93
+ "recommendation_locations",
94
+ "play_audiobook",
95
+ "lists_createoradd",
96
+ "news_query",
97
+ "alarm_query",
98
+ "iot_wemo_on",
99
+ "general_joke",
100
+ "qa_definition",
101
+ "social_query",
102
+ "music_settings",
103
+ "audio_volume_other",
104
+ "calendar_remove",
105
+ "iot_hue_lightdim",
106
+ "calendar_query",
107
+ "email_sendemail",
108
+ "iot_cleaning",
109
+ "audio_volume_down",
110
+ "play_radio",
111
+ "cooking_query",
112
+ "datetime_convert",
113
+ "qa_maths",
114
+ "iot_hue_lightoff",
115
+ "iot_hue_lighton",
116
+ "transport_query",
117
+ "music_likeness",
118
+ "email_query",
119
+ "play_music",
120
+ "audio_volume_mute",
121
+ "social_post",
122
+ "alarm_set",
123
+ "qa_factoid",
124
+ "calendar_set",
125
+ "play_game",
126
+ "alarm_remove",
127
+ "lists_remove",
128
+ "transport_taxi",
129
+ "recommendation_movies",
130
+ "iot_coffee",
131
+ "music_query",
132
+ "play_podcasts",
133
+ "lists_query",
134
+ ]
135
+
136
+
137
+ class MASSIVE(datasets.GeneratorBasedBuilder):
138
+ """MASSIVE: A 1M-Example Multilingual Natural Language Understanding Dataset with 51 Typologically-Diverse Languages"""
139
+
140
+ BUILDER_CONFIGS = [
141
+ datasets.BuilderConfig(
142
+ name=name,
143
+ version=datasets.Version("1.0.0"),
144
+ description=f"The MASSIVE corpora for {name}",
145
+ )
146
+ for name in _LANGUAGES.keys()
147
+ ]
148
+
149
+ DEFAULT_CONFIG_NAME = "en-US"
150
+
151
+ def _info(self):
152
+ return datasets.DatasetInfo(
153
+ description=_DESCRIPTION,
154
+ features=datasets.Features(
155
+ {
156
+ "id": datasets.Value("string"),
157
+ "label": datasets.features.ClassLabel(names=_INTENTS),
158
+ "label_text": datasets.Value("string"),
159
+ "text": datasets.Value("string"),
160
+ },
161
+ ),
162
+ supervised_keys=None,
163
+ homepage="https://github.com/alexa/massive",
164
+ citation="_CITATION",
165
+ license="_LICENSE",
166
+ )
167
+
168
+ def _split_generators(self, dl_manager):
169
+
170
+ # path = dl_manager.download_and_extract(_URL)
171
+ archive_path = dl_manager.download(_URL)
172
+ files = dl_manager.iter_archive(archive_path)
173
+
174
+ return [
175
+ datasets.SplitGenerator(
176
+ name=datasets.Split.TRAIN,
177
+ gen_kwargs={
178
+ "files": files,
179
+ "split": "train",
180
+ "lang": self.config.name,
181
+ },
182
+ ),
183
+ datasets.SplitGenerator(
184
+ name=datasets.Split.VALIDATION,
185
+ gen_kwargs={
186
+ "files": files,
187
+ "split": "dev",
188
+ "lang": self.config.name,
189
+ },
190
+ ),
191
+ datasets.SplitGenerator(
192
+ name=datasets.Split.TEST,
193
+ gen_kwargs={
194
+ "files": files,
195
+ "split": "test",
196
+ "lang": self.config.name,
197
+ },
198
+ ),
199
+ ]
200
+
201
+ def _generate_examples(self, files, split, lang):
202
+ filepath = "1.0/data/" + _LANGUAGES[lang] + ".jsonl"
203
+ logger.info("⏳ Generating examples from = %s", filepath)
204
+ for path, f in files:
205
+ if path == filepath:
206
+ lines = f.readlines()
207
+ key_ = 0
208
+ for line in lines:
209
+ data = json.loads(line)
210
+ if data["partition"] != split:
211
+ continue
212
+ yield key_, {
213
+ "id": data["id"],
214
+ "label": data["intent"],
215
+ "label_text": data["intent"],
216
+ "text": data["utt"],
217
+ }
218
+ key_ += 1