Datasets:
mteb
/

Modalities:
Text
Formats:
json
Libraries:
Datasets
Dask
amazon_massive_intent / amazon_massive_intent.py
nouamanetazi's picture
nouamanetazi HF staff
Update amazon_massive_intent.py
31efe3c
raw
history blame
5.81 kB
# coding=utf-8
"""MASSIVE: A 1M-Example Multilingual Natural Language Understanding Dataset with 51 Typologically-Diverse Languages"""
import json
import datasets
import os
logger = datasets.logging.get_logger(__name__)
_DESCRIPTION = """\
MASSIVE is a parallel dataset of > 1M utterances across 51 languages with annotations
for the Natural Language Understanding tasks of intent prediction and slot annotation.
Utterances span 60 intents and include 55 slot types. MASSIVE was created by localizing
the SLURP dataset, composed of general Intelligent Voice Assistant single-shot interactions.
"""
_URL = "amazon-massive-dataset-1.0.tar.gz"
_LANGUAGES = {
"af": "af-ZA",
"am": "am-ET",
"ar": "ar-SA",
"az": "az-AZ",
"bn": "bn-BD",
"cy": "cy-GB",
"da": "da-DK",
"de": "de-DE",
"el": "el-GR",
"en": "en-US",
"es": "es-ES",
"fa": "fa-IR",
"fi": "fi-FI",
"fr": "fr-FR",
"he": "he-IL",
"hi": "hi-IN",
"hu": "hu-HU",
"hy": "hy-AM",
"id": "id-ID",
"is": "is-IS",
"it": "it-IT",
"ja": "ja-JP",
"jv": "jv-ID",
"ka": "ka-GE",
"km": "km-KH",
"kn": "kn-IN",
"ko": "ko-KR",
"lv": "lv-LV",
"ml": "ml-IN",
"mn": "mn-MN",
"ms": "ms-MY",
"my": "my-MM",
"nb": "nb-NO",
"nl": "nl-NL",
"pl": "pl-PL",
"pt": "pt-PT",
"ro": "ro-RO",
"ru": "ru-RU",
"sl": "sl-SL",
"sq": "sq-AL",
"sv": "sv-SE",
"sw": "sw-KE",
"ta": "ta-IN",
"te": "te-IN",
"th": "th-TH",
"tl": "tl-PH",
"tr": "tr-TR",
"ur": "ur-PK",
"vi": "vi-VN",
"zh-CN": "zh-CN",
"zh-TW": "zh-TW",
}
_INTENTS = [
"datetime_query",
"iot_hue_lightchange",
"transport_ticket",
"takeaway_query",
"qa_stock",
"general_greet",
"recommendation_events",
"music_dislikeness",
"iot_wemo_off",
"cooking_recipe",
"qa_currency",
"transport_traffic",
"general_quirky",
"weather_query",
"audio_volume_up",
"email_addcontact",
"takeaway_order",
"email_querycontact",
"iot_hue_lightup",
"recommendation_locations",
"play_audiobook",
"lists_createoradd",
"news_query",
"alarm_query",
"iot_wemo_on",
"general_joke",
"qa_definition",
"social_query",
"music_settings",
"audio_volume_other",
"calendar_remove",
"iot_hue_lightdim",
"calendar_query",
"email_sendemail",
"iot_cleaning",
"audio_volume_down",
"play_radio",
"cooking_query",
"datetime_convert",
"qa_maths",
"iot_hue_lightoff",
"iot_hue_lighton",
"transport_query",
"music_likeness",
"email_query",
"play_music",
"audio_volume_mute",
"social_post",
"alarm_set",
"qa_factoid",
"calendar_set",
"play_game",
"alarm_remove",
"lists_remove",
"transport_taxi",
"recommendation_movies",
"iot_coffee",
"music_query",
"play_podcasts",
"lists_query",
]
class MASSIVE(datasets.GeneratorBasedBuilder):
"""MASSIVE: A 1M-Example Multilingual Natural Language Understanding Dataset with 51 Typologically-Diverse Languages"""
BUILDER_CONFIGS = [
datasets.BuilderConfig(
name=name,
version=datasets.Version("1.0.0"),
description=f"The MASSIVE corpora for {name}",
)
for name in _LANGUAGES.keys()
]
DEFAULT_CONFIG_NAME = "en"
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
"id": datasets.Value("string"),
"label": datasets.features.ClassLabel(names=_INTENTS),
"label_text": datasets.Value("string"),
"text": datasets.Value("string"),
},
),
supervised_keys=None,
homepage="https://github.com/alexa/massive",
citation="_CITATION",
license="_LICENSE",
)
def _split_generators(self, dl_manager):
# path = dl_manager.download_and_extract(_URL)
archive_path = dl_manager.download(_URL)
files = dl_manager.iter_archive(archive_path)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"files": files,
"split": "train",
"lang": self.config.name,
},
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
gen_kwargs={
"files": files,
"split": "dev",
"lang": self.config.name,
},
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={
"files": files,
"split": "test",
"lang": self.config.name,
},
),
]
def _generate_examples(self, files, split, lang):
filepath = "1.0/data/" + _LANGUAGES[lang] + ".jsonl"
logger.info("⏳ Generating examples from = %s", filepath)
for path, f in files:
if path == filepath:
lines = f.readlines()
key_ = 0
for line in lines:
data = json.loads(line)
if data["partition"] != split:
continue
yield key_, {
"id": data["id"],
"label": data["intent"],
"label_text": data["intent"],
"text": data["utt"],
}
key_ += 1