miam / dataset_infos.json
system's picture
system HF staff
Update files from the datasets library (from 1.6.0)
bd1467e
{"dihana": {"description": "Multilingual dIalogAct benchMark is a collection of resources for training, evaluating, and\nanalyzing natural language understanding systems specifically designed for spoken language. Datasets\nare in English, French, German, Italian and Spanish. They cover a variety of domains including\nspontaneous speech, scripted scenarios, and joint task completion. Some datasets additionally include\nemotion and/or sentimant labels.\n", "citation": "@inproceedings{benedi2006design,\ntitle={Design and acquisition of a telephone spontaneous speech dialogue corpus in Spanish: DIHANA},\nauthor={Bened{\\i}, Jos{'e}-Miguel and Lleida, Eduardo and Varona, Amparo and Castro, Mar{\\i}a-Jos{'e} and Galiano, Isabel and Justo, Raquel and L{'o}pez, I and Miguel, Antonio},\nbooktitle={Fifth International Conference on Language Resources and Evaluation (LREC)},\npages={1636--1639},\nyear={2006}\n}\n@inproceedings{post2013improved,\ntitle={Improved speech-to-text translation with the Fisher and Callhome Spanish--English speech translation corpus},\nauthor={Post, Matt and Kumar, Gaurav and Lopez, Adam and Karakos, Damianos and Callison-Burch, Chris and Khudanpur, Sanjeev},\nbooktitle={Proc. IWSLT},\nyear={2013}\n}\n@article{coria2005predicting,\ntitle={Predicting obligation dialogue acts from prosodic and speaker infomation},\nauthor={Coria, S and Pineda, L},\njournal={Research on Computing Science (ISSN 1665-9899), Centro de Investigacion en Computacion, Instituto Politecnico Nacional, Mexico City},\nyear={2005}\n}\n@inproceedings{anonymous,\n title = \"Cross-Lingual Pretraining Methods for Spoken Dialog\",\n author = \"Anonymous\",\n booktitle = \"Transactions of the Association for Computational Linguistics\",\n month = ,\n year = \"\",\n address = \"Online\",\n publisher = \"Association for Computational Linguistics\",\n url = \"\",\n doi = \"\",\n pages = \"\",\n abstract = \"There has been an increasing interest among NLP researchers towards learning generic\n representations. However, in the field of multilingual spoken dialogue systems, this problem\n remains overlooked. Indeed most of the pre-training methods focus on learning representations\n for written and non-conversational data or are restricted to the monolingual setting. In this\n work we (1) generalise existing losses to the multilingual setting, (2) develop a new set of\n losses to leverage parallel conversations when available. These losses improve the learning of\n representations by fostering the deep encoder to better learn contextual dependencies. The\n pre-training relies on OpenSubtitles, a huge multilingual corpus that is composed of 24.3G tokens;\n a by-product of the pre-processing includes multilingual aligned conversations. We also introduce\n two new multilingual tasks and a new benchmark on multilingual dialogue act labels called MIAM.\n We validate our pre-training on the three aforementioned tasks and show that our model using our\n newly designed losses achieves better performances than existing models. Our implementation will\n be available on github.com and pre-processed data will be available in Datasets (Wolf et al., 2020).\",\n}\n", "homepage": "", "license": "", "features": {"Speaker": {"dtype": "string", "id": null, "_type": "Value"}, "Utterance": {"dtype": "string", "id": null, "_type": "Value"}, "Dialogue_Act": {"dtype": "string", "id": null, "_type": "Value"}, "Dialogue_ID": {"dtype": "string", "id": null, "_type": "Value"}, "File_ID": {"dtype": "string", "id": null, "_type": "Value"}, "Label": {"num_classes": 11, "names": ["Afirmacion", "Apertura", "Cierre", "Confirmacion", "Espera", "Indefinida", "Negacion", "No_entendido", "Nueva_consulta", "Pregunta", "Respuesta"], "names_file": null, "id": null, "_type": "ClassLabel"}, "Idx": {"dtype": "int32", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "builder_name": "miam", "config_name": "dihana", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 1946735, "num_examples": 19063, "dataset_name": "miam"}, "validation": {"name": "validation", "num_bytes": 216498, "num_examples": 2123, "dataset_name": "miam"}, "test": {"name": "test", "num_bytes": 238446, "num_examples": 2361, "dataset_name": "miam"}}, "download_checksums": {"https://raw.githubusercontent.com/eusip/MIAM/main/dihana/train.csv": {"num_bytes": 1441183, "checksum": "4702276f3494926fa1c751492b5385530d49ab4b8d4e583f89d5c1ecc9b69311"}, "https://raw.githubusercontent.com/eusip/MIAM/main/dihana/dev.csv": {"num_bytes": 160244, "checksum": "fa267399dbb66f8b096134a2a3f51d71dc48bfa5332dbc0c7b96b2eb0bd91097"}, "https://raw.githubusercontent.com/eusip/MIAM/main/dihana/test.csv": {"num_bytes": 175840, "checksum": "27e45d5f8f0655ed310589777fa9f9eda6a0727dbae277586c086e937c9aca28"}}, "download_size": 1777267, "post_processing_size": null, "dataset_size": 2401679, "size_in_bytes": 4178946}, "ilisten": {"description": "Multilingual dIalogAct benchMark is a collection of resources for training, evaluating, and\nanalyzing natural language understanding systems specifically designed for spoken language. Datasets\nare in English, French, German, Italian and Spanish. They cover a variety of domains including\nspontaneous speech, scripted scenarios, and joint task completion. Some datasets additionally include\nemotion and/or sentimant labels.\n", "citation": "@article{basile2018overview,\ntitle={Overview of the Evalita 2018itaLIan Speech acT labEliNg (iLISTEN) Task},\nauthor={Basile, Pierpaolo and Novielli, Nicole},\njournal={EVALITA Evaluation of NLP and Speech Tools for Italian},\nvolume={12},\npages={44},\nyear={2018}\n}\n@inproceedings{anonymous,\n title = \"Cross-Lingual Pretraining Methods for Spoken Dialog\",\n author = \"Anonymous\",\n booktitle = \"Transactions of the Association for Computational Linguistics\",\n month = ,\n year = \"\",\n address = \"Online\",\n publisher = \"Association for Computational Linguistics\",\n url = \"\",\n doi = \"\",\n pages = \"\",\n abstract = \"There has been an increasing interest among NLP researchers towards learning generic\n representations. However, in the field of multilingual spoken dialogue systems, this problem\n remains overlooked. Indeed most of the pre-training methods focus on learning representations\n for written and non-conversational data or are restricted to the monolingual setting. In this\n work we (1) generalise existing losses to the multilingual setting, (2) develop a new set of\n losses to leverage parallel conversations when available. These losses improve the learning of\n representations by fostering the deep encoder to better learn contextual dependencies. The\n pre-training relies on OpenSubtitles, a huge multilingual corpus that is composed of 24.3G tokens;\n a by-product of the pre-processing includes multilingual aligned conversations. We also introduce\n two new multilingual tasks and a new benchmark on multilingual dialogue act labels called MIAM.\n We validate our pre-training on the three aforementioned tasks and show that our model using our\n newly designed losses achieves better performances than existing models. Our implementation will\n be available on github.com and pre-processed data will be available in Datasets (Wolf et al., 2020).\",\n}\n", "homepage": "", "license": "", "features": {"Speaker": {"dtype": "string", "id": null, "_type": "Value"}, "Utterance": {"dtype": "string", "id": null, "_type": "Value"}, "Dialogue_Act": {"dtype": "string", "id": null, "_type": "Value"}, "Dialogue_ID": {"dtype": "string", "id": null, "_type": "Value"}, "Label": {"num_classes": 15, "names": ["AGREE", "ANSWER", "CLOSING", "ENCOURAGE-SORRY", "GENERIC-ANSWER", "INFO-REQUEST", "KIND-ATTITUDE_SMALL-TALK", "OFFER-GIVE-INFO", "OPENING", "PERSUASION-SUGGEST", "QUESTION", "REJECT", "SOLICITATION-REQ_CLARIFICATION", "STATEMENT", "TALK-ABOUT-SELF"], "names_file": null, "id": null, "_type": "ClassLabel"}, "Idx": {"dtype": "int32", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "builder_name": "miam", "config_name": "ilisten", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 244336, "num_examples": 1986, "dataset_name": "miam"}, "validation": {"name": "validation", "num_bytes": 33988, "num_examples": 230, "dataset_name": "miam"}, "test": {"name": "test", "num_bytes": 145376, "num_examples": 971, "dataset_name": "miam"}}, "download_checksums": {"https://raw.githubusercontent.com/eusip/MIAM/main/ilisten/train.csv": {"num_bytes": 198219, "checksum": "75ea972e2ac1cac8b2e77f55574b18d218e355eb4585b9b95d6873ebd9bdcd04"}, "https://raw.githubusercontent.com/eusip/MIAM/main/ilisten/dev.csv": {"num_bytes": 28741, "checksum": "745faed68a4471a81ad65300e5476fba94d4740ec113df529a2bcd5dd2439971"}, "https://raw.githubusercontent.com/eusip/MIAM/main/ilisten/test.csv": {"num_bytes": 123033, "checksum": "6d2abc758426747b1d271766783d4126756c7372fd92ad3baa0bda67d1de0c77"}}, "download_size": 349993, "post_processing_size": null, "dataset_size": 423700, "size_in_bytes": 773693}, "loria": {"description": "Multilingual dIalogAct benchMark is a collection of resources for training, evaluating, and\nanalyzing natural language understanding systems specifically designed for spoken language. Datasets\nare in English, French, German, Italian and Spanish. They cover a variety of domains including\nspontaneous speech, scripted scenarios, and joint task completion. Some datasets additionally include\nemotion and/or sentimant labels.\n", "citation": "@inproceedings{barahona2012building,\ntitle={Building and exploiting a corpus of dialog interactions between french speaking virtual and human agents},\nauthor={Barahona, Lina Maria Rojas and Lorenzo, Alejandra and Gardent, Claire},\nbooktitle={The eighth international conference on Language Resources and Evaluation (LREC)},\npages={1428--1435},\nyear={2012}\n}\n@inproceedings{anonymous,\n title = \"Cross-Lingual Pretraining Methods for Spoken Dialog\",\n author = \"Anonymous\",\n booktitle = \"Transactions of the Association for Computational Linguistics\",\n month = ,\n year = \"\",\n address = \"Online\",\n publisher = \"Association for Computational Linguistics\",\n url = \"\",\n doi = \"\",\n pages = \"\",\n abstract = \"There has been an increasing interest among NLP researchers towards learning generic\n representations. However, in the field of multilingual spoken dialogue systems, this problem\n remains overlooked. Indeed most of the pre-training methods focus on learning representations\n for written and non-conversational data or are restricted to the monolingual setting. In this\n work we (1) generalise existing losses to the multilingual setting, (2) develop a new set of\n losses to leverage parallel conversations when available. These losses improve the learning of\n representations by fostering the deep encoder to better learn contextual dependencies. The\n pre-training relies on OpenSubtitles, a huge multilingual corpus that is composed of 24.3G tokens;\n a by-product of the pre-processing includes multilingual aligned conversations. We also introduce\n two new multilingual tasks and a new benchmark on multilingual dialogue act labels called MIAM.\n We validate our pre-training on the three aforementioned tasks and show that our model using our\n newly designed losses achieves better performances than existing models. Our implementation will\n be available on github.com and pre-processed data will be available in Datasets (Wolf et al., 2020).\",\n}\n", "homepage": "", "license": "", "features": {"Speaker": {"dtype": "string", "id": null, "_type": "Value"}, "Utterance": {"dtype": "string", "id": null, "_type": "Value"}, "Dialogue_Act": {"dtype": "string", "id": null, "_type": "Value"}, "Dialogue_ID": {"dtype": "string", "id": null, "_type": "Value"}, "File_ID": {"dtype": "string", "id": null, "_type": "Value"}, "Label": {"num_classes": 31, "names": ["ack", "ask", "find_mold", "find_plans", "first_step", "greet", "help", "inform", "inform_engine", "inform_job", "inform_material_space", "informer_conditioner", "informer_decoration", "informer_elcomps", "informer_end_manufacturing", "kindAtt", "manufacturing_reqs", "next_step", "no", "other", "quality_control", "quit", "reqRep", "security_policies", "staff_enterprise", "staff_job", "studies_enterprise", "studies_job", "todo_failure", "todo_irreparable", "yes"], "names_file": null, "id": null, "_type": "ClassLabel"}, "Idx": {"dtype": "int32", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "builder_name": "miam", "config_name": "loria", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 1208730, "num_examples": 8465, "dataset_name": "miam"}, "validation": {"name": "validation", "num_bytes": 133829, "num_examples": 942, "dataset_name": "miam"}, "test": {"name": "test", "num_bytes": 149855, "num_examples": 1047, "dataset_name": "miam"}}, "download_checksums": {"https://raw.githubusercontent.com/eusip/MIAM/main/loria/train.csv": {"num_bytes": 989066, "checksum": "0cda3440cdf0157f1a70617374842f04a3b7cf8c5175cca9f3fe9a33a5105ddf"}, "https://raw.githubusercontent.com/eusip/MIAM/main/loria/dev.csv": {"num_bytes": 109364, "checksum": "02f99287f69dc869926aaf077d6c3d0a81cb2576c80721c9272e39a2b226d989"}, "https://raw.githubusercontent.com/eusip/MIAM/main/loria/test.csv": {"num_bytes": 122702, "checksum": "8e2f5e513761970aad138332fb83488ca52c943492153c08199ddc4ae8fe4209"}}, "download_size": 1221132, "post_processing_size": null, "dataset_size": 1492414, "size_in_bytes": 2713546}, "maptask": {"description": "Multilingual dIalogAct benchMark is a collection of resources for training, evaluating, and\nanalyzing natural language understanding systems specifically designed for spoken language. Datasets\nare in English, French, German, Italian and Spanish. They cover a variety of domains including\nspontaneous speech, scripted scenarios, and joint task completion. Some datasets additionally include\nemotion and/or sentimant labels.\n", "citation": "@inproceedings{thompson1993hcrc,\ntitle={The HCRC map task corpus: natural dialogue for speech recognition},\nauthor={Thompson, Henry S and Anderson, Anne H and Bard, Ellen Gurman and Doherty-Sneddon,\nGwyneth and Newlands, Alison and Sotillo, Cathy},\nbooktitle={HUMAN LANGUAGE TECHNOLOGY: Proceedings of a Workshop Held at Plainsboro, New Jersey, March 21-24, 1993},\nyear={1993}\n}\n@inproceedings{anonymous,\n title = \"Cross-Lingual Pretraining Methods for Spoken Dialog\",\n author = \"Anonymous\",\n booktitle = \"Transactions of the Association for Computational Linguistics\",\n month = ,\n year = \"\",\n address = \"Online\",\n publisher = \"Association for Computational Linguistics\",\n url = \"\",\n doi = \"\",\n pages = \"\",\n abstract = \"There has been an increasing interest among NLP researchers towards learning generic\n representations. However, in the field of multilingual spoken dialogue systems, this problem\n remains overlooked. Indeed most of the pre-training methods focus on learning representations\n for written and non-conversational data or are restricted to the monolingual setting. In this\n work we (1) generalise existing losses to the multilingual setting, (2) develop a new set of\n losses to leverage parallel conversations when available. These losses improve the learning of\n representations by fostering the deep encoder to better learn contextual dependencies. The\n pre-training relies on OpenSubtitles, a huge multilingual corpus that is composed of 24.3G tokens;\n a by-product of the pre-processing includes multilingual aligned conversations. We also introduce\n two new multilingual tasks and a new benchmark on multilingual dialogue act labels called MIAM.\n We validate our pre-training on the three aforementioned tasks and show that our model using our\n newly designed losses achieves better performances than existing models. Our implementation will\n be available on github.com and pre-processed data will be available in Datasets (Wolf et al., 2020).\",\n}\n", "homepage": "http://groups.inf.ed.ac.uk/maptask/", "license": "", "features": {"Speaker": {"dtype": "string", "id": null, "_type": "Value"}, "Utterance": {"dtype": "string", "id": null, "_type": "Value"}, "Dialogue_Act": {"dtype": "string", "id": null, "_type": "Value"}, "Dialogue_ID": {"dtype": "string", "id": null, "_type": "Value"}, "File_ID": {"dtype": "string", "id": null, "_type": "Value"}, "Label": {"num_classes": 12, "names": ["acknowledge", "align", "check", "clarify", "explain", "instruct", "query_w", "query_yn", "ready", "reply_n", "reply_w", "reply_y"], "names_file": null, "id": null, "_type": "ClassLabel"}, "Idx": {"dtype": "int32", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "builder_name": "miam", "config_name": "maptask", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 1910120, "num_examples": 25382, "dataset_name": "miam"}, "validation": {"name": "validation", "num_bytes": 389879, "num_examples": 5221, "dataset_name": "miam"}, "test": {"name": "test", "num_bytes": 396947, "num_examples": 5335, "dataset_name": "miam"}}, "download_checksums": {"https://raw.githubusercontent.com/eusip/MIAM/main/maptask/train.csv": {"num_bytes": 1226569, "checksum": "76ce790d9c5100d2f2b1f535edd8e8b5a6d88d6bbaa7f3a948dd95bff1e2e798"}, "https://raw.githubusercontent.com/eusip/MIAM/main/maptask/dev.csv": {"num_bytes": 249215, "checksum": "895ca2761e8224b02f963df6836912c0a913362f5d44dfc4813391b51919f147"}, "https://raw.githubusercontent.com/eusip/MIAM/main/maptask/test.csv": {"num_bytes": 253237, "checksum": "e11a1fbaa4ffc74c0b438b2d3e6f17f991adee50e368c2c9c57f98ef6a2dd0c3"}}, "download_size": 1729021, "post_processing_size": null, "dataset_size": 2696946, "size_in_bytes": 4425967}, "vm2": {"description": "Multilingual dIalogAct benchMark is a collection of resources for training, evaluating, and\nanalyzing natural language understanding systems specifically designed for spoken language. Datasets\nare in English, French, German, Italian and Spanish. They cover a variety of domains including\nspontaneous speech, scripted scenarios, and joint task completion. Some datasets additionally include\nemotion and/or sentimant labels.\n", "citation": "@book{kay1992verbmobil,\ntitle={Verbmobil: A translation system for face-to-face dialog},\nauthor={Kay, Martin},\nyear={1992},\npublisher={University of Chicago Press}\n}\n@inproceedings{anonymous,\n title = \"Cross-Lingual Pretraining Methods for Spoken Dialog\",\n author = \"Anonymous\",\n booktitle = \"Transactions of the Association for Computational Linguistics\",\n month = ,\n year = \"\",\n address = \"Online\",\n publisher = \"Association for Computational Linguistics\",\n url = \"\",\n doi = \"\",\n pages = \"\",\n abstract = \"There has been an increasing interest among NLP researchers towards learning generic\n representations. However, in the field of multilingual spoken dialogue systems, this problem\n remains overlooked. Indeed most of the pre-training methods focus on learning representations\n for written and non-conversational data or are restricted to the monolingual setting. In this\n work we (1) generalise existing losses to the multilingual setting, (2) develop a new set of\n losses to leverage parallel conversations when available. These losses improve the learning of\n representations by fostering the deep encoder to better learn contextual dependencies. The\n pre-training relies on OpenSubtitles, a huge multilingual corpus that is composed of 24.3G tokens;\n a by-product of the pre-processing includes multilingual aligned conversations. We also introduce\n two new multilingual tasks and a new benchmark on multilingual dialogue act labels called MIAM.\n We validate our pre-training on the three aforementioned tasks and show that our model using our\n newly designed losses achieves better performances than existing models. Our implementation will\n be available on github.com and pre-processed data will be available in Datasets (Wolf et al., 2020).\",\n}\n", "homepage": "", "license": "", "features": {"Utterance": {"dtype": "string", "id": null, "_type": "Value"}, "Dialogue_Act": {"dtype": "string", "id": null, "_type": "Value"}, "Speaker": {"dtype": "string", "id": null, "_type": "Value"}, "Dialogue_ID": {"dtype": "string", "id": null, "_type": "Value"}, "Label": {"num_classes": 31, "names": ["ACCEPT", "BACKCHANNEL", "BYE", "CLARIFY", "CLOSE", "COMMIT", "CONFIRM", "DEFER", "DELIBERATE", "DEVIATE_SCENARIO", "EXCLUDE", "EXPLAINED_REJECT", "FEEDBACK", "FEEDBACK_NEGATIVE", "FEEDBACK_POSITIVE", "GIVE_REASON", "GREET", "INFORM", "INIT", "INTRODUCE", "NOT_CLASSIFIABLE", "OFFER", "POLITENESS_FORMULA", "REJECT", "REQUEST", "REQUEST_CLARIFY", "REQUEST_COMMENT", "REQUEST_COMMIT", "REQUEST_SUGGEST", "SUGGEST", "THANK"], "names_file": null, "id": null, "_type": "ClassLabel"}, "Idx": {"dtype": "int32", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "builder_name": "miam", "config_name": "vm2", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 1869254, "num_examples": 25060, "dataset_name": "miam"}, "validation": {"name": "validation", "num_bytes": 209390, "num_examples": 2860, "dataset_name": "miam"}, "test": {"name": "test", "num_bytes": 209032, "num_examples": 2855, "dataset_name": "miam"}}, "download_checksums": {"https://raw.githubusercontent.com/eusip/MIAM/main/vm2/train.csv": {"num_bytes": 1342990, "checksum": "5b5ddbd333a57f033a01e9b4135c7675d56a3a819258f85cee40e6adef53f7f7"}, "https://raw.githubusercontent.com/eusip/MIAM/main/vm2/dev.csv": {"num_bytes": 149358, "checksum": "30c5ba7219d113cdb4260488b0f7e515347f8c06ca5e0345b701ccfc229c41be"}, "https://raw.githubusercontent.com/eusip/MIAM/main/vm2/test.csv": {"num_bytes": 149105, "checksum": "a94db1a8ece862084624ffb0774b9b22d180a30a778a19613b963e4c244c9da3"}}, "download_size": 1641453, "post_processing_size": null, "dataset_size": 2287676, "size_in_bytes": 3929129}}