{"i-am-neo--ami_shard_0of8_phrases_experiment": {"description": "The AMI Meeting Corpus consists of 100 hours of meeting recordings. The recordings use a range of signals\nsynchronized to a common timeline. These include close-talking and far-field microphones, individual and\nroom-view video cameras, and output from a slide projector and an electronic whiteboard. During the meetings,\nthe participants also have unsynchronized pens available to them that record what is written. The meetings\nwere recorded in English using three different rooms with different acoustic properties, and include mostly\nnon-native speakers. \n\nClose talking audio of single headset. This configuration only includes audio belonging to the headset of the person currently speaking.", "citation": "@inproceedings{10.1007/11677482_3,\nauthor = {Carletta, Jean and Ashby, Simone and Bourban, Sebastien and Flynn, Mike and Guillemot, Mael and Hain, Thomas and Kadlec, Jaroslav and Karaiskos, Vasilis and Kraaij, Wessel and Kronenthal, Melissa and Lathoud, Guillaume and Lincoln, Mike and Lisowska, Agnes and McCowan, Iain and Post, Wilfried and Reidsma, Dennis and Wellner, Pierre},\ntitle = {The AMI Meeting Corpus: A Pre-Announcement},\nyear = {2005},\nisbn = {3540325492},\npublisher = {Springer-Verlag},\naddress = {Berlin, Heidelberg},\nurl = {https://doi.org/10.1007/11677482_3},\ndoi = {10.1007/11677482_3},\nabstract = {The AMI Meeting Corpus is a multi-modal data set consisting of 100 hours of meeting\nrecordings. It is being created in the context of a project that is developing meeting\nbrowsing technology and will eventually be released publicly. Some of the meetings\nit contains are naturally occurring, and some are elicited, particularly using a scenario\nin which the participants play different roles in a design team, taking a design project\nfrom kick-off to completion over the course of a day. The corpus is being recorded\nusing a wide range of devices including close-talking and far-field microphones, individual\nand room-view video cameras, projection, a whiteboard, and individual pens, all of\nwhich produce output signals that are synchronized with each other. It is also being\nhand-annotated for many different phenomena, including orthographic transcription,\ndiscourse properties such as named entities and dialogue acts, summaries, emotions,\nand some head and hand gestures. We describe the data set, including the rationale\nbehind using elicited material, and explain how the material is being recorded, transcribed\nand annotated.},\nbooktitle = {Proceedings of the Second International Conference on Machine Learning for Multimodal Interaction},\npages = {28\u201339},\nnumpages = {12},\nlocation = {Edinburgh, UK},\nseries = {MLMI'05}\n}\n", "homepage": "https://groups.inf.ed.ac.uk/ami/corpus/", "license": "", "features": {"audio": {"feature": {"dtype": "float32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "ami", "config_name": "headset-single", "version": {"version_str": "1.6.2", "description": "", "major": 1, "minor": 6, "patch": 2}, "splits": {"train": {"name": "train", "num_bytes": 1632493882, "num_examples": 7262, "dataset_name": "ami_shard_0of8_phrases_experiment"}, "validation": {"name": "validation", "num_bytes": 224781995, "num_examples": 1094, "dataset_name": "ami_shard_0of8_phrases_experiment"}, "test": {"name": "test", "num_bytes": 176875231, "num_examples": 773, "dataset_name": "ami_shard_0of8_phrases_experiment"}}, "download_checksums": null, "download_size": 900971106, "post_processing_size": null, "dataset_size": 2034151108, "size_in_bytes": 2935122214}} |