--- annotations_creators: - other language_creators: - other language: - en license: - unknown multilinguality: - monolingual pretty_name: SUPERB size_categories: - unknown source_datasets: - original - extended|librispeech_asr - extended|other-librimix - extended|other-speech_commands task_categories: - automatic-speech-recognition - audio-classification task_ids: - keyword-spotting - speaker-identification - audio-intent-classification - audio-emotion-recognition tags: - query-by-example-spoken-term-detection - audio-slot-filling - speaker-diarization - automatic-speaker-verification dataset_info: - config_name: asr features: - name: file dtype: string - name: audio dtype: audio: sampling_rate: 16000 - name: text dtype: string - name: speaker_id dtype: int64 - name: chapter_id dtype: int64 - name: id dtype: string splits: - name: train num_bytes: 11852430 num_examples: 28539 - name: validation num_bytes: 897213 num_examples: 2703 - name: test num_bytes: 871234 num_examples: 2620 download_size: 7071899769 dataset_size: 13620877 - config_name: sd features: - name: record_id dtype: string - name: file dtype: string - name: start dtype: int64 - name: end dtype: int64 - name: speakers list: - name: speaker_id dtype: string - name: start dtype: int64 - name: end dtype: int64 splits: - name: train num_bytes: 4622013 num_examples: 13901 - name: dev num_bytes: 860472 num_examples: 3014 - name: test num_bytes: 847803 num_examples: 3002 download_size: 7190370211 dataset_size: 6330288 - config_name: ks features: - name: file dtype: string - name: label dtype: class_label: names: 0: 'yes' 1: 'no' 2: up 3: down 4: left 5: right 6: 'on' 7: 'off' 8: stop 9: go 10: _silence_ 11: _unknown_ splits: - name: train num_bytes: 8467781 num_examples: 51094 - name: validation num_bytes: 1126476 num_examples: 6798 - name: test num_bytes: 510619 num_examples: 3081 download_size: 1560367713 dataset_size: 10104876 - config_name: ic features: - name: file dtype: string - name: speaker_id dtype: string - name: text dtype: string - name: action dtype: class_label: names: 0: activate 1: bring 2: change language 3: deactivate 4: decrease 5: increase - name: object dtype: class_label: names: 0: Chinese 1: English 2: German 3: Korean 4: heat 5: juice 6: lamp 7: lights 8: music 9: newspaper 10: none 11: shoes 12: socks 13: volume - name: location dtype: class_label: names: 0: bedroom 1: kitchen 2: none 3: washroom splits: - name: train num_bytes: 7071466 num_examples: 23132 - name: validation num_bytes: 953622 num_examples: 3118 - name: test num_bytes: 1158347 num_examples: 3793 download_size: 1544093324 dataset_size: 9183435 - config_name: si features: - name: file dtype: string - name: label dtype: class_label: names: 0: id10001 1: id10002 2: id10003 3: id10004 4: id10005 5: id10006 6: id10007 7: id10008 8: id10009 9: id10010 10: id10011 11: id10012 12: id10013 13: id10014 14: id10015 15: id10016 16: id10017 17: id10018 18: id10019 19: id10020 20: id10021 21: id10022 22: id10023 23: id10024 24: id10025 25: id10026 26: id10027 27: id10028 28: id10029 29: id10030 30: id10031 31: id10032 32: id10033 33: id10034 34: id10035 35: id10036 36: id10037 37: id10038 38: id10039 39: id10040 40: id10041 41: id10042 42: id10043 43: id10044 44: id10045 45: id10046 46: id10047 47: id10048 48: id10049 49: id10050 50: id10051 51: id10052 52: id10053 53: id10054 54: id10055 55: id10056 56: id10057 57: id10058 58: id10059 59: id10060 60: id10061 61: id10062 62: id10063 63: id10064 64: id10065 65: id10066 66: id10067 67: id10068 68: id10069 69: id10070 70: id10071 71: id10072 72: id10073 73: id10074 74: id10075 75: id10076 76: id10077 77: id10078 78: id10079 79: id10080 80: id10081 81: id10082 82: id10083 83: id10084 84: id10085 85: id10086 86: id10087 87: id10088 88: id10089 89: id10090 90: id10091 91: id10092 92: id10093 93: id10094 94: id10095 95: id10096 96: id10097 97: id10098 98: id10099 99: id10100 100: id10101 101: id10102 102: id10103 103: id10104 104: id10105 105: id10106 106: id10107 107: id10108 108: id10109 109: id10110 110: id10111 111: id10112 112: id10113 113: id10114 114: id10115 115: id10116 116: id10117 117: id10118 118: id10119 119: id10120 120: id10121 121: id10122 122: id10123 123: id10124 124: id10125 125: id10126 126: id10127 127: id10128 128: id10129 129: id10130 130: id10131 131: id10132 132: id10133 133: id10134 134: id10135 135: id10136 136: id10137 137: id10138 138: id10139 139: id10140 140: id10141 141: id10142 142: id10143 143: id10144 144: id10145 145: id10146 146: id10147 147: id10148 148: id10149 149: id10150 150: id10151 151: id10152 152: id10153 153: id10154 154: id10155 155: id10156 156: id10157 157: id10158 158: id10159 159: id10160 160: id10161 161: id10162 162: id10163 163: id10164 164: id10165 165: id10166 166: id10167 167: id10168 168: id10169 169: id10170 170: id10171 171: id10172 172: id10173 173: id10174 174: id10175 175: id10176 176: id10177 177: id10178 178: id10179 179: id10180 180: id10181 181: id10182 182: id10183 183: id10184 184: id10185 185: id10186 186: id10187 187: id10188 188: id10189 189: id10190 190: id10191 191: id10192 192: id10193 193: id10194 194: id10195 195: id10196 196: id10197 197: id10198 198: id10199 199: id10200 200: id10201 201: id10202 202: id10203 203: id10204 204: id10205 205: id10206 206: id10207 207: id10208 208: id10209 209: id10210 210: id10211 211: id10212 212: id10213 213: id10214 214: id10215 215: id10216 216: id10217 217: id10218 218: id10219 219: id10220 220: id10221 221: id10222 222: id10223 223: id10224 224: id10225 225: id10226 226: id10227 227: id10228 228: id10229 229: id10230 230: id10231 231: id10232 232: id10233 233: id10234 234: id10235 235: id10236 236: id10237 237: id10238 238: id10239 239: id10240 240: id10241 241: id10242 242: id10243 243: id10244 244: id10245 245: id10246 246: id10247 247: id10248 248: id10249 249: id10250 250: id10251 251: id10252 252: id10253 253: id10254 254: id10255 255: id10256 256: id10257 257: id10258 258: id10259 259: id10260 260: id10261 261: id10262 262: id10263 263: id10264 264: id10265 265: id10266 266: id10267 267: id10268 268: id10269 269: id10270 270: id10271 271: id10272 272: id10273 273: id10274 274: id10275 275: id10276 276: id10277 277: id10278 278: id10279 279: id10280 280: id10281 281: id10282 282: id10283 283: id10284 284: id10285 285: id10286 286: id10287 287: id10288 288: id10289 289: id10290 290: id10291 291: id10292 292: id10293 293: id10294 294: id10295 295: id10296 296: id10297 297: id10298 298: id10299 299: id10300 300: id10301 301: id10302 302: id10303 303: id10304 304: id10305 305: id10306 306: id10307 307: id10308 308: id10309 309: id10310 310: id10311 311: id10312 312: id10313 313: id10314 314: id10315 315: id10316 316: id10317 317: id10318 318: id10319 319: id10320 320: id10321 321: id10322 322: id10323 323: id10324 324: id10325 325: id10326 326: id10327 327: id10328 328: id10329 329: id10330 330: id10331 331: id10332 332: id10333 333: id10334 334: id10335 335: id10336 336: id10337 337: id10338 338: id10339 339: id10340 340: id10341 341: id10342 342: id10343 343: id10344 344: id10345 345: id10346 346: id10347 347: id10348 348: id10349 349: id10350 350: id10351 351: id10352 352: id10353 353: id10354 354: id10355 355: id10356 356: id10357 357: id10358 358: id10359 359: id10360 360: id10361 361: id10362 362: id10363 363: id10364 364: id10365 365: id10366 366: id10367 367: id10368 368: id10369 369: id10370 370: id10371 371: id10372 372: id10373 373: id10374 374: id10375 375: id10376 376: id10377 377: id10378 378: id10379 379: id10380 380: id10381 381: id10382 382: id10383 383: id10384 384: id10385 385: id10386 386: id10387 387: id10388 388: id10389 389: id10390 390: id10391 391: id10392 392: id10393 393: id10394 394: id10395 395: id10396 396: id10397 397: id10398 398: id10399 399: id10400 400: id10401 401: id10402 402: id10403 403: id10404 404: id10405 405: id10406 406: id10407 407: id10408 408: id10409 409: id10410 410: id10411 411: id10412 412: id10413 413: id10414 414: id10415 415: id10416 416: id10417 417: id10418 418: id10419 419: id10420 420: id10421 421: id10422 422: id10423 423: id10424 424: id10425 425: id10426 426: id10427 427: id10428 428: id10429 429: id10430 430: id10431 431: id10432 432: id10433 433: id10434 434: id10435 435: id10436 436: id10437 437: id10438 438: id10439 439: id10440 440: id10441 441: id10442 442: id10443 443: id10444 444: id10445 445: id10446 446: id10447 447: id10448 448: id10449 449: id10450 450: id10451 451: id10452 452: id10453 453: id10454 454: id10455 455: id10456 456: id10457 457: id10458 458: id10459 459: id10460 460: id10461 461: id10462 462: id10463 463: id10464 464: id10465 465: id10466 466: id10467 467: id10468 468: id10469 469: id10470 470: id10471 471: id10472 472: id10473 473: id10474 474: id10475 475: id10476 476: id10477 477: id10478 478: id10479 479: id10480 480: id10481 481: id10482 482: id10483 483: id10484 484: id10485 485: id10486 486: id10487 487: id10488 488: id10489 489: id10490 490: id10491 491: id10492 492: id10493 493: id10494 494: id10495 495: id10496 496: id10497 497: id10498 498: id10499 499: id10500 500: id10501 501: id10502 502: id10503 503: id10504 504: id10505 505: id10506 506: id10507 507: id10508 508: id10509 509: id10510 510: id10511 511: id10512 512: id10513 513: id10514 514: id10515 515: id10516 516: id10517 517: id10518 518: id10519 519: id10520 520: id10521 521: id10522 522: id10523 523: id10524 524: id10525 525: id10526 526: id10527 527: id10528 528: id10529 529: id10530 530: id10531 531: id10532 532: id10533 533: id10534 534: id10535 535: id10536 536: id10537 537: id10538 538: id10539 539: id10540 540: id10541 541: id10542 542: id10543 543: id10544 544: id10545 545: id10546 546: id10547 547: id10548 548: id10549 549: id10550 550: id10551 551: id10552 552: id10553 553: id10554 554: id10555 555: id10556 556: id10557 557: id10558 558: id10559 559: id10560 560: id10561 561: id10562 562: id10563 563: id10564 564: id10565 565: id10566 566: id10567 567: id10568 568: id10569 569: id10570 570: id10571 571: id10572 572: id10573 573: id10574 574: id10575 575: id10576 576: id10577 577: id10578 578: id10579 579: id10580 580: id10581 581: id10582 582: id10583 583: id10584 584: id10585 585: id10586 586: id10587 587: id10588 588: id10589 589: id10590 590: id10591 591: id10592 592: id10593 593: id10594 594: id10595 595: id10596 596: id10597 597: id10598 598: id10599 599: id10600 600: id10601 601: id10602 602: id10603 603: id10604 604: id10605 605: id10606 606: id10607 607: id10608 608: id10609 609: id10610 610: id10611 611: id10612 612: id10613 613: id10614 614: id10615 615: id10616 616: id10617 617: id10618 618: id10619 619: id10620 620: id10621 621: id10622 622: id10623 623: id10624 624: id10625 625: id10626 626: id10627 627: id10628 628: id10629 629: id10630 630: id10631 631: id10632 632: id10633 633: id10634 634: id10635 635: id10636 636: id10637 637: id10638 638: id10639 639: id10640 640: id10641 641: id10642 642: id10643 643: id10644 644: id10645 645: id10646 646: id10647 647: id10648 648: id10649 649: id10650 650: id10651 651: id10652 652: id10653 653: id10654 654: id10655 655: id10656 656: id10657 657: id10658 658: id10659 659: id10660 660: id10661 661: id10662 662: id10663 663: id10664 664: id10665 665: id10666 666: id10667 667: id10668 668: id10669 669: id10670 670: id10671 671: id10672 672: id10673 673: id10674 674: id10675 675: id10676 676: id10677 677: id10678 678: id10679 679: id10680 680: id10681 681: id10682 682: id10683 683: id10684 684: id10685 685: id10686 686: id10687 687: id10688 688: id10689 689: id10690 690: id10691 691: id10692 692: id10693 693: id10694 694: id10695 695: id10696 696: id10697 697: id10698 698: id10699 699: id10700 700: id10701 701: id10702 702: id10703 703: id10704 704: id10705 705: id10706 706: id10707 707: id10708 708: id10709 709: id10710 710: id10711 711: id10712 712: id10713 713: id10714 714: id10715 715: id10716 716: id10717 717: id10718 718: id10719 719: id10720 720: id10721 721: id10722 722: id10723 723: id10724 724: id10725 725: id10726 726: id10727 727: id10728 728: id10729 729: id10730 730: id10731 731: id10732 732: id10733 733: id10734 734: id10735 735: id10736 736: id10737 737: id10738 738: id10739 739: id10740 740: id10741 741: id10742 742: id10743 743: id10744 744: id10745 745: id10746 746: id10747 747: id10748 748: id10749 749: id10750 750: id10751 751: id10752 752: id10753 753: id10754 754: id10755 755: id10756 756: id10757 757: id10758 758: id10759 759: id10760 760: id10761 761: id10762 762: id10763 763: id10764 764: id10765 765: id10766 766: id10767 767: id10768 768: id10769 769: id10770 770: id10771 771: id10772 772: id10773 773: id10774 774: id10775 775: id10776 776: id10777 777: id10778 778: id10779 779: id10780 780: id10781 781: id10782 782: id10783 783: id10784 784: id10785 785: id10786 786: id10787 787: id10788 788: id10789 789: id10790 790: id10791 791: id10792 792: id10793 793: id10794 794: id10795 795: id10796 796: id10797 797: id10798 798: id10799 799: id10800 800: id10801 801: id10802 802: id10803 803: id10804 804: id10805 805: id10806 806: id10807 807: id10808 808: id10809 809: id10810 810: id10811 811: id10812 812: id10813 813: id10814 814: id10815 815: id10816 816: id10817 817: id10818 818: id10819 819: id10820 820: id10821 821: id10822 822: id10823 823: id10824 824: id10825 825: id10826 826: id10827 827: id10828 828: id10829 829: id10830 830: id10831 831: id10832 832: id10833 833: id10834 834: id10835 835: id10836 836: id10837 837: id10838 838: id10839 839: id10840 840: id10841 841: id10842 842: id10843 843: id10844 844: id10845 845: id10846 846: id10847 847: id10848 848: id10849 849: id10850 850: id10851 851: id10852 852: id10853 853: id10854 854: id10855 855: id10856 856: id10857 857: id10858 858: id10859 859: id10860 860: id10861 861: id10862 862: id10863 863: id10864 864: id10865 865: id10866 866: id10867 867: id10868 868: id10869 869: id10870 870: id10871 871: id10872 872: id10873 873: id10874 874: id10875 875: id10876 876: id10877 877: id10878 878: id10879 879: id10880 880: id10881 881: id10882 882: id10883 883: id10884 884: id10885 885: id10886 886: id10887 887: id10888 888: id10889 889: id10890 890: id10891 891: id10892 892: id10893 893: id10894 894: id10895 895: id10896 896: id10897 897: id10898 898: id10899 899: id10900 900: id10901 901: id10902 902: id10903 903: id10904 904: id10905 905: id10906 906: id10907 907: id10908 908: id10909 909: id10910 910: id10911 911: id10912 912: id10913 913: id10914 914: id10915 915: id10916 916: id10917 917: id10918 918: id10919 919: id10920 920: id10921 921: id10922 922: id10923 923: id10924 924: id10925 925: id10926 926: id10927 927: id10928 928: id10929 929: id10930 930: id10931 931: id10932 932: id10933 933: id10934 934: id10935 935: id10936 936: id10937 937: id10938 938: id10939 939: id10940 940: id10941 941: id10942 942: id10943 943: id10944 944: id10945 945: id10946 946: id10947 947: id10948 948: id10949 949: id10950 950: id10951 951: id10952 952: id10953 953: id10954 954: id10955 955: id10956 956: id10957 957: id10958 958: id10959 959: id10960 960: id10961 961: id10962 962: id10963 963: id10964 964: id10965 965: id10966 966: id10967 967: id10968 968: id10969 969: id10970 970: id10971 971: id10972 972: id10973 973: id10974 974: id10975 975: id10976 976: id10977 977: id10978 978: id10979 979: id10980 980: id10981 981: id10982 982: id10983 983: id10984 984: id10985 985: id10986 986: id10987 987: id10988 988: id10989 989: id10990 990: id10991 991: id10992 992: id10993 993: id10994 994: id10995 995: id10996 996: id10997 997: id10998 998: id10999 999: id11000 1000: id11001 1001: id11002 1002: id11003 1003: id11004 1004: id11005 1005: id11006 1006: id11007 1007: id11008 1008: id11009 1009: id11010 1010: id11011 1011: id11012 1012: id11013 1013: id11014 1014: id11015 1015: id11016 1016: id11017 1017: id11018 1018: id11019 1019: id11020 1020: id11021 1021: id11022 1022: id11023 1023: id11024 1024: id11025 1025: id11026 1026: id11027 1027: id11028 1028: id11029 1029: id11030 1030: id11031 1031: id11032 1032: id11033 1033: id11034 1034: id11035 1035: id11036 1036: id11037 1037: id11038 1038: id11039 1039: id11040 1040: id11041 1041: id11042 1042: id11043 1043: id11044 1044: id11045 1045: id11046 1046: id11047 1047: id11048 1048: id11049 1049: id11050 1050: id11051 1051: id11052 1052: id11053 1053: id11054 1054: id11055 1055: id11056 1056: id11057 1057: id11058 1058: id11059 1059: id11060 1060: id11061 1061: id11062 1062: id11063 1063: id11064 1064: id11065 1065: id11066 1066: id11067 1067: id11068 1068: id11069 1069: id11070 1070: id11071 1071: id11072 1072: id11073 1073: id11074 1074: id11075 1075: id11076 1076: id11077 1077: id11078 1078: id11079 1079: id11080 1080: id11081 1081: id11082 1082: id11083 1083: id11084 1084: id11085 1085: id11086 1086: id11087 1087: id11088 1088: id11089 1089: id11090 1090: id11091 1091: id11092 1092: id11093 1093: id11094 1094: id11095 1095: id11096 1096: id11097 1097: id11098 1098: id11099 1099: id11100 1100: id11101 1101: id11102 1102: id11103 1103: id11104 1104: id11105 1105: id11106 1106: id11107 1107: id11108 1108: id11109 1109: id11110 1110: id11111 1111: id11112 1112: id11113 1113: id11114 1114: id11115 1115: id11116 1116: id11117 1117: id11118 1118: id11119 1119: id11120 1120: id11121 1121: id11122 1122: id11123 1123: id11124 1124: id11125 1125: id11126 1126: id11127 1127: id11128 1128: id11129 1129: id11130 1130: id11131 1131: id11132 1132: id11133 1133: id11134 1134: id11135 1135: id11136 1136: id11137 1137: id11138 1138: id11139 1139: id11140 1140: id11141 1141: id11142 1142: id11143 1143: id11144 1144: id11145 1145: id11146 1146: id11147 1147: id11148 1148: id11149 1149: id11150 1150: id11151 1151: id11152 1152: id11153 1153: id11154 1154: id11155 1155: id11156 1156: id11157 1157: id11158 1158: id11159 1159: id11160 1160: id11161 1161: id11162 1162: id11163 1163: id11164 1164: id11165 1165: id11166 1166: id11167 1167: id11168 1168: id11169 1169: id11170 1170: id11171 1171: id11172 1172: id11173 1173: id11174 1174: id11175 1175: id11176 1176: id11177 1177: id11178 1178: id11179 1179: id11180 1180: id11181 1181: id11182 1182: id11183 1183: id11184 1184: id11185 1185: id11186 1186: id11187 1187: id11188 1188: id11189 1189: id11190 1190: id11191 1191: id11192 1192: id11193 1193: id11194 1194: id11195 1195: id11196 1196: id11197 1197: id11198 1198: id11199 1199: id11200 1200: id11201 1201: id11202 1202: id11203 1203: id11204 1204: id11205 1205: id11206 1206: id11207 1207: id11208 1208: id11209 1209: id11210 1210: id11211 1211: id11212 1212: id11213 1213: id11214 1214: id11215 1215: id11216 1216: id11217 1217: id11218 1218: id11219 1219: id11220 1220: id11221 1221: id11222 1222: id11223 1223: id11224 1224: id11225 1225: id11226 1226: id11227 1227: id11228 1228: id11229 1229: id11230 1230: id11231 1231: id11232 1232: id11233 1233: id11234 1234: id11235 1235: id11236 1236: id11237 1237: id11238 1238: id11239 1239: id11240 1240: id11241 1241: id11242 1242: id11243 1243: id11244 1244: id11245 1245: id11246 1246: id11247 1247: id11248 1248: id11249 1249: id11250 1250: id11251 splits: - name: train num_bytes: 12729268 num_examples: 138361 - name: validation num_bytes: 635172 num_examples: 6904 - name: test num_bytes: 759096 num_examples: 8251 download_size: 0 dataset_size: 14123536 --- # Dataset Card for SUPERB ## Table of Contents - [Table of Contents](#table-of-contents) - [Dataset Description](#dataset-description) - [Dataset Summary](#dataset-summary) - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards) - [Languages](#languages) - [Dataset Structure](#dataset-structure) - [Data Instances](#data-instances) - [Data Fields](#data-fields) - [Data Splits](#data-splits) - [Dataset Creation](#dataset-creation) - [Curation Rationale](#curation-rationale) - [Source Data](#source-data) - [Annotations](#annotations) - [Personal and Sensitive Information](#personal-and-sensitive-information) - [Considerations for Using the Data](#considerations-for-using-the-data) - [Social Impact of Dataset](#social-impact-of-dataset) - [Discussion of Biases](#discussion-of-biases) - [Other Known Limitations](#other-known-limitations) - [Additional Information](#additional-information) - [Dataset Curators](#dataset-curators) - [Licensing Information](#licensing-information) - [Citation Information](#citation-information) - [Contributions](#contributions) ## Dataset Description - **Homepage:** [http://superbbenchmark.org](http://superbbenchmark.org) - **Repository:** [https://github.com/s3prl/s3prl](https://github.com/s3prl/s3prl) - **Paper:** [SUPERB: Speech processing Universal PERformance Benchmark](https://arxiv.org/abs/2105.01051) - **Leaderboard:** [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) - **Point of Contact:** [Lewis Tunstall](mailto:lewis@huggingface.co) and [Albert Villanova](mailto:albert@huggingface.co) ### Dataset Summary SUPERB is a leaderboard to benchmark the performance of a shared model across a wide range of speech processing tasks with minimal architecture changes and labeled data. ### Supported Tasks and Leaderboards The SUPERB leaderboard can be found here https://superbbenchmark.org/leaderboard and consists of the following tasks: #### pr Phoneme Recognition (PR) transcribes an utterance into the smallest content units. This task includes alignment modeling to avoid potentially inaccurate forced alignment. [LibriSpeech](https://huggingface.co/datasets/librispeech_asr) train-clean-100/dev-clean/test-clean subsets are adopted in SUPERB for training/validation/testing. Phoneme transcriptions are obtained from the LibriSpeech official g2p-model-5 and the conversion script in Kaldi librispeech s5 recipe. The evaluation metric is phone error rate (PER). #### asr Automatic Speech Recognition (ASR) transcribes utterances into words. While PR analyzes the improvement in modeling phonetics, ASR reflects the significance of the improvement in a real-world scenario. [LibriSpeech](https://huggingface.co/datasets/librispeech_asr) train-clean-100/devclean/test-clean subsets are used for training/validation/testing. The evaluation metric is word error rate (WER). #### ks Keyword Spotting (KS) detects preregistered keywords by classifying utterances into a predefined set of words. The task is usually performed on-device for the fast response time. Thus, accuracy, model size, and inference time are all crucial. SUPERB uses the widely used [Speech Commands dataset v1.0](https://www.tensorflow.org/datasets/catalog/speech_commands) for the task. The dataset consists of ten classes of keywords, a class for silence, and an unknown class to include the false positive. The evaluation metric is accuracy (ACC) ##### Example of usage: Use these auxillary functions to: - load the audio file into an audio data array - sample from long `_silence_` audio clips For other examples of handling long `_silence_` clips see the [S3PRL](https://github.com/s3prl/s3prl/blob/099ce807a6ffa6bf2482ceecfcaf83dea23da355/s3prl/downstream/speech_commands/dataset.py#L80) or [TFDS](https://github.com/tensorflow/datasets/blob/6b8cfdb7c3c0a04e731caaa8660ce948d0a67b1e/tensorflow_datasets/audio/speech_commands.py#L143) implementations. ```python def map_to_array(example): import soundfile as sf speech_array, sample_rate = sf.read(example["file"]) example["speech"] = speech_array example["sample_rate"] = sample_rate return example def sample_noise(example): # Use this function to extract random 1 sec slices of each _silence_ utterance, # e.g. inside `torch.utils.data.Dataset.__getitem__()` from random import randint if example["label"] == "_silence_": random_offset = randint(0, len(example["speech"]) - example["sample_rate"] - 1) example["speech"] = example["speech"][random_offset : random_offset + example["sample_rate"]] return example ``` #### qbe Query by Example Spoken Term Detection (QbE) detects a spoken term (query) in an audio database (documents) by binary discriminating a given pair of query and document into a match or not. The English subset in [QUESST 2014 challenge](https://github.com/s3prl/s3prl/tree/master/downstream#qbe-query-by-example-spoken-term-detection) is adopted since we focus on investigating English as the first step. The evaluation metric is maximum term weighted value (MTWV) which balances misses and false alarms. #### ic Intent Classification (IC) classifies utterances into predefined classes to determine the intent of speakers. SUPERB uses the [Fluent Speech Commands dataset](https://github.com/s3prl/s3prl/tree/master/downstream#ic-intent-classification---fluent-speech-commands), where each utterance is tagged with three intent labels: action, object, and location. The evaluation metric is accuracy (ACC). #### sf Slot Filling (SF) predicts a sequence of semantic slot-types from an utterance, like a slot-type FromLocation for a spoken word Taipei, which is known as a slot-value. Both slot-types and slot-values are essential for an SLU system to function. The evaluation metrics thus include slot-type F1 score and slotvalue CER. [Audio SNIPS](https://github.com/s3prl/s3prl/tree/master/downstream#sf-end-to-end-slot-filling) is adopted, which synthesized multi-speaker utterances for SNIPS. Following the standard split in SNIPS, US-accent speakers are further selected for training, and others are for validation/testing. #### si Speaker Identification (SI) classifies each utterance for its speaker identity as a multi-class classification, where speakers are in the same predefined set for both training and testing. The widely used [VoxCeleb1 dataset](https://www.robots.ox.ac.uk/~vgg/data/voxceleb/vox1.html) is adopted, and the evaluation metric is accuracy (ACC). #### asv Automatic Speaker Verification (ASV) verifies whether the speakers of a pair of utterances match as a binary classification, and speakers in the testing set may not appear in the training set. Thus, ASV is more challenging than SID. VoxCeleb1 is used without VoxCeleb2 training data and noise augmentation. The evaluation metric is equal error rate (EER). #### sd Speaker Diarization (SD) predicts *who is speaking when* for each timestamp, and multiple speakers can speak simultaneously. The model has to encode rich speaker characteristics for each frame and should be able to represent mixtures of signals. [LibriMix](https://github.com/s3prl/s3prl/tree/master/downstream#sd-speaker-diarization) is adopted where LibriSpeech train-clean-100/dev-clean/test-clean are used to generate mixtures for training/validation/testing. We focus on the two-speaker scenario as the first step. The time-coded speaker labels were generated using alignments from Kaldi LibriSpeech ASR model. The evaluation metric is diarization error rate (DER). ##### Example of usage Use these auxiliary functions to: - load the audio file into an audio data array - generate the label array ```python def load_audio_file(example, frame_shift=160): import soundfile as sf example["array"], example["sample_rate"] = sf.read( example["file"], start=example["start"] * frame_shift, stop=example["end"] * frame_shift ) return example def generate_label(example, frame_shift=160, num_speakers=2, rate=16000): import numpy as np start = example["start"] end = example["end"] frame_num = end - start speakers = sorted({speaker["speaker_id"] for speaker in example["speakers"]}) label = np.zeros((frame_num, num_speakers), dtype=np.int32) for speaker in example["speakers"]: speaker_index = speakers.index(speaker["speaker_id"]) start_frame = np.rint(speaker["start"] * rate / frame_shift).astype(int) end_frame = np.rint(speaker["end"] * rate / frame_shift).astype(int) rel_start = rel_end = None if start <= start_frame < end: rel_start = start_frame - start if start < end_frame <= end: rel_end = end_frame - start if rel_start is not None or rel_end is not None: label[rel_start:rel_end, speaker_index] = 1 example["label"] = label return example ``` #### er Emotion Recognition (ER) predicts an emotion class for each utterance. The most widely used ER dataset [IEMOCAP](https://github.com/s3prl/s3prl/tree/master/downstream#er-emotion-recognition) is adopted, and we follow the conventional evaluation protocol: we drop the unbalance emotion classes to leave the final four classes with a similar amount of data points and cross-validates on five folds of the standard splits. The evaluation metric is accuracy (ACC). ### Languages The language data in SUPERB is in English (BCP-47 `en`) ## Dataset Structure ### Data Instances #### pr [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) #### asr An example from each split looks like: ```python {'chapter_id': 1240, 'file': 'path/to/file.flac', 'audio': {'path': 'path/to/file.flac', 'array': array([-0.00048828, -0.00018311, -0.00137329, ..., 0.00079346, 0.00091553, 0.00085449], dtype=float32), 'sampling_rate': 16000}, 'id': '103-1240-0000', 'speaker_id': 103, 'text': 'CHAPTER ONE MISSUS RACHEL LYNDE IS SURPRISED MISSUS RACHEL LYNDE ' 'LIVED JUST WHERE THE AVONLEA MAIN ROAD DIPPED DOWN INTO A LITTLE ' 'HOLLOW FRINGED WITH ALDERS AND LADIES EARDROPS AND TRAVERSED BY A ' 'BROOK'} ``` #### ks An example from each split looks like: ```python { 'file': '/path/yes/af7a8296_nohash_1.wav', 'audio': {'path': '/path/yes/af7a8296_nohash_1.wav', 'array': array([-0.00048828, -0.00018311, -0.00137329, ..., 0.00079346, 0.00091553, 0.00085449], dtype=float32), 'sampling_rate': 16000}, 'label': 0 # 'yes' } ``` #### qbe [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) #### ic ```python { 'file': "/path/wavs/speakers/2BqVo8kVB2Skwgyb/063aa8f0-4479-11e9-a9a5-5dbec3b8816a.wav", 'audio': {'path': '/path/wavs/speakers/2BqVo8kVB2Skwgyb/063aa8f0-4479-11e9-a9a5-5dbec3b8816a.wav', 'array': array([-0.00048828, -0.00018311, -0.00137329, ..., 0.00079346, 0.00091553, 0.00085449], dtype=float32), 'sampling_rate': 16000}, 'speaker_id': '2BqVo8kVB2Skwgyb', 'text': 'Turn the bedroom lights off', 'action': 3, # 'deactivate' 'object': 7, # 'lights' 'location': 0 # 'bedroom' } ``` #### sf [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) #### si ```python { 'file': '/path/wav/id10003/na8-QEFmj44/00003.wav', 'audio': {'path': '/path/wav/id10003/na8-QEFmj44/00003.wav', 'array': array([-0.00048828, -0.00018311, -0.00137329, ..., 0.00079346, 0.00091553, 0.00085449], dtype=float32), 'sampling_rate': 16000}, 'label': 2 # 'id10003' } ``` #### asv [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) #### sd An example from each split looks like: ```python { 'record_id': '1578-6379-0038_6415-111615-0009', 'file': 'path/to/file.wav', 'audio': {'path': 'path/to/file.wav', 'array': array([-0.00048828, -0.00018311, -0.00137329, ..., 0.00079346, 0.00091553, 0.00085449], dtype=float32), 'sampling_rate': 16000}, 'start': 0, 'end': 1590, 'speakers': [ {'speaker_id': '1578', 'start': 28, 'end': 657}, {'speaker_id': '6415', 'start': 28, 'end': 1576} ] } ``` #### er [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) ### Data Fields ####Note abouth the `audio` fields When accessing the audio column: `dataset[0]["audio"]` the audio file is automatically decoded and resampled to `dataset.features["audio"].sampling_rate`. Decoding and resampling of a large number of audio files might take a significant amount of time. Thus it is important to first query the sample index before the `"audio"` column, *i.e.* `dataset[0]["audio"]` should **always** be preferred over `dataset["audio"][0]`. #### pr [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) #### asr - `file` (`string`): Path to the WAV audio file. - `audio` (`dict`): A dictionary containing the path to the downloaded audio file, the decoded audio array, and the sampling rate. - `text` (`string`): The transcription of the audio file. - `speaker_id` (`integer`): A unique ID of the speaker. The same speaker id can be found for multiple data samples. - `chapter_id` (`integer`): ID of the audiobook chapter which includes the transcription. - `id` (`string`): A unique ID of the data sample. #### ks - `file` (`string`): Path to the WAV audio file. - `audio` (`dict`): A dictionary containing the path to the downloaded audio file, the decoded audio array, and the sampling rate. - `label` (`ClassLabel`): Label of the spoken command. Possible values: - `0: "yes", 1: "no", 2: "up", 3: "down", 4: "left", 5: "right", 6: "on", 7: "off", 8: "stop", 9: "go", 10: "_silence_", 11: "_unknown_"` #### qbe [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) #### ic - `file` (`string`): Path to the WAV audio file. - `audio` (`dict`): A dictionary containing the path to the downloaded audio file, the decoded audio array, and the sampling rate. - `speaker_id` (`string`): ID of the speaker. - `text` (`string`): Transcription of the spoken command. - `action` (`ClassLabel`): Label of the command's action. Possible values: - `0: "activate", 1: "bring", 2: "change language", 3: "deactivate", 4: "decrease", 5: "increase"` - `object` (`ClassLabel`): Label of the command's object. Possible values: - `0: "Chinese", 1: "English", 2: "German", 3: "Korean", 4: "heat", 5: "juice", 6: "lamp", 7: "lights", 8: "music", 9: "newspaper", 10: "none", 11: "shoes", 12: "socks", 13: "volume"` - `location` (`ClassLabel`): Label of the command's location. Possible values: - `0: "bedroom", 1: "kitchen", 2: "none", 3: "washroom"` #### sf [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) #### si - `file` (`string`): Path to the WAV audio file. - `audio` (`dict`): A dictionary containing the path to the downloaded audio file, the decoded audio array, and the sampling rate. - `label` (`ClassLabel`): Label (ID) of the speaker. Possible values: - `0: "id10001", 1: "id10002", 2: "id10003", ..., 1250: "id11251"` #### asv [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) #### sd The data fields in all splits are: - `record_id` (`string`): ID of the record. - `file` (`string`): Path to the WAV audio file. - `audio` (`dict`): A dictionary containing the path to the downloaded audio file, the decoded audio array, and the sampling rate. - `start` (`integer`): Start frame of the audio. - `end` (`integer`): End frame of the audio. - `speakers` (`list` of `dict`): List of speakers in the audio. Each item contains the fields: - `speaker_id` (`string`): ID of the speaker. - `start` (`integer`): Frame when the speaker starts speaking. - `end` (`integer`): Frame when the speaker stops speaking. #### er - `file` (`string`): Path to the WAV audio file. - `audio` (`dict`): A dictionary containing the path to the downloaded audio file, the decoded audio array, and the sampling rate. - `label` (`ClassLabel`): Label of the speech emotion. Possible values: - `0: "neu", 1: "hap", 2: "ang", 3: "sad"` ### Data Splits #### pr [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) #### asr | | train | validation | test | |-----|------:|-----------:|-----:| | asr | 28539 | 2703 | 2620 | #### ks | | train | validation | test | |----|------:|-----------:|-----:| | ks | 51094 | 6798 | 3081 | #### qbe [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) #### ic | | train | validation | test | |----|------:|-----------:|-----:| | ic | 23132 | 3118 | 3793 | #### sf [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) #### si | | train | validation | test | |----|-------:|-----------:|-----:| | si | 138361 | 6904 | 8251 | #### asv [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) #### sd The data is split into "train", "dev" and "test" sets, each containing the following number of examples: | | train | dev | test | |----|------:|-----:|-----:| | sd | 13901 | 3014 | 3002 | #### er The data is split into 5 sets intended for 5-fold cross-validation: | | session1 | session2 | session3 | session4 | session5 | |----|---------:|---------:|---------:|---------:|---------:| | er | 1085 | 1023 | 1151 | 1031 | 1241 | ## Dataset Creation ### Curation Rationale [More Information Needed] ### Source Data #### Initial Data Collection and Normalization [More Information Needed] #### Who are the source language producers? [More Information Needed] ### Annotations #### Annotation process [More Information Needed] #### Who are the annotators? [More Information Needed] ### Personal and Sensitive Information The dataset consists of people who have donated their voice online. You agree to not attempt to determine the identity of speakers in this dataset. ## Considerations for Using the Data ### Social Impact of Dataset [More Information Needed] ### Discussion of Biases [More Information Needed] ### Other Known Limitations Dataset provided for research purposes only. Please check dataset license for additional information. ## Additional Information ### Dataset Curators [More Information Needed] ### Licensing Information #### pr and asr The license for Librispeech is the Creative Commons Attribution 4.0 International license ((CC-BY-4.0)[https://creativecommons.org/licenses/by/4.0/]). #### ks The license for Speech Commands is [CC BY 4.0](https://creativecommons.org/licenses/by/4.0/legalcode) #### qbe The license for QUESST 2014 is not known. #### ic The license for Fluent Speech Commands dataset is the [Fluent Speech Commands Public License](https://fluent.ai/wp-content/uploads/2021/04/Fluent_Speech_Commands_Public_License.pdf) #### sf The license for Audio SNIPS dataset is not known. #### si and asv The license for VoxCeleb1 dataset is the Creative Commons Attribution 4.0 International license ([CC-BY-4.0](https://creativecommons.org/licenses/by/4.0/)). #### sd LibriMix is based on the LibriSpeech (see above) and Wham! noises datasets. The Wham! noises dataset is distributed under the Attribution-NonCommercial 4.0 International ([CC BY-NC 4.0](https://creativecommons.org/licenses/by-nc/4.0/)) license. #### er The IEMOCAP license is ditributed under [its own license](https://sail.usc.edu/iemocap/Data_Release_Form_IEMOCAP.pdf). ### Citation Information ``` @article{DBLP:journals/corr/abs-2105-01051, author = {Shu{-}Wen Yang and Po{-}Han Chi and Yung{-}Sung Chuang and Cheng{-}I Jeff Lai and Kushal Lakhotia and Yist Y. Lin and Andy T. Liu and Jiatong Shi and Xuankai Chang and Guan{-}Ting Lin and Tzu{-}Hsien Huang and Wei{-}Cheng Tseng and Ko{-}tik Lee and Da{-}Rong Liu and Zili Huang and Shuyan Dong and Shang{-}Wen Li and Shinji Watanabe and Abdelrahman Mohamed and Hung{-}yi Lee}, title = {{SUPERB:} Speech processing Universal PERformance Benchmark}, journal = {CoRR}, volume = {abs/2105.01051}, year = {2021}, url = {https://arxiv.org/abs/2105.01051}, archivePrefix = {arXiv}, eprint = {2105.01051}, timestamp = {Thu, 01 Jul 2021 13:30:22 +0200}, biburl = {https://dblp.org/rec/journals/corr/abs-2105-01051.bib}, bibsource = {dblp computer science bibliography, https://dblp.org} } Note that each SUPERB dataset has its own citation. Please see the source to see the correct citation for each contained dataset. ``` ### Contributions Thanks to [@lewtun](https://github.com/lewtun), [@albertvillanova](https://github.com/albertvillanova) and [@anton-l](https://github.com/anton-l) for adding this dataset.