Datasets:

Languages:
English
Multilinguality:
monolingual
Size Categories:
10K<n<100K
Language Creators:
found
Annotations Creators:
expert-generated
Source Datasets:
original
Tags:
License:
bgstud commited on
Commit
94c29b5
1 Parent(s): 1d322b1

Upload dataset_infos.json with huggingface_hub

Browse files
Files changed (1) hide show
  1. dataset_infos.json +1 -0
dataset_infos.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"bgstud--libri-whisper-raw": {"description": "LibriSpeech is a corpus of approximately 1000 hours of read English speech with sampling rate of 16 kHz,\nprepared by Vassil Panayotov with the assistance of Daniel Povey. The data is derived from read\naudiobooks from the LibriVox project, and has been carefully segmented and aligned.87\n", "citation": "@inproceedings{panayotov2015librispeech,\n title={Librispeech: an ASR corpus based on public domain audio books},\n author={Panayotov, Vassil and Chen, Guoguo and Povey, Daniel and Khudanpur, Sanjeev},\n booktitle={Acoustics, Speech and Signal Processing (ICASSP), 2015 IEEE International Conference on},\n pages={5206--5210},\n year={2015},\n organization={IEEE}\n}\n", "homepage": "http://www.openslr.org/12", "license": "", "features": {"audio": {"sampling_rate": 16000, "mono": true, "decode": true, "id": null, "_type": "Audio"}, "sentence": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": {"input": "file", "output": "text"}, "task_templates": [], "builder_name": "librispeech_asr", "config_name": "clean", "version": {"version_str": "2.1.0", "description": "", "major": 2, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 2086814529.251971, "num_examples": 9000, "dataset_name": "libri-whisper-raw"}, "validation": {"name": "validation", "num_bytes": 359344187.966, "num_examples": 2703, "dataset_name": "libri-whisper-raw"}, "test": {"name": "test", "num_bytes": 367487198.42, "num_examples": 2620, "dataset_name": "libri-whisper-raw"}}, "download_checksums": null, "download_size": 2717040282, "post_processing_size": null, "dataset_size": 2813645915.637971, "size_in_bytes": 5530686197.637971}}