--- dataset_info: - config_name: cs features: - name: audio_id dtype: string - name: audio dtype: audio: sampling_rate: 16000 - name: language dtype: string - name: transcription dtype: string - name: translation dtype: string splits: - name: train num_bytes: 3968868756.0 num_examples: 12000 download_size: 3963196917 dataset_size: 3968868756.0 - config_name: de features: - name: audio_id dtype: string - name: audio dtype: audio: sampling_rate: 16000 - name: language dtype: string - name: transcription dtype: string - name: translation dtype: string splits: - name: train num_bytes: 3497902679.0 num_examples: 12000 download_size: 3487719829 dataset_size: 3497902679.0 - config_name: en features: - name: audio_id dtype: string - name: audio dtype: audio: sampling_rate: 16000 - name: language dtype: string - name: transcription dtype: string - name: translation dtype: string - name: wer dtype: float32 splits: - name: train num_bytes: 4000276474.0 num_examples: 12000 download_size: 3984332876 dataset_size: 4000276474.0 - config_name: es features: - name: audio_id dtype: string - name: audio dtype: audio: sampling_rate: 16000 - name: language dtype: string - name: transcription dtype: string - name: translation dtype: string - name: wer dtype: float32 splits: - name: train num_bytes: 4138004589.0 num_examples: 12000 download_size: 4128702065 dataset_size: 4138004589.0 - config_name: fr features: - name: audio_id dtype: string - name: audio dtype: audio: sampling_rate: 16000 - name: language dtype: string - name: transcription dtype: string - name: translation dtype: string - name: wer dtype: float32 splits: - name: train num_bytes: 3915210199.0 num_examples: 12000 download_size: 3906302179 dataset_size: 3915210199.0 - config_name: hu features: - name: audio_id dtype: string - name: audio dtype: audio: sampling_rate: 16000 - name: language dtype: string - name: transcription dtype: string - name: translation dtype: string splits: - name: train num_bytes: 4173785272.0 num_examples: 12000 download_size: 4167101687 dataset_size: 4173785272.0 - config_name: it features: - name: audio_id dtype: string - name: audio dtype: audio: sampling_rate: 16000 - name: language dtype: string - name: transcription dtype: string - name: translation dtype: string splits: - name: train num_bytes: 4732306152.0 num_examples: 12000 download_size: 4721993123 dataset_size: 4732306152.0 - config_name: nl features: - name: audio_id dtype: string - name: audio dtype: audio: sampling_rate: 16000 - name: language dtype: string - name: transcription dtype: string - name: translation dtype: string - name: wer dtype: float32 splits: - name: train num_bytes: 3162694343.0 num_examples: 12000 download_size: 3154090731 dataset_size: 3162694343.0 - config_name: pl features: - name: audio_id dtype: string - name: audio dtype: audio: sampling_rate: 16000 - name: language dtype: string - name: transcription dtype: string - name: translation dtype: string splits: - name: train num_bytes: 4040847257.0 num_examples: 12000 download_size: 4033234696 dataset_size: 4040847257.0 - config_name: ro features: - name: audio_id dtype: string - name: audio dtype: audio: sampling_rate: 16000 - name: language dtype: string - name: transcription dtype: string - name: translation dtype: string - name: wer dtype: float32 splits: - name: train num_bytes: 4341972777.0 num_examples: 12000 download_size: 4334737748 dataset_size: 4341972777.0 configs: - config_name: cs data_files: - split: train path: cs/train-* - config_name: de data_files: - split: train path: de/train-* - config_name: en data_files: - split: train path: en/train-* - config_name: es data_files: - split: train path: es/train-* - config_name: fr data_files: - split: train path: fr/train-* - config_name: hu data_files: - split: train path: hu/train-* - config_name: it data_files: - split: train path: it/train-* - config_name: nl data_files: - split: train path: nl/train-* - config_name: pl data_files: - split: train path: pl/train-* - config_name: ro data_files: - split: train path: ro/train-* --- # Dataset Card for "vp-er-10l" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)