hhhaaahhhaa's picture
Upload README.md with huggingface_hub
f3030a5
|
raw
history blame
No virus
1.64 kB
metadata
configs:
  - config_name: default
    data_files:
      - split: train
        path: data/train-*
      - split: validation
        path: data/validation-*
      - split: test
        path: data/test-*
dataset_info:
  features:
    - name: file_id
      dtype: string
    - name: instruction
      dtype: string
    - name: transcription
      dtype: string
    - name: src_speech_tokenizer_0
      sequence: int64
    - name: src_speech_tokenizer_1
      sequence: int64
    - name: src_speech_tokenizer_2
      sequence: int64
    - name: src_speech_tokenizer_3
      sequence: int64
    - name: src_speech_tokenizer_4
      sequence: int64
    - name: src_speech_tokenizer_5
      sequence: int64
    - name: src_speech_tokenizer_6
      sequence: int64
    - name: src_speech_tokenizer_7
      sequence: int64
    - name: tgt_speech_tokenizer_0
      sequence: int64
    - name: tgt_speech_tokenizer_1
      sequence: int64
    - name: tgt_speech_tokenizer_2
      sequence: int64
    - name: tgt_speech_tokenizer_3
      sequence: int64
    - name: tgt_speech_tokenizer_4
      sequence: int64
    - name: tgt_speech_tokenizer_5
      sequence: int64
    - name: tgt_speech_tokenizer_6
      sequence: int64
    - name: tgt_speech_tokenizer_7
      sequence: int64
  splits:
    - name: train
      num_bytes: 2476215704
      num_examples: 90000
    - name: validation
      num_bytes: 135757316
      num_examples: 5000
    - name: test
      num_bytes: 139761511
      num_examples: 5000
  download_size: 147633674
  dataset_size: 2751734531

Dataset Card for "text-guided-vc-google-tts-api-speech_tokenizer"

More Information needed