Dataset Preview Go to dataset viewer
The dataset preview is not available for this split.
Server error
Status code:   400
Exception:     KeyError
Message:       'builder_name'

Need help to make the dataset viewer work? Open an issue for direct support.

YAML Metadata Warning: empty or missing yaml metadata in repo card (https://huggingface.co/docs/hub/datasets-cards)

This is a processed AMI dataset with headset-multi (4 channels) configurations and was created by running:

import librosa
import math
from datasets import load_dataset

SAMPLE_RATE = 16_000
MAX_LENGTH_IN_SECONDS = 20.0

def chunk_audio(batch):
    new_batch = {
        "audio": [],
        "words": [],
        "speaker": [],
        "lengths": [],
        "word_start_times": [],
        "segment_start_times": [],
    }
    
    audio_0, _ = librosa.load(batch["file-0"][0], sr=SAMPLE_RATE)
    audio_1, _ = librosa.load(batch["file-1"][0], sr=SAMPLE_RATE)
    audio_2, _ = librosa.load(batch["file-2"][0], sr=SAMPLE_RATE)
    audio_3, _ = librosa.load(batch["file-3"][0], sr=SAMPLE_RATE)
    
    speaker_chanel = {
        "A": audio_0,
        "B": audio_1,
        "C": audio_2,
        "D": audio_3  
    }
    
    word_idx = 0
    num_words = len(batch["words"][0])
    for segment_idx in range(len(batch["segment_start_times"][0])):
        words = []
        word_start_times = []
        audio_multi_chanel = []
        start_time = batch["segment_start_times"][0][segment_idx]
        end_time = batch["segment_end_times"][0][segment_idx]
        speaker = batch["segment_speakers"][0][segment_idx]
        
        if speaker in speaker_chanel:
            # go back and forth with word_idx since segments overlap with each other
            while (word_idx > 1) and (start_time < batch["word_end_times"][0][word_idx - 1]):
                word_idx -= 1

            while word_idx < num_words and (start_time > batch["word_start_times"][0][word_idx]):
                word_idx += 1
                
            new_batch["audio"].append(speaker_chanel[speaker][int(start_time * SAMPLE_RATE):int(end_time * SAMPLE_RATE)])
            
            while word_idx < num_words and batch["word_start_times"][0][word_idx] < end_time:
                words.append(batch["words"][0][word_idx])
                word_start_times.append(batch["word_start_times"][0][word_idx])
                word_idx += 1
            
            new_batch["lengths"].append(end_time - start_time)
            new_batch["words"].append(words)
            new_batch["speaker"].append(batch["segment_speakers"][0][segment_idx])
            new_batch["word_start_times"].append(word_start_times)

            new_batch["segment_start_times"].append(batch["segment_start_times"][0][segment_idx])

    return new_batch
    
ami = load_dataset("ami", "headset-multi")
ami = ami.map(chunk_audio, batched=True, batch_size=1, remove_columns=ami["train"].column_names)

followed by

def chunk_into_max_n_seconds(batch):
    new_batch = {
        "audio": [],
        "text": [],
    }

    sample_length = batch["lengths"][0]
    segment_start = batch["segment_start_times"][0]

    if sample_length > MAX_LENGTH_IN_SECONDS:
        num_chunks_per_sample = math.ceil(sample_length / MAX_LENGTH_IN_SECONDS)
        avg_chunk_length = sample_length / num_chunks_per_sample
        num_words = len(batch["words"][0])

        # start chunking by times
        start_word_idx = end_word_idx = 0
        chunk_start_time = 0
        for n in range(num_chunks_per_sample):
            while (end_word_idx < num_words - 1) and (batch["word_start_times"][0][end_word_idx] < segment_start + (n + 1) * avg_chunk_length):
                end_word_idx += 1

            chunk_end_time = int((batch["word_start_times"][0][end_word_idx] - segment_start) * SAMPLE_RATE)
            new_batch["audio"].append(batch["audio"][0][chunk_start_time: chunk_end_time])
            new_batch["text"].append(" ".join(batch["words"][0][start_word_idx: end_word_idx]))

            chunk_start_time = chunk_end_time
            start_word_idx = end_word_idx
    else:
        new_batch["audio"].append(batch["audio"][0])
        new_batch["text"].append(" ".join(batch["words"][0]))

    return new_batch
 
ami = ami.map(chunk_into_max_n_seconds, batched=True, batch_size=1, remove_columns=ami["train"].column_names)
Edit dataset card
Evaluate models HF Leaderboard