Dataset Preview Go to dataset viewer
The dataset preview is not available for this split.
Server error
Status code:   400
Exception:     KeyError
Message:       'builder_name'

Need help to make the dataset viewer work? Open an issue for direct support.

YAML Metadata Warning: empty or missing yaml metadata in repo card (https://huggingface.co/docs/hub/datasets-cards)

This is a processed AMI dataset and was created by running:

import librosa
import math
from datasets import load_dataset

SAMPLE_RATE = 16_000
MAX_LENGTH_IN_SECONDS = 20.0

def chunk_audio(batch):
    new_batch = {
        "audio": [],
        "words": [],
        "speaker": [],
        "lengths": [],
        "word_start_times": [],
        "segment_start_times": [],
    }

    audio, _ = librosa.load(batch["file"][0], sr=SAMPLE_RATE)

    word_idx = 0
    num_words = len(batch["words"][0])
    for segment_idx in range(len(batch["segment_start_times"][0])):
        words = []
        word_start_times = []
        start_time = batch["segment_start_times"][0][segment_idx]
        end_time = batch["segment_end_times"][0][segment_idx]

        # go back and forth with word_idx since segments overlap with each other
        while (word_idx > 1) and (start_time < batch["word_end_times"][0][word_idx - 1]):
            word_idx -= 1

        while word_idx < num_words and (start_time > batch["word_start_times"][0][word_idx]):
            word_idx += 1

        new_batch["audio"].append(audio[int(start_time * SAMPLE_RATE): int(end_time * SAMPLE_RATE)])

        while word_idx < num_words and batch["word_start_times"][0][word_idx] < end_time:
            words.append(batch["words"][0][word_idx])
            word_start_times.append(batch["word_start_times"][0][word_idx])
            word_idx += 1

        new_batch["lengths"].append(end_time - start_time)
        new_batch["words"].append(words)
        new_batch["speaker"].append(batch["segment_speakers"][0][segment_idx])
        new_batch["word_start_times"].append(word_start_times)

        new_batch["segment_start_times"].append(batch["segment_start_times"][0][segment_idx])

    return new_batch
    
ami = load_dataset("ami", "headset-single")
ami = ami.map(chunk_audio, batched=True, batch_size=1, remove_columns=ami["train"].column_names)
Edit dataset card
Evaluate models HF Leaderboard