Dataset Preview
The dataset preview is not available for this dataset.
Server Error
Status code:   400
Exception:     Status400Error
Message:       could not get the config name for this dataset

Need help to make the dataset viewer work? Open an issue for direct support.

This is a processed AMI dataset and was created by running:

import librosa
from datasets import load_dataset

SAMPLE_RATE = 16_000
MAX_LENGTH_IN_SECONDS = 20.0

def chunk_audio(batch):
    new_batch = {
        "audio": [],
        "words": [],
        "speaker": [],
        "lengths": [],
        "word_start_times": [],
        "segment_start_times": [],
    }

    audio, _ = librosa.load(batch["file"][0], sr=SAMPLE_RATE)

    word_idx = 0
    num_words = len(batch["words"][0])
    for segment_idx in range(len(batch["segment_start_times"][0])):
        words = []
        word_start_times = []
        start_time = batch["segment_start_times"][0][segment_idx]
        end_time = batch["segment_end_times"][0][segment_idx]

        # go back and forth with word_idx since segments overlap with each other
        while (word_idx > 1) and (start_time < batch["word_end_times"][0][word_idx - 1]):
            word_idx -= 1

        while word_idx < num_words and (start_time > batch["word_start_times"][0][word_idx]):
            word_idx += 1

        new_batch["audio"].append(audio[int(start_time * SAMPLE_RATE): int(end_time * SAMPLE_RATE)])

        while word_idx < num_words and batch["word_start_times"][0][word_idx] < end_time:
            words.append(batch["words"][0][word_idx])
            word_start_times.append(batch["word_start_times"][0][word_idx])
            word_idx += 1

        new_batch["lengths"].append(end_time - start_time)
        new_batch["words"].append(words)
        new_batch["speaker"].append(batch["segment_speakers"][0][segment_idx])
        new_batch["word_start_times"].append(word_start_times)

        new_batch["segment_start_times"].append(batch["segment_start_times"][0][segment_idx])

    return new_batch
    
ami = load_dataset("ami", "headset-single")
ami = ami.map(chunk_audio, batched=True, batch_size=1, remove_columns=ami["train"].column_names)

chunked audio by

import math

def chunk_into_max_n_seconds(batch):
    new_batch = {
        "audio": [],
        "text": [],
    }

    sample_length = batch["lengths"][0]
    segment_start = batch["segment_start_times"][0]

    if sample_length > MAX_LENGTH_IN_SECONDS:
        num_chunks_per_sample = math.ceil(sample_length / MAX_LENGTH_IN_SECONDS)
        avg_chunk_length = sample_length / num_chunks_per_sample
        num_words = len(batch["words"][0])

        # start chunking by times
        start_word_idx = end_word_idx = 0
        chunk_start_time = 0
        for n in range(num_chunks_per_sample):
            while (end_word_idx < num_words - 1) and (batch["word_start_times"][0][end_word_idx] < segment_start + (n + 1) * avg_chunk_length):
                end_word_idx += 1

            chunk_end_time = int((batch["word_start_times"][0][end_word_idx] - segment_start) * SAMPLE_RATE)
            new_batch["audio"].append(batch["audio"][0][chunk_start_time: chunk_end_time])
            new_batch["text"].append(" ".join(batch["words"][0][start_word_idx: end_word_idx]))

            chunk_start_time = chunk_end_time
            start_word_idx = end_word_idx
    else:
        new_batch["audio"].append(batch["audio"][0])
        new_batch["text"].append(" ".join(batch["words"][0]))

    return new_batch
 
ami = ami.map(chunk_into_max_n_seconds, batched=True, batch_size=1, remove_columns=ami["train"].column_names)

get duration and filter nan audio by

def get_duration(batch):
   
    new_batch = {
        "audio": [],
        "text": [],
        "lengths": []
    }

    duration_audio = len(batch["audio"][0])/SAMPLE_RATE
    
    new_batch['lengths'].append(duration_audio)
    new_batch["audio"].append(batch["audio"][0])
    new_batch["text"].append(batch["text"][0])

    return new_batch
 
ami = ami.map(get_duration, batched=True, batch_size=1, remove_columns=ami["train"].column_names)
ami = ami.filter(lambda x: len(x["text"]) > 0)

concatenate short audio by

import pandas as pd
from datasets import Dataset, DatasetDict

def concat_short_audio(df):
    result_dict = {
        'audio': [],
        'text': [],
        'lengths': []
    }

    i = 0
    j = 1

    while i < df.shape[0] and j < df.shape[0]:
        audio_append = df.loc[i, 'audio']
        text_append = df.loc[i, 'text']
        length_append = df.loc[i, 'lengths']

        while length_append < 3 and j < df.shape[0]:  
            audio_append = np.append(audio_append, df.loc[j, 'audio'])
            text_append = text_append + " " + df.loc[j, 'text']
            length_append += df.loc[j, 'lengths']
            j += 1
    
        result_dict['audio'].append(audio_append)
        result_dict['text'].append(text_append)
        result_dict['lengths'].append(length_append)
        i = j 
        j = i + 1
    data_save = pd.DataFrame(result_dict)  
    data_save.drop(columns='lengths', inplace = True)
    return data_save


# convert to pandas
train_df = ami['train'].to_pandas()
valid_df = ami['validation'].to_pandas()
test_df = ami['test'].to_pandas()

# concat short audio
data_train = concat_short_audio(train_df)
data_test = concat_short_audio(valid_df)
data_valid = concat_short_audio(test_df)

# convert back to Huggingface Dataset
dataset_train = Dataset.from_pandas(data_train)
dataset_test = Dataset.from_pandas(data_test)
dataset_valid = Dataset.from_pandas(data_valid)

# convert back to DatasetDict
new_ami_data = DatasetDict({"train": ami_train, "validation": ami_valid, "test": ami_test})

Models trained or fine-tuned on ami-wav2vec2/ami_single_headset_segmented_and_chunked_and_concatenated

None yet