The viewer is disabled because this dataset repo requires arbitrary Python code execution. Please consider removing the loading script and relying on automated data support (you can use convert_to_parquet from the datasets library). If this is not possible, please open a discussion for direct help.

Google/MusicCapsをスペクトログラムにしたデータ。

  • The dataset viwer of this repository is truncated, so maybe you should see this one instaed.

Dataset information

画像 caption data_idx number
1025px × 216px 音楽の説明 どのデータから生成されたデータか 5秒ずつ区切ったデータのうち、何番目か

How this dataset was made

from PIL import Image
import IPython.display
import cv2

# 1. wavファイルを解析
y, sr = librosa.load("wavファイルなど")

# 2. フーリエ変換を適用して周波数成分を取得
D = librosa.amplitude_to_db(np.abs(librosa.stft(y)), ref=np.max) # librosaを用いてデータを作る
image = Image.fromarray(np.uint8(D), mode='L')  # 'L'は1チャンネルのグレースケールモードを指定します
image.save('spectrogram_{}.png')

Recover music(wave form) from sprctrogram

im = Image.open("pngファイル")
db_ud = np.uint8(np.array(im))
amp = librosa.db_to_amplitude(db_ud)
print(amp.shape)
# (1025, 861)は20秒のwavファイルをスペクトログラムにした場合
# (1025, 431)は10秒のwavファイルをスペクトログラムにした場合
# (1025, 216)は5秒のwavファイルをスペクトログラムにした場合

y_inv = librosa.griffinlim(amp*200)
display(IPython.display.Audio(y_inv, rate=sr))

Example : How to use this

  • Subset data 1300-1600 and data 3400-3600 are not working now, so please get subset_name_list those were removed first.

1 : get information about this dataset:

  • copy this code~~
'''
  if you use GoogleColab, remove # to install packages below..
'''
#!pip install datasets
#!pip install huggingface-hub
#!huggingface-cli login
import datasets
from datasets import load_dataset

# make subset_name_list
subset_name_list = [
  'data 0-200',
  'data 200-600',
  'data 600-1000',
  'data 1000-1300',
  'data 1600-2000',
  'data 2000-2200',
  'data 2200-2400',
  'data 2400-2600',
  'data 2600-2800',
  'data 3000-3200',
  'data 3200-3400',
  'data 3600-3800',
  'data 3800-4000',
  'data 4000-4200',
  'data 4200-4400',
  'data 4400-4600',
  'data 4600-4800',
  'data 4800-5000',
  'data 5000-5200',
  'data 5200-5520'
]

# load_all_datasets
data = load_dataset("mb23/GraySpectrogram", subset_name_list[0])
for subset in subset_name_list:
    # Confirm subset_list doesn't include "remove_list" datasets in the above cell.
    print(subset)
    new_ds = load_dataset("mb23/GraySpectrogram", subset)
    new_dataset_train = datasets.concatenate_datasets([data["train"], new_ds["train"]])
    new_dataset_test = datasets.concatenate_datasets([data["test"], new_ds["test"]])

    # take place of data[split]
    data["train"] = new_dataset_train
    data["test"] = new_dataset_test

data

2 : load dataset and change to dataloader:

  • You can use the code below:
  • ...but (;・∀・)I don't know whether this code works efficiently, because I haven't tried this code so far
import datasets
from datasets import load_dataset, DatasetDict
from torchvision import transforms
from torch.utils.data import DataLoader
# BATCH_SIZE = ??? 
# IMAGE_SIZE = ???
# TRAIN_SIZE = ??? # the number of training data
# TEST_SIZE = ??? # the number of test data

def load_datasets():

    # Define data transforms
    data_transforms = [
        transforms.Resize((IMG_SIZE, IMG_SIZE)),
        transforms.ToTensor(), # Scales data into [0,1]
        transforms.Lambda(lambda t: (t * 2) - 1) # Scale between [-1, 1]
    ]
    data_transform = transforms.Compose(data_transforms)

    data = load_dataset("mb23/GraySpectrogram", subset_name_list[0])
    for subset in subset_name_list:
        # Confirm subset_list doesn't include "remove_list" datasets in the above cell.
        print(subset)
        new_ds = load_dataset("mb23/GraySpectrogram", subset)
        new_dataset_train = datasets.concatenate_datasets([data["train"], new_ds["train"]])
        new_dataset_test = datasets.concatenate_datasets([data["test"], new_ds["test"]])

        # take place of data[split]
        data["train"] = new_dataset_train
        data["test"] = new_dataset_test

    # memo:
    # 特徴量上手く抽出する方法が...わからん。これは力づく。
    # 本当はload_dataset()の時点で抽出したかったけど、無理そう
    # リポジトリ作り直してpush_to_hub()したほうがいいかもしれない。

    new_dataset = dict()
    new_dataset["train"] = Dataset.from_dict({
        "image" : data["train"]["image"],
        "caption" : data["train"]["caption"]
    })
    
    new_dataset["test"] = Dataset.from_dict({
        "image" : data["test"]["image"],
        "caption" : data["test"]["caption"]
    })
    data = datasets.DatasetDict(new_dataset)
    train = data["train"]
    test = data["test"]

    for idx in range(len(train["image"])):
        train["image"][idx] = data_transform(train["image"][idx])
        test["image"][idx] = data_transform(test["image"][idx])
    
    train = Dataset.from_dict(train)
    train = train.with_format("torch") # リスト型回避
    test = Dataset.from_dict(train)
    test = test.with_format("torch") # リスト型回避

    # or
    train_loader = DataLoader(train, batch_size=BATCH_SIZE, shuffle=True, drop_last=True)
    test_loader = DataLoader(test, batch_size=BATCH_SIZE, shuffle=True, drop_last=True)
    return train_loader, test_loader
  • then try this?
train_loader, test_loader = load_datasets()
Downloads last month
0
Edit dataset card