{ "cells": [ { "cell_type": "markdown", "id": "62c5865f", "metadata": {}, "source": [ "\"Open" ] }, { "cell_type": "code", "execution_count": null, "id": "6c7800a6", "metadata": {}, "outputs": [], "source": [ "try:\n", " # are we running on Google Colab?\n", " import google.colab\n", " !git clone -q https://github.com/teticio/audio-diffusion.git\n", " %cd audio-diffusion\n", " %pip install -q -r requirements.txt\n", "except:\n", " pass" ] }, { "cell_type": "code", "execution_count": null, "id": "b447e2c4", "metadata": {}, "outputs": [], "source": [ "import os\n", "import sys\n", "sys.path.insert(0, os.path.dirname(os.path.abspath(\"\")))" ] }, { "cell_type": "code", "execution_count": null, "id": "c2fc0e7a", "metadata": {}, "outputs": [], "source": [ "import torch\n", "import random\n", "import librosa\n", "import numpy as np\n", "from datasets import load_dataset\n", "from IPython.display import Audio\n", "from audiodiffusion import AudioDiffusion" ] }, { "cell_type": "code", "execution_count": null, "id": "b294a94a", "metadata": {}, "outputs": [], "source": [ "device = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n", "generator = torch.Generator(device=device)" ] }, { "cell_type": "markdown", "id": "f3feb265", "metadata": {}, "source": [ "## DDPM (De-noising Diffusion Probabilistic Models)" ] }, { "cell_type": "markdown", "id": "7fd945bb", "metadata": {}, "source": [ "### Select model" ] }, { "cell_type": "code", "execution_count": null, "id": "97f24046", "metadata": {}, "outputs": [], "source": [ "#@markdown teticio/audio-diffusion-256 - trained on my Spotify \"liked\" playlist\n", "\n", "#@markdown teticio/audio-diffusion-breaks-256 - trained on samples used in music\n", "\n", "#@markdown teticio/audio-diffusion-instrumental-hiphop-256 - trained on instrumental hiphop\n", "\n", "model_id = \"teticio/audio-diffusion-256\" #@param [\"teticio/audio-diffusion-256\", \"teticio/audio-diffusion-breaks-256\", \"audio-diffusion-instrumenal-hiphop-256\", \"teticio/audio-diffusion-ddim-256\"]" ] }, { "cell_type": "code", "execution_count": null, "id": "a3d45c36", "metadata": {}, "outputs": [], "source": [ "audio_diffusion = AudioDiffusion(model_id=model_id)\n", "mel = audio_diffusion.pipe.mel" ] }, { "cell_type": "markdown", "id": "011fb5a1", "metadata": {}, "source": [ "### Run model inference to generate mel spectrogram, audios and loops" ] }, { "cell_type": "code", "execution_count": null, "id": "b809fed5", "metadata": {}, "outputs": [], "source": [ "for _ in range(10):\n", " seed = generator.seed()\n", " print(f'Seed = {seed}')\n", " generator.manual_seed(seed)\n", " image, (sample_rate,\n", " audio) = audio_diffusion.generate_spectrogram_and_audio(\n", " generator=generator)\n", " display(image)\n", " display(Audio(audio, rate=sample_rate))\n", " loop = AudioDiffusion.loop_it(audio, sample_rate)\n", " if loop is not None:\n", " display(Audio(loop, rate=sample_rate))\n", " else:\n", " print(\"Unable to determine loop points\")" ] }, { "cell_type": "markdown", "id": "0bb03e33", "metadata": {}, "source": [ "### Generate variations of audios" ] }, { "cell_type": "markdown", "id": "80e5b5fa", "metadata": {}, "source": [ "Try playing around with `start_steps`. Values closer to zero will produce new samples, while values closer to 1,000 will produce samples more faithful to the original." ] }, { "cell_type": "code", "execution_count": null, "id": "5074ec11", "metadata": {}, "outputs": [], "source": [ "seed = 2391504374279719 #@param {type:\"integer\"}\n", "generator.manual_seed(seed)\n", "image, (sample_rate, audio) = audio_diffusion.generate_spectrogram_and_audio(\n", " generator=generator)\n", "display(image)\n", "display(Audio(audio, rate=sample_rate))" ] }, { "cell_type": "code", "execution_count": null, "id": "a0fefe28", "metadata": { "scrolled": false }, "outputs": [], "source": [ "start_step = 500 #@param {type:\"slider\", min:0, max:1000, step:10}\n", "track = AudioDiffusion.loop_it(audio, sample_rate, loops=1)\n", "for variation in range(12):\n", " image2, (\n", " sample_rate,\n", " audio2) = audio_diffusion.generate_spectrogram_and_audio_from_audio(\n", " raw_audio=audio, start_step=start_step)\n", " display(image2)\n", " display(Audio(audio2, rate=sample_rate))\n", " track = np.concatenate(\n", " [track, AudioDiffusion.loop_it(audio2, sample_rate, loops=1)])\n", "display(Audio(track, rate=sample_rate))" ] }, { "cell_type": "markdown", "id": "58a876c1", "metadata": {}, "source": [ "### Generate continuations (\"out-painting\")" ] }, { "cell_type": "code", "execution_count": null, "id": "b95d5780", "metadata": {}, "outputs": [], "source": [ "overlap_secs = 2 #@param {type:\"integer\"}\n", "start_step = 0 #@param {type:\"slider\", min:0, max:1000, step:10}\n", "overlap_samples = overlap_secs * sample_rate\n", "track = audio\n", "for variation in range(12):\n", " image2, (\n", " sample_rate,\n", " audio2) = audio_diffusion.generate_spectrogram_and_audio_from_audio(\n", " raw_audio=audio[-overlap_samples:],\n", " start_step=start_step,\n", " mask_start_secs=overlap_secs)\n", " display(image2)\n", " display(Audio(audio2, rate=sample_rate))\n", " track = np.concatenate([track, audio2[overlap_samples:]])\n", " audio = audio2\n", "display(Audio(track, rate=sample_rate))" ] }, { "cell_type": "markdown", "id": "b6434d3f", "metadata": {}, "source": [ "### Remix (style transfer)" ] }, { "cell_type": "markdown", "id": "0da030b2", "metadata": {}, "source": [ "Alternatively, you can start from another audio altogether, resulting in a kind of style transfer. Maintaining the same seed during generation fixes the style, while masking helps stitch consecutive segments together more smoothly." ] }, { "cell_type": "code", "execution_count": null, "id": "fc620a80", "metadata": {}, "outputs": [], "source": [ "try:\n", " # are we running on Google Colab?\n", " from google.colab import files\n", " audio_file = list(files.upload().keys())[0]\n", "except:\n", " audio_file = \"/home/teticio/Music/liked/El Michels Affair - Glaciers Of Ice.mp3\"" ] }, { "cell_type": "code", "execution_count": null, "id": "5a257e69", "metadata": { "scrolled": false }, "outputs": [], "source": [ "start_step = 500 #@param {type:\"slider\", min:0, max:1000, step:10}\n", "overlap_secs = 2 #@param {type:\"integer\"}\n", "track_audio, _ = librosa.load(audio_file, mono=True, sr=mel.get_sample_rate())\n", "overlap_samples = overlap_secs * sample_rate\n", "slice_size = mel.x_res * mel.hop_length\n", "stride = slice_size - overlap_samples\n", "generator = torch.Generator(device=device)\n", "seed = generator.seed()\n", "print(f'Seed = {seed}')\n", "track = np.array([])\n", "not_first = 0\n", "for sample in range(len(track_audio) // stride):\n", " generator.manual_seed(seed)\n", " audio = np.array(track_audio[sample * stride:sample * stride + slice_size])\n", " if not_first:\n", " # Normalize and re-insert generated audio\n", " audio[:overlap_samples] = audio2[-overlap_samples:] * np.max(\n", " audio[:overlap_samples]) / np.max(audio2[-overlap_samples:])\n", " _, (sample_rate,\n", " audio2) = audio_diffusion.generate_spectrogram_and_audio_from_audio(\n", " raw_audio=audio,\n", " start_step=start_step,\n", " generator=generator,\n", " mask_start_secs=overlap_secs * not_first)\n", " track = np.concatenate([track, audio2[overlap_samples * not_first:]])\n", " not_first = 1\n", " display(Audio(track, rate=sample_rate))" ] }, { "cell_type": "markdown", "id": "924ff9d5", "metadata": {}, "source": [ "### Fill the gap (\"in-painting\")" ] }, { "cell_type": "code", "execution_count": null, "id": "0200264c", "metadata": {}, "outputs": [], "source": [ "slice = 3 #@param {type:\"integer\"}\n", "raw_audio = track_audio[sample * stride:sample * stride + slice_size]\n", "_, (sample_rate,\n", " audio2) = audio_diffusion.generate_spectrogram_and_audio_from_audio(\n", " raw_audio=raw_audio,\n", " mask_start_secs=1,\n", " mask_end_secs=1,\n", " step_generator=torch.Generator(device=device))\n", "display(Audio(audio, rate=sample_rate))\n", "display(Audio(audio2, rate=sample_rate))" ] }, { "cell_type": "markdown", "id": "efc32dae", "metadata": {}, "source": [ "## DDIM (De-noising Diffusion Implicit Models)" ] }, { "cell_type": "code", "execution_count": null, "id": "a021f78a", "metadata": {}, "outputs": [], "source": [ "audio_diffusion = AudioDiffusion(model_id='teticio/audio-diffusion-ddim-256')\n", "mel = audio_diffusion.pipe.mel" ] }, { "cell_type": "markdown", "id": "deb23339", "metadata": {}, "source": [ "### Generation can be done in many fewer steps with DDIMs" ] }, { "cell_type": "code", "execution_count": null, "id": "c105a497", "metadata": {}, "outputs": [], "source": [ "for _ in range(10):\n", " seed = generator.seed()\n", " print(f'Seed = {seed}')\n", " generator.manual_seed(seed)\n", " image, (sample_rate,\n", " audio) = audio_diffusion.generate_spectrogram_and_audio(\n", " generator=generator)\n", " display(image)\n", " display(Audio(audio, rate=sample_rate))\n", " loop = AudioDiffusion.loop_it(audio, sample_rate)\n", " if loop is not None:\n", " display(Audio(loop, rate=sample_rate))\n", " else:\n", " print(\"Unable to determine loop points\")" ] }, { "cell_type": "markdown", "id": "cab4692c", "metadata": {}, "source": [ "The parameter eta controls the variance:\n", "* 0 - DDIM (deterministic)\n", "* 1 - DDPM (De-noising Diffusion Probabilistic Model)" ] }, { "cell_type": "code", "execution_count": null, "id": "72bdd207", "metadata": {}, "outputs": [], "source": [ "image, (sample_rate, audio) = audio_diffusion.generate_spectrogram_and_audio(\n", " steps=1000, generator=generator, eta=1)\n", "display(image)\n", "display(Audio(audio, rate=sample_rate))" ] }, { "cell_type": "markdown", "id": "b8d5442c", "metadata": {}, "source": [ "### DDIMs can be used as encoders..." ] }, { "cell_type": "code", "execution_count": null, "id": "269ee816", "metadata": {}, "outputs": [], "source": [ "# Doesn't have to be an audio from the train dataset, this is just for convenience\n", "ds = load_dataset('teticio/audio-diffusion-256')" ] }, { "cell_type": "code", "execution_count": null, "id": "278d1d80", "metadata": {}, "outputs": [], "source": [ "image = ds['train'][264]['image']\n", "display(Audio(mel.image_to_audio(image), rate=sample_rate))" ] }, { "cell_type": "code", "execution_count": null, "id": "912b54e4", "metadata": {}, "outputs": [], "source": [ "noise = audio_diffusion.pipe.encode([image])" ] }, { "cell_type": "code", "execution_count": null, "id": "c7b31f97", "metadata": {}, "outputs": [], "source": [ "# Reconstruct original audio from noise\n", "_, (sample_rate, audio) = audio_diffusion.generate_spectrogram_and_audio(\n", " noise=noise, generator=generator)\n", "display(Audio(audio, rate=sample_rate))" ] }, { "cell_type": "markdown", "id": "998c776b", "metadata": {}, "source": [ "### ...or to interpolate between audios" ] }, { "cell_type": "code", "execution_count": null, "id": "33f82367", "metadata": {}, "outputs": [], "source": [ "image2 = ds['train'][15978]['image']\n", "display(Audio(mel.image_to_audio(image2), rate=sample_rate))" ] }, { "cell_type": "code", "execution_count": null, "id": "f93fb6c0", "metadata": {}, "outputs": [], "source": [ "noise2 = audio_diffusion.pipe.encode([image2])" ] }, { "cell_type": "code", "execution_count": null, "id": "a4190563", "metadata": {}, "outputs": [], "source": [ "alpha = 0.5 #@param {type:\"slider\", min:0, max:1, step:0.1}\n", "_, (sample_rate, audio) = audio_diffusion.generate_spectrogram_and_audio(\n", " noise=audio_diffusion.pipe.slerp(noise, noise2, alpha),\n", " generator=generator)\n", "display(Audio(mel.image_to_audio(image), rate=sample_rate))\n", "display(Audio(mel.image_to_audio(image2), rate=sample_rate))\n", "display(Audio(audio, rate=sample_rate))" ] }, { "cell_type": "markdown", "id": "9b244547", "metadata": {}, "source": [ "## Latent Audio Diffusion\n", "Instead of de-noising images directly in the pixel space, we can work in the latent space of a pre-trained VAE (Variational AutoEncoder). This is much faster to train and run inference on, although the quality suffers as there are now three stages involved in encoding / decoding: mel spectrogram, VAE and de-noising." ] }, { "cell_type": "code", "execution_count": null, "id": "a88b3fbb", "metadata": {}, "outputs": [], "source": [ "model_id = \"teticio/latent-audio-diffusion-ddim-256\" #@param [\"teticio/latent-audio-diffusion-256\", \"teticio/latent-audio-diffusion-ddim-256\"]" ] }, { "cell_type": "code", "execution_count": null, "id": "15e353ee", "metadata": {}, "outputs": [], "source": [ "audio_diffusion = AudioDiffusion(model_id=model_id)\n", "mel = audio_diffusion.pipe.mel" ] }, { "cell_type": "code", "execution_count": null, "id": "fa0f0c8c", "metadata": {}, "outputs": [], "source": [ "seed = 3412253600050855 #@param {type:\"integer\"}\n", "generator.manual_seed(seed)\n", "image, (sample_rate, audio) = audio_diffusion.generate_spectrogram_and_audio(\n", " generator=generator)\n", "display(image)\n", "display(Audio(audio, rate=sample_rate))" ] }, { "cell_type": "code", "execution_count": null, "id": "73dc575d", "metadata": {}, "outputs": [], "source": [ "seed2 = 7016114633369557 #@param {type:\"integer\"}\n", "generator.manual_seed(seed2)\n", "image2, (sample_rate, audio2) = audio_diffusion.generate_spectrogram_and_audio(\n", " generator=generator)\n", "display(image2)\n", "display(Audio(audio2, rate=sample_rate))" ] }, { "cell_type": "markdown", "id": "428d2d67", "metadata": {}, "source": [ "### Interpolation in latent space\n", "As the VAE forces a more compact, lower dimensional representation for the spectrograms, interpolation in latent space can lead to meaningful combinations of audios. In combination with the (deterministic) DDIM from the previous section, the model can be used as an encoder / decoder to a lower dimensional space." ] }, { "cell_type": "code", "execution_count": null, "id": "72211c2b", "metadata": {}, "outputs": [], "source": [ "generator.manual_seed(seed)\n", "latents = torch.randn((1, audio_diffusion.pipe.unet.in_channels,\n", " audio_diffusion.pipe.unet.sample_size[0],\n", " audio_diffusion.pipe.unet.sample_size[1]),\n", " device=device,\n", " generator=generator)\n", "latents.shape" ] }, { "cell_type": "code", "execution_count": null, "id": "6c732dbe", "metadata": {}, "outputs": [], "source": [ "generator.manual_seed(seed2)\n", "latents2 = torch.randn((1, audio_diffusion.pipe.unet.in_channels,\n", " audio_diffusion.pipe.unet.sample_size[0],\n", " audio_diffusion.pipe.unet.sample_size[1]),\n", " device=device,\n", " generator=generator)\n", "latents2.shape" ] }, { "cell_type": "code", "execution_count": null, "id": "159bcfc4", "metadata": {}, "outputs": [], "source": [ "alpha = 0.5 #@param {type:\"slider\", min:0, max:1, step:0.1}\n", "_, (sample_rate, audio3) = audio_diffusion.generate_spectrogram_and_audio(\n", " noise=audio_diffusion.pipe.slerp(latents, latents2, alpha),\n", " generator=generator)\n", "display(Audio(audio, rate=sample_rate))\n", "display(Audio(audio2, rate=sample_rate))\n", "display(Audio(audio3, rate=sample_rate))" ] }, { "cell_type": "code", "execution_count": null, "id": "ce6c9cc1", "metadata": {}, "outputs": [], "source": [] } ], "metadata": { "accelerator": "GPU", "colab": { "provenance": [] }, "gpuClass": "standard", "kernelspec": { "display_name": "huggingface", "language": "python", "name": "huggingface" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.10.6" }, "toc": { "base_numbering": 1, "nav_menu": {}, "number_sections": true, "sideBar": true, "skip_h1_title": false, "title_cell": "Table of Contents", "title_sidebar": "Contents", "toc_cell": false, "toc_position": {}, "toc_section_display": true, "toc_window_display": false } }, "nbformat": 4, "nbformat_minor": 5 }