{ "cells": [ { "cell_type": "markdown", "id": "fef7e1fb", "metadata": {}, "source": [ "\"Open" ] }, { "cell_type": "markdown", "id": "2ada074b", "metadata": {}, "source": [ "# Audio Diffusion\n", "For training scripts and notebooks visit https://github.com/teticio/audio-diffusion" ] }, { "cell_type": "code", "execution_count": null, "id": "6c7800a6", "metadata": {}, "outputs": [], "source": [ "try:\n", " # are we running on Google Colab?\n", " import google.colab\n", " %pip install -q diffusers torch librosa\n", "except:\n", " pass" ] }, { "cell_type": "code", "execution_count": null, "id": "c2fc0e7a", "metadata": {}, "outputs": [], "source": [ "import torch\n", "import random\n", "import librosa\n", "import numpy as np\n", "from datasets import load_dataset\n", "from IPython.display import Audio\n", "from librosa.beat import beat_track\n", "from diffusers import DiffusionPipeline" ] }, { "cell_type": "code", "execution_count": null, "id": "b294a94a", "metadata": {}, "outputs": [], "source": [ "device = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n", "generator = torch.Generator(device=device)" ] }, { "cell_type": "markdown", "id": "f3feb265", "metadata": {}, "source": [ "## DDPM (De-noising Diffusion Probabilistic Models)" ] }, { "cell_type": "markdown", "id": "7fd945bb", "metadata": {}, "source": [ "### Select model" ] }, { "cell_type": "code", "execution_count": null, "id": "97f24046", "metadata": {}, "outputs": [], "source": [ "#@markdown teticio/audio-diffusion-256 - trained on my Spotify \"liked\" playlist\n", "\n", "#@markdown teticio/audio-diffusion-breaks-256 - trained on samples used in music\n", "\n", "#@markdown teticio/audio-diffusion-instrumental-hiphop-256 - trained on instrumental hiphop\n", "\n", "model_id = \"teticio/audio-diffusion-256-new\" #@param [\"teticio/audio-diffusion-256\", \"teticio/audio-diffusion-breaks-256\", \"audio-diffusion-instrumenal-hiphop-256\", \"teticio/audio-diffusion-ddim-256\"]" ] }, { "cell_type": "code", "execution_count": null, "id": "a3d45c36", "metadata": {}, "outputs": [], "source": [ "audio_diffusion = DiffusionPipeline.from_pretrained(model_id).to(device)\n", "mel = audio_diffusion.mel\n", "sample_rate = mel.get_sample_rate()" ] }, { "cell_type": "code", "execution_count": null, "id": "ab0d705c", "metadata": {}, "outputs": [], "source": [ "def loop_it(audio: np.ndarray,\n", " sample_rate: int,\n", " loops: int = 12) -> np.ndarray:\n", " \"\"\"Loop audio\n", "\n", " Args:\n", " audio (np.ndarray): audio as numpy array\n", " sample_rate (int): sample rate of audio\n", " loops (int): number of times to loop\n", "\n", " Returns:\n", " (float, np.ndarray): sample rate and raw audio or None\n", " \"\"\"\n", " _, beats = beat_track(y=audio, sr=sample_rate, units='samples')\n", " for beats_in_bar in [16, 12, 8, 4]:\n", " if len(beats) > beats_in_bar:\n", " return np.tile(audio[beats[0]:beats[beats_in_bar]], loops)\n", " return None" ] }, { "cell_type": "markdown", "id": "011fb5a1", "metadata": {}, "source": [ "### Run model inference to generate mel spectrogram, audios and loops" ] }, { "cell_type": "code", "execution_count": null, "id": "b809fed5", "metadata": {}, "outputs": [], "source": [ "for _ in range(10):\n", " seed = generator.seed()\n", " print(f'Seed = {seed}')\n", " generator.manual_seed(seed)\n", " output = audio_diffusion(generator=generator)\n", " image = output.images[0]\n", " audio = output.audios[0, 0]\n", " display(image)\n", " display(Audio(audio, rate=sample_rate))\n", " loop = loop_it(audio, sample_rate)\n", " if loop is not None:\n", " display(Audio(loop, rate=sample_rate))\n", " else:\n", " print(\"Unable to determine loop points\")" ] }, { "cell_type": "markdown", "id": "0bb03e33", "metadata": {}, "source": [ "### Generate variations of audios" ] }, { "cell_type": "markdown", "id": "80e5b5fa", "metadata": {}, "source": [ "Try playing around with `start_steps`. Values closer to zero will produce new samples, while values closer to 1,000 will produce samples more faithful to the original." ] }, { "cell_type": "code", "execution_count": null, "id": "5074ec11", "metadata": {}, "outputs": [], "source": [ "seed = 2391504374279719 #@param {type:\"integer\"}\n", "generator.manual_seed(seed)\n", "output = audio_diffusion(generator=generator)\n", "image = output.images[0]\n", "audio = output.audios[0, 0]\n", "display(image)\n", "display(Audio(audio, rate=sample_rate))" ] }, { "cell_type": "code", "execution_count": null, "id": "a0fefe28", "metadata": { "scrolled": false }, "outputs": [], "source": [ "start_step = 500 #@param {type:\"slider\", min:0, max:1000, step:10}\n", "track = loop_it(audio, sample_rate, loops=1)\n", "for variation in range(12):\n", " output = audio_diffusion(raw_audio=audio, start_step=start_step)\n", " image2 = output.images[0]\n", " audio2 = output.audios[0, 0]\n", " display(image2)\n", " display(Audio(audio2, rate=sample_rate))\n", " track = np.concatenate([track, loop_it(audio2, sample_rate, loops=1)])\n", "display(Audio(track, rate=sample_rate))" ] }, { "cell_type": "markdown", "id": "58a876c1", "metadata": {}, "source": [ "### Generate continuations (\"out-painting\")" ] }, { "cell_type": "code", "execution_count": null, "id": "b95d5780", "metadata": {}, "outputs": [], "source": [ "overlap_secs = 2 #@param {type:\"integer\"}\n", "start_step = 0 #@param {type:\"slider\", min:0, max:1000, step:10}\n", "overlap_samples = overlap_secs * sample_rate\n", "track = audio\n", "for variation in range(12):\n", " output = audio_diffusion(raw_audio=audio[-overlap_samples:],\n", " start_step=start_step,\n", " mask_start_secs=overlap_secs)\n", " image2 = output.images[0]\n", " audio2 = output.audios[0, 0]\n", " display(image2)\n", " display(Audio(audio2, rate=sample_rate))\n", " track = np.concatenate([track, audio2[overlap_samples:]])\n", " audio = audio2\n", "display(Audio(track, rate=sample_rate))" ] }, { "cell_type": "markdown", "id": "b6434d3f", "metadata": {}, "source": [ "### Remix (style transfer)" ] }, { "cell_type": "markdown", "id": "0da030b2", "metadata": {}, "source": [ "Alternatively, you can start from another audio altogether, resulting in a kind of style transfer. Maintaining the same seed during generation fixes the style, while masking helps stitch consecutive segments together more smoothly." ] }, { "cell_type": "code", "execution_count": null, "id": "fc620a80", "metadata": {}, "outputs": [], "source": [ "try:\n", " # are we running on Google Colab?\n", " from google.colab import files\n", " audio_file = list(files.upload().keys())[0]\n", "except:\n", " audio_file = \"/home/teticio/Music/liked/El Michels Affair - Glaciers Of Ice.mp3\"" ] }, { "cell_type": "code", "execution_count": null, "id": "5a257e69", "metadata": { "scrolled": false }, "outputs": [], "source": [ "start_step = 500 #@param {type:\"slider\", min:0, max:1000, step:10}\n", "overlap_secs = 2 #@param {type:\"integer\"}\n", "track_audio, _ = librosa.load(audio_file, mono=True, sr=sample_rate)\n", "overlap_samples = overlap_secs * sample_rate\n", "slice_size = mel.x_res * mel.hop_length\n", "stride = slice_size - overlap_samples\n", "generator = torch.Generator(device=device)\n", "seed = generator.seed()\n", "print(f'Seed = {seed}')\n", "track = np.array([])\n", "not_first = 0\n", "for sample in range(len(track_audio) // stride):\n", " generator.manual_seed(seed)\n", " audio = np.array(track_audio[sample * stride:sample * stride + slice_size])\n", " if not_first:\n", " # Normalize and re-insert generated audio\n", " audio[:overlap_samples] = audio2[-overlap_samples:] * np.max(\n", " audio[:overlap_samples]) / np.max(audio2[-overlap_samples:])\n", " output = audio_diffusion(raw_audio=audio,\n", " start_step=start_step,\n", " generator=generator,\n", " mask_start_secs=overlap_secs * not_first)\n", " audio2 = output.audios[0, 0]\n", " track = np.concatenate([track, audio2[overlap_samples * not_first:]])\n", " not_first = 1\n", " display(Audio(track, rate=sample_rate))" ] }, { "cell_type": "markdown", "id": "924ff9d5", "metadata": {}, "source": [ "### Fill the gap (\"in-painting\")" ] }, { "cell_type": "code", "execution_count": null, "id": "0200264c", "metadata": {}, "outputs": [], "source": [ "sample = 3 #@param {type:\"integer\"}\n", "raw_audio = track_audio[sample * stride:sample * stride + slice_size]\n", "output = audio_diffusion(raw_audio=raw_audio,\n", " mask_start_secs=1,\n", " mask_end_secs=1,\n", " step_generator=torch.Generator(device=device))\n", "audio2 = output.audios[0, 0]\n", "display(Audio(audio, rate=sample_rate))\n", "display(Audio(audio2, rate=sample_rate))" ] }, { "cell_type": "markdown", "id": "efc32dae", "metadata": {}, "source": [ "## DDIM (De-noising Diffusion Implicit Models)" ] }, { "cell_type": "code", "execution_count": null, "id": "a021f78a", "metadata": {}, "outputs": [], "source": [ "audio_diffusion = DiffusionPipeline.from_pretrained('teticio/audio-diffusion-ddim-256-new').to(device)\n", "mel = audio_diffusion.mel\n", "sample_rate = mel.get_sample_rate()" ] }, { "cell_type": "markdown", "id": "deb23339", "metadata": {}, "source": [ "### Generation can be done in many fewer steps with DDIMs" ] }, { "cell_type": "code", "execution_count": null, "id": "c105a497", "metadata": {}, "outputs": [], "source": [ "for _ in range(10):\n", " seed = generator.seed()\n", " print(f'Seed = {seed}')\n", " generator.manual_seed(seed)\n", " output = audio_diffusion(generator=generator)\n", " image = output.images[0]\n", " audio = output.audios[0, 0]\n", " display(image)\n", " display(Audio(audio, rate=sample_rate))\n", " loop = loop_it(audio, sample_rate)\n", " if loop is not None:\n", " display(Audio(loop, rate=sample_rate))\n", " else:\n", " print(\"Unable to determine loop points\")" ] }, { "cell_type": "markdown", "id": "cab4692c", "metadata": {}, "source": [ "The parameter eta controls the variance:\n", "* 0 - DDIM (deterministic)\n", "* 1 - DDPM (De-noising Diffusion Probabilistic Model)" ] }, { "cell_type": "code", "execution_count": null, "id": "72bdd207", "metadata": {}, "outputs": [], "source": [ "output = audio_diffusion(steps=1000, generator=generator, eta=1)\n", "image = output.images[0]\n", "audio = output.audios[0, 0]\n", "display(image)\n", "display(Audio(audio, rate=sample_rate))" ] }, { "cell_type": "markdown", "id": "b8d5442c", "metadata": {}, "source": [ "### DDIMs can be used as encoders..." ] }, { "cell_type": "code", "execution_count": null, "id": "269ee816", "metadata": {}, "outputs": [], "source": [ "# Doesn't have to be an audio from the train dataset, this is just for convenience\n", "ds = load_dataset('teticio/audio-diffusion-256')" ] }, { "cell_type": "code", "execution_count": null, "id": "278d1d80", "metadata": {}, "outputs": [], "source": [ "image = ds['train'][264]['image']\n", "display(Audio(mel.image_to_audio(image), rate=sample_rate))" ] }, { "cell_type": "code", "execution_count": null, "id": "912b54e4", "metadata": {}, "outputs": [], "source": [ "noise = audio_diffusion.encode([image])" ] }, { "cell_type": "code", "execution_count": null, "id": "c7b31f97", "metadata": {}, "outputs": [], "source": [ "# Reconstruct original audio from noise\n", "output = audio_diffusion(mel=mel, noise=noise, generator=generator)\n", "image = output.images[0]\n", "audio = output.audios[0, 0]\n", "display(Audio(audio, rate=sample_rate))" ] }, { "cell_type": "markdown", "id": "998c776b", "metadata": {}, "source": [ "### ...or to interpolate between audios" ] }, { "cell_type": "code", "execution_count": null, "id": "33f82367", "metadata": {}, "outputs": [], "source": [ "image2 = ds['train'][15978]['image']\n", "display(Audio(mel.image_to_audio(image2), rate=sample_rate))" ] }, { "cell_type": "code", "execution_count": null, "id": "f93fb6c0", "metadata": {}, "outputs": [], "source": [ "noise2 = audio_diffusion.encode([image2])" ] }, { "cell_type": "code", "execution_count": null, "id": "a4190563", "metadata": {}, "outputs": [], "source": [ "alpha = 0.5 #@param {type:\"slider\", min:0, max:1, step:0.1}\n", "output = audio_diffusion(\n", " noise=audio_diffusion.slerp(noise, noise2, alpha),\n", " generator=generator)\n", "audio = output.audios[0, 0]\n", "display(Audio(mel.image_to_audio(image), rate=sample_rate))\n", "display(Audio(mel.image_to_audio(image2), rate=sample_rate))\n", "display(Audio(audio, rate=sample_rate))" ] }, { "cell_type": "markdown", "id": "9b244547", "metadata": {}, "source": [ "## Latent Audio Diffusion\n", "Instead of de-noising images directly in the pixel space, we can work in the latent space of a pre-trained VAE (Variational AutoEncoder). This is much faster to train and run inference on, although the quality suffers as there are now three stages involved in encoding / decoding: mel spectrogram, VAE and de-noising." ] }, { "cell_type": "code", "execution_count": null, "id": "a88b3fbb", "metadata": {}, "outputs": [], "source": [ "model_id = \"teticio/latent-audio-diffusion-ddim-256-new\" #@param [\"teticio/latent-audio-diffusion-256\", \"teticio/latent-audio-diffusion-ddim-256\"]" ] }, { "cell_type": "code", "execution_count": null, "id": "15e353ee", "metadata": {}, "outputs": [], "source": [ "audio_diffusion = DiffusionPipeline.from_pretrained(model_id).to(device)\n", "mel = audio_diffusion.mel\n", "sample_rate = mel.get_sample_rate()" ] }, { "cell_type": "code", "execution_count": null, "id": "fa0f0c8c", "metadata": {}, "outputs": [], "source": [ "seed = 3412253600050855 #@param {type:\"integer\"}\n", "generator.manual_seed(seed)\n", "output = audio_diffusion(generator=generator)\n", "image = output.images[0]\n", "audio = output.audios[0, 0]\n", "display(image)\n", "display(Audio(audio, rate=sample_rate))" ] }, { "cell_type": "code", "execution_count": null, "id": "73dc575d", "metadata": {}, "outputs": [], "source": [ "seed2 = 7016114633369557 #@param {type:\"integer\"}\n", "generator.manual_seed(seed2)\n", "output = audio_diffusion(generator=generator)\n", "image2 = output.images[0]\n", "audio2 = output.audios[0, 0]\n", "display(image2)\n", "display(Audio(audio2, rate=sample_rate))" ] }, { "cell_type": "markdown", "id": "428d2d67", "metadata": {}, "source": [ "### Interpolation in latent space\n", "As the VAE forces a more compact, lower dimensional representation for the spectrograms, interpolation in latent space can lead to meaningful combinations of audios. In combination with the (deterministic) DDIM from the previous section, the model can be used as an encoder / decoder to a lower dimensional space." ] }, { "cell_type": "code", "execution_count": null, "id": "72211c2b", "metadata": {}, "outputs": [], "source": [ "generator.manual_seed(seed)\n", "latents = torch.randn(\n", " (1, audio_diffusion.unet.in_channels, audio_diffusion.unet.sample_size[0],\n", " audio_diffusion.unet.sample_size[1]),\n", " generator=generator, device=device)\n", "latents.shape" ] }, { "cell_type": "code", "execution_count": null, "id": "6c732dbe", "metadata": {}, "outputs": [], "source": [ "generator.manual_seed(seed2)\n", "latents2 = torch.randn(\n", " (1, audio_diffusion.unet.in_channels, audio_diffusion.unet.sample_size[0],\n", " audio_diffusion.unet.sample_size[1]),\n", " generator=generator,\n", " device=device)\n", "latents2.shape" ] }, { "cell_type": "code", "execution_count": null, "id": "159bcfc4", "metadata": {}, "outputs": [], "source": [ "alpha = 0.5 #@param {type:\"slider\", min:0, max:1, step:0.1}\n", "output = audio_diffusion(\n", " noise=audio_diffusion.slerp(latents, latents2, alpha),\n", " generator=generator)\n", "audio3 = output.audios[0, 0]\n", "display(Audio(audio, rate=mel.get_sample_rate()))\n", "display(Audio(audio2, rate=mel.get_sample_rate()))\n", "display(Audio(audio3, rate=sample_rate))" ] }, { "cell_type": "code", "execution_count": null, "id": "ce6c9cc1", "metadata": {}, "outputs": [], "source": [] } ], "metadata": { "accelerator": "GPU", "colab": { "provenance": [] }, "gpuClass": "standard", "kernelspec": { "display_name": "huggingface", "language": "python", "name": "huggingface" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.10.6" }, "toc": { "base_numbering": 1, "nav_menu": {}, "number_sections": true, "sideBar": true, "skip_h1_title": false, "title_cell": "Table of Contents", "title_sidebar": "Contents", "toc_cell": false, "toc_position": {}, "toc_section_display": true, "toc_window_display": false } }, "nbformat": 4, "nbformat_minor": 5 }