{
  "cells": [
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "_dhTFlyu6b9h"
      },
      "source": [
        "##### Copyright 2021 Google LLC.\n",
        "\n",
        "Licensed under the Apache License, Version 2.0 (the \"License\");\n",
        "you may not use this file except in compliance with the License.\n",
        "You may obtain a copy of the License at\n",
        "\n",
        "    http://www.apache.org/licenses/LICENSE-2.0\n",
        "\n",
        "Unless required by applicable law or agreed to in writing, software\n",
        "distributed under the License is distributed on an \"AS IS\" BASIS,\n",
        "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n",
        "See the License for the specific language governing permissions and\n",
        "limitations under the License."
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "knTwjafz6fm3"
      },
      "source": [
        "# Yamnet controlled mixing of speech enhanced audio\n",
        "This notebook will mix a variable amount of speech enhanced (cleaned) and original (noisy) audio, using one of two strategies:\n",
        "1. Mix a fixed ratio of cleaned and noisy audio.\n",
        "1. Mix a variable ratio of cleaned and noisy audio, determined by causally running [YAMNet](https://www.tensorflow.org/hub/tutorials/yamnet) model inference. YAMNet is used here to estimate, every 0.480s, how much the audio is like music instead or speech/silence. Because the speech enhancement model will often remove music, we reduce the fraction of speech enhanced audio that is mixed whenever YAMNet detects music.\n",
        "\n",
        "Inputs:\n",
        "* A directory containing wav files of the original (uncleaned/noisy) audio.\n",
        "* A directory containing wav files of the speech enhanced audio, having the same basename as the corresponding original audio file.\n",
        "\n",
        "Outputs:\n",
        "* A directory containing wav files of the mixed audio, mixed based off of either the fixed or variable strategy.\n",
        "\n",
        "**Note**: YAMNet takes an input window of 0.960 s to make a prediction, and does so every 0.480 s (i.e., windows are overlapped by 50%). Hence, to ensure a causal mixing strategy, we implement a fixed default mix ratio for the first 0.960 s."
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "uY-ud1nvXksk"
      },
      "outputs": [],
      "source": [
        "import os\n",
        "from tensorflow.io import gfile\n",
        "import tensorflow as tf\n",
        "import tensorflow_hub as hub\n",
        "import numpy as np\n",
        "import csv\n",
        "\n",
        "from IPython.display import Audio, display\n",
        "import ipywidgets\n",
        "import matplotlib.pyplot as plt\n",
        "from scipy.io import wavfile\n",
        "from scipy.signal import lfilter\n",
        "from scipy.signal import resample\n",
        "\n",
        "from google.colab import widgets\n",
        "\n",
        "tf.compat.v1.enable_eager_execution()"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "Ynx9i5IBSrGr"
      },
      "outputs": [],
      "source": [
        "# @title Mount Google Drive\n",
        "from google.colab import drive\n",
        "ROOT_DIR = '/content/gdrive'\n",
        "drive.mount(ROOT_DIR, force_remount=True)"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "cellView": "form",
        "id": "X2PO2zDvULa1"
      },
      "outputs": [],
      "source": [
        "# @title Helper function for playing audio.\n",
        "def PlaySound(samples, sample_rate=16000):\n",
        "  out = ipywidgets.Output()\n",
        "  with out:\n",
        "    display(Audio(samples, rate=sample_rate))\n",
        "  display(out)"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "cellView": "form",
        "id": "pQHN9jlleLDu"
      },
      "outputs": [],
      "source": [
        "#@title File read/write functions.\n",
        "def write_wav(filename, waveform, sample_rate=16000):\n",
        "  \"\"\"Write a audio waveform (float numpy array) as .wav file.\"\"\"\n",
        "  wavfile.write(\n",
        "      filename, sample_rate,\n",
        "      np.round(np.clip(waveform * 2**15, -32768, 32767)).astype(np.int16))\n",
        "\n",
        "def read_wav(wav_path, sample_rate=16000, channel=None):\n",
        "  \"\"\"Read a wav file as numpy array.\n",
        "\n",
        "  Args:\n",
        "    wav_path: String, path to .wav file.\n",
        "    sample_rate: Int, sample rate for audio to be converted to.\n",
        "    channel: Int, option to select a particular channel for stereo audio.\n",
        "\n",
        "  Returns:\n",
        "    Audio as float numpy array.\n",
        "  \"\"\"\n",
        "  sr_read, x = wavfile.read(wav_path)\n",
        "  x = x.astype(np.float32) / (2**15)\n",
        "\n",
        "  if sr_read != sample_rate:\n",
        "    x = resample(x, int(round((float(sample_rate) / sr_read) * len(x))))\n",
        "  if x.ndim \u003e 1 and channel is not None:\n",
        "    return x[:, channel]\n",
        "  return x"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "cellView": "form",
        "id": "cBO0qH6bXxMq"
      },
      "outputs": [],
      "source": [
        "# @title YAMNet inference code.\n",
        "def class_names_from_csv(class_map_csv_text):\n",
        "  \"\"\"Returns list of class names corresponding to score vector.\"\"\"\n",
        "  class_names = []\n",
        "  with tf.io.gfile.GFile(class_map_csv_text) as csvfile:\n",
        "    reader = csv.DictReader(csvfile)\n",
        "    for row in reader:\n",
        "      class_names.append(row['display_name'])\n",
        "\n",
        "  return class_names\n",
        "\n",
        "\n",
        "yamnet_model = hub.load('https://tfhub.dev/google/yamnet/1')\n",
        "class_map_path = yamnet_model.class_map_path().numpy()\n",
        "CLASS_NAMES = class_names_from_csv(class_map_path)\n",
        "# See https://github.com/tensorflow/models/blob/master/research/audioset/yamnet/yamnet_class_map.csv\n",
        "INDEX_SILENCE = np.where(np.array(CLASS_NAMES) == 'Silence')[0][0]\n",
        "INDICES_MUSIC = slice(132, 277)\n",
        "INDICES_SPEECH = slice(0, 67)"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "cellView": "form",
        "id": "rlSAT78X0FSd"
      },
      "outputs": [],
      "source": [
        "# @title Code for variable mixing.\n",
        "\n",
        "\n",
        "def get_music_fraction(scores):\n",
        "  \"\"\"For each inference window, conver YAMNet prediction to fraction of music.\n",
        "\n",
        "  While YAMNet outputs predictions on 521 classes, here we only look at the\n",
        "  fraction of music relative to music + speech + silence.\n",
        "\n",
        "  Args:\n",
        "    scores: A (N, 521) shape array of YAMNet predictions in [0, 1.0], for the\n",
        "      521 classes. N is roughly input_duration / 0.480 s.\n",
        "\n",
        "  Returns:\n",
        "    music_normalized: A 1D array of fraction YAMNet predicted music in [0,\n",
        "    1.0].\n",
        "  \"\"\"\n",
        "  music = np.max(scores[:, INDICES_MUSIC], axis=1)\n",
        "  speech = np.max(scores[:, INDICES_SPEECH], axis=1)\n",
        "  speech_and_silence = speech + scores[:, INDEX_SILENCE]\n",
        "  music_normalized = music / (music + speech_and_silence)\n",
        "  return music_normalized.numpy()\n",
        "\n",
        "\n",
        "def map_fraction_music_to_fraction_speech_enhancement_to_mix(\n",
        "    x, threshold=0.2, ceiling=0.4):\n",
        "  \"\"\"A mapping from fraction music detected to non-speech-enhanced audio mixed.\n",
        "\n",
        "  Args:\n",
        "    x: A 1D array of fraction YAMNet predicted music in [0, 1.0].\n",
        "    threshold: Float in [0, 1.0]; values below this are mapped to 0.0.\n",
        "    ceiling: Float, the maximum output value (i.e. the output for 1.0 input).\n",
        "\n",
        "  Returns:\n",
        "    fraction_non_speech_enhanced_audio: A 1D array of fraction of\n",
        "    non-speech-enhanced audio to mix in [0, 1.0].\n",
        "  \"\"\"\n",
        "  fraction_non_speech_enhanced_audio = (x \u003e threshold) * (x**(1 / 5)) * ceiling\n",
        "  return fraction_non_speech_enhanced_audio\n",
        "\n",
        "\n",
        "def get_causal_speech_enhancement_mixing_strategy(\n",
        "    x, default_non_speech_enhanced_mix=0.05, num_periods_to_run_default_mix=2):\n",
        "  \"\"\"Get a causal speech enhancement mixing strategy.\n",
        "\n",
        "  Args:\n",
        "    x: A 1D array of fraction of non-speech-enhanced audio to mix in [0, 1.0].\n",
        "    default_non_speech_enhanced_mix: Float in [0.0, 1.0], the default mix of non\n",
        "      speech enhanced audio to mix.\n",
        "    num_periods_to_run_default_mix: Int, number of periods to run the default\n",
        "      mix for. For the YAMNet model example inference, this should be at least\n",
        "      2, in order to use the YAMNet predictions in a casual way.\n",
        "\n",
        "  Returns:\n",
        "    A 1D array of fraction of non-speech enhanced audio to mix in [0,\n",
        "    1.0], which is smoothed with a weighting function and shifted to be causal\n",
        "    by having a fixed default mix ratio. The length of this array is one larger\n",
        "    than len(x).\n",
        "  \"\"\"\n",
        "  if num_periods_to_run_default_mix \u003c 2:\n",
        "    raise ValueError(\n",
        "        'num_periods_to_run_default_mix=%d would yield non-causal result' %\n",
        "        num_periods_to_run_default_mix)\n",
        "  # We weight the current prediction 60%, the previous prediction 30%, and the\n",
        "  # one before it 10%.\n",
        "  kernel = np.array([.6, .3, .1])\n",
        "\n",
        "  x_pad = np.append([default_non_speech_enhanced_mix] *\n",
        "                    (num_periods_to_run_default_mix + len(kernel) - 1), x)\n",
        "  return np.convolve(x_pad, kernel, 'valid')[:len(x) + 1]\n",
        "\n",
        "\n",
        "def gen_audio_mixing_waveform(mix_strategy_discrete, samples_per_window,\n",
        "                              cross_fade):\n",
        "  \"\"\"Map a discrete mixing strategy to a mix waveform with crossfade.\n",
        "\n",
        "  Args:\n",
        "    mix_strategy_discrete: A 1D array of fraction of non-speech enhanced audio\n",
        "      to mix in [0, 1.0], each element corresponding to a 0.480 s window. This\n",
        "      array should be long enough so that len(mix_strategy_discrete) *\n",
        "      samples_per_window is at least as long as the input audio.\n",
        "    samples_per_window: Int, number of audio samples per window.\n",
        "    cross_fade: Int, number of audio samples over which to crossfade\n",
        "\n",
        "  Returns:\n",
        "    mix_continuous_crossfaded: A 1D numpy array of audio with values in [0,\n",
        "    1.0], representing the fraction of non-speech-enhanced audio to mix. The\n",
        "    output length is an integer multiple of samples_per_window and should be\n",
        "    cropped to match the exact length of input audio.\n",
        "  \"\"\"\n",
        "  mix_continuous = np.repeat(mix_strategy_discrete, samples_per_window)\n",
        "  window = np.hanning(cross_fade)\n",
        "  mix_continuous_crossfaded = lfilter(window / np.sum(window), 1,\n",
        "                                      mix_continuous)\n",
        "  return mix_continuous_crossfaded\n",
        "\n",
        "\n",
        "def run_yamnet_mix_and_save_audio(audio_clip_subpath,\n",
        "                                  input_path,\n",
        "                                  input_enhanced_path,\n",
        "                                  output_path,\n",
        "                                  strategy='variable'):\n",
        "  \"\"\"Runs YAMNet inference, causally mixes speech enhanced and original audio.\n",
        "\n",
        "  Args:\n",
        "    audio_clip_subpath: String, the input .wav filename, of original and\n",
        "      enhanced audio.\n",
        "    input_path: String, path to directory with original audio.\n",
        "    input_enhanced_path: String, path to directory with speech enhanced audio.\n",
        "    output_path: String, path where the mixed audio will be saved.\n",
        "    strategy: String, either 'variable' or 'fixed', for the variable mixing\n",
        "      strategy utilizing YAMNet, or a baseline fixed strategy.\n",
        "  \"\"\"\n",
        "  original_audio = read_wav(\n",
        "      os.path.join(input_path, audio_clip_subpath), sample_rate=SAMPLE_RATE)\n",
        "  cleaned_audio = read_wav(\n",
        "      os.path.join(input_enhanced_path, audio_clip_subpath),\n",
        "      sample_rate=SAMPLE_RATE,\n",
        "      channel=0)\n",
        "  if original_audio.shape != cleaned_audio.shape:\n",
        "    raise ValueError('Cleaned audio shape does not match: %s, %s' %\n",
        "                     (original_audio.shape, cleaned_audio.shape))\n",
        "\n",
        "  scores, _, spectrogram = yamnet_model(original_audio)\n",
        "\n",
        "  if strategy == 'variable':\n",
        "    music_fraction = get_music_fraction(scores)\n",
        "    mix_strategy_discrete = get_causal_speech_enhancement_mixing_strategy(\n",
        "        map_fraction_music_to_fraction_speech_enhancement_to_mix(\n",
        "            music_fraction))\n",
        "    mix_waveform = gen_audio_mixing_waveform(mix_strategy_discrete,\n",
        "                                             SAMPLES_PER_INFERENCE_PERIOD,\n",
        "                                             CROSS_FADE)\n",
        "    mix_waveform = mix_waveform[:original_audio.shape[0]]\n",
        "  elif strategy == 'fixed':\n",
        "    mix_waveform = np.ones((original_audio.shape[0],)) * FIXED_NOISE_FRACTION\n",
        "  else:\n",
        "    raise ValueError('Invalid mixing strategy: %s' % strategy)\n",
        "  mixed_audio = mix_waveform * original_audio + (1 -\n",
        "                                                 mix_waveform) * cleaned_audio\n",
        "\n",
        "  display_audio(original_audio, cleaned_audio, mixed_audio)\n",
        "  visualize_mixing(\n",
        "      scores,\n",
        "      spectrogram,\n",
        "      original_audio,\n",
        "      mix_waveform,\n",
        "      mixed_audio,\n",
        "      output_plot_filename=os.path.splitext(\n",
        "          os.path.join(output_path, audio_clip_subpath))[0] + '.png')\n",
        "\n",
        "  write_wav(\n",
        "      os.path.join(output_path, audio_clip_subpath), mixed_audio, SAMPLE_RATE)"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "cellView": "form",
        "id": "eRF8vPLa9314"
      },
      "outputs": [],
      "source": [
        "# @title Code for listening and visualizing mixing.\n",
        "def visualize_mixing(scores,\n",
        "                     spectrogram,\n",
        "                     waveform,\n",
        "                     mix_waveform,\n",
        "                     mixed_waveform=None,\n",
        "                     output_plot_filename=None):\n",
        "  \"\"\"Generates a plot showing the input and mixed audio, and YAMNet predictions.\n",
        "\n",
        "  Args:\n",
        "    scores: A N x 521, array of predictions in [0, 1.0], for the 521 classes. N\n",
        "      is roughly input_duration / 0.480 s.\n",
        "    spectrogram: A 2D array, the spectrogram of input audio, for visualization.\n",
        "    waveform: A 1D array, the input audio.\n",
        "    mix_waveform: A a 1D array in [0, 1.0] denoting the fraction noise to mix.\n",
        "    output_plot_filename: String, output filename.\n",
        "  \"\"\"\n",
        "  duration = len(waveform) / SAMPLE_RATE\n",
        "\n",
        "  scores_np = scores.numpy()\n",
        "  spectrogram_np = spectrogram.numpy()\n",
        "\n",
        "  plt.figure(figsize=(10, 7))\n",
        "\n",
        "  # Plot the waveform.\n",
        "  plt.subplot(5, 1, 1)\n",
        "  plt.plot(np.arange(0, waveform.shape[0]) / SAMPLE_RATE, waveform)\n",
        "  plt.xlim([0, duration])\n",
        "  plt.xlabel('time (s)')\n",
        "  plt.ylabel('input')\n",
        "\n",
        "  # Plot the log-mel spectrogram (returned by the model).\n",
        "  plt.subplot(5, 1, 2)\n",
        "  plt.imshow(\n",
        "      spectrogram_np.T[:, :int(100 * duration)],\n",
        "      aspect='auto',\n",
        "      interpolation='nearest',\n",
        "      origin='lower')\n",
        "  plt.xlabel('spectrogram # (10 ms hop each)')\n",
        "\n",
        "  # Plot and label the model output scores for the top-scoring classes.\n",
        "  mean_scores = np.mean(scores, axis=0)\n",
        "  top_n = 5\n",
        "  top_class_indices = np.argsort(mean_scores)[::-1][:top_n]\n",
        "  plt.subplot(5, 1, 3)\n",
        "  plt.imshow(\n",
        "      scores_np[:, top_class_indices].T,\n",
        "      aspect='auto',\n",
        "      interpolation='nearest',\n",
        "      cmap='gray_r')\n",
        "  plt.xlim([-1.5, (duration / 0.480) - 1.5])\n",
        "\n",
        "  # Label the top_N classes.\n",
        "  yticks = range(0, top_n, 1)\n",
        "  plt.yticks(yticks, [CLASS_NAMES[top_class_indices[x]] for x in yticks])\n",
        "  _ = plt.ylim(-0.5 + np.array([top_n, 0]))\n",
        "  plt.xlabel('prediction # (960 ms input, every 480 ms)')\n",
        "  plt.ylabel('YAMNet pred.')\n",
        "\n",
        "  plt.subplot(5, 1, 4)\n",
        "  plt.plot(np.arange(0, mix_waveform.shape[0]) / SAMPLE_RATE, mix_waveform)\n",
        "  plt.xlim([0, duration])\n",
        "  plt.ylim([-0.01, 1.01])\n",
        "  plt.xlabel('time (s)')\n",
        "  plt.ylabel('strategy')\n",
        "\n",
        "  plt.subplot(5, 1, 5)\n",
        "  plt.plot(np.arange(0, mixed_waveform.shape[0]) / SAMPLE_RATE, mixed_waveform)\n",
        "  plt.xlim([0, duration])\n",
        "  plt.xlabel('time (s)')\n",
        "  plt.ylabel('mixed')\n",
        "  plt.tight_layout()\n",
        "  if output_plot_filename is not None:\n",
        "    plt.savefig(\n",
        "        gfile.GFile(output_plot_filename, 'w'), dpi=300, bbox_inches='tight')\n",
        "\n",
        "\n",
        "def display_audio(original_audio, cleaned_audio, mixed_audio):\n",
        "  t = widgets.Grid(1, 3)\n",
        "  with t.output_to(0, 0):\n",
        "    print('original')\n",
        "    PlaySound(original_audio, SAMPLE_RATE)\n",
        "  with t.output_to(0, 1):\n",
        "    print('cleaned')\n",
        "    PlaySound(cleaned_audio, SAMPLE_RATE)\n",
        "  with t.output_to(0, 2):\n",
        "    print('mixed')\n",
        "    PlaySound(mixed_audio, SAMPLE_RATE)"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "zwxWdRi28ATU"
      },
      "outputs": [],
      "source": [
        "# Desired rate, required by YAMNet.\n",
        "SAMPLE_RATE = 16000\n",
        "SAMPLES_PER_INFERENCE_PERIOD = int(\n",
        "    SAMPLE_RATE * 0.480)  # time between YAMNet inference windows\n",
        "\n",
        "CROSS_FADE = int(0.100 * SAMPLE_RATE)\n",
        "FIXED_NOISE_FRACTION = 0.05"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "QkZ69ykfZI37"
      },
      "outputs": [],
      "source": [
        "# Specify input paths.\n",
        "PATH_ORIGINAL = 'gdrive/My Drive/cihack_audio'  # E.g. gdrive/My Drive/cihack_audio\n",
        "PATH_SPEECH_ENHANCED = PATH_ORIGINAL + '_enhanced'\n",
        "\n",
        "# Specify output paths.\n",
        "PATH_MIXED_VARIABLE = PATH_SPEECH_ENHANCED + '_mixed_variable'\n",
        "PATH_MIXED_FIXED = PATH_SPEECH_ENHANCED + '_mixed_fixed'\n",
        "\n",
        "PATH_SETS_TO_MIX = [\n",
        "    (PATH_ORIGINAL, PATH_SPEECH_ENHANCED, PATH_MIXED_VARIABLE),\n",
        "    (PATH_ORIGINAL, PATH_SPEECH_ENHANCED, PATH_MIXED_FIXED),\n",
        "]"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "NaqjspRaZNvo"
      },
      "outputs": [],
      "source": [
        "audio_clip_matcher = '*.wav'  #@param\n",
        "\n",
        "for input_path, enhanced_input_path, output_path in PATH_SETS_TO_MIX:\n",
        "  wavs = gfile.glob(os.path.join(input_path, audio_clip_matcher))\n",
        "  gfile.makedirs(output_path)\n",
        "  for wav in wavs:\n",
        "    if 'variable' in output_path:\n",
        "      strategy = 'variable'\n",
        "    elif 'fixed' in output_path:\n",
        "      strategy = 'fixed'\n",
        "\n",
        "    run_yamnet_mix_and_save_audio(\n",
        "        os.path.basename(wav),\n",
        "        input_path,\n",
        "        enhanced_input_path,\n",
        "        output_path,\n",
        "        strategy=strategy)"
      ]
    }
  ],
  "metadata": {
    "colab": {
      "collapsed_sections": [],
      "name": "YAMNet_controlled_mixing_of_speech_enhanced_audio.ipynb",
      "provenance": [
        {
          "file_id": "1xJkjFAY-XIOtYPCd4ek3cT6Y80Gtpd2z",
          "timestamp": 1614196033651
        }
      ]
    },
    "kernelspec": {
      "display_name": "Python 3",
      "name": "python3"
    }
  },
  "nbformat": 4,
  "nbformat_minor": 0
}
