{
  "cells": [
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "cqjmjldEHOFf"
      },
      "source": [
        "##### Copyright 2021 Google LLC.\n",
        "\n",
        "Licensed under the Apache License, Version 2.0 (the \"License\");\n",
        "you may not use this file except in compliance with the License.\n",
        "You may obtain a copy of the License at\n",
        "\n",
        "    http://www.apache.org/licenses/LICENSE-2.0\n",
        "\n",
        "Unless required by applicable law or agreed to in writing, software\n",
        "distributed under the License is distributed on an \"AS IS\" BASIS,\n",
        "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n",
        "See the License for the specific language governing permissions and\n",
        "limitations under the License."
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "4ZhZSvtlGHF6"
      },
      "outputs": [],
      "source": [
        "import os\n",
        "from tensorflow.io import gfile\n",
        "from scipy import io\n",
        "from scipy import sparse\n",
        "import numpy as np\n",
        "from scipy.io import wavfile\n",
        "from scipy.signal import resample\n",
        "import tempfile"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "t_relnlfZ51f"
      },
      "outputs": [],
      "source": [
        "# @title Mount Google Drive\n",
        "from google.colab import drive\n",
        "ROOT_DIR = '/content/gdrive'\n",
        "drive.mount(ROOT_DIR, force_remount=True)"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "cellView": "form",
        "id": "diY9MgnJF2FQ"
      },
      "outputs": [],
      "source": [
        "# @title Code for cochlear implant audio-to-electrodogram processor.\n",
        "\n",
        "from typing import List, Optional, Tuple\n",
        "import dataclasses\n",
        "import numpy as np\n",
        "import scipy.signal as sig\n",
        "\n",
        "# Electrodogram channel order, -1 to convert to base-0 indices.\n",
        "ELGRAM_CHANNEL_ORDER = tuple(\n",
        "    np.array([1, 5, 9, 13, 2, 6, 10, 14, 3, 7, 11, 15, 4, 8, 12, 16]) - 1)\n",
        "NUM_CHANNELS = len(ELGRAM_CHANNEL_ORDER)\n",
        "\n",
        "\n",
        "@dataclasses.dataclass\n",
        "class Params:\n",
        "  \"\"\"Processing parameters.\"\"\"\n",
        "\n",
        "  ## Preprocessing. ------------------------------------------------------------\n",
        "  # Compression ratio applied in preprocessing.\n",
        "  preprocess_compression_ratio: float = 2.0\n",
        "\n",
        "  ## Filterbank. ---------------------------------------------------------------\n",
        "  # Downsample factor between input audio rate and sample rate for envelopes.\n",
        "  hop: int = 16\n",
        "  # Number of frequency channels in the filterbank.\n",
        "  num_channels: int = NUM_CHANNELS\n",
        "  # Center frequency for channel 0.\n",
        "  lowest_cf_hz: float = 350.0\n",
        "  # Number of channels per octave.\n",
        "  channels_per_octave: float = 4.0\n",
        "  # Number of poles in the full gammatone filter.\n",
        "  gtf_order: int = 6\n",
        "  # Q parameter of the gammatone filter.\n",
        "  gtf_q: float = 5.0\n",
        "  # Time constant in seconds for the \"slow\" envelope.\n",
        "  tau_slow: float = 1.0\n",
        "  # Time constant in seconds for the \"medium\" envelope.\n",
        "  tau_medium: float = 0.1\n",
        "  # Time constant in seconds for the \"fast\" envelope.\n",
        "  tau_fast: float = 0.001\n",
        "\n",
        "  ## PCEN compression. ---------------------------------------------------------\n",
        "  # First stage denominator exponent, used on the slow envelope.\n",
        "  pcen_alpha1: float = 0.8\n",
        "  # Second stage denominator exponent, used on the medium envelope.\n",
        "  pcen_alpha2: float = 0.6\n",
        "  # First stage denominator offset, should be tuned proportional to the noise.\n",
        "  pcen_epsilon1: float = 1e-6\n",
        "  # Second stage denominator offset, should be tuned proportional to the noise.\n",
        "  pcen_epsilon2: float = 1e-4\n",
        "  # PCEN zero offset. Energy below this, amplitude above, suppresses noise.\n",
        "  pcen_offset: float = 0.05\n",
        "  # PCEN beta exponent. 1/2 and 1/3 are about equally good.\n",
        "  pcen_beta: float = 1 / 2.0\n",
        "\n",
        "  ## Coring sparsification step. -----------------------------------------------\n",
        "  # Factor for how much of the mean to subtract in coring sparisfying step.\n",
        "  mean_subtraction_gain: float = 0.25\n",
        "\n",
        "  ## Conversion to electrodogram. ----------------------------------------------\n",
        "  # Log scale factor to compensate for exp(5*EF) in vocoder.\n",
        "  elgram_log_scale: float = 5.0 / 0.4 * 0.5\n",
        "  # Factor for adjusting log offset.\n",
        "  elgram_floor_factor: float = 0.9\n",
        "  # Max electrodogram magnitude.\n",
        "  max_microamps: float = 550.0\n",
        "\n",
        "\n",
        "@dataclasses.dataclass\n",
        "class Envelopes:\n",
        "  slow: np.ndarray\n",
        "  medium: np.ndarray\n",
        "  fast: np.ndarray\n",
        "\n",
        "\n",
        "def desired_audio_sample_rate(params: Optional[Params] = None) -\u003e float:\n",
        "  \"\"\"Computes the desired sample rate for the input audio based on the hop.\"\"\"\n",
        "  if params is None:\n",
        "    params = Params()\n",
        "  return NUM_CHANNELS / (params.hop * 2 * 18e-6)\n",
        "\n",
        "\n",
        "def audio_to_elgram(audio: np.ndarray,\n",
        "                    sample_rate_hz: float,\n",
        "                    params: Optional[Params] = None) -\u003e np.ndarray:\n",
        "  \"\"\"Processes audio to electrodogram.\"\"\"\n",
        "  amplitudes = audio_to_amplitudes(audio, sample_rate_hz, params)\n",
        "  elgram = amplitudes_to_elgram(amplitudes, params)\n",
        "  return elgram\n",
        "\n",
        "\n",
        "def audio_to_amplitudes(audio: np.ndarray,\n",
        "                        sample_rate_hz: float,\n",
        "                        params: Optional[Params] = None):\n",
        "  \"\"\"Processes audio to compressed amplitudes.\n",
        "\n",
        "  Args:\n",
        "    audio: 1D numpy array, input audio waveform. Should be scaled so that\n",
        "      samples have the nominal range [-1, 1].\n",
        "    sample_rate_hz: Sample rate in Hz. Should be get_sample_rate_for_hop(hop).\n",
        "    params: Params.\n",
        "\n",
        "  Returns:\n",
        "    2D numpy array with `filterbank_params.num_channels` rows.\n",
        "  \"\"\"\n",
        "  if params is None:\n",
        "    params = Params()\n",
        "  audio = preprocess(audio, sample_rate_hz, params)\n",
        "  envelopes = filterbank(audio, sample_rate_hz, params)\n",
        "  amplitudes = apply_pcen(envelopes, params)\n",
        "\n",
        "  # Find the mean for each frame.\n",
        "  means = np.mean(amplitudes, axis=0, keepdims=True)\n",
        "  # Enhance and sparsify spectral shape.\n",
        "  amplitudes = np.maximum(0.0,\n",
        "                          amplitudes - means * params.mean_subtraction_gain)\n",
        "  # Normalization for listening tests to use full range without clip.\n",
        "  amplitudes /= amplitudes.max()\n",
        "  return amplitudes\n",
        "\n",
        "\n",
        "def amplitudes_to_elgram(amplitudes,\n",
        "                         params: Optional[Params] = None) -\u003e np.ndarray:\n",
        "  \"\"\"Converts compressed amplitudes to electrodogram.\n",
        "\n",
        "  Args:\n",
        "    amplitudes: 2D numpy array with 16 rows.\n",
        "    params: Params.\n",
        "\n",
        "  Returns:\n",
        "    2D numpy array with 16 rows.\n",
        "  \"\"\"\n",
        "  if params is None:\n",
        "    params = Params()\n",
        "\n",
        "  nl_offset = params.elgram_floor_factor * np.exp(-params.elgram_log_scale)\n",
        "  log_amplitudes = np.log(amplitudes + nl_offset) / params.elgram_log_scale\n",
        "  log_amplitudes = np.maximum(0.0, 1.0 + log_amplitudes - log_amplitudes.max())\n",
        "\n",
        "  # Make electrodogram from amplitudes.\n",
        "  ehop = 2 * NUM_CHANNELS\n",
        "  elgram = np.zeros((NUM_CHANNELS, ehop * log_amplitudes.shape[1]))\n",
        "  start = 0\n",
        "\n",
        "  for c in ELGRAM_CHANNEL_ORDER:\n",
        "    microamps = params.max_microamps * log_amplitudes[c]\n",
        "    elgram[c, start::ehop] = microamps\n",
        "    elgram[c, (start + 1)::ehop] = -microamps\n",
        "    start += 2\n",
        "\n",
        "  return elgram\n",
        "\n",
        "\n",
        "def preprocess(audio: np.ndarray,\n",
        "               unused_sample_rate_hz: float,\n",
        "               params: Optional[Params] = None) -\u003e np.ndarray:\n",
        "  \"\"\"Preprocesses input audio with preemphasis filtering and compression.\"\"\"\n",
        "  if params is None:\n",
        "    params = Params()\n",
        "\n",
        "  audio = sig.lfilter([1.0, -0.8], [1.0], audio)\n",
        "  # NOTE: We take the mean of the whole signal to simulate the effect of a\n",
        "  # slow-acting compressor. Some input recordings are too short (or start with\n",
        "  # pure silence) to be able to initialize causally.\n",
        "  # The \"np.mean(audio**4)**(1/4)\" part is RMS average, but with 4th power \n",
        "  # instead of 2 so that the result is influenced more by the peaks.\n",
        "  agc_gain = (1e-12 +\n",
        "              np.mean(audio**4))**(-1 /\n",
        "                                   (4 * params.preprocess_compression_ratio))\n",
        "  audio *= agc_gain\n",
        "  return audio\n",
        "\n",
        "\n",
        "def design_gammatone_filter_stage(\n",
        "    center_frequency_hz: float,\n",
        "    sample_rate_hz: float,\n",
        "    order: int = 6,\n",
        "    q: float = 5.0) -\u003e Tuple[np.ndarray, np.ndarray]:\n",
        "  \"\"\"Designs complex gammatone filter (GTF).\n",
        "\n",
        "  Args:\n",
        "    center_frequency_hz: Passband center frequency in Hz.\n",
        "    sample_rate_hz: Sample rate in Hz.\n",
        "    order: Integer, the filter order.\n",
        "    q: Filter Q value.\n",
        "\n",
        "  Returns:\n",
        "    (gtf_numer, gtf_denom) filter coefficients for one stage. This stage filter\n",
        "    should be applied `order` times to implement the full GTF filter.\n",
        "  \"\"\"\n",
        "  zeta = np.sqrt(order) / (2.0 * q)\n",
        "  omega = 2.0 * np.pi * center_frequency_hz\n",
        "  T = 1.0 / sample_rate_hz  # pylint: disable=invalid-name\n",
        "  s_pole = -zeta * omega + 1j * omega\n",
        "  z_pole = np.exp(s_pole * T)\n",
        "  # Evaluate abs(1/(z - z_pole)) at z = exp(i omega / sample_rate_hz).\n",
        "  stage_gain = 1.0 / np.abs(np.exp(1j * omega / sample_rate_hz) - z_pole)\n",
        "\n",
        "  gtf_numer = np.array([1.0, 1.0]) / (2 * stage_gain)\n",
        "  gtf_denom = np.array([1.0, -z_pole])\n",
        "  return gtf_numer, gtf_denom\n",
        "\n",
        "\n",
        "def energy_envelope(signal: np.ndarray,\n",
        "                    sample_rate_hz: float,\n",
        "                    time_constant_s: float,\n",
        "                    hop: int = 16,\n",
        "                    hot_initialize: bool = True) -\u003e np.ndarray:\n",
        "  \"\"\"Extracts energy envelope.\n",
        "\n",
        "  Args:\n",
        "    signal: Possibly complex-valued input signal.\n",
        "    sample_rate_hz: Sample rate in Hz.\n",
        "    time_constant_s: Positive float, smoothing time constant is seconds.\n",
        "    hop: Positive integer, downsampling factor.\n",
        "    hot_initialize: If true, \"hot\" initialize smoothing filter based on the\n",
        "      mean(energy**4). If false, use zero initialization.\n",
        "\n",
        "  Returns:\n",
        "    Numpy array, the envelope at `sample_rate_hz / hop`.\n",
        "  \"\"\"\n",
        "  # 2-pole smoother for anti-aliasing before downsampling by hop.\n",
        "  tau_energy = 0.7 * hop  # Plausible corner for downsampling; in samples.\n",
        "  lpf_denom = [1, -2.0 * np.exp(-1.0 / tau_energy), np.exp(-2.0 / tau_energy)]\n",
        "  # For unit gain at DC, null at Nyquist.\n",
        "  lpf_numer = np.ones(2) * 0.5 * np.sum(lpf_denom)\n",
        "  energy = sig.lfilter(lpf_numer, lpf_denom, np.abs(signal)**2)[::hop]\n",
        "\n",
        "  T_hop = hop / sample_rate_hz  # pylint: disable=invalid-name\n",
        "  s_pole = -1.0 / time_constant_s\n",
        "  # Map s = -1/tau to z = exp(s*T).\n",
        "  z_pole = np.exp(s_pole * T_hop)\n",
        "  state = sig.lfilter_zi([1 - z_pole], [1, -z_pole])\n",
        "\n",
        "  if hot_initialize:\n",
        "    state *= (np.mean(energy**4)**0.25)  # Average focused on peaks.\n",
        "  else:\n",
        "    state *= 0.0\n",
        "\n",
        "  envelope, _ = sig.lfilter([1 - z_pole], [1, -z_pole], energy, zi=state)\n",
        "  return envelope\n",
        "\n",
        "\n",
        "def filterbank(audio: np.ndarray,\n",
        "               sample_rate_hz: float,\n",
        "               params: Optional[Params] = None) -\u003e List[Envelopes]:\n",
        "  \"\"\"Runs GTF filterbank on audio, returning a collection of energy envelopes.\n",
        "\n",
        "  Args:\n",
        "    audio: Numpy array, audio to filter.\n",
        "    sample_rate_hz: Sample rate in Hz.\n",
        "    params: Params.\n",
        "\n",
        "  Returns:\n",
        "    A list of `params.num_channels` Envelopes, in which each envelope has sample\n",
        "    rate `sample_rate_hz / params.hop`.\n",
        "  \"\"\"\n",
        "  if params is None:\n",
        "    params = Params()\n",
        "\n",
        "  envelopes = []\n",
        "  for c in range(params.num_channels):\n",
        "    # Design GTF filter for this channel.\n",
        "    gtf_numer, gtf_denom = design_gammatone_filter_stage(\n",
        "        center_frequency_hz=params.lowest_cf_hz *\n",
        "        2**(c / params.channels_per_octave),\n",
        "        sample_rate_hz=sample_rate_hz,\n",
        "        order=params.gtf_order,\n",
        "        q=params.gtf_q)\n",
        "\n",
        "    # GTF bank; identical filter stages, some with energy outputs hopping.\n",
        "    # Stage 0:\n",
        "    filtered = sig.lfilter(gtf_numer, gtf_denom, audio)\n",
        "    slow_env = energy_envelope(filtered, sample_rate_hz, params.tau_slow,\n",
        "                               params.hop)\n",
        "    # Stage 1:\n",
        "    filtered = sig.lfilter(gtf_numer, gtf_denom, filtered)\n",
        "    medium_env = energy_envelope(filtered, sample_rate_hz, params.tau_medium,\n",
        "                                 params.hop)\n",
        "    # Remaining stages.\n",
        "    for _ in range(2, params.gtf_order):\n",
        "      filtered = sig.lfilter(gtf_numer, gtf_denom, filtered)\n",
        "    fast_env = energy_envelope(\n",
        "        filtered,\n",
        "        sample_rate_hz,\n",
        "        params.tau_fast,\n",
        "        params.hop,\n",
        "        hot_initialize=False)\n",
        "\n",
        "    envelopes.append(Envelopes(slow=slow_env, medium=medium_env, fast=fast_env))\n",
        "\n",
        "  return envelopes\n",
        "\n",
        "\n",
        "def apply_pcen(envelopes: List[Envelopes],\n",
        "               params: Optional[Params] = None) -\u003e np.ndarray:\n",
        "  \"\"\"Applies PCEN-like AGC to fast envelopes to compute compressed amplitudes.\n",
        "\n",
        "  See reference:\n",
        "    Y. Wang, P. Getreuer, T. Hughes, R. F. Lyon, and R. A. Saurous,\n",
        "    “Trainable frontend for robust and far-field keyword spotting,” in Proc.\n",
        "    IEEE ICASSP, 2017.\n",
        "\n",
        "  Args:\n",
        "    envelopes: List of num_channels Envelopes.\n",
        "    params: Params.\n",
        "\n",
        "  Returns:\n",
        "    2D numpy array with num_channels rows.\n",
        "  \"\"\"\n",
        "  if params is None:\n",
        "    params = Params()\n",
        "\n",
        "  num_channels = len(envelopes)\n",
        "  amplitudes = np.empty((num_channels, len(envelopes[0].slow)))\n",
        "\n",
        "  for c, env in enumerate(envelopes):\n",
        "    # Use slow and medium envelopes to apply PCEN-style AGC to env.fast.\n",
        "    gain1 = (env.slow + params.pcen_epsilon1)**(-params.pcen_alpha1)\n",
        "    gain2 = (env.medium * gain1 + params.pcen_epsilon2)**(-params.pcen_alpha2)\n",
        "    amplitudes[c] = (\n",
        "        (env.fast * gain1 * gain2 + params.pcen_offset)**params.pcen_beta -\n",
        "        params.pcen_offset**params.pcen_beta)\n",
        "\n",
        "  return amplitudes\n",
        "  "
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "cellView": "form",
        "id": "8MHG1cztNwgB"
      },
      "outputs": [],
      "source": [
        "# @title File read/write functions.\n",
        "def write_wav(filename, waveform, sample_rate=16000):\n",
        "  \"\"\"Write a audio waveform (float numpy array) as .wav file.\"\"\"\n",
        "  wavfile.write(\n",
        "      filename, sample_rate,\n",
        "      np.round(np.clip(waveform * 2**15, -32768, 32767)).astype(np.int16))\n",
        "\n",
        "def read_wav(wav_path, sample_rate=16000, channel=None):\n",
        "  \"\"\"Read a wav file as numpy array.\n",
        "\n",
        "  Args:\n",
        "    wav_path: String, path to .wav file.\n",
        "    sample_rate: Int, sample rate for audio to be converted to.\n",
        "    channel: Int, option to select a particular channel for stereo audio.\n",
        "\n",
        "  Returns:\n",
        "    Audio as float numpy array.\n",
        "  \"\"\"\n",
        "  sr_read, x = wavfile.read(wav_path)\n",
        "  x = x.astype(np.float32) / (2**15)\n",
        "\n",
        "  if sr_read != sample_rate:\n",
        "    x = resample(x, int(round((float(sample_rate) / sr_read) * len(x))))\n",
        "  if x.ndim \u003e 1 and channel is not None:\n",
        "    return x[:, channel]\n",
        "  return x\n",
        "\n",
        "\n",
        "def store_elgram(results, elgram_output_dir):\n",
        "  basename = os.path.basename(results['sourceName'])\n",
        "  inputFileName = os.path.splitext(basename)[0]\n",
        "\n",
        "  elgram_output_path = os.path.join(elgram_output_dir, inputFileName + '.mat')\n",
        "  gfile.makedirs(os.path.dirname(elgram_output_path))\n",
        "\n",
        "  sparse_mat = sparse.csr_matrix(results['elGram'])\n",
        "\n",
        "  local_temp_path = os.path.join(tempfile.mkdtemp(),\n",
        "                                 os.path.basename(elgram_output_path))\n",
        "  io.savemat(local_temp_path, {'elData': sparse_mat})\n",
        "  gfile.copy(local_temp_path, elgram_output_path, overwrite=True)\n",
        "\n",
        "\n",
        "def ci_process_file_elgram(wav_filename: str):\n",
        "  \"\"\"Computes amplitudes array and electrodogram for a wav file.\n",
        "\n",
        "  Args:\n",
        "    wav_filename: string name of the wav file\n",
        "\n",
        "  Returns:\n",
        "    elgram: a 2D float array electrodogram\n",
        "  \"\"\"\n",
        "  nChan = NUM_CHANNELS\n",
        "  params = Params(\n",
        "      hop=16,  # can be 12 or 16 or 24 or various other possibilities; 16 best?\n",
        "  )\n",
        "  fs = desired_audio_sample_rate(params)\n",
        "  sig_smp_wavIn = read_wav(wav_filename, sample_rate=fs)\n",
        "\n",
        "  amplitudes = audio_to_amplitudes(sig_smp_wavIn, fs, params)\n",
        "  elgram = amplitudes_to_elgram(amplitudes, params)\n",
        "\n",
        "  return elgram"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "HD-KvsP2NdhD"
      },
      "outputs": [],
      "source": [
        "#@title run audio-to-electrodogram\n",
        "audio_clip_subpath = '*.wav'  #@param\n",
        "\n",
        "PATH_AUDIO = 'gdrive/My Drive/cihack_audio_enhanced_mixed_variable'  # E.g. gdrive/My Drive/cihack_audio_enhanced_mixed_variable\n",
        "\n",
        "PATH_ELGRAM2 = PATH_AUDIO + '_elgram2'\n",
        "\n",
        "wavs = gfile.glob(os.path.join(PATH_AUDIO, audio_clip_subpath))\n",
        "for wav in wavs:\n",
        "  elgram = ci_process_file_elgram(wav)\n",
        "  results = {'sourceName': wav, 'elGram': elgram}\n",
        "  store_elgram(results, PATH_ELGRAM2)"
      ]
    }
  ],
  "metadata": {
    "colab": {
      "collapsed_sections": [],
      "name": "audio_to_electrodogram.ipynb",
      "provenance": [
        {
          "file_id": "14kivuO4t4WufSaDsobGEa1rPFQpKwf74",
          "timestamp": 1614195832534
        }
      ]
    },
    "kernelspec": {
      "display_name": "Python 3",
      "name": "python3"
    }
  },
  "nbformat": 4,
  "nbformat_minor": 0
}
