{
  "nbformat": 4,
  "nbformat_minor": 0,
  "metadata": {
    "colab": {
      "name": "augly_benchmarking.ipynb",
      "provenance": [],
      "collapsed_sections": []
    },
    "kernelspec": {
      "display_name": "Python 3",
      "name": "python3"
    },
    "language_info": {
      "name": "python"
    }
  },
  "cells": [
    {
      "cell_type": "code",
      "metadata": {
        "id": "pgajyYb5NnjD"
      },
      "source": [
        "# Note: restart runtime after this import before running the augmentations\n",
        "!pip install -U augly[av]\n",
        "!sudo apt-get install python3-magic\n",
        "!pip install pydub\n",
        "!pip install audiomentations\n",
        "!pip install torchaudio\n",
        "!sudo pip install git+https://github.com/okankop/vidaug\n",
        "!pip install moviepy\n",
        "!pip install av\n",
        "!pip install pytorchvideo\n",
        "!pip install -U albumentations\n",
        "!pip install -U torchvision\n",
        "!pip install -U imgaug\n",
        "!pip install torch==1.10.0+cu111 -f https://download.pytorch.org/whl/cu111/torch_stable.html\n",
        "!pip install -U imagecorruptions\n",
        "!pip install numpy requests nlpaug\n",
        "!pip install textattack[tensorflow]\n",
        "!pip install textflint"
      ],
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "ZBm7XDPvNvIz"
      },
      "source": [
        "import cProfile\n",
        "import inspect\n",
        "import os\n",
        "import pstats\n",
        "import subprocess\n",
        "import tempfile\n",
        "import time\n",
        "import torch\n",
        "import numpy as np\n",
        "import pandas as pd\n",
        "from collections import defaultdict\n",
        "from copy import deepcopy\n",
        "from PIL import Image\n",
        "\n",
        "import augly.audio as audaugs\n",
        "import augly.audio.utils as aud_utils\n",
        "import augly.image as imaugs\n",
        "import augly.text as textaugs\n",
        "import augly.video as vidaugs\n",
        "import augly.video.helpers as vid_helpers\n",
        "\n",
        "from moviepy.editor import VideoFileClip\n",
        "from pydub import AudioSegment\n",
        "from textattack.shared import AttackedText\n",
        "from textflint.input.component.sample.ut_sample import UTSample\n",
        "from torchvision.io import video as vd"
      ],
      "execution_count": 24,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "ORkVWxKZOHbH"
      },
      "source": [
        "# Change this to \"audio\" in order to benchmark the audio augs, etc.\n",
        "modality = \"audio\""
      ],
      "execution_count": 34,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "FQFaE5qVQQOh"
      },
      "source": [
        "modules = {\n",
        "    \"audio\": audaugs,\n",
        "    \"image\": imaugs,\n",
        "    \"text\": textaugs,\n",
        "    \"video\": vidaugs,\n",
        "}\n",
        "\n",
        "lib_names = {\n",
        "    \"audio\": [\"AugLy\", \"pydub\", \"torchaudio\", \"audiomentations\"],\n",
        "    \"image\": [\"AugLy\", \"imgaug\", \"torchvision\", \"albumentations\"],\n",
        "    \"text\": [\"AugLy\", \"nlpaug\", \"textattack\", \"textflint\"],\n",
        "    \"video\": [\"AugLy\", \"moviepy\", \"pytorchvideo\", \"vidaug\"],\n",
        "}"
      ],
      "execution_count": 35,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "xUdmZ1mOnaCE"
      },
      "source": [
        "# Create datapoints to test on\n",
        "num_dp = 10\n",
        "tmpdir = \"/tmp/\"\n",
        "data = defaultdict(list)\n",
        "\n",
        "# Create audio\n",
        "sample_rate = 44100\n",
        "duration_s = 10\n",
        "channels = 2\n",
        "num_samples = int(sample_rate * duration_s)\n",
        "for i in range(num_dp):\n",
        "    dp_np = np.random.standard_normal((channels, num_samples))\n",
        "    filepath = os.path.join(tmpdir, f\"audio_{i}.wav\")\n",
        "    aud_utils.ret_and_save_audio(dp_np, filepath, sample_rate)\n",
        "    data[\"audio\"].append([dp_np, sample_rate])\n",
        "    data[\"audio_pydub\"].append([AudioSegment.from_file(filepath)])\n",
        "    data[\"audio_torchaudio\"].append([torch.Tensor(dp_np), sample_rate])\n",
        "\n",
        "# Create images\n",
        "width, height, channels = 1920, 1080, 3\n",
        "for i in range(num_dp):\n",
        "    dp_np = (\n",
        "        np.random.rand(width, height, channels) * 255\n",
        "    ).astype(\"uint8\")\n",
        "    im = Image.fromarray(dp_np).convert(\"RGBA\")\n",
        "    data[\"image\"].append([im])\n",
        "    data[\"image_imgaug\"].append([dp_np])\n",
        "    data[\"image_albumentations\"].append([dp_np])\n",
        "\n",
        "# Create text\n",
        "data[\"text\"].extend(\n",
        "    [\n",
        "        [\"Hello! How are you today?\"],\n",
        "        [\n",
        "            \"The decision to move the photocopier business was \"\n",
        "            \"done for privacy reasons.\"\n",
        "        ],\n",
        "        [\"I am twenty years old and have two brothers\"],\n",
        "        [\n",
        "            \"The National Weather Service is calling for a string \"\n",
        "            \"of cold, wet storms coming to Northern California for \"\n",
        "            \"the rest of the week.\"\n",
        "        ],\n",
        "        [\"I can not believe he said that!\"],\n",
        "        [\"The victim was less than a quarter century old\"],\n",
        "        [\"She likes to eat a croissant and coffee for breakfast\"],\n",
        "        [\"knock knock who's there\"],\n",
        "        [\"Adam lives with his mother, Amanda, and his brother, Lee\"],\n",
        "        [\n",
        "            \"It almost never rains here in California. I do not even \"\n",
        "            \"remember the last time I saw snow!\"\n",
        "        ],\n",
        "    ],\n",
        ")\n",
        "for dp in data[\"text\"]:\n",
        "    data[\"text_textattack\"].append([AttackedText(dp[0])])\n",
        "    data[\"text_textflint\"].append([UTSample({\"x\": dp[0]})])\n",
        "\n",
        "# used for reading videos using torchvision since we need to supply\n",
        "# length as an arg for read_video\n",
        "def get_length(filename):\n",
        "    result = subprocess.run(\n",
        "        [\n",
        "            \"ffprobe\", \"-v\", \"error\", \"-show_entries\",\n",
        "            \"format=duration\", \"-of\",\n",
        "            \"default=noprint_wrappers=1:nokey=1\", filename\n",
        "        ],\n",
        "        stdout=subprocess.PIPE,\n",
        "        stderr=subprocess.STDOUT)\n",
        "    return float(result.stdout)\n",
        "\n",
        "# Create videos\n",
        "for i in range(num_dp):\n",
        "    filepath = os.path.join(tmpdir, f\"video_{i}.mp4\")\n",
        "    vid_helpers.create_color_video(\n",
        "        filepath,\n",
        "        duration_s,\n",
        "        height,\n",
        "        width,\n",
        "        color=(\n",
        "            np.random.randint(0, 255),\n",
        "            np.random.randint(0, 255),\n",
        "            np.random.randint(0, 255),\n",
        "        ),\n",
        "    )\n",
        "    vidaugs.audio_swap(\n",
        "        filepath,\n",
        "        audio_path=os.path.join(tmpdir, f\"audio_{i}.wav\"),\n",
        "    )\n",
        "    data[\"video\"].append([filepath])\n",
        "    data[\"video_moviepy\"].append([VideoFileClip(filepath)])\n",
        "    length = get_length(filepath)\n",
        "    video_tensor, *_ = vd.read_video(filepath, 0, length, \"sec\")\n",
        "    data[\"video_pytorchvideo\"].append([video_tensor.permute(0, 3, 1, 2)])\n",
        "    data[\"video_vidaug\"].append([video_tensor.detach().cpu().numpy()])"
      ],
      "execution_count": 36,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "ZptN-SZ6NMTf"
      },
      "source": [
        "# For many augmentations we will use the default kwargs when calling\n",
        "# them, but for some we want to override the defaults, so let's define\n",
        "# those here\n",
        "transforms_nondefault_kwargs = {\n",
        "    \"audio\": {\n",
        "        \"ChangeVolume\": {\"volume_db\": 5.0},\n",
        "        \"Clip\": {\"offset_factor\": 0.2, \"duration_factor\": 0.5},\n",
        "        \"InsertInBackground\": {\"offset_factor\": 0.6},\n",
        "    },\n",
        "    \"image\": {\n",
        "        \"Brightness\": {\"factor\": 2.0},\n",
        "        \"ColorJitter\": {\n",
        "            \"brightness_factor\": 0.5,\n",
        "            \"contrast_factor\": 2.0,\n",
        "            \"saturation_factor\": 0.3,\n",
        "        },\n",
        "        \"Contrast\": {\"factor\": 0.6},\n",
        "        \"ConvertColor\": {\"mode\": \"P\"},\n",
        "        \"MaskedComposite\": {\n",
        "            \"transform_function\": imaugs.Brightness(factor=0.2),\n",
        "            \"mask\": None,\n",
        "        },\n",
        "        \"Opacity\": {\"level\": 0.5},\n",
        "        \"OverlayImage\": {\n",
        "            \"overlay\": Image.fromarray(\n",
        "                (\n",
        "                    np.random.rand(width, height, channels)\n",
        "                ).astype(\"uint8\") * 255\n",
        "            ).convert(\"RGB\"),\n",
        "            \"opacity\": 0.7\n",
        "        },\n",
        "        \"OverlayOntoBackgroundImage\": {\n",
        "            \"background_image\": Image.fromarray(\n",
        "                (\n",
        "                    np.random.rand(width, height, channels)\n",
        "                ).astype(\"uint8\") * 255\n",
        "            ).convert(\"RGB\"),\n",
        "            \"overlay_size\": 0.4,\n",
        "        },\n",
        "        \"Pixelization\": {\"ratio\": 0.3},\n",
        "        \"Resize\": {\"width\": 1300, \"height\": 1200},\n",
        "        \"Saturation\": {\"factor\": 3.0},\n",
        "        \"Sharpen\": {\"factor\": 4.0},\n",
        "        \"ShufflePixels\": {\"factor\": 0.5},\n",
        "    },\n",
        "    \"text\": {\n",
        "        \"ChangeCase\": {\"cadence\": 3.0},\n",
        "        \"Contractions\": {\"aug_p\": 1.0},\n",
        "        \"InsertPunctuationChars\": {\"cadence\": 2.5, \"granularity\": \"word\"},\n",
        "        \"InsertWhitespaceChars\": {\"cadence\": 2.5},\n",
        "        \"InsertZeroWidthChars\": {\"cadence\": 2.5},\n",
        "        \"ReplaceBidirectional\": {\"granularity\": \"word\"},\n",
        "        \"ReplaceFunFonts\": {\"granularity\": \"word\", \"vary_fonts\": True},\n",
        "        \"SwapGenderedWords\": {\"aug_word_p\": 1.0},\n",
        "    },\n",
        "    \"video\": {\n",
        "        \"AudioSwap\": {\n",
        "            \"audio_path\": os.path.join(tmpdir, \"audio_0.wav\"),\n",
        "            \"offset\": 0.5,\n",
        "        },\n",
        "        \"AugmentAudio\": {\n",
        "            \"audio_aug_function\": audaugs.normalize,\n",
        "        },\n",
        "        \"BlendVideos\": {\"overlay_path\": data[\"video\"][-1][0]},\n",
        "        \"ChangeVideoSpeed\": {\"factor\": 2.0},\n",
        "        \"ColorJitter\": {\n",
        "            \"brightness_factor\": 0.5,\n",
        "            \"contrast_factor\": -10.0,\n",
        "            \"saturation_factor\": 2.0,\n",
        "        },\n",
        "        \"Concat\": {\n",
        "            \"other_video_paths\": [d[0] for d in data[\"video\"][-2:]],\n",
        "        },\n",
        "        \"HStack\": {\"second_video_path\": data[\"video\"][-1][0]},\n",
        "        \"InsertInBackground\": {\"offset_factor\": 0.3},\n",
        "        \"Loop\": {\"num_loops\": 2},\n",
        "        \"Overlay\": {\"overlay_path\": data[\"video\"][2][0]},\n",
        "        \"OverlayOntoBackgroundVideo\": {\n",
        "            \"background_path\": data[\"video\"][1][0],\n",
        "        },\n",
        "        \"OverlayShapes\": {\"num_shapes\": 3},\n",
        "        \"Pixelization\": {\"ratio\": 0.4},\n",
        "        \"ReplaceWithColorFrames\": {\n",
        "            \"offset_factor\": 0.2, \"duration_factor\": 0.4\n",
        "        },\n",
        "        \"Resize\": {\"height\": 1300, \"width\": 1200},\n",
        "        \"Shift\": {\"x_factor\": 0.1, \"y_factor\": 0.6},\n",
        "        \"TimeCrop\": {\"offset_factor\": 0.2, \"duration_factor\": 0.7},\n",
        "        \"Trim\": {\"start\": 2.0, \"end\": 7.3},\n",
        "        \"VStack\": {\"second_video_path\": data[\"video\"][0][0]},\n",
        "    },\n",
        "}"
      ],
      "execution_count": 37,
      "outputs": []
    },
    {
      "cell_type": "code",
      "source": [
        "# Map the AugLy augmentation names to analogues in other libraries for\n",
        "# the modality we're benchmarking\n",
        "other_lib_funcs = {}\n",
        "if modality == \"audio\":\n",
        "    import audiomentations\n",
        "    import torchaudio.backend\n",
        "    from torchaudio.sox_effects import apply_effects_tensor\n",
        "\n",
        "    other_lib_funcs = {\n",
        "        \"AddBackgroundNoise\": {\n",
        "            \"audiomentations\": audiomentations.AddGaussianNoise(),\n",
        "        },\n",
        "        \"ChangeVolume\": {\n",
        "            \"audiomentations\": audiomentations.Gain(),\n",
        "            \"pydub\": lambda audio_seg: audio_seg.apply_gain,\n",
        "            \"torchaudio\": lambda t, sr: apply_effects_tensor(\n",
        "                t, sr, [[\"vol\", \"5.0\"]]\n",
        "            ),\n",
        "        },\n",
        "        \"Clip\": {\n",
        "            \"audiomentations\": audiomentations.Clip(),\n",
        "        },\n",
        "        \"HighPassFilter\": {\n",
        "            \"pydub\": lambda audio_seg: audio_seg.high_pass_filter,\n",
        "            \"audiomentations\": audiomentations.HighPassFilter(),\n",
        "            \"torchaudio\": lambda t, sr: apply_effects_tensor(\n",
        "                t, sr, [[\"highpass\", \"3000\"]]\n",
        "            ),\n",
        "        },\n",
        "        \"LowPassFilter\": {\n",
        "            \"pydub\": lambda audio_seg: audio_seg.low_pass_filter,\n",
        "            \"audiomentations\": audiomentations.LowPassFilter(),\n",
        "            \"torchaudio\": lambda t, sr: apply_effects_tensor(\n",
        "                t, sr, [[\"lowpass\", \"500\"]]\n",
        "            ),\n",
        "        },\n",
        "        \"Normalize\": {\n",
        "            \"pydub\": lambda audio_seg: audio_seg.normalize,\n",
        "            \"audiomentations\": audiomentations.Normalize(),\n",
        "            \"torchaudio\": lambda t, sr: apply_effects_tensor(\n",
        "                t, sr, [[\"norm\", \"-n\"]]\n",
        "            ),\n",
        "        },\n",
        "        \"PitchShift\": {\n",
        "            \"audiomentations\": audiomentations.PitchShift(),\n",
        "            \"torchaudio\": lambda t, sr: apply_effects_tensor(\n",
        "                t, sr, [[\"pitch\", \"1.0\"]]\n",
        "            ),\n",
        "        },\n",
        "        \"Reverb\": {\n",
        "            \"torchaudio\": lambda t, sr: apply_effects_tensor(\n",
        "                t, sr, [[\"reverb\", \"50.0\"]]\n",
        "            ),\n",
        "        },\n",
        "        \"Speed\": {\n",
        "            \"pydub\": lambda audio_seg: audio_seg.speedup,\n",
        "        },\n",
        "        \"TimeStretch\": {\n",
        "            \"audiomentations\": audiomentations.TimeStretch(),\n",
        "            \"torchaudio\": lambda t, sr: apply_effects_tensor(\n",
        "                t, sr, [[\"stretch\", \"1.5\"]]\n",
        "            ),\n",
        "        },\n",
        "        \"ToMono\": {\n",
        "            \"torchaudio\": lambda t, sr: apply_effects_tensor(\n",
        "                t, sr, [[\"channels\", \"1\"]]\n",
        "            ),\n",
        "        },\n",
        "    }\n",
        "elif modality == \"image\":\n",
        "    from albumentations.augmentations import transforms as alb\n",
        "    from albumentations.augmentations.crops import transforms as alb_crops\n",
        "    from albumentations.augmentations.geometric import (\n",
        "        resize as alb_resize,\n",
        "        rotate as alb_rotate,\n",
        "        transforms as alb_geo,\n",
        "    )\n",
        "    from albumentations.core import transforms_interface as alb_core\n",
        "    from imgaug import augmenters as imgaug\n",
        "    from torchvision import transforms as torchvision\n",
        "\n",
        "    other_lib_funcs = {\n",
        "        \"ApplyLambda\": {\n",
        "            \"albumentations\": alb_core.NoOp(),\n",
        "        },\n",
        "        \"Blur\": {\n",
        "            \"imgaug\": imgaug.blur.GaussianBlur(sigma=2),\n",
        "            \"torchvision\": torchvision.GaussianBlur(kernel_size=1),\n",
        "            \"albumentations\": alb.Blur(p=1.0),\n",
        "        },\n",
        "        \"Brightness\": {\n",
        "            \"torchvision\": torchvision.ColorJitter(brightness=2.0),\n",
        "            \"albumentations\": alb.RandomBrightness(p=1.0),\n",
        "        },\n",
        "        \"ColorJitter\": {\n",
        "            \"imgaug\": imgaug.color.AddToHueAndSaturation(value=20),\n",
        "            \"torchvision\": torchvision.ColorJitter(0.5, 0.2, 0.3),\n",
        "            \"albumentations\": alb.ColorJitter(0.5, 0.2, 0.3),\n",
        "        },\n",
        "        \"Contrast\": {\n",
        "            \"imgaug\": imgaug.contrast.LinearContrast(),\n",
        "            \"torchvision\": torchvision.ColorJitter(contrast=2.0),\n",
        "        },\n",
        "        \"Crop\": {\n",
        "            \"imgaug\": imgaug.size.Crop(percent=0.25),\n",
        "            \"torchvision\": torchvision.CenterCrop((960, 540)),\n",
        "            \"albumentations\": alb_crops.Crop(480, 270),\n",
        "        },\n",
        "        \"EncodingQuality\": {\n",
        "            \"imgaug\": imgaug.arithmetic.JpegCompression(compression=50),\n",
        "            \"albumentations\": alb.Downscale(),\n",
        "        },\n",
        "        \"Grayscale\": {\n",
        "            \"imgaug\": imgaug.color.Grayscale(),\n",
        "            \"torchvision\": torchvision.Grayscale(),\n",
        "            \"albumentations\": alb.ToGray(),\n",
        "        },\n",
        "        \"HFlip\": {\n",
        "            \"imgaug\": imgaug.flip.HorizontalFlip(),\n",
        "            \"torchvision\": torchvision.RandomHorizontalFlip(p=1.0),\n",
        "            \"albumentations\": alb.HorizontalFlip(),\n",
        "        },\n",
        "        \"Pad\": {\n",
        "            \"imgaug\": imgaug.size.Pad(percent=0.25),\n",
        "            \"torchvision\": torchvision.Pad((480, 270)),\n",
        "            \"albumentations\": alb.PadIfNeeded(\n",
        "                min_height=1350, min_width=2400\n",
        "             ),\n",
        "        },\n",
        "        \"PerspectiveTransform\": {\n",
        "            \"imgaug\": imgaug.geometric.PerspectiveTransform(scale=0.05),\n",
        "            \"torchvision\": torchvision.RandomPerspective(p=1.0),\n",
        "            \"albumentations\": alb_geo.Perspective(),\n",
        "        },\n",
        "        \"Pixelization\": {\n",
        "            \"imgaug\": imgaug.imgcorruptlike.Pixelate(severity=3),\n",
        "        },\n",
        "        \"Resize\": {\n",
        "            \"imgaug\": imgaug.size.Resize(size=(1300, 1200)),\n",
        "            \"torchvision\": torchvision.Resize((1300, 1200)),\n",
        "            \"albumentations\": alb_resize.Resize(1200, 1300),\n",
        "        },\n",
        "        \"Rotate\": {\n",
        "            \"imgaug\": imgaug.geometric.Rotate(),\n",
        "            \"torchvision\": torchvision.RandomRotation(degrees=40),\n",
        "            \"albumentations\": alb_rotate.Rotate(),\n",
        "        },\n",
        "        \"Saturation\": {\n",
        "            \"imgaug\": imgaug.imgcorruptlike.Saturate(severity=3),\n",
        "            \"torchvision\": torchvision.ColorJitter(saturation=2.0),\n",
        "            \"albumentations\": alb.HueSaturationValue(),\n",
        "        },\n",
        "        \"Sharpen\": {\n",
        "            \"imgaug\": imgaug.convolutional.Sharpen(\n",
        "                alpha=1.0, lightness=1.0\n",
        "            ),\n",
        "            \"torchvision\": torchvision.RandomAdjustSharpness(2, p=1.0),\n",
        "            \"albumentations\": alb.Sharpen(),\n",
        "        },\n",
        "        \"VFlip\": {\n",
        "            \"imgaug\": imgaug.flip.VerticalFlip(),\n",
        "            \"torchvision\": torchvision.RandomVerticalFlip(p=1.0),\n",
        "            \"albumentations\": alb.VerticalFlip(),\n",
        "        },\n",
        "    }\n",
        "elif modality == \"text\":\n",
        "    from textattack import augmentation as textattack_aug\n",
        "    from textattack.transformations import (\n",
        "        word_swaps as textattack_ws,\n",
        "        word_merges as textattack_wm,\n",
        "    )\n",
        "    from nlpaug.augmenter import char as nlpaug_c, word as nlpaug_w\n",
        "    from textflint.generation.transformation import UT as textflint\n",
        "\n",
        "    other_lib_funcs = {\n",
        "        \"ChangeCase\": {\n",
        "            \"textflint\": textflint.WordCase().transform,\n",
        "        },\n",
        "        \"Contractions\": {\n",
        "            \"textattack\": textattack_ws.WordSwapContract(),\n",
        "            \"textflint\": textflint.Contraction().transform,\n",
        "        },\n",
        "        \"InsertPunctuationChars\": {\n",
        "            \"textattack\": (\n",
        "                textattack_ws.WordSwapRandomCharacterInsertion()\n",
        "            ),\n",
        "            \"textflint\": textflint.Punctuation().transform,\n",
        "        },\n",
        "        \"ReplaceSimilarChars\": {\n",
        "            \"nlpaug\": nlpaug_c.ocr.OcrAug().augment,\n",
        "            \"textattack\": textattack_ws.WordSwapHomoglyphSwap(),\n",
        "            \"textflint\": textflint.Ocr().transform,\n",
        "        },\n",
        "        \"SimulateTypos\": {\n",
        "            \"nlpaug\": nlpaug_w.spelling.SpellingAug().augment,\n",
        "            \"textattack\": textattack_aug.CharSwapAugmenter().augment,\n",
        "            \"textflint\": textflint.SpellingError().transform,\n",
        "        },\n",
        "        \"SplitWords\": {\n",
        "            \"nlpaug\": nlpaug_w.split.SplitAug().augment,\n",
        "        },\n",
        "        \"SwapGenderedWords\": {\n",
        "            \"textflint\": (\n",
        "                textflint.Prejudice(change_type=\"Name\").transform\n",
        "            ),\n",
        "        },\n",
        "    }\n",
        "elif modality == \"video\":\n",
        "    import moviepy.audio.fx.all as moviepy_audio\n",
        "    import moviepy.video.fx.all as moviepy\n",
        "    from pytorchvideo.transforms import augmentations as pytorchvideo\n",
        "    from vidaug import augmentors as vidaug\n",
        "\n",
        "    other_lib_funcs = {\n",
        "        \"AddNoise\": {\n",
        "            \"vidaug\": vidaug.Add(value=10),\n",
        "        },\n",
        "        \"AugmentAudio\": {\n",
        "            \"moviepy\": moviepy_audio.audio_normalize,\n",
        "        },\n",
        "        \"Blur\": {\n",
        "            \"pytorchvideo\": lambda vid: pytorchvideo._adjust_sharpness(\n",
        "                vid, factor=0\n",
        "            ),\n",
        "            \"vidaug\": vidaug.GaussianBlur(sigma=1),\n",
        "        },\n",
        "        \"Brightness\": {\n",
        "            \"pytorchvideo\": lambda vid: pytorchvideo._adjust_brightness(\n",
        "                vid, factor=0.15\n",
        "            ),\n",
        "        },\n",
        "        \"ColorJitter\": {\n",
        "            \"moviepy\": lambda vid: moviepy.colorx(vid, factor=2.0),\n",
        "            \"pytorchvideo\": lambda vid: pytorchvideo._adjust_saturation(\n",
        "              vid, factor=10.0\n",
        "            ),\n",
        "        },\n",
        "        \"Crop\": {\n",
        "            \"moviepy\": lambda vid: moviepy.crop(\n",
        "                vid, 0.25, 0.25, 0.75, 0.75\n",
        "            ),\n",
        "            \"vidaug\": vidaug.CenterCrop(size=(960, 540)),\n",
        "        },\n",
        "        \"ChangeVideoSpeed\": {\n",
        "            \"moviepy\": lambda vid: moviepy.speedx(vid, factor=2.0),\n",
        "            \"vidaug\": vidaug.Upsample(2.0),\n",
        "        },\n",
        "        \"Grayscale\": {\n",
        "            \"moviepy\": moviepy.blackwhite,\n",
        "            \"pytorchvideo\": pytorchvideo._autocontrast,\n",
        "        },\n",
        "        \"HFlip\": {\n",
        "            \"moviepy\": moviepy.mirror_x,\n",
        "            \"pytorchvideo\": lambda vid: pytorchvideo._translate_x(\n",
        "                vid, factor=1, fill=1\n",
        "            ),\n",
        "            \"vidaug\": vidaug.HorizontalFlip(),\n",
        "        },\n",
        "        \"Loop\": {\n",
        "            \"moviepy\": lambda vid: moviepy.loop(vid, n=2),\n",
        "        },\n",
        "        \"Pad\": {\n",
        "            \"moviepy\": lambda vid: moviepy.margin(vid, mar=20),\n",
        "        },\n",
        "        \"Pixelization\": {\n",
        "            \"vidaug\": vidaug.Superpixel(p_replace=0.5, n_segments=10),\n",
        "        },\n",
        "        \"Resize\": {\n",
        "            \"moviepy\": lambda vid: moviepy.resize(\n",
        "                vid, width=1300, height=1200\n",
        "            ),\n",
        "        },\n",
        "        \"Rotate\": {\n",
        "            \"moviepy\": lambda vid: moviepy.rotate(vid, angle=90),\n",
        "            \"pytorchvideo\": lambda vid: pytorchvideo._rotate(\n",
        "                vid, factor=90, fill=1\n",
        "            ),\n",
        "            \"vidaug\": vidaug.RandomRotate(degrees=90),\n",
        "        },\n",
        "        \"Shift\": {\n",
        "            \"vidaug\": vidaug.RandomTranslate(),\n",
        "        },\n",
        "        \"TimeCrop\": {\n",
        "            \"vidaug\": vidaug.TemporalRandomCrop(size=1),\n",
        "        },\n",
        "        \"VFlip\": {\n",
        "            \"moviepy\": moviepy.mirror_y,\n",
        "            \"pytorchvideo\": lambda vid: pytorchvideo._translate_y(\n",
        "                vid, factor=1, fill=1\n",
        "            ),\n",
        "            \"vidaug\": vidaug.VerticalFlip(),\n",
        "        },\n",
        "    }\n",
        "else:\n",
        "    print(f\"Modality {modality} is not supported\")"
      ],
      "metadata": {
        "id": "Xz4shdZQ633B"
      },
      "execution_count": 38,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "Ut4csTktQkcK"
      },
      "source": [
        "module = modules[modality]\n",
        "transforms_kwargs = transforms_nondefault_kwargs[modality]\n",
        "libs = lib_names[modality]"
      ],
      "execution_count": 39,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "I2k7b2T0OVA8"
      },
      "source": [
        "transforms_name_to_callables = {\n",
        "    k: {\n",
        "        \"AugLy\": v(**transforms_kwargs.get(k, {})),\n",
        "        **other_lib_funcs.get(k, {}),\n",
        "    }\n",
        "    for k, v in inspect.getmembers(module)\n",
        "    if not k.endswith(\"_intensity\")\n",
        "    and not k.startswith(\"Random\")\n",
        "    and k[0].isupper()\n",
        "    and k not in {\n",
        "        \"Compose\",\n",
        "        \"OneOf\",\n",
        "    }\n",
        "}\n",
        "\n",
        "num_other_lib_augs = sum(\n",
        "    1 for name, d in other_lib_funcs.items() for lib, f in d.items()\n",
        ")\n",
        "\n",
        "print(\n",
        "    f\"Starting benchmarking on {modality} modality: will run \"\n",
        "    f\"{len(transforms_name_to_callables.keys())} AugLy augmentations & \"\n",
        "    f\"{num_other_lib_augs} other libraries' augmentations on \"\n",
        "    f\"{len(data[modality])} data points\"\n",
        ")"
      ],
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "epCncDYYoekk"
      },
      "source": [
        "mono_only = [\"HighPassFilter\", \"LowPassFilter\"]\n",
        "metrics = []\n",
        "for tname, transforms in transforms_name_to_callables.items():\n",
        "    print(tname)\n",
        "    num_dp = len(data[modality])\n",
        "    avg_runtimes_s = []\n",
        "    for lib in libs:\n",
        "        transform = transforms.get(lib, None)\n",
        "        if transform is None:\n",
        "            avg_runtimes_s.append(None)\n",
        "            continue\n",
        "        t0 = time.time()\n",
        "        for i, kwargs in enumerate(data[modality]):\n",
        "            args = (\n",
        "                data[f\"{modality}_{lib}\"][i]\n",
        "                if f\"{modality}_{lib}\" in data and not (\n",
        "                    tname is \"SimulateTypos\" and lib is \"textattack\"\n",
        "                )\n",
        "                else kwargs\n",
        "            )\n",
        "            args = list(args) if len(args) > 1 else [*args]\n",
        "            if modality == \"audio\" and tname in mono_only:\n",
        "                args[0] = args[0][0]\n",
        "                if lib == \"torchaudio\":\n",
        "                    args[0] = args[0].reshape((1, -1))\n",
        "            if modality == \"video\" and lib == \"AugLy\":\n",
        "                args.append(\"/tmp/video_out.mp4\")\n",
        "            if lib in [\"imgaug\", \"albumentations\"]:\n",
        "                transform(image=args[0])\n",
        "            else:\n",
        "                transform(*args)\n",
        "        avg_runtimes_s.append((time.time() - t0) / num_dp)\n",
        "    metrics.append((tname, *avg_runtimes_s))\n",
        "    print(\n",
        "        f\"Average runtime on {num_dp} data points: \"\n",
        "        f\"{list(zip(libs, avg_runtimes_s))}\\n\"\n",
        "    )\n",
        "    print(\"----------\\n\")"
      ],
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "fHvs-p5ApiZt"
      },
      "source": [
        "metrics.sort(key=lambda t: t[1], reverse=True)\n",
        "pd.DataFrame(metrics, columns=[\"Augmentation\", *libs])"
      ],
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "code",
      "source": [
        ""
      ],
      "metadata": {
        "id": "ByZ_eyO5a0IV"
      },
      "execution_count": null,
      "outputs": []
    }
  ]
}