{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {
    "id": "auuaEk9iaMyg"
   },
   "source": [
    "Copyright (c) Meta Platforms, Inc. and affiliates. All rights reserved. This source code is licensed under the license found in the LICENSE file in the root directory of this source tree.\n",
    "\n",
    "# Video Seal Inference\n",
    "\n",
    "[[`arXiv`](https://arxiv.org/abs/2412.09492)]\n",
    "[[`Colab`](https://colab.research.google.com/github/facebookresearch/videoseal/blob/main/notebooks/colab.ipynb)]\n",
    "[[`Demo`](https://aidemos.meta.com/videoseal)]"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Installation"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "id": "ktbpIy3maMyi"
   },
   "source": [
    "Clone repository and install dependencies"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {
    "colab": {
     "base_uri": "https://localhost:8080/"
    },
    "id": "JXXEkRqEaMyi",
    "outputId": "9a5de18f-ebd3-4ba7-ea07-a9c55419de0d"
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Cloning into 'videoseal'...\n",
      "remote: Enumerating objects: 209, done.\u001b[K\n",
      "remote: Counting objects: 100% (133/133), done.\u001b[K\n",
      "remote: Compressing objects: 100% (95/95), done.\u001b[K\n",
      "remote: Total 209 (delta 44), reused 82 (delta 37), pack-reused 76 (from 1)\u001b[K\n",
      "Receiving objects: 100% (209/209), 46.30 MiB | 15.69 MiB/s, done.\n",
      "Resolving deltas: 100% (49/49), done.\n",
      "/content/videoseal\n"
     ]
    }
   ],
   "source": [
    "!git clone https://github.com/facebookresearch/videoseal.git\n",
    "%cd videoseal"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "id": "V7HepUuOaMyj"
   },
   "source": [
    "Install dependencies"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {
    "colab": {
     "base_uri": "https://localhost:8080/",
     "height": 1000
    },
    "id": "VmrcsfF8aMyj",
    "outputId": "7913708a-d98a-4183-aafc-bd9328e326db"
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Requirement already satisfied: pandas in /usr/local/lib/python3.10/dist-packages (from -r requirements.txt (line 1)) (2.2.2)\n",
      "Requirement already satisfied: opencv-python in /usr/local/lib/python3.10/dist-packages (from -r requirements.txt (line 2)) (4.10.0.84)\n",
      "Collecting omegaconf (from -r requirements.txt (line 3))\n",
      "  Downloading omegaconf-2.3.0-py3-none-any.whl.metadata (3.9 kB)\n",
      "Requirement already satisfied: einops in /usr/local/lib/python3.10/dist-packages (from -r requirements.txt (line 4)) (0.8.0)\n",
      "Collecting lpips (from -r requirements.txt (line 5))\n",
      "  Downloading lpips-0.1.4-py3-none-any.whl.metadata (10 kB)\n",
      "Collecting timm==0.9.16 (from -r requirements.txt (line 6))\n",
      "  Downloading timm-0.9.16-py3-none-any.whl.metadata (38 kB)\n",
      "Collecting pre-commit (from -r requirements.txt (line 7))\n",
      "  Downloading pre_commit-4.0.1-py2.py3-none-any.whl.metadata (1.3 kB)\n",
      "Requirement already satisfied: ipykernel in /usr/local/lib/python3.10/dist-packages (from -r requirements.txt (line 8)) (5.5.6)\n",
      "Requirement already satisfied: pycocotools in /usr/local/lib/python3.10/dist-packages (from -r requirements.txt (line 9)) (2.0.8)\n",
      "Collecting PyWavelets (from -r requirements.txt (line 11))\n",
      "  Downloading pywavelets-1.8.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl.metadata (9.0 kB)\n",
      "Collecting av (from -r requirements.txt (line 12))\n",
      "  Downloading av-14.0.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl.metadata (4.5 kB)\n",
      "Collecting pyav (from -r requirements.txt (line 13))\n",
      "  Downloading pyav-14.0.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl.metadata (2.1 kB)\n",
      "Requirement already satisfied: scikit-image in /usr/local/lib/python3.10/dist-packages (from -r requirements.txt (line 14)) (0.24.0)\n",
      "Collecting decord (from -r requirements.txt (line 15))\n",
      "  Downloading decord-0.6.0-py3-none-manylinux2010_x86_64.whl.metadata (422 bytes)\n",
      "Requirement already satisfied: tqdm in /usr/local/lib/python3.10/dist-packages (from -r requirements.txt (line 17)) (4.66.6)\n",
      "Collecting pytorch_msssim (from -r requirements.txt (line 18))\n",
      "  Downloading pytorch_msssim-1.0.0-py3-none-any.whl.metadata (8.0 kB)\n",
      "Requirement already satisfied: tensorboard in /usr/local/lib/python3.10/dist-packages (from -r requirements.txt (line 19)) (2.17.1)\n",
      "Collecting calflops (from -r requirements.txt (line 20))\n",
      "  Downloading calflops-0.3.2-py3-none-any.whl.metadata (28 kB)\n",
      "Requirement already satisfied: transformers in /usr/local/lib/python3.10/dist-packages (from -r requirements.txt (line 21)) (4.46.3)\n",
      "Requirement already satisfied: safetensors in /usr/local/lib/python3.10/dist-packages (from -r requirements.txt (line 22)) (0.4.5)\n",
      "Requirement already satisfied: scipy in /usr/local/lib/python3.10/dist-packages (from -r requirements.txt (line 27)) (1.13.1)\n",
      "Collecting ffmpeg-python (from -r requirements.txt (line 28))\n",
      "  Downloading ffmpeg_python-0.2.0-py3-none-any.whl.metadata (1.7 kB)\n",
      "Requirement already satisfied: torch in /usr/local/lib/python3.10/dist-packages (from timm==0.9.16->-r requirements.txt (line 6)) (2.5.1+cu121)\n",
      "Requirement already satisfied: torchvision in /usr/local/lib/python3.10/dist-packages (from timm==0.9.16->-r requirements.txt (line 6)) (0.20.1+cu121)\n",
      "Requirement already satisfied: pyyaml in /usr/local/lib/python3.10/dist-packages (from timm==0.9.16->-r requirements.txt (line 6)) (6.0.2)\n",
      "Requirement already satisfied: huggingface_hub in /usr/local/lib/python3.10/dist-packages (from timm==0.9.16->-r requirements.txt (line 6)) (0.26.5)\n",
      "Requirement already satisfied: numpy>=1.22.4 in /usr/local/lib/python3.10/dist-packages (from pandas->-r requirements.txt (line 1)) (1.26.4)\n",
      "Requirement already satisfied: python-dateutil>=2.8.2 in /usr/local/lib/python3.10/dist-packages (from pandas->-r requirements.txt (line 1)) (2.8.2)\n",
      "Requirement already satisfied: pytz>=2020.1 in /usr/local/lib/python3.10/dist-packages (from pandas->-r requirements.txt (line 1)) (2024.2)\n",
      "Requirement already satisfied: tzdata>=2022.7 in /usr/local/lib/python3.10/dist-packages (from pandas->-r requirements.txt (line 1)) (2024.2)\n",
      "Collecting antlr4-python3-runtime==4.9.* (from omegaconf->-r requirements.txt (line 3))\n",
      "  Downloading antlr4-python3-runtime-4.9.3.tar.gz (117 kB)\n",
      "\u001b[2K     \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m117.0/117.0 kB\u001b[0m \u001b[31m6.4 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
      "\u001b[?25h  Preparing metadata (setup.py) ... \u001b[?25l\u001b[?25hdone\n",
      "Collecting cfgv>=2.0.0 (from pre-commit->-r requirements.txt (line 7))\n",
      "  Downloading cfgv-3.4.0-py2.py3-none-any.whl.metadata (8.5 kB)\n",
      "Collecting identify>=1.0.0 (from pre-commit->-r requirements.txt (line 7))\n",
      "  Downloading identify-2.6.3-py2.py3-none-any.whl.metadata (4.4 kB)\n",
      "Collecting nodeenv>=0.11.1 (from pre-commit->-r requirements.txt (line 7))\n",
      "  Downloading nodeenv-1.9.1-py2.py3-none-any.whl.metadata (21 kB)\n",
      "Collecting virtualenv>=20.10.0 (from pre-commit->-r requirements.txt (line 7))\n",
      "  Downloading virtualenv-20.28.0-py3-none-any.whl.metadata (4.4 kB)\n",
      "Requirement already satisfied: ipython-genutils in /usr/local/lib/python3.10/dist-packages (from ipykernel->-r requirements.txt (line 8)) (0.2.0)\n",
      "Requirement already satisfied: ipython>=5.0.0 in /usr/local/lib/python3.10/dist-packages (from ipykernel->-r requirements.txt (line 8)) (7.34.0)\n",
      "Requirement already satisfied: traitlets>=4.1.0 in /usr/local/lib/python3.10/dist-packages (from ipykernel->-r requirements.txt (line 8)) (5.7.1)\n",
      "Requirement already satisfied: jupyter-client in /usr/local/lib/python3.10/dist-packages (from ipykernel->-r requirements.txt (line 8)) (6.1.12)\n",
      "Requirement already satisfied: tornado>=4.2 in /usr/local/lib/python3.10/dist-packages (from ipykernel->-r requirements.txt (line 8)) (6.3.3)\n",
      "Requirement already satisfied: matplotlib>=2.1.0 in /usr/local/lib/python3.10/dist-packages (from pycocotools->-r requirements.txt (line 9)) (3.8.0)\n",
      "Requirement already satisfied: networkx>=2.8 in /usr/local/lib/python3.10/dist-packages (from scikit-image->-r requirements.txt (line 14)) (3.4.2)\n",
      "Requirement already satisfied: pillow>=9.1 in /usr/local/lib/python3.10/dist-packages (from scikit-image->-r requirements.txt (line 14)) (11.0.0)\n",
      "Requirement already satisfied: imageio>=2.33 in /usr/local/lib/python3.10/dist-packages (from scikit-image->-r requirements.txt (line 14)) (2.36.1)\n",
      "Requirement already satisfied: tifffile>=2022.8.12 in /usr/local/lib/python3.10/dist-packages (from scikit-image->-r requirements.txt (line 14)) (2024.9.20)\n",
      "Requirement already satisfied: packaging>=21 in /usr/local/lib/python3.10/dist-packages (from scikit-image->-r requirements.txt (line 14)) (24.2)\n",
      "Requirement already satisfied: lazy-loader>=0.4 in /usr/local/lib/python3.10/dist-packages (from scikit-image->-r requirements.txt (line 14)) (0.4)\n",
      "Requirement already satisfied: absl-py>=0.4 in /usr/local/lib/python3.10/dist-packages (from tensorboard->-r requirements.txt (line 19)) (1.4.0)\n",
      "Requirement already satisfied: grpcio>=1.48.2 in /usr/local/lib/python3.10/dist-packages (from tensorboard->-r requirements.txt (line 19)) (1.68.1)\n",
      "Requirement already satisfied: markdown>=2.6.8 in /usr/local/lib/python3.10/dist-packages (from tensorboard->-r requirements.txt (line 19)) (3.7)\n",
      "Requirement already satisfied: protobuf!=4.24.0,>=3.19.6 in /usr/local/lib/python3.10/dist-packages (from tensorboard->-r requirements.txt (line 19)) (4.25.5)\n",
      "Requirement already satisfied: setuptools>=41.0.0 in /usr/local/lib/python3.10/dist-packages (from tensorboard->-r requirements.txt (line 19)) (75.1.0)\n",
      "Requirement already satisfied: six>1.9 in /usr/local/lib/python3.10/dist-packages (from tensorboard->-r requirements.txt (line 19)) (1.17.0)\n",
      "Requirement already satisfied: tensorboard-data-server<0.8.0,>=0.7.0 in /usr/local/lib/python3.10/dist-packages (from tensorboard->-r requirements.txt (line 19)) (0.7.2)\n",
      "Requirement already satisfied: werkzeug>=1.0.1 in /usr/local/lib/python3.10/dist-packages (from tensorboard->-r requirements.txt (line 19)) (3.1.3)\n",
      "Requirement already satisfied: accelerate>=0.22.0 in /usr/local/lib/python3.10/dist-packages (from calflops->-r requirements.txt (line 20)) (1.1.1)\n",
      "Requirement already satisfied: filelock in /usr/local/lib/python3.10/dist-packages (from transformers->-r requirements.txt (line 21)) (3.16.1)\n",
      "Requirement already satisfied: regex!=2019.12.17 in /usr/local/lib/python3.10/dist-packages (from transformers->-r requirements.txt (line 21)) (2024.9.11)\n",
      "Requirement already satisfied: requests in /usr/local/lib/python3.10/dist-packages (from transformers->-r requirements.txt (line 21)) (2.32.3)\n",
      "Requirement already satisfied: tokenizers<0.21,>=0.20 in /usr/local/lib/python3.10/dist-packages (from transformers->-r requirements.txt (line 21)) (0.20.3)\n",
      "Requirement already satisfied: future in /usr/local/lib/python3.10/dist-packages (from ffmpeg-python->-r requirements.txt (line 28)) (1.0.0)\n",
      "Requirement already satisfied: psutil in /usr/local/lib/python3.10/dist-packages (from accelerate>=0.22.0->calflops->-r requirements.txt (line 20)) (5.9.5)\n",
      "Requirement already satisfied: fsspec>=2023.5.0 in /usr/local/lib/python3.10/dist-packages (from huggingface_hub->timm==0.9.16->-r requirements.txt (line 6)) (2024.10.0)\n",
      "Requirement already satisfied: typing-extensions>=3.7.4.3 in /usr/local/lib/python3.10/dist-packages (from huggingface_hub->timm==0.9.16->-r requirements.txt (line 6)) (4.12.2)\n",
      "Collecting jedi>=0.16 (from ipython>=5.0.0->ipykernel->-r requirements.txt (line 8))\n",
      "  Downloading jedi-0.19.2-py2.py3-none-any.whl.metadata (22 kB)\n",
      "Requirement already satisfied: decorator in /usr/local/lib/python3.10/dist-packages (from ipython>=5.0.0->ipykernel->-r requirements.txt (line 8)) (4.4.2)\n",
      "Requirement already satisfied: pickleshare in /usr/local/lib/python3.10/dist-packages (from ipython>=5.0.0->ipykernel->-r requirements.txt (line 8)) (0.7.5)\n",
      "Requirement already satisfied: prompt-toolkit!=3.0.0,!=3.0.1,<3.1.0,>=2.0.0 in /usr/local/lib/python3.10/dist-packages (from ipython>=5.0.0->ipykernel->-r requirements.txt (line 8)) (3.0.48)\n",
      "Requirement already satisfied: pygments in /usr/local/lib/python3.10/dist-packages (from ipython>=5.0.0->ipykernel->-r requirements.txt (line 8)) (2.18.0)\n",
      "Requirement already satisfied: backcall in /usr/local/lib/python3.10/dist-packages (from ipython>=5.0.0->ipykernel->-r requirements.txt (line 8)) (0.2.0)\n",
      "Requirement already satisfied: matplotlib-inline in /usr/local/lib/python3.10/dist-packages (from ipython>=5.0.0->ipykernel->-r requirements.txt (line 8)) (0.1.7)\n",
      "Requirement already satisfied: pexpect>4.3 in /usr/local/lib/python3.10/dist-packages (from ipython>=5.0.0->ipykernel->-r requirements.txt (line 8)) (4.9.0)\n",
      "Requirement already satisfied: contourpy>=1.0.1 in /usr/local/lib/python3.10/dist-packages (from matplotlib>=2.1.0->pycocotools->-r requirements.txt (line 9)) (1.3.1)\n",
      "Requirement already satisfied: cycler>=0.10 in /usr/local/lib/python3.10/dist-packages (from matplotlib>=2.1.0->pycocotools->-r requirements.txt (line 9)) (0.12.1)\n",
      "Requirement already satisfied: fonttools>=4.22.0 in /usr/local/lib/python3.10/dist-packages (from matplotlib>=2.1.0->pycocotools->-r requirements.txt (line 9)) (4.55.3)\n",
      "Requirement already satisfied: kiwisolver>=1.0.1 in /usr/local/lib/python3.10/dist-packages (from matplotlib>=2.1.0->pycocotools->-r requirements.txt (line 9)) (1.4.7)\n",
      "Requirement already satisfied: pyparsing>=2.3.1 in /usr/local/lib/python3.10/dist-packages (from matplotlib>=2.1.0->pycocotools->-r requirements.txt (line 9)) (3.2.0)\n",
      "Requirement already satisfied: jinja2 in /usr/local/lib/python3.10/dist-packages (from torch->timm==0.9.16->-r requirements.txt (line 6)) (3.1.4)\n",
      "Requirement already satisfied: sympy==1.13.1 in /usr/local/lib/python3.10/dist-packages (from torch->timm==0.9.16->-r requirements.txt (line 6)) (1.13.1)\n",
      "Requirement already satisfied: mpmath<1.4,>=1.1.0 in /usr/local/lib/python3.10/dist-packages (from sympy==1.13.1->torch->timm==0.9.16->-r requirements.txt (line 6)) (1.3.0)\n",
      "Collecting distlib<1,>=0.3.7 (from virtualenv>=20.10.0->pre-commit->-r requirements.txt (line 7))\n",
      "  Downloading distlib-0.3.9-py2.py3-none-any.whl.metadata (5.2 kB)\n",
      "Requirement already satisfied: platformdirs<5,>=3.9.1 in /usr/local/lib/python3.10/dist-packages (from virtualenv>=20.10.0->pre-commit->-r requirements.txt (line 7)) (4.3.6)\n",
      "Requirement already satisfied: MarkupSafe>=2.1.1 in /usr/local/lib/python3.10/dist-packages (from werkzeug>=1.0.1->tensorboard->-r requirements.txt (line 19)) (3.0.2)\n",
      "Requirement already satisfied: jupyter-core>=4.6.0 in /usr/local/lib/python3.10/dist-packages (from jupyter-client->ipykernel->-r requirements.txt (line 8)) (5.7.2)\n",
      "Requirement already satisfied: pyzmq>=13 in /usr/local/lib/python3.10/dist-packages (from jupyter-client->ipykernel->-r requirements.txt (line 8)) (24.0.1)\n",
      "Requirement already satisfied: charset-normalizer<4,>=2 in /usr/local/lib/python3.10/dist-packages (from requests->transformers->-r requirements.txt (line 21)) (3.4.0)\n",
      "Requirement already satisfied: idna<4,>=2.5 in /usr/local/lib/python3.10/dist-packages (from requests->transformers->-r requirements.txt (line 21)) (3.10)\n",
      "Requirement already satisfied: urllib3<3,>=1.21.1 in /usr/local/lib/python3.10/dist-packages (from requests->transformers->-r requirements.txt (line 21)) (2.2.3)\n",
      "Requirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.10/dist-packages (from requests->transformers->-r requirements.txt (line 21)) (2024.8.30)\n",
      "Requirement already satisfied: parso<0.9.0,>=0.8.4 in /usr/local/lib/python3.10/dist-packages (from jedi>=0.16->ipython>=5.0.0->ipykernel->-r requirements.txt (line 8)) (0.8.4)\n",
      "Requirement already satisfied: ptyprocess>=0.5 in /usr/local/lib/python3.10/dist-packages (from pexpect>4.3->ipython>=5.0.0->ipykernel->-r requirements.txt (line 8)) (0.7.0)\n",
      "Requirement already satisfied: wcwidth in /usr/local/lib/python3.10/dist-packages (from prompt-toolkit!=3.0.0,!=3.0.1,<3.1.0,>=2.0.0->ipython>=5.0.0->ipykernel->-r requirements.txt (line 8)) (0.2.13)\n",
      "Downloading timm-0.9.16-py3-none-any.whl (2.2 MB)\n",
      "\u001b[2K   \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m2.2/2.2 MB\u001b[0m \u001b[31m37.9 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
      "\u001b[?25hDownloading omegaconf-2.3.0-py3-none-any.whl (79 kB)\n",
      "\u001b[2K   \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m79.5/79.5 kB\u001b[0m \u001b[31m4.2 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
      "\u001b[?25hDownloading lpips-0.1.4-py3-none-any.whl (53 kB)\n",
      "\u001b[2K   \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m53.8/53.8 kB\u001b[0m \u001b[31m4.0 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
      "\u001b[?25hDownloading pre_commit-4.0.1-py2.py3-none-any.whl (218 kB)\n",
      "\u001b[2K   \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m218.7/218.7 kB\u001b[0m \u001b[31m14.0 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
      "\u001b[?25hDownloading pywavelets-1.8.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (4.5 MB)\n",
      "\u001b[2K   \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m4.5/4.5 MB\u001b[0m \u001b[31m49.7 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
      "\u001b[?25hDownloading av-14.0.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (33.0 MB)\n",
      "\u001b[2K   \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m33.0/33.0 MB\u001b[0m \u001b[31m16.0 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
      "\u001b[?25hDownloading pyav-14.0.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (29.1 MB)\n",
      "\u001b[2K   \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m29.1/29.1 MB\u001b[0m \u001b[31m44.6 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
      "\u001b[?25hDownloading decord-0.6.0-py3-none-manylinux2010_x86_64.whl (13.6 MB)\n",
      "\u001b[2K   \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m13.6/13.6 MB\u001b[0m \u001b[31m62.5 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
      "\u001b[?25hDownloading pytorch_msssim-1.0.0-py3-none-any.whl (7.7 kB)\n",
      "Downloading calflops-0.3.2-py3-none-any.whl (29 kB)\n",
      "Downloading ffmpeg_python-0.2.0-py3-none-any.whl (25 kB)\n",
      "Downloading cfgv-3.4.0-py2.py3-none-any.whl (7.2 kB)\n",
      "Downloading identify-2.6.3-py2.py3-none-any.whl (99 kB)\n",
      "\u001b[2K   \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m99.0/99.0 kB\u001b[0m \u001b[31m7.4 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
      "\u001b[?25hDownloading nodeenv-1.9.1-py2.py3-none-any.whl (22 kB)\n",
      "Downloading virtualenv-20.28.0-py3-none-any.whl (4.3 MB)\n",
      "\u001b[2K   \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m4.3/4.3 MB\u001b[0m \u001b[31m58.5 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
      "\u001b[?25hDownloading distlib-0.3.9-py2.py3-none-any.whl (468 kB)\n",
      "\u001b[2K   \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m469.0/469.0 kB\u001b[0m \u001b[31m30.5 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
      "\u001b[?25hDownloading jedi-0.19.2-py2.py3-none-any.whl (1.6 MB)\n",
      "\u001b[2K   \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m1.6/1.6 MB\u001b[0m \u001b[31m49.1 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
      "\u001b[?25hBuilding wheels for collected packages: antlr4-python3-runtime\n",
      "  Building wheel for antlr4-python3-runtime (setup.py) ... \u001b[?25l\u001b[?25hdone\n",
      "  Created wheel for antlr4-python3-runtime: filename=antlr4_python3_runtime-4.9.3-py3-none-any.whl size=144555 sha256=a48c796342a65399ea6321c82981aae7dc85ef316657236f63cd8c121f0c6019\n",
      "  Stored in directory: /root/.cache/pip/wheels/12/93/dd/1f6a127edc45659556564c5730f6d4e300888f4bca2d4c5a88\n",
      "Successfully built antlr4-python3-runtime\n",
      "Installing collected packages: distlib, antlr4-python3-runtime, virtualenv, PyWavelets, pyav, omegaconf, nodeenv, jedi, identify, ffmpeg-python, decord, cfgv, av, pre-commit, pytorch_msssim, timm, lpips, calflops\n",
      "  Attempting uninstall: timm\n",
      "    Found existing installation: timm 1.0.12\n",
      "    Uninstalling timm-1.0.12:\n",
      "      Successfully uninstalled timm-1.0.12\n",
      "Successfully installed PyWavelets-1.8.0 antlr4-python3-runtime-4.9.3 av-14.0.1 calflops-0.3.2 cfgv-3.4.0 decord-0.6.0 distlib-0.3.9 ffmpeg-python-0.2.0 identify-2.6.3 jedi-0.19.2 lpips-0.1.4 nodeenv-1.9.1 omegaconf-2.3.0 pre-commit-4.0.1 pyav-14.0.1 pytorch_msssim-1.0.0 timm-0.9.16 virtualenv-20.28.0\n"
     ]
    },
    {
     "data": {
      "application/vnd.colab-display-data+json": {
       "id": "74ef783e5bb54e2aa650c30d48247e8b",
       "pip_warning": {
        "packages": [
         "pydevd_plugins"
        ]
       }
      }
     },
     "metadata": {},
     "output_type": "display_data"
    }
   ],
   "source": [
    "!pip install -r requirements.txt"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "id": "Q16r0X1CaMyk"
   },
   "source": [
    "## Imports and loading"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {
    "colab": {
     "base_uri": "https://localhost:8080/"
    },
    "id": "WPfZw_FAaMyk",
    "outputId": "403fb560-5d1d-4083-fd34-02e75f92b118"
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "/content/videoseal\n"
     ]
    }
   ],
   "source": [
    "%cd /content/videoseal"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {
    "id": "ZIicYPSXaMyl"
   },
   "outputs": [],
   "source": [
    "import matplotlib.pyplot as plt\n",
    "from matplotlib.animation import FuncAnimation\n",
    "import logging\n",
    "logging.getLogger(\"matplotlib.image\").setLevel(logging.ERROR)\n",
    "from IPython.display import HTML, display\n",
    "\n",
    "import pandas as pd\n",
    "from tqdm import tqdm\n",
    "import numpy as np\n",
    "import ffmpeg\n",
    "import os\n",
    "import cv2\n",
    "import subprocess\n",
    "\n",
    "import torch\n",
    "\n",
    "from videoseal.evals.metrics import bit_accuracy\n",
    "from videoseal.models import Videoseal\n",
    "from videoseal.utils.cfg import setup_model_from_model_card\n",
    "\n",
    "\n",
    "def get_video_info(input_path):\n",
    "    # Open the video file\n",
    "    video = cv2.VideoCapture(input_path)\n",
    "\n",
    "    # Get video properties\n",
    "    width = int(video.get(cv2.CAP_PROP_FRAME_WIDTH))\n",
    "    height = int(video.get(cv2.CAP_PROP_FRAME_HEIGHT))\n",
    "    fps = video.get(cv2.CAP_PROP_FPS)\n",
    "    codec = int(video.get(cv2.CAP_PROP_FOURCC))\n",
    "    num_frames = int(video.get(cv2.CAP_PROP_FRAME_COUNT))\n",
    "\n",
    "    # Decode codec to human-readable form\n",
    "    codec_str = \"\".join([chr((codec >> 8 * i) & 0xFF) for i in range(4)])\n",
    "\n",
    "    video.release()  # Close the video file\n",
    "\n",
    "    return {\n",
    "        \"width\": width,\n",
    "        \"height\": height,\n",
    "        \"fps\": fps,\n",
    "        \"codec\": codec_str,\n",
    "        \"num_frames\": num_frames\n",
    "    }\n",
    "\n",
    "device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "id": "4tGZxcKuaMyl"
   },
   "source": [
    "## Load the model\n",
    "\n",
    "The videoseal library provides pretrained models for embedding and extracting watermarks."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {
    "colab": {
     "base_uri": "https://localhost:8080/"
    },
    "id": "1_8BMj5UaMym",
    "outputId": "cf796b09-9aea-4f56-eeda-bf8496d2b2db"
   },
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_auth.py:94: UserWarning: \n",
      "The secret `HF_TOKEN` does not exist in your Colab secrets.\n",
      "To authenticate with the Hugging Face Hub, create a token in your settings tab (https://huggingface.co/settings/tokens), set it as secret in your Google Colab and restart your session.\n",
      "You will be able to reuse this secret in all of your notebooks.\n",
      "Please note that authentication is recommended but still optional to access public models or datasets.\n",
      "  warnings.warn(\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Model loaded successfully from /root/.cache/huggingface/hub/models--facebook--video_seal/snapshots/8037ef59ba2b2ec8fb8b55298ff37b8ccddd078d/checkpoint.pth with message: <All keys matched successfully>\n"
     ]
    }
   ],
   "source": [
    "# Load the VideoSeal model\n",
    "model = setup_model_from_model_card(\"videoseal\")\n",
    "\n",
    "# Set the model to evaluation mode and move it to the selected device\n",
    "model = model.eval()\n",
    "model = model.to(device)\n",
    "model.compile()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "id": "FDB8D9e6aMym"
   },
   "source": [
    "## Embedding\n",
    "\n",
    "The embedding process is the process of hiding the watermark in the video."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {
    "id": "WeT-7WzXaMym"
   },
   "outputs": [],
   "source": [
    "def embed_video_clip(\n",
    "    model: Videoseal,\n",
    "    clip: np.ndarray,\n",
    "    msgs: torch.Tensor\n",
    ") -> np.ndarray:\n",
    "    clip_tensor = torch.tensor(clip, dtype=torch.float32).permute(0, 3, 1, 2) / 255.0\n",
    "    outputs = model.embed(clip_tensor, msgs=msgs, is_video=True)\n",
    "    processed_clip = outputs[\"imgs_w\"]\n",
    "    processed_clip = (processed_clip * 255.0).byte().permute(0, 2, 3, 1).numpy()\n",
    "    return processed_clip\n",
    "\n",
    "def embed_video(\n",
    "    model: Videoseal,\n",
    "    input_path: str,\n",
    "    output_path: str,\n",
    "    chunk_size: int,\n",
    "    crf: int = 23\n",
    ") -> None:\n",
    "    # Read video dimensions\n",
    "    video_info = get_video_info(input_path)\n",
    "    width = int(video_info['width'])\n",
    "    height = int(video_info['height'])\n",
    "    fps = float(video_info['fps'])\n",
    "    codec = video_info['codec']\n",
    "    num_frames = int(video_info['num_frames'])\n",
    "\n",
    "    # Open the input video\n",
    "    process1 = (\n",
    "        ffmpeg\n",
    "        .input(input_path)\n",
    "        .output('pipe:', format='rawvideo', pix_fmt='rgb24', s='{}x{}'.format(width, height), r=fps)\n",
    "        .run_async(pipe_stdout=True, pipe_stderr=subprocess.PIPE)\n",
    "    )\n",
    "    # Open the output video\n",
    "    process2 = (\n",
    "        ffmpeg\n",
    "        .input('pipe:', format='rawvideo', pix_fmt='rgb24', s='{}x{}'.format(width, height), r=fps)\n",
    "        .output(output_path, vcodec='libx264', pix_fmt='yuv420p', r=fps, crf=crf)\n",
    "        .overwrite_output()\n",
    "        .run_async(pipe_stdin=True, pipe_stderr=subprocess.PIPE)\n",
    "    )\n",
    "\n",
    "    # Create a random message\n",
    "    msgs = model.get_random_msg()\n",
    "    with open(output_path.replace(\".mp4\", \".txt\"), \"w\") as f:\n",
    "        f.write(\"\".join([str(msg.item()) for msg in msgs[0]]))\n",
    "\n",
    "    # Process the video\n",
    "    frame_size = width * height * 3\n",
    "    chunk = np.zeros((chunk_size, height, width, 3), dtype=np.uint8)\n",
    "    frame_count = 0\n",
    "    pbar = tqdm(total=num_frames, desc=\"Watermark embedding\")\n",
    "    while True:\n",
    "        in_bytes = process1.stdout.read(frame_size)\n",
    "        if not in_bytes:\n",
    "            break\n",
    "        frame = np.frombuffer(in_bytes, np.uint8).reshape([height, width, 3])\n",
    "        chunk[frame_count % chunk_size] = frame\n",
    "        frame_count += 1\n",
    "        pbar.update(1)\n",
    "        if frame_count % chunk_size == 0:\n",
    "            processed_frame = embed_video_clip(model, chunk, msgs)\n",
    "            process2.stdin.write(processed_frame.tobytes())\n",
    "    process1.stdout.close()\n",
    "    process2.stdin.close()\n",
    "    process1.wait()\n",
    "    process2.wait()\n",
    "\n",
    "    return msgs"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "id": "gKOJP7GTa1xO"
   },
   "source": [
    "You are free to upload any video and change the `video_path`.\n",
    "\n",
    "You can look at the watermark video output in the folder `outputs`."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {
    "colab": {
     "base_uri": "https://localhost:8080/"
    },
    "id": "XX1TKDega1Wg",
    "outputId": "47248e63-0b10-41af-bf2b-66cf3ec7cc6f"
   },
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Watermark embedding: 100%|██████████| 256/256 [03:18<00:00,  1.29it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Saved watermarked video to ./outputs/1.mp4\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "\n"
     ]
    }
   ],
   "source": [
    "# Path to the input video\n",
    "video_path = \"./assets/videos/1.mp4\"\n",
    "\n",
    "# Create the output directory and path\n",
    "output_dir = \"./outputs\"\n",
    "os.makedirs(output_dir, exist_ok=True)\n",
    "output_path = os.path.join(output_dir, os.path.basename(video_path))\n",
    "\n",
    "# Embed the watermark inside the video with a random msg\n",
    "msgs_ori = embed_video(model, video_path, output_path, 16)\n",
    "print(f\"\\nSaved watermarked video to {output_path}\")"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "id": "nJPpKOMDaMym"
   },
   "source": [
    "## Extraction\n",
    "\n",
    "Load the video output from the embedding process and extract the watermark."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {
    "colab": {
     "base_uri": "https://localhost:8080/"
    },
    "id": "wJxWT4F6aMym",
    "outputId": "89b323a7-919e-4a29-8935-29dbd08fae52"
   },
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Watermark extraction:  12%|█▎        | 32/256 [00:12<01:30,  2.48it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\n",
      "Binary message extracted with 100.0% bit accuracy\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "\n"
     ]
    }
   ],
   "source": [
    "def detect_video_clip(\n",
    "    model: Videoseal,\n",
    "    clip: np.ndarray\n",
    ") -> torch.Tensor:\n",
    "    clip_tensor = torch.tensor(clip, dtype=torch.float32).permute(0, 3, 1, 2) / 255.0\n",
    "    outputs = model.detect(clip_tensor, is_video=True)\n",
    "    output_bits = outputs[\"preds\"][:, 1:]  # exclude the first which may be used for detection\n",
    "    return output_bits\n",
    "\n",
    "def detect_video(\n",
    "    model: Videoseal,\n",
    "    input_path: str,\n",
    "    num_frames_for_extraction: int,\n",
    "    chunk_size: int\n",
    ") -> None:\n",
    "    # Read video dimensions\n",
    "    video_info = get_video_info(input_path)\n",
    "    width = int(video_info['width'])\n",
    "    height = int(video_info['height'])\n",
    "    num_frames = int(video_info['num_frames'])\n",
    "\n",
    "    # Open the input video\n",
    "    process1 = (\n",
    "        ffmpeg\n",
    "        .input(input_path)\n",
    "        .output('pipe:', format='rawvideo', pix_fmt='rgb24')\n",
    "        .run_async(pipe_stdout=True, pipe_stderr=subprocess.PIPE)\n",
    "    )\n",
    "\n",
    "    # Process the video\n",
    "    frame_size = width * height * 3\n",
    "    chunk = np.zeros((chunk_size, height, width, 3), dtype=np.uint8)\n",
    "    frame_count = 0\n",
    "    soft_msgs = []\n",
    "    pbar = tqdm(total=num_frames, desc=\"Watermark extraction\")\n",
    "    while True:\n",
    "        if frame_count >= num_frames_for_extraction:\n",
    "            break\n",
    "        in_bytes = process1.stdout.read(frame_size)\n",
    "        if not in_bytes:\n",
    "            break\n",
    "        frame = np.frombuffer(in_bytes, np.uint8).reshape([height, width, 3])\n",
    "        chunk[frame_count % chunk_size] = frame\n",
    "        frame_count += 1\n",
    "        pbar.update(1)\n",
    "        if frame_count % chunk_size == 0:\n",
    "            soft_msgs.append(detect_video_clip(model, chunk))\n",
    "    process1.stdout.close()\n",
    "    process1.wait()\n",
    "\n",
    "    soft_msgs = torch.cat(soft_msgs, dim=0)\n",
    "    soft_msgs = soft_msgs.mean(dim=0)  # Average the predictions across all frames\n",
    "    return soft_msgs"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Detect the watermark\n",
    "num_frames_for_extraction = 32\n",
    "soft_msgs = detect_video(model, output_path, num_frames_for_extraction, 16)\n",
    "bit_acc = bit_accuracy(soft_msgs, msgs_ori).item() * 100\n",
    "print(f\"\\nBinary message extracted with {bit_acc:.1f}% bit accuracy\")"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Run other baselines"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "To download other checkpoints, you can run the following command:"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {
    "id": "MsDX_uGqcP5d"
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Requirement already satisfied: huggingface_hub in /opt/miniconda3/lib/python3.12/site-packages (0.26.2)\n",
      "Requirement already satisfied: filelock in /opt/miniconda3/lib/python3.12/site-packages (from huggingface_hub) (3.13.1)\n",
      "Requirement already satisfied: fsspec>=2023.5.0 in /opt/miniconda3/lib/python3.12/site-packages (from huggingface_hub) (2024.10.0)\n",
      "Requirement already satisfied: packaging>=20.9 in /opt/miniconda3/lib/python3.12/site-packages (from huggingface_hub) (24.1)\n",
      "Requirement already satisfied: pyyaml>=5.1 in /opt/miniconda3/lib/python3.12/site-packages (from huggingface_hub) (6.0.1)\n",
      "Requirement already satisfied: requests in /opt/miniconda3/lib/python3.12/site-packages (from huggingface_hub) (2.32.3)\n",
      "Requirement already satisfied: tqdm>=4.42.1 in /opt/miniconda3/lib/python3.12/site-packages (from huggingface_hub) (4.66.4)\n",
      "Requirement already satisfied: typing-extensions>=3.7.4.3 in /opt/miniconda3/lib/python3.12/site-packages (from huggingface_hub) (4.12.2)\n",
      "Requirement already satisfied: charset-normalizer<4,>=2 in /opt/miniconda3/lib/python3.12/site-packages (from requests->huggingface_hub) (3.3.2)\n",
      "Requirement already satisfied: idna<4,>=2.5 in /opt/miniconda3/lib/python3.12/site-packages (from requests->huggingface_hub) (3.7)\n",
      "Requirement already satisfied: urllib3<3,>=1.21.1 in /opt/miniconda3/lib/python3.12/site-packages (from requests->huggingface_hub) (2.2.2)\n",
      "Requirement already satisfied: certifi>=2017.4.17 in /opt/miniconda3/lib/python3.12/site-packages (from requests->huggingface_hub) (2024.8.30)\n",
      "Fetching 11 files:   0%|                                 | 0/11 [00:00<?, ?it/s]Downloading '.gitattributes' to '.cache/models--tangtianzhong--img-wm-torchscript/blobs/a6344aac8c09253b3b630fb776ae94478aa0275b.incomplete'\n",
      "Downloading 'cin_nsm_decoder.pt' to '.cache/models--tangtianzhong--img-wm-torchscript/blobs/60cbc933a974f0258d3cfa947404c0e92027a29c3769edba34d90718301f9e8d.incomplete'\n",
      "Downloading 'trustmark_decoder_q.pt' to '.cache/models--tangtianzhong--img-wm-torchscript/blobs/c1e22e4a12c095e6a8f59c0b4fe35f2c09a6a071a62548b1fd6aae37f6fd85ef.incomplete'\n",
      "Downloading 'hidden_encoder_48b.pt' to '.cache/models--tangtianzhong--img-wm-torchscript/blobs/6a7c78241837a455db3d160134fa25e60ec225b548b5d6aab69a30cd3f7b19c3.incomplete'\n",
      "Downloading 'mbrs_256_m256_decoder.pt' to '.cache/models--tangtianzhong--img-wm-torchscript/blobs/9835526c99330b84f4910ab79f64d6da398ac2eaed9731f14b8e8707d775a70d.incomplete'\n",
      "Downloading 'cin_nsm_encoder.pt' to '.cache/models--tangtianzhong--img-wm-torchscript/blobs/5db5bdf2dc77dcf4a811b23185a031e86a6b60d727855dee1d17ba716f1d762b.incomplete'\n",
      "Downloading 'hidden_decoder_48b.pt' to '.cache/models--tangtianzhong--img-wm-torchscript/blobs/fb7571b93edeac06ba6d403d5bdbe3744ca82852cf1e00d2e2b7ec33376f8349.incomplete'\n",
      "Downloading 'mbrs_256_m256_encoder.pt' to '.cache/models--tangtianzhong--img-wm-torchscript/blobs/cabf5317340901e7fcc946428a16c3000f66fbb1a297e8fed114dcf540455bab.incomplete'\n",
      "\n",
      "cin_nsm_decoder.pt:   0%|                            | 0.00/138M [00:00<?, ?B/s]\u001b[A\n",
      "\n",
      "trustmark_decoder_q.pt:   0%|                       | 0.00/95.3M [00:00<?, ?B/s]\u001b[A\u001b[A\n",
      "\n",
      "\n",
      "hidden_encoder_48b.pt:   0%|                         | 0.00/755k [00:00<?, ?B/s]\u001b[A\u001b[A\u001b[A\n",
      "\n",
      "\n",
      "\n",
      "mbrs_256_m256_decoder.pt:   0%|                     | 0.00/81.2M [00:00<?, ?B/s]\u001b[A\u001b[A\u001b[A\u001b[A\n",
      "\n",
      "\n",
      "\n",
      "\n",
      "mbrs_256_m256_encoder.pt:   0%|                     | 0.00/2.38M [00:00<?, ?B/s]\u001b[A\u001b[A\u001b[A\u001b[A\u001b[A\n",
      "\n",
      "\n",
      "\n",
      "\n",
      "\n",
      "hidden_decoder_48b.pt:   0%|                        | 0.00/1.21M [00:00<?, ?B/s]\u001b[A\u001b[A\u001b[A\u001b[A\u001b[A\u001b[A\n",
      "\n",
      "\n",
      "\n",
      "\n",
      "\n",
      "\n",
      "hidden_encoder_48b.pt: 100%|█████████████████| 755k/755k [00:00<00:00, 11.7MB/s]\u001b[A\u001b[A\u001b[A\u001b[A\u001b[A\u001b[A\u001b[A\n",
      "Download complete. Moving file to .cache/models--tangtianzhong--img-wm-torchscript/blobs/6a7c78241837a455db3d160134fa25e60ec225b548b5d6aab69a30cd3f7b19c3\n",
      "\n",
      "\n",
      "\n",
      ".gitattributes: 100%|██████████████████████| 1.52k/1.52k [00:00<00:00, 28.7MB/s]\u001b[A\u001b[A\u001b[A\n",
      "Download complete. Moving file to .cache/models--tangtianzhong--img-wm-torchscript/blobs/a6344aac8c09253b3b630fb776ae94478aa0275b\n",
      "Fetching 11 files:   9%|██▎                      | 1/11 [00:00<00:02,  4.23it/s]Downloading 'trustmark_encoder_q.pt' to '.cache/models--tangtianzhong--img-wm-torchscript/blobs/9502c4d6d0f0dbbefead5257f84b123238c79b68342143403011bcb8eb435eb9.incomplete'\n",
      "Downloading 'wam_decoder.pt' to '.cache/models--tangtianzhong--img-wm-torchscript/blobs/a245971eeef46f4c27083ee79f5027ca67117b579e9e8ec56105e167f01912fb.incomplete'\n",
      "\n",
      "\n",
      "\n",
      "\n",
      "\n",
      "\n",
      "hidden_decoder_48b.pt: 100%|███████████████| 1.21M/1.21M [00:00<00:00, 5.46MB/s]\u001b[A\u001b[A\u001b[A\u001b[A\u001b[A\u001b[A\n",
      "\n",
      "\n",
      "hidden_decoder_48b.pt: 100%|███████████████| 1.21M/1.21M [00:00<00:00, 5.44MB/s]\u001b[A\u001b[A\u001b[A\n",
      "Download complete. Moving file to .cache/models--tangtianzhong--img-wm-torchscript/blobs/fb7571b93edeac06ba6d403d5bdbe3744ca82852cf1e00d2e2b7ec33376f8349\n",
      "\n",
      "\n",
      "\n",
      "\n",
      "\n",
      "\n",
      "wam_decoder.pt:   0%|                                | 0.00/373M [00:00<?, ?B/s]\u001b[A\u001b[A\u001b[A\u001b[A\u001b[A\u001b[ADownloading 'wam_encoder.pt' to '.cache/models--tangtianzhong--img-wm-torchscript/blobs/d5101989c3729529908c4596b5fd78045333639d7ffe4ae45699b28224ada48c.incomplete'\n",
      "\n",
      "\n",
      "\n",
      "\n",
      "\n",
      "\n",
      "\n",
      "\n",
      "wam_encoder.pt:   0%|                               | 0.00/4.57M [00:00<?, ?B/s]\u001b[A\u001b[A\u001b[A\u001b[A\u001b[A\u001b[A\u001b[A\u001b[A\n",
      "\n",
      "\n",
      "\n",
      "\n",
      "mbrs_256_m256_encoder.pt: 100%|████████████| 2.38M/2.38M [00:00<00:00, 4.59MB/s]\u001b[A\u001b[A\u001b[A\u001b[A\u001b[A\n",
      "Download complete. Moving file to .cache/models--tangtianzhong--img-wm-torchscript/blobs/cabf5317340901e7fcc946428a16c3000f66fbb1a297e8fed114dcf540455bab\n",
      "\n",
      "\n",
      "\n",
      "\n",
      "\n",
      "\n",
      "wam_decoder.pt:   3%|▋                      | 10.5M/373M [00:00<00:16, 21.9MB/s]\u001b[A\u001b[A\u001b[A\u001b[A\u001b[A\u001b[A\n",
      "\n",
      "\n",
      "\n",
      "\n",
      "\n",
      "wam_decoder.pt:   6%|█▎                     | 21.0M/373M [00:00<00:14, 23.6MB/s]\u001b[A\u001b[A\u001b[A\u001b[A\u001b[A\u001b[A\n",
      "\n",
      "\n",
      "\n",
      "\n",
      "\n",
      "wam_decoder.pt:   8%|█▉                     | 31.5M/373M [00:01<00:11, 29.7MB/s]\u001b[A\u001b[A\u001b[A\u001b[A\u001b[A\u001b[A\n",
      "\n",
      "\n",
      "\n",
      "\n",
      "\n",
      "wam_decoder.pt:  11%|██▌                    | 41.9M/373M [00:01<00:10, 32.5MB/s]\u001b[A\u001b[A\u001b[A\u001b[A\u001b[A\u001b[A\n",
      "\n",
      "\n",
      "\n",
      "\n",
      "\n",
      "\n",
      "\n",
      "wam_encoder.pt: 100%|██████████████████████| 4.57M/4.57M [00:01<00:00, 3.06MB/s]\u001b[A\u001b[A\u001b[A\u001b[A\u001b[A\u001b[A\u001b[A\u001b[A\n",
      "Download complete. Moving file to .cache/models--tangtianzhong--img-wm-torchscript/blobs/d5101989c3729529908c4596b5fd78045333639d7ffe4ae45699b28224ada48c\n",
      "\n",
      "\n",
      "\n",
      "\n",
      "\n",
      "\n",
      "wam_decoder.pt:  14%|███▏                   | 52.4M/373M [00:01<00:08, 37.0MB/s]\u001b[A\u001b[A\u001b[A\u001b[A\u001b[A\u001b[A\n",
      "\n",
      "\n",
      "\n",
      "\n",
      "\n",
      "wam_decoder.pt:  17%|███▉                   | 62.9M/373M [00:01<00:07, 40.7MB/s]\u001b[A\u001b[A\u001b[A\u001b[A\u001b[A\u001b[A\n",
      "\n",
      "\n",
      "trustmark_encoder_q.pt:  30%|████▏         | 10.5M/34.7M [00:02<00:04, 5.08MB/s]\u001b[A\u001b[A\u001b[A\n",
      "\n",
      "\n",
      "\n",
      "\n",
      "\n",
      "wam_decoder.pt:  20%|████▌                  | 73.4M/373M [00:02<00:06, 45.2MB/s]\u001b[A\u001b[A\u001b[A\u001b[A\u001b[A\u001b[A\n",
      "\n",
      "\n",
      "\n",
      "\n",
      "\n",
      "wam_decoder.pt:  22%|█████▏                 | 83.9M/373M [00:02<00:06, 45.4MB/s]\u001b[A\u001b[A\u001b[A\u001b[A\u001b[A\u001b[A\n",
      "\n",
      "\n",
      "\n",
      "\n",
      "\n",
      "wam_decoder.pt:  25%|█████▊                 | 94.4M/373M [00:02<00:06, 44.8MB/s]\u001b[A\u001b[A\u001b[A\u001b[A\u001b[A\u001b[A\n",
      "\n",
      "\n",
      "\n",
      "\n",
      "\n",
      "wam_decoder.pt:  28%|██████▋                 | 105M/373M [00:02<00:05, 45.3MB/s]\u001b[A\u001b[A\u001b[A\u001b[A\u001b[A\u001b[A\n",
      "\n",
      "\n",
      "\n",
      "\n",
      "\n",
      "wam_decoder.pt:  31%|███████▍                | 115M/373M [00:02<00:05, 47.1MB/s]\u001b[A\u001b[A\u001b[A\u001b[A\u001b[A\u001b[A\n",
      "\n",
      "\n",
      "\n",
      "\n",
      "\n",
      "wam_decoder.pt:  34%|████████                | 126M/373M [00:03<00:05, 48.0MB/s]\u001b[A\u001b[A\u001b[A\u001b[A\u001b[A\u001b[A\n",
      "\n",
      "trustmark_decoder_q.pt:  11%|█▌            | 10.5M/95.3M [00:03<00:31, 2.71MB/s]\u001b[A\u001b[A\n",
      "\n",
      "\n",
      "\n",
      "\n",
      "\n",
      "wam_decoder.pt:  37%|████████▊               | 136M/373M [00:03<00:06, 37.8MB/s]\u001b[A\u001b[A\u001b[A\u001b[A\u001b[A\u001b[A\n",
      "cin_nsm_decoder.pt:   8%|█▍                 | 10.5M/138M [00:03<00:47, 2.71MB/s]\u001b[A\n",
      "\n",
      "\n",
      "\n",
      "\n",
      "\n",
      "wam_decoder.pt:  39%|█████████▍              | 147M/373M [00:03<00:06, 37.3MB/s]\u001b[A\u001b[A\u001b[A\u001b[A\u001b[A\u001b[A\n",
      "\n",
      "\n",
      "\n",
      "mbrs_256_m256_decoder.pt:  13%|█▌          | 10.5M/81.2M [00:04<00:29, 2.44MB/s]\u001b[A\u001b[A\u001b[A\u001b[A\n",
      "\n",
      "\n",
      "\n",
      "\n",
      "\n",
      "wam_decoder.pt:  42%|██████████              | 157M/373M [00:04<00:05, 39.6MB/s]\u001b[A\u001b[A\u001b[A\u001b[A\u001b[A\u001b[A\n",
      "\n",
      "\n",
      "\n",
      "\n",
      "\n",
      "wam_decoder.pt:  45%|██████████▊             | 168M/373M [00:04<00:04, 43.1MB/s]\u001b[A\u001b[A\u001b[A\u001b[A\u001b[A\u001b[A\n",
      "\n",
      "\n",
      "trustmark_encoder_q.pt:  60%|████████▍     | 21.0M/34.7M [00:04<00:02, 4.63MB/s]\u001b[A\u001b[A\u001b[A\n",
      "\n",
      "\n",
      "\n",
      "\n",
      "\n",
      "wam_decoder.pt:  48%|███████████▍            | 178M/373M [00:04<00:04, 45.2MB/s]\u001b[A\u001b[A\u001b[A\u001b[A\u001b[A\u001b[A\n",
      "\n",
      "\n",
      "\n",
      "\n",
      "\n",
      "wam_decoder.pt:  51%|████████████▏           | 189M/373M [00:04<00:04, 44.4MB/s]\u001b[A\u001b[A\u001b[A\u001b[A\u001b[A\u001b[A\n",
      "\n",
      "\n",
      "\n",
      "\n",
      "\n",
      "\n",
      "cin_nsm_encoder.pt:  41%|███████▍          | 10.5M/25.5M [00:05<00:07, 2.02MB/s]\u001b[A\u001b[A\u001b[A\u001b[A\u001b[A\u001b[A\u001b[A\n",
      "\n",
      "\n",
      "\n",
      "\n",
      "\n",
      "wam_decoder.pt:  53%|████████████▊           | 199M/373M [00:04<00:03, 45.5MB/s]\u001b[A\u001b[A\u001b[A\u001b[A\u001b[A\u001b[A\n",
      "\n",
      "\n",
      "\n",
      "\n",
      "\n",
      "wam_decoder.pt:  56%|█████████████▍          | 210M/373M [00:05<00:03, 46.8MB/s]\u001b[A\u001b[A\u001b[A\u001b[A\u001b[A\u001b[A\n",
      "\n",
      "trustmark_decoder_q.pt:  22%|███           | 21.0M/95.3M [00:05<00:18, 3.95MB/s]\u001b[A\u001b[A\n",
      "\n",
      "\n",
      "\n",
      "\n",
      "\n",
      "wam_decoder.pt:  59%|██████████████▏         | 220M/373M [00:05<00:03, 45.8MB/s]\u001b[A\u001b[A\u001b[A\u001b[A\u001b[A\u001b[A\n",
      "\n",
      "\n",
      "trustmark_encoder_q.pt:  91%|████████████▋ | 31.5M/34.7M [00:05<00:00, 6.04MB/s]\u001b[A\u001b[A\u001b[A\n",
      "\n",
      "\n",
      "\n",
      "mbrs_256_m256_decoder.pt:  26%|███         | 21.0M/81.2M [00:05<00:15, 3.89MB/s]\u001b[A\u001b[A\u001b[A\u001b[A\n",
      "\n",
      "\n",
      "\n",
      "\n",
      "\n",
      "wam_decoder.pt:  62%|██████████████▊         | 231M/373M [00:05<00:03, 46.0MB/s]\u001b[A\u001b[A\u001b[A\u001b[A\u001b[A\u001b[A\n",
      "cin_nsm_decoder.pt:  15%|██▉                | 21.0M/138M [00:06<00:32, 3.63MB/s]\u001b[A\n",
      "\n",
      "\n",
      "trustmark_encoder_q.pt: 100%|██████████████| 34.7M/34.7M [00:05<00:00, 5.91MB/s]\u001b[A\u001b[A\u001b[A\n",
      "Download complete. Moving file to .cache/models--tangtianzhong--img-wm-torchscript/blobs/9502c4d6d0f0dbbefead5257f84b123238c79b68342143403011bcb8eb435eb9\n",
      "\n",
      "\n",
      "\n",
      "\n",
      "\n",
      "\n",
      "wam_decoder.pt:  65%|███████████████▌        | 241M/373M [00:05<00:02, 44.6MB/s]\u001b[A\u001b[A\u001b[A\u001b[A\u001b[A\u001b[A\n",
      "\n",
      "\n",
      "\n",
      "\n",
      "\n",
      "wam_decoder.pt:  67%|████████████████▏       | 252M/373M [00:06<00:03, 37.9MB/s]\u001b[A\u001b[A\u001b[A\u001b[A\u001b[A\u001b[A\n",
      "\n",
      "\n",
      "\n",
      "\n",
      "\n",
      "wam_decoder.pt:  70%|████████████████▊       | 262M/373M [00:06<00:02, 39.5MB/s]\u001b[A\u001b[A\u001b[A\u001b[A\u001b[A\u001b[A\n",
      "\n",
      "\n",
      "\n",
      "mbrs_256_m256_decoder.pt:  39%|████▋       | 31.5M/81.2M [00:06<00:09, 5.32MB/s]\u001b[A\u001b[A\u001b[A\u001b[A\n",
      "\n",
      "\n",
      "\n",
      "\n",
      "\n",
      "wam_decoder.pt:  73%|█████████████████▌      | 273M/373M [00:06<00:02, 41.4MB/s]\u001b[A\u001b[A\u001b[A\u001b[A\u001b[A\u001b[A\n",
      "\n",
      "\n",
      "\n",
      "\n",
      "\n",
      "wam_decoder.pt:  76%|██████████████████▏     | 283M/373M [00:07<00:02, 33.0MB/s]\u001b[A\u001b[A\u001b[A\u001b[A\u001b[A\u001b[A\n",
      "\n",
      "trustmark_decoder_q.pt:  33%|████▌         | 31.5M/95.3M [00:07<00:13, 4.62MB/s]\u001b[A\u001b[A\n",
      "\n",
      "\n",
      "\n",
      "\n",
      "\n",
      "wam_decoder.pt:  79%|██████████████████▊     | 294M/373M [00:07<00:02, 32.4MB/s]\u001b[A\u001b[A\u001b[A\u001b[A\u001b[A\u001b[A\n",
      "\n",
      "\n",
      "\n",
      "\n",
      "\n",
      "wam_decoder.pt:  81%|███████████████████▌    | 304M/373M [00:07<00:02, 33.2MB/s]\u001b[A\u001b[A\u001b[A\u001b[A\u001b[A\u001b[A\n",
      "\n",
      "\n",
      "\n",
      "\n",
      "\n",
      "wam_decoder.pt:  84%|████████████████████▏   | 315M/373M [00:08<00:01, 35.2MB/s]\u001b[A\u001b[A\u001b[A\u001b[A\u001b[A\u001b[A\n",
      "\n",
      "\n",
      "\n",
      "mbrs_256_m256_decoder.pt:  52%|██████▏     | 41.9M/81.2M [00:08<00:06, 5.95MB/s]\u001b[A\u001b[A\u001b[A\u001b[A\n",
      "\n",
      "\n",
      "\n",
      "\n",
      "\n",
      "wam_decoder.pt:  87%|████████████████████▉   | 325M/373M [00:08<00:01, 35.3MB/s]\u001b[A\u001b[A\u001b[A\u001b[A\u001b[A\u001b[A\n",
      "\n",
      "\n",
      "\n",
      "\n",
      "\n",
      "\n",
      "cin_nsm_encoder.pt:  82%|██████████████▊   | 21.0M/25.5M [00:08<00:01, 2.47MB/s]\u001b[A\u001b[A\u001b[A\u001b[A\u001b[A\u001b[A\u001b[A\n",
      "\n",
      "\n",
      "\n",
      "\n",
      "\n",
      "wam_decoder.pt:  90%|█████████████████████▌  | 336M/373M [00:08<00:00, 37.9MB/s]\u001b[A\u001b[A\u001b[A\u001b[A\u001b[A\u001b[A\n",
      "cin_nsm_decoder.pt:  23%|████▎              | 31.5M/138M [00:09<00:30, 3.49MB/s]\u001b[A\n",
      "\n",
      "\n",
      "\n",
      "\n",
      "\n",
      "wam_decoder.pt:  93%|██████████████████████▏ | 346M/373M [00:08<00:00, 35.9MB/s]\u001b[A\u001b[A\u001b[A\u001b[A\u001b[A\u001b[A\n",
      "\n",
      "\n",
      "\n",
      "\n",
      "\n",
      "wam_decoder.pt:  95%|██████████████████████▉ | 357M/373M [00:09<00:00, 36.8MB/s]\u001b[A\u001b[A\u001b[A\u001b[A\u001b[A\u001b[A\n",
      "\n",
      "\n",
      "\n",
      "mbrs_256_m256_decoder.pt:  65%|███████▋    | 52.4M/81.2M [00:10<00:04, 5.80MB/s]\u001b[A\u001b[A\u001b[A\u001b[A\n",
      "\n",
      "\n",
      "\n",
      "\n",
      "\n",
      "wam_decoder.pt:  98%|███████████████████████▌| 367M/373M [00:10<00:00, 23.2MB/s]\u001b[A\u001b[A\u001b[A\u001b[A\u001b[A\u001b[A\n",
      "\n",
      "\n",
      "\n",
      "\n",
      "\n",
      "wam_decoder.pt: 100%|████████████████████████| 373M/373M [00:10<00:00, 36.6MB/s]\u001b[A\u001b[A\u001b[A\u001b[A\u001b[A\u001b[A\n",
      "Download complete. Moving file to .cache/models--tangtianzhong--img-wm-torchscript/blobs/a245971eeef46f4c27083ee79f5027ca67117b579e9e8ec56105e167f01912fb\n",
      "\n",
      "\n",
      "trustmark_decoder_q.pt:  44%|██████▏       | 41.9M/95.3M [00:10<00:13, 3.98MB/s]\u001b[A\u001b[A\n",
      "\n",
      "\n",
      "\n",
      "\n",
      "\n",
      "\n",
      "cin_nsm_encoder.pt: 100%|██████████████████| 25.5M/25.5M [00:10<00:00, 2.34MB/s]\u001b[A\u001b[A\u001b[A\u001b[A\u001b[A\u001b[A\u001b[A\n",
      "Download complete. Moving file to .cache/models--tangtianzhong--img-wm-torchscript/blobs/5db5bdf2dc77dcf4a811b23185a031e86a6b60d727855dee1d17ba716f1d762b\n",
      "\n",
      "\n",
      "\n",
      "\n",
      "mbrs_256_m256_decoder.pt:  77%|█████████▎  | 62.9M/81.2M [00:11<00:02, 7.09MB/s]\u001b[A\u001b[A\u001b[A\u001b[A\n",
      "cin_nsm_decoder.pt:  30%|█████▊             | 41.9M/138M [00:11<00:24, 3.85MB/s]\u001b[A\n",
      "\n",
      "trustmark_decoder_q.pt:  55%|███████▋      | 52.4M/95.3M [00:11<00:08, 5.28MB/s]\u001b[A\u001b[A\n",
      "\n",
      "\n",
      "\n",
      "mbrs_256_m256_decoder.pt:  90%|██████████▊ | 73.4M/81.2M [00:11<00:00, 9.25MB/s]\u001b[A\u001b[A\u001b[A\u001b[A\n",
      "\n",
      "\n",
      "\n",
      "mbrs_256_m256_decoder.pt: 100%|████████████| 81.2M/81.2M [00:11<00:00, 6.80MB/s]\u001b[A\u001b[A\u001b[A\u001b[A\n",
      "Download complete. Moving file to .cache/models--tangtianzhong--img-wm-torchscript/blobs/9835526c99330b84f4910ab79f64d6da398ac2eaed9731f14b8e8707d775a70d\n",
      "\n",
      "\n",
      "trustmark_decoder_q.pt:  66%|█████████▏    | 62.9M/95.3M [00:12<00:05, 6.44MB/s]\u001b[A\u001b[A\n",
      "cin_nsm_decoder.pt:  38%|███████▏           | 52.4M/138M [00:12<00:17, 4.82MB/s]\u001b[A\n",
      "\n",
      "trustmark_decoder_q.pt:  77%|██████████▊   | 73.4M/95.3M [00:12<00:02, 8.72MB/s]\u001b[A\u001b[A\n",
      "\n",
      "trustmark_decoder_q.pt:  88%|████████████▎ | 83.9M/95.3M [00:13<00:00, 11.7MB/s]\u001b[A\u001b[A\n",
      "cin_nsm_decoder.pt:  46%|████████▋          | 62.9M/138M [00:13<00:11, 6.59MB/s]\u001b[A\n",
      "\n",
      "trustmark_decoder_q.pt: 100%|██████████████| 95.3M/95.3M [00:13<00:00, 7.18MB/s]\u001b[A\u001b[A\n",
      "Download complete. Moving file to .cache/models--tangtianzhong--img-wm-torchscript/blobs/c1e22e4a12c095e6a8f59c0b4fe35f2c09a6a071a62548b1fd6aae37f6fd85ef\n",
      "\n",
      "cin_nsm_decoder.pt:  53%|██████████         | 73.4M/138M [00:13<00:07, 8.74MB/s]\u001b[A\n",
      "cin_nsm_decoder.pt:  61%|███████████▌       | 83.9M/138M [00:13<00:04, 12.0MB/s]\u001b[A\n",
      "cin_nsm_decoder.pt:  68%|████████████▉      | 94.4M/138M [00:13<00:02, 15.9MB/s]\u001b[A\n",
      "cin_nsm_decoder.pt:  76%|███████████████▏    | 105M/138M [00:14<00:01, 19.8MB/s]\u001b[A\n",
      "cin_nsm_decoder.pt:  84%|████████████████▋   | 115M/138M [00:14<00:00, 23.5MB/s]\u001b[A\n",
      "cin_nsm_decoder.pt:  91%|██████████████████▏ | 126M/138M [00:14<00:00, 27.5MB/s]\u001b[A\n",
      "cin_nsm_decoder.pt: 100%|████████████████████| 138M/138M [00:14<00:00, 9.22MB/s]\u001b[A\n",
      "Download complete. Moving file to .cache/models--tangtianzhong--img-wm-torchscript/blobs/60cbc933a974f0258d3cfa947404c0e92027a29c3769edba34d90718301f9e8d\n",
      "Fetching 11 files: 100%|████████████████████████| 11/11 [00:15<00:00,  1.37s/it]\n",
      ".cache/models--tangtianzhong--img-wm-torchscript/snapshots/845dc751783db2a03a4b14ea600b0a4a9aba89aa\n"
     ]
    }
   ],
   "source": [
    "!pip install huggingface_hub\n",
    "!huggingface-cli download tangtianzhong/img-wm-torchscript --cache-dir .cache\n",
    "!mkdir ckpts\n",
    "!cp .cache/models--tangtianzhong--img-wm-torchscript/snapshots/845dc751783db2a03a4b14ea600b0a4a9aba89aa/*.pt ckpts/\n",
    "!rm -rf .cache"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "from videoseal.utils.cfg import setup_model_from_checkpoint\n",
    "\n",
    "model = setup_model_from_checkpoint(\"baseline/trustmark\")\n",
    "model = model.eval()\n",
    "model = model.to(device)\n",
    "model.compile()\n",
    "\n",
    "model.chunk_size = 32  # embed 32 frames/imgs at a time\n",
    "model.step_size = 4  # propagate the wm to 4 next frame/img\n",
    "# model.blender.scaling_w *= 1.5  # imperceptibility/robustness trade-off"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### Embedding"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Path to the input video\n",
    "video_path = \"./assets/videos/1.mp4\"\n",
    "\n",
    "# Create the output directory and path\n",
    "output_dir = \"./outputs\"\n",
    "os.makedirs(output_dir, exist_ok=True)\n",
    "output_path = os.path.join(output_dir, os.path.basename(video_path))\n",
    "\n",
    "# Embed the watermark inside the video with a random msg\n",
    "msgs_ori = embed_video(model, video_path, output_path, 16)\n",
    "print(f\"\\nSaved watermarked video to {output_path}\")"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### Extraction"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Detect the watermark\n",
    "num_frames_for_extraction = 32\n",
    "soft_msgs = detect_video(model, output_path, num_frames_for_extraction, 16)\n",
    "bit_acc = bit_accuracy(soft_msgs, msgs_ori).item() * 100\n",
    "print(f\"\\nBinary message extracted with {bit_acc:.1f}% bit accuracy\")"
   ]
  }
 ],
 "metadata": {
  "colab": {
   "provenance": []
  },
  "kernelspec": {
   "display_name": "base",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.12.2"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 0
}
