{
 "cells": [
  {
   "cell_type": "markdown",
   "id": "09369c79",
   "metadata": {},
   "source": [
    "# Loss function test"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "af4ebe29",
   "metadata": {},
   "source": [
    "## Color Saliency Loss"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "id": "d8bab1b4",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2023-02-09T11:34:19.295113Z",
     "start_time": "2023-02-09T11:34:19.056460Z"
    }
   },
   "source": [
    "%matplotlib inline"
   ],
   "outputs": []
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "id": "8e4cc887",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2023-02-09T11:34:19.297730Z",
     "start_time": "2023-02-09T11:34:19.296192Z"
    }
   },
   "source": [
    "from typing import Tuple"
   ],
   "outputs": []
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "id": "5089130b",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2023-02-09T12:30:00.592354Z",
     "start_time": "2023-02-09T12:30:00.421465Z"
    }
   },
   "source": [
    "import torch\n",
    "import torch.nn as nn\n",
    "import torchvision.transforms as T\n",
    "\n",
    "import numpy as np\n",
    "from PIL import Image\n",
    "import matplotlib.pyplot as plt"
   ],
   "outputs": []
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "id": "e13860c6",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2023-02-09T12:31:35.307753Z",
     "start_time": "2023-02-09T12:31:35.303067Z"
    },
    "code_folding": [
     0
    ]
   },
   "source": [
    "def plot(imgs, with_orig=True, row_title=None, fw=15, fh=10, **imshow_kwargs):\n",
    "    if not isinstance(imgs[0], list):\n",
    "        # Make a 2d grid even if there's just 1 row\n",
    "        imgs = [imgs]\n",
    "\n",
    "    num_rows = len(imgs)\n",
    "    num_cols = len(imgs[0]) + with_orig\n",
    "    fig, axs = plt.subplots(nrows=num_rows, ncols=num_cols, squeeze=False, figsize=(fw, fh))\n",
    "    for row_idx, row in enumerate(imgs):\n",
    "        row = [orig_img] + row if with_orig else row\n",
    "        for col_idx, img in enumerate(row):\n",
    "            ax = axs[row_idx, col_idx]\n",
    "            ax.imshow(np.asarray(img), **imshow_kwargs)\n",
    "            ax.set(xticklabels=[], yticklabels=[], xticks=[], yticks=[])\n",
    "\n",
    "    if with_orig:\n",
    "        axs[0, 0].set(title='Original image')\n",
    "        axs[0, 0].title.set_size(8)\n",
    "    if row_title is not None:\n",
    "        for row_idx in range(num_rows):\n",
    "            axs[row_idx, 0].set(ylabel=row_title[row_idx])\n",
    "\n",
    "    plt.tight_layout()"
   ],
   "outputs": []
  },
  {
   "cell_type": "code",
   "execution_count": 106,
   "id": "45d2f3ef",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2022-12-31T12:58:20.779034Z",
     "start_time": "2022-12-31T12:58:20.774366Z"
    },
    "code_folding": []
   },
   "source": [
    "class SaliencyLoss(nn.Module):\n",
    "    \"\"\"Implementation of the colorfulness metric as the saliency loss.\n",
    "    The smaller the value, the less colorful the image.\n",
    "    Reference: https://infoscience.epfl.ch/record/33994/files/HaslerS03.pdf\n",
    "    \"\"\"\n",
    "\n",
    "    def __init__(self):\n",
    "        super(SaliencyLoss, self).__init__()\n",
    "\n",
    "    def forward(self, adv_patch: torch.Tensor) -> torch.Tensor:\n",
    "        \"\"\"\n",
    "        Args:\n",
    "            adv_patch: Float Tensor of shape [C, H, W] where C=3 (R, G, B channels)\n",
    "        \"\"\"\n",
    "        assert adv_patch.shape[0] == 3\n",
    "        r, g, b = adv_patch\n",
    "        rg = r - g\n",
    "        yb = 0.5 * (r + g) - b\n",
    "\n",
    "        mu_rg, sigma_rg = torch.mean(rg), torch.std(rg)\n",
    "        mu_yb, sigma_yb = torch.mean(yb), torch.std(yb)\n",
    "        sl = torch.sqrt(sigma_rg**2 + sigma_yb**2) + (0.3 * torch.sqrt(mu_rg**2 + mu_yb**2))\n",
    "        return sl / torch.numel(adv_patch)"
   ],
   "outputs": []
  },
  {
   "cell_type": "code",
   "execution_count": 107,
   "id": "fa6823b7",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2022-12-31T12:58:21.417405Z",
     "start_time": "2022-12-31T12:58:21.414235Z"
    }
   },
   "source": [
    "x = torch.randn(3, 224, 224)"
   ],
   "outputs": []
  },
  {
   "cell_type": "code",
   "execution_count": 108,
   "id": "de05e775",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2022-12-31T12:58:21.978173Z",
     "start_time": "2022-12-31T12:58:21.973531Z"
    }
   },
   "source": [
    "loss_saliency = SaliencyLoss()\n",
    "loss_saliency(x)"
   ],
   "outputs": []
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "id": "523eeedc",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2023-02-09T11:34:22.678852Z",
     "start_time": "2023-02-09T11:34:22.652809Z"
    }
   },
   "source": [
    "low_color_img = Image.open(\"images/low_color.png\", 'r')\n",
    "high_color_img = Image.open(\"images/high_color.png\", 'r')\n",
    "\n",
    "low_color_img = low_color_img.resize((224, 224))\n",
    "high_color_img = high_color_img.resize((224, 224))\n",
    "low_color_img = np.asarray(low_color_img)[:, :, :3]\n",
    "high_color_img = np.asarray(high_color_img)[:, :, :3]"
   ],
   "outputs": []
  },
  {
   "cell_type": "code",
   "execution_count": 26,
   "id": "cb03de0d",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2022-12-31T12:03:13.862972Z",
     "start_time": "2022-12-31T12:03:13.626061Z"
    }
   },
   "source": [
    "fig, ax = plt.subplots(1,2, figsize=(12, 10))\n",
    "ax[0].imshow(low_color_img)\n",
    "ax[1].imshow(high_color_img)\n",
    "ax[0].set_title(\"Low Color\")\n",
    "ax[1].set_title(\"High Color\")\n",
    "plt.show()"
   ],
   "outputs": []
  },
  {
   "cell_type": "code",
   "execution_count": 40,
   "id": "f35e57f8",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2022-12-31T12:07:36.148534Z",
     "start_time": "2022-12-31T12:07:36.140243Z"
    }
   },
   "source": [
    "low_color_tensor = torch.from_numpy(low_color_img.copy()).float().permute((2, 0, 1))\n",
    "low_color_tensor.requires_grad = True\n",
    "\n",
    "high_color_tensor = torch.from_numpy(high_color_img.copy()).float().permute((2, 0, 1))\n",
    "high_color_tensor.requires_grad = True\n",
    "\n",
    "# Note the first value must be smaller than the second value\n",
    "loss_saliency(low_color_tensor), loss_saliency(high_color_tensor)"
   ],
   "outputs": []
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "id": "fc42c566",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2022-12-31T11:38:31.465753Z",
     "start_time": "2022-12-31T11:38:31.445898Z"
    }
   },
   "source": [
    "loss_s.backward()  # loss is differentiable as well"
   ],
   "outputs": []
  },
  {
   "cell_type": "markdown",
   "id": "ebb9a5de",
   "metadata": {},
   "source": [
    "## Total Variation Loss"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "id": "7f043f11",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2022-12-31T11:38:31.476785Z",
     "start_time": "2022-12-31T11:38:31.466554Z"
    }
   },
   "source": [
    "class TotalVariationLoss(nn.Module):\n",
    "    \"\"\"TotalVariationLoss: calculates the total variation of a patch.\n",
    "    Module providing the functionality necessary to calculate the total vatiation (TV) of an adversarial patch.\n",
    "    Reference: https://en.wikipedia.org/wiki/Total_variation\n",
    "    \"\"\"\n",
    "\n",
    "    def __init__(self):\n",
    "        super(TotalVariationLoss, self).__init__()\n",
    "\n",
    "    def forward(self, adv_patch: torch.Tensor) -> torch.Tensor:\n",
    "        \"\"\"\n",
    "        Args:\n",
    "            adv_patch: Tensor of shape [C, H, W] \n",
    "        \"\"\"\n",
    "        # calc diff in patch rows\n",
    "        tvcomp_r = torch.sum(torch.abs(adv_patch[:, :, 1:] - adv_patch[:, :, :-1]+0.000001), dim=0)\n",
    "        tvcomp_r = torch.sum(torch.sum(tvcomp_r, dim=0), dim=0)\n",
    "        # calc diff in patch columns\n",
    "        tvcomp_c = torch.sum(torch.abs(adv_patch[:, 1:, :] - adv_patch[:, :-1, :]+0.000001), dim=0)\n",
    "        tvcomp_c = torch.sum(torch.sum(tvcomp_c, dim=0), dim=0)\n",
    "        tv = tvcomp_r + tvcomp_c\n",
    "        return tv / torch.numel(adv_patch)"
   ],
   "outputs": []
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "id": "44f41a8f",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2022-12-31T11:39:00.502263Z",
     "start_time": "2022-12-31T11:39:00.498312Z"
    }
   },
   "source": [
    "adv_patch_grey = torch.full((3, 224,224), 0.5)\n",
    "adv_patch_rand = torch.randn(3, 224, 224)"
   ],
   "outputs": []
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "id": "122a9a7a",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2022-12-31T11:39:11.718234Z",
     "start_time": "2022-12-31T11:39:11.712302Z"
    }
   },
   "source": [
    "tv_loss = TotalVariationLoss()\n",
    "tv_loss(adv_patch_grey), tv_loss(adv_patch_rand)  # note the grey patch must have a lower tv loss"
   ],
   "outputs": []
  },
  {
   "cell_type": "markdown",
   "id": "d12da7c8",
   "metadata": {},
   "source": [
    "## Non Printability Score Loss"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 87,
   "id": "a6d286ec",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2022-12-31T12:32:49.274718Z",
     "start_time": "2022-12-31T12:32:49.264065Z"
    },
    "code_folding": [
     0,
     55
    ]
   },
   "source": [
    "class NPSLoss(nn.Module):\n",
    "    \"\"\"NMSLoss: calculates the non-printability-score loss of a patch.\n",
    "    Module providing the functionality necessary to calculate the non-printability score (NMS) of an adversarial patch.\n",
    "    However, a summation of the differences is used instead of the total product to calc the NPSLoss\n",
    "    Reference: https://users.ece.cmu.edu/~lbauer/papers/2016/ccs2016-face-recognition.pdf\n",
    "    \n",
    "    Args: \n",
    "        triplet_scores_fpath: str, path to csv file with RGB triplets sep by commas in newlines\n",
    "        size: Tuple[int, int], Tuple with width, height of the patch\n",
    "    \"\"\"\n",
    "\n",
    "    def __init__(self, triplet_scores_fpath: str, size: Tuple[int, int]):\n",
    "        super(NPSLoss, self).__init__()\n",
    "        self.printability_array = nn.Parameter(self.get_printability_array(triplet_scores_fpath, size), requires_grad=False)\n",
    "\n",
    "    def forward(self, adv_patch):\n",
    "        # calculate euclidean distance between colors in patch and colors in printability_array \n",
    "        # square root of sum of squared difference\n",
    "        color_dist = (adv_patch - self.printability_array + 0.000001)\n",
    "        color_dist = color_dist ** 2\n",
    "        color_dist = torch.sum(color_dist, 1) + 0.000001\n",
    "        color_dist = torch.sqrt(color_dist)\n",
    "        # use the min distance\n",
    "        color_dist_prod = torch.min(color_dist, 0)[0]\n",
    "        # calculate the nps by summing over all pixels\n",
    "        nps_score = torch.sum(color_dist_prod, 0)\n",
    "        nps_score = torch.sum(nps_score, 0)\n",
    "        return nps_score / torch.numel(adv_patch)\n",
    "    \n",
    "    def get_printability_array(self, triplet_scores_fpath: str, size: Tuple[int, int]) -> torch.Tensor:\n",
    "        \"\"\"\n",
    "        Get printability tensor array holding the rgb triplets (range [0,1]) loaded from triplet_scores_fpath\n",
    "        Args: \n",
    "            triplet_scores_fpath: str, path to csv file with RGB triplets sep by commas in newlines\n",
    "            size: Tuple[int, int], Tuple with width, height of the patch\n",
    "        \"\"\"\n",
    "        ref_triplet_list = []\n",
    "        # read in reference printability triplets into a list\n",
    "        with open(triplet_scores_fpath) as f:\n",
    "            for line in f:\n",
    "                ref_triplet_list.append(line.strip().split(\",\"))\n",
    "\n",
    "        w, h = size\n",
    "        printability_array = []\n",
    "        for ref_triplet in ref_triplet_list:\n",
    "            r, g, b = map(float, ref_triplet)\n",
    "            ref_tensor_img = torch.stack([torch.full((h, w), r),\n",
    "                                          torch.full((h, w), g),\n",
    "                                          torch.full((h, w), b)])\n",
    "            printability_array.append(ref_tensor_img.float())\n",
    "        return torch.stack(printability_array)\n",
    "\n",
    "    def get_printability_array_old(self, triplet_scores_fpath: str, patch_side_len: int) -> torch.Tensor:\n",
    "        \"\"\"\n",
    "        Get printability tensor array holding the rgb triplets (range [0,1]) loaded from triplet_scores_fpath\n",
    "        Args: \n",
    "            triplet_scores_fpath: str, path to csv file with RGB triplets sep by commas in newlines\n",
    "            patch_side_len: int, length of the sides of the patch\n",
    "        \"\"\"\n",
    "        side = patch_side_len\n",
    "        ref_triplet_list = []\n",
    "\n",
    "        # read in reference printability triplets and put them in a list\n",
    "        with open(triplet_scores_fpath) as f:\n",
    "            for line in f:\n",
    "                ref_triplet_list.append(line.strip().split(\",\"))\n",
    "\n",
    "        printability_array = []\n",
    "        for ref_triplet in ref_triplet_list:\n",
    "            ref_imgs = []\n",
    "            r, g, b = ref_triplet\n",
    "            ref_imgs.append(np.full((side, side), r))\n",
    "            ref_imgs.append(np.full((side, side), g))\n",
    "            ref_imgs.append(np.full((side, side), b))\n",
    "            printability_array.append(ref_imgs)\n",
    "\n",
    "        printability_array = np.asarray(printability_array)\n",
    "        printability_array = np.float32(printability_array)\n",
    "        pa = torch.from_numpy(printability_array)\n",
    "        print(pa.shape, pa.dtype)\n",
    "        return pa"
   ],
   "outputs": []
  },
  {
   "cell_type": "code",
   "execution_count": 88,
   "id": "820deed7",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2022-12-31T12:32:49.569354Z",
     "start_time": "2022-12-31T12:32:49.562093Z"
    }
   },
   "source": [
    "nps_loss = NPSLoss(\"30_rgb_triplets.csv\", (224, 224))"
   ],
   "outputs": []
  },
  {
   "cell_type": "code",
   "execution_count": 90,
   "id": "6d94dcfa",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2022-12-31T12:33:09.976055Z",
     "start_time": "2022-12-31T12:33:09.961828Z"
    }
   },
   "source": [
    "adv_patch_grey = torch.full((3, 224,224), 0.5)\n",
    "adv_patch_rand = torch.randn(3, 224, 224)\n",
    "\n",
    "# the grey patch must have lower nps score as it more printable than the rand noise patch\n",
    "nps_loss(adv_patch_grey), nps_loss(adv_patch_rand)"
   ],
   "outputs": []
  },
  {
   "cell_type": "markdown",
   "id": "8d79e444",
   "metadata": {},
   "source": [
    "## Noise addition and contrast reduction"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 32,
   "id": "9e64c2af",
   "metadata": {},
   "source": [
    "h, w, c = high_color_img.shape\n",
    "nprn = np.random.normal\n",
    "rcless = high_color_img * nprn(0.9, 0.1, (h, w, c)) + nprn(0, 0.001, (h, w, c))\n",
    "rcmore = high_color_img * nprn(0.9, 0.1, (h, w, c)) + nprn(0, 50, (h, w, c))\n",
    "\n",
    "fig, ax = plt.subplots(1, 2, figsize=(12, 10))\n",
    "ax[0].imshow(rcless / 255.)\n",
    "ax[1].imshow(rcmore / 255.)\n",
    "ax[0].set_title(\"Low variance\")\n",
    "ax[1].set_title(\"High variance\")\n",
    "plt.show()\n"
   ],
   "outputs": []
  },
  {
   "cell_type": "code",
   "execution_count": 21,
   "id": "5e579baf",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2023-02-09T11:48:14.903069Z",
     "start_time": "2023-02-09T11:48:14.872871Z"
    }
   },
   "source": [
    "h, w, c = high_color_img.shape\n",
    "nprn = np.random.normal\n",
    "\n",
    "rc05 = high_color_img * nprn(0.5, 0.1, (h, w, c)) + nprn(0, 0.01, (h, w, c))\n",
    "rc06 = high_color_img * nprn(0.6, 0.1, (h, w, c)) + nprn(0, 0.01, (h, w, c))\n",
    "rc07 = high_color_img * nprn(0.7, 0.1, (h, w, c)) + nprn(0, 0.01, (h, w, c))\n",
    "rc08 = high_color_img * nprn(0.8, 0.1, (h, w, c)) + nprn(0, 0.01, (h, w, c))\n",
    "rc09 = high_color_img * nprn(0.9, 0.1, (h, w, c)) + nprn(0, 0.01, (h, w, c))"
   ],
   "outputs": []
  },
  {
   "cell_type": "code",
   "execution_count": 22,
   "id": "fa1679ab",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2023-02-09T11:48:15.151766Z",
     "start_time": "2023-02-09T11:48:15.148924Z"
    }
   },
   "source": [
    "img_list = [high_color_img, rc09, rc08, rc07, rc06, rc05]\n",
    "title_list = [\"High color img\", \"reduced_color 0.9\", \"reduced_color 0.8\", \"reduced_color 0.7\", \"reduced_color 0.6\", \"reduced_color 0.5\"]"
   ],
   "outputs": []
  },
  {
   "cell_type": "code",
   "execution_count": 23,
   "id": "fa26ca2d",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2023-02-09T11:48:20.892549Z",
     "start_time": "2023-02-09T11:48:20.470456Z"
    }
   },
   "source": [
    "assert len(img_list) == len(title_list) \n",
    "fig, ax = plt.subplots(1, len(img_list), figsize=(20, 10))\n",
    "for i, (img, title) in enumerate(zip(img_list, title_list)):\n",
    "    ax[i].imshow(img / 255.)\n",
    "    ax[i].set_title(title)\n",
    "plt.show()"
   ],
   "outputs": []
  },
  {
   "cell_type": "markdown",
   "id": "68ebfd1e",
   "metadata": {},
   "source": [
    "## Plot random perspective transforms"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 32,
   "id": "a3ce8006",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2023-02-09T12:31:40.664217Z",
     "start_time": "2023-02-09T12:31:40.421150Z"
    }
   },
   "source": [
    "orig_img = Image.fromarray(high_color_img)\n",
    "perspective_transformer = T.RandomPerspective(distortion_scale=0.6, p=1.0)\n",
    "perspective_imgs = [perspective_transformer(orig_img) for _ in range(4)]\n",
    "plot(perspective_imgs)"
   ],
   "outputs": []
  },
  {
   "cell_type": "code",
   "execution_count": 85,
   "id": "66c9ff01",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2023-02-09T12:49:29.733012Z",
     "start_time": "2023-02-09T12:49:29.729177Z"
    }
   },
   "source": [
    "class PerspectiveTransform(nn.Module):\n",
    "    \"\"\"PerspectiveTransform as a module for differentiation\n",
    "    https://pytorch.org/vision/main/auto_examples/plot_transforms.html#sphx-glr-auto-examples-plot-transforms-py\n",
    "    \"\"\"\n",
    "\n",
    "    def __init__(self):\n",
    "        super(PerspectiveTransform, self).__init__()\n",
    "        self.trp = T.RandomPerspective(distortion_scale=0.9, p=1.0)\n",
    "\n",
    "    def forward(self, adv_patch: torch.Tensor) -> torch.Tensor:\n",
    "        \"\"\"\n",
    "        Args:\n",
    "            adv_patch: Float Tensor of shape [B, C, H, W] where C=3 (R, G, B channels)\n",
    "        \"\"\"\n",
    "        trans = self.trp(adv_patch)\n",
    "        return trans"
   ],
   "outputs": []
  },
  {
   "cell_type": "code",
   "execution_count": 92,
   "id": "2031f2da",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2023-02-09T12:50:45.040985Z",
     "start_time": "2023-02-09T12:50:45.032688Z"
    }
   },
   "source": [
    "trans = PerspectiveTransform()\n",
    "orig_image_tensor = T.ToTensor()(orig_img)  #  tensor(0.) tensor(1.)\n",
    "ic, ih, iw = orig_image_tensor.shape\n",
    "orig_image_tensor = orig_image_tensor.unsqueeze(0).repeat(2, 1, 1, 1)  # torch.Size([2, 3, 224, 224])\n",
    "orig_image_tensor = orig_image_tensor.unsqueeze(0).repeat(2, 1, 1, 1, 1)  # torch.Size([2, 2, 3, 224, 224])\n",
    "\n",
    "orig_image_tensor = orig_image_tensor.reshape(-1, ic, ih, iw)  # torch.Size([4, 3, 224, 224])\n",
    "trans_image_tensor = trans(orig_image_tensor)  # torch.Size([4, 3, 224, 224])"
   ],
   "outputs": []
  },
  {
   "cell_type": "code",
   "execution_count": 93,
   "id": "07a86203",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2023-02-09T12:50:45.699186Z",
     "start_time": "2023-02-09T12:50:45.357045Z"
    }
   },
   "source": [
    "plot([T.ToPILImage()(img) for img in trans_image_tensor])"
   ],
   "outputs": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "b6ccea35",
   "metadata": {},
   "source": [],
   "outputs": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3 (ipykernel)",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.8.10"
  },
  "vscode": {
   "interpreter": {
    "hash": "98634befc514324259d6c9d42b6d889d9a27b32bc4075c57570577e494554633"
   }
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
