{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "0",
   "metadata": {},
   "outputs": [],
   "source": [
    "import tempfile\n",
    "from pathlib import Path\n",
    "from urllib.request import urlretrieve\n",
    "\n",
    "import cv2\n",
    "import matplotlib.pyplot as plt\n",
    "import numpy as np\n",
    "import rp\n",
    "import torch\n",
    "import torchvision.transforms\n",
    "from icecream import ic\n",
    "from PIL import Image, ImageDraw\n",
    "from torchvision.io import read_video\n",
    "from torchvision.models.optical_flow import raft_large\n",
    "from torchvision.utils import flow_to_image\n",
    "from tqdm import tqdm\n",
    "\n",
    "\n",
    "\n",
    "class RaftOpticalFlow:\n",
    "    def __init__(self, device):\n",
    "        self.device = device\n",
    "        self.model = raft_large(pretrained=True, progress=False).to(device)\n",
    "        self.model.eval()\n",
    "\n",
    "    def _preprocess_image(self, image):\n",
    "        assert rp.is_image(image)\n",
    "        \n",
    "        image = rp.as_float_image(rp.as_rgb_image(image))\n",
    "\n",
    "        #Floor height and width to the nearest multpiple of 8\n",
    "        height, width = rp.get_image_dimensions(image)\n",
    "        new_height = (height // 8) * 8\n",
    "        new_width  = (width  // 8) * 8\n",
    "\n",
    "        T = torchvision.transforms\n",
    "        transforms = T.Compose(\n",
    "            [\n",
    "                T.ToTensor(),\n",
    "                T.Normalize(mean=0.5, std=0.5),  # map [0, 1] into [-1, 1]\n",
    "                T.Resize(size=(new_height, new_width)),\n",
    "            ]\n",
    "        )\n",
    "        \n",
    "        output = transforms(image)[None].to(self.device).float()\n",
    "\n",
    "        assert rp.is_torch_tensor(output)\n",
    "        assert output.shape == (1, 3, height, width)\n",
    "\n",
    "        return output\n",
    "    \n",
    "    def get_flow_map(self, from_image, to_image):\n",
    "        assert rp.is_image(from_image)\n",
    "        assert rp.is_image(to_image)\n",
    "        assert rp.get_image_dimensions(from_image) == rp.get_image_dimensions(to_image)\n",
    "        \n",
    "        height, width = rp.get_image_dimensions(from_image)\n",
    "        \n",
    "        with torch.no_grad():\n",
    "            img1 = self._preprocess_image(from_image)\n",
    "            img2 = self._preprocess_image(to_image  )\n",
    "            \n",
    "            list_of_flows = self.model(img1, img2)\n",
    "            output_flow = list_of_flows[-1][0]\n",
    "    \n",
    "            # Resize the predicted flow back to the original image size\n",
    "            resize = torchvision.transforms.Resize((height, width))\n",
    "            output_flow = resize(output_flow[None])[0]\n",
    "\n",
    "        assert rp.is_torch_tensor(output_flow)\n",
    "        assert output_flow.shape == (2, height, width)\n",
    "\n",
    "        return output_flow\n",
    "\n",
    "    def demo_optical_flow(self, from_image, to_image):\n",
    "        predicted_flow = self.get_flow_map(from_image, to_image)\n",
    "        flow_img = flow_to_image(predicted_flow)\n",
    "        \n",
    "        fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(12, 6))\n",
    "        ax1.imshow(from_image)\n",
    "        ax1.set_title(\"Input Image\")\n",
    "        ax1.axis(\"off\")\n",
    "        ax2.imshow(flow_img.permute(1, 2, 0).cpu().numpy())\n",
    "        ax2.set_title(\"Predicted Optical Flow\")\n",
    "        ax2.axis(\"off\")\n",
    "        plt.tight_layout()\n",
    "        plt.show()\n",
    "    \n",
    "    def demo_optical_flow_dots(self, from_image, to_image, num_rows=10, num_cols=10):\n",
    "        predicted_flow = self.get_flow_map(from_image, to_image)\n",
    "        \n",
    "        height, width = rp.get_image_dimensions(from_image)\n",
    "        from_image = rp.as_pil_image(from_image)\n",
    "        to_image = rp.as_pil_image(to_image)\n",
    "        x_step = width // (num_cols + 1)\n",
    "        y_step = height // (num_rows + 1)\n",
    "        \n",
    "        dots_x, dots_y = np.meshgrid(np.arange(x_step, width, x_step), np.arange(y_step, height, y_step))\n",
    "        dots_x = dots_x.flatten()\n",
    "        dots_y = dots_y.flatten()\n",
    "        \n",
    "        # Draw dots on the from_image\n",
    "        from_image_with_dots = from_image.copy()\n",
    "        draw = ImageDraw.Draw(from_image_with_dots)\n",
    "        for x, y in zip(dots_x, dots_y):\n",
    "            draw.ellipse((x-2, y-2, x+2, y+2), fill=\"red\")\n",
    "        \n",
    "        # Warp the dots to the to_image based on the predicted flow\n",
    "        to_image_with_dots = to_image.copy()\n",
    "        draw = ImageDraw.Draw(to_image_with_dots)\n",
    "        for x, y in zip(dots_x, dots_y):\n",
    "            flow_x = predicted_flow[0, y, x].item()\n",
    "            flow_y = predicted_flow[1, y, x].item()\n",
    "            warped_x = x + flow_x\n",
    "            warped_y = y + flow_y\n",
    "            draw.ellipse((warped_x-2, warped_y-2, warped_x+2, warped_y+2), fill=\"blue\")\n",
    "        \n",
    "        fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(12, 6))\n",
    "        ax1.imshow(from_image_with_dots)\n",
    "        ax1.set_title(\"Input Image with Dots\")\n",
    "        ax1.axis(\"off\")\n",
    "        ax2.imshow(to_image_with_dots)\n",
    "        ax2.set_title(\"Output Image with Warped Dots\")\n",
    "        ax2.axis(\"off\")\n",
    "        plt.tight_layout()\n",
    "        plt.show()\n",
    "        \n",
    "    def query_flow_map(self, points, flow_map):\n",
    "\n",
    "        #--------------------------------------------------------------- TODO: AUDIT THIS FUNC ---------------------------------------------------------------\n",
    "        #------------------------------------------------------------ REDO IT WITH GENERTIC MAP SAMPLING FUNC ------------------------------------------------\n",
    "        #------------------------------------------------------------ REDO IT WITH GENERTIC MAP SAMPLING FUNC ------------------------------------------------\n",
    "        # print(\"Coordinates shape:\", points.shape)\n",
    "        # print(\"Flow map shape:\", flow_map.shape)\n",
    "        \n",
    "        # Normalize points to [-1, 1] range\n",
    "        height, width = flow_map.shape[1:]\n",
    "        normalized_coordinates = points.clone()\n",
    "        normalized_coordinates[0] = (points[0] / (width - 1)) * 2 - 1\n",
    "        normalized_coordinates[1] = (points[1] / (height - 1)) * 2 - 1\n",
    "        normalized_coordinates = normalized_coordinates.permute(1, 0).unsqueeze(0).unsqueeze(0)\n",
    "        \n",
    "        # print(\"Normalized points shape:\", normalized_coordinates.shape)\n",
    "        \n",
    "        # Perform bilinear interpolation using grid_sample\n",
    "        flow_map_permuted = flow_map.unsqueeze(0)\n",
    "        # print(\"Flow map permuted shape:\", flow_map_permuted.shape)\n",
    "        \n",
    "        deltas = torch.nn.functional.grid_sample(flow_map_permuted, normalized_coordinates, mode='bilinear', align_corners=True)\n",
    "        # print(\"Deltas shape after grid_sample:\", deltas.shape)\n",
    "        \n",
    "        # deltas = deltas.squeeze(0).permute(1, 0)\n",
    "        deltas = deltas.squeeze(0).squeeze(1).permute(1, 0).T\n",
    "        # print(\"Deltas shape after squeeze and permute:\", deltas.shape)\n",
    "        \n",
    "        return deltas\n",
    "    def add_optical_flow(self, points, flow_map):\n",
    "        deltas = self.query_flow_map(points, flow_map)\n",
    "        return points + deltas\n",
    "\n",
    "    def demo_optical_flow_animation(self, frames, num_rows=100, num_cols=100):\n",
    "        height, width = rp.get_image_dimensions(frames[0])\n",
    "        x_step = width // (num_cols + 1)\n",
    "        y_step = height // (num_rows + 1)\n",
    "        \n",
    "        dots_x, dots_y = np.meshgrid(np.arange(x_step, width, x_step), np.arange(y_step, height, y_step))\n",
    "        dots_x = torch.from_numpy(dots_x.flatten()).float().to(self.device)\n",
    "        dots_y = torch.from_numpy(dots_y.flatten()).float().to(self.device)\n",
    "        \n",
    "        points = torch.stack([dots_x, dots_y])\n",
    "        \n",
    "        animation_frames = []\n",
    "        \n",
    "        for frame_idx in tqdm(range(len(frames) - 1)):\n",
    "            from_image = frames[frame_idx]\n",
    "            to_image = frames[frame_idx + 1]\n",
    "            \n",
    "            predicted_flow = self.get_flow_map(from_image, to_image)\n",
    "            \n",
    "            # Update dot positions by accumulating optical flows\n",
    "            points = self.add_optical_flow(points, predicted_flow)\n",
    "            \n",
    "            # Convert PIL image to OpenCV format\n",
    "            from_image_cv = cv2.cvtColor(np.array(from_image), cv2.COLOR_RGB2BGR)\n",
    "            \n",
    "            # Draw dots on the from_image using OpenCV\n",
    "            dot_positions = points.cpu().numpy().T\n",
    "            dot_positions = dot_positions[~np.isnan(dot_positions).any(axis=1)]  # Remove NaN values\n",
    "            dot_positions = dot_positions.astype(np.int32)\n",
    "            from_image_cv[\n",
    "                np.clip(dot_positions[:, 1], 0, from_image_cv.shape[0]-1),\n",
    "                np.clip(dot_positions[:, 0], 0, from_image_cv.shape[1]-1),\n",
    "            ] = (0, 0, 255)  # Draw red dots\n",
    "            \n",
    "            # Convert the OpenCV image back to PIL format\n",
    "            from_image_with_dots = Image.fromarray(cv2.cvtColor(from_image_cv, cv2.COLOR_BGR2RGB))\n",
    "            \n",
    "            animation_frames.append(from_image_with_dots)\n",
    "        \n",
    "        rp.display_image_slideshow(animation_frames)\n",
    "\n",
    "\n",
    "def demo_flow_anim():\n",
    "    with torch.no_grad():\n",
    "        \n",
    "        # Usage example\n",
    "        device = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n",
    "        optical_flow = RaftOpticalFlow(device)\n",
    "    \n",
    "        \n",
    "        # Load video\n",
    "        video_url = \"https://download.pytorch.org/tutorial/pexelscom_pavel_danilyuk_basketball_hd.mp4\"\n",
    "        video_path = Path(tempfile.mkdtemp()) / \"basketball.mp4\"\n",
    "        _ = urlretrieve(video_url, video_path)\n",
    "\n",
    "        video_path = '/root/CleanCode/Projects/deepfloyd_init_test/videos/diffuse_kevin_spin_height1024.mp4'\n",
    "        \n",
    "        frames = rp.load_video(video_path)\n",
    "        \n",
    "        # Demo optical flow with dots\n",
    "        optical_flow.demo_optical_flow_dots(frames[100], frames[110], num_rows=20, num_cols=30)\n",
    "        \n",
    "        # Demo optical flow animation\n",
    "        optical_flow.demo_optical_flow_animation(frames[100:200], num_rows=400, num_cols=200)\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\"\"\"\n",
    "TODO:\n",
    "- Bulk operations: getting flows from a video tensor or iterable and returning a generator (lazy) or a tensor, with a show_progress\n",
    "- Einops all the way\n",
    "- Document each func and tensor shapes\n",
    "- backwards / forwards occlusion detection + demo\n",
    "\n",
    "For gaussian swarm we need:\n",
    "def image_to_points(image, mask) --> xy, rgb\n",
    "def points_to_image(xy, rgb, height, width) --> area_image, rgb_sum_image, xy_sum_image\n",
    "\"\"\"\n",
    "\n",
    "demo_flow_anim()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "1",
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python vaetuner",
   "language": "python",
   "name": "vaetuner"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.10.14"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
