{
 "cells": [
  {
   "cell_type": "markdown",
   "source": [
    "# Functions"
   ],
   "metadata": {
    "collapsed": false
   },
   "id": "3f052650050e9bb5"
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "outputs": [],
   "source": [
    "import sys\n",
    "\n",
    "\n",
    "sys.path.append(\"../\")"
   ],
   "metadata": {
    "collapsed": false,
    "ExecuteTime": {
     "end_time": "2023-12-13T02:55:16.302188Z",
     "start_time": "2023-12-13T02:55:16.293857Z"
    }
   },
   "id": "aef68bd5f7d34c25"
  },
  {
   "cell_type": "markdown",
   "source": [
    "## MP4 functions"
   ],
   "metadata": {
    "collapsed": false
   },
   "id": "6f35a8c66ad3c817"
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "id": "initial_id",
   "metadata": {
    "collapsed": true,
    "ExecuteTime": {
     "end_time": "2023-12-13T02:55:33.541542Z",
     "start_time": "2023-12-13T02:55:33.460844Z"
    }
   },
   "outputs": [],
   "source": [
    "import cv2\n",
    "from IPython.display import clear_output, display, HTML\n",
    "from base64 import b64encode\n",
    "\n",
    "\n",
    "def play_video(video_path):\n",
    "    video = cv2.VideoCapture(video_path)\n",
    "    while True:\n",
    "        ret, frame = video.read()\n",
    "        if not ret:\n",
    "            break\n",
    "        clear_output(wait=True)\n",
    "        _, buffer = cv2.imencode('.jpg', frame)\n",
    "        display(HTML('<img src=\"data:image/jpeg;base64,{}\">'.format(b64encode(buffer).decode())))\n",
    "        # time.sleep(0.1)\n",
    "    video.release()"
   ]
  },
  {
   "cell_type": "markdown",
   "source": [
    "## Data transform functions"
   ],
   "metadata": {
    "collapsed": false
   },
   "id": "52cc3acf9ad573d4"
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "outputs": [],
   "source": [
    "import torchvision.transforms.functional as F\n",
    "\n",
    "\n",
    "def process_image(image):\n",
    "    ori_image = image.copy()\n",
    "    h, w = image.shape[:2]\n",
    "    scale = 800 / min(h, w)\n",
    "    if max(h, w) * scale > 1536:\n",
    "        scale = 1536 / max(h, w)\n",
    "    target_h = int(h * scale)\n",
    "    target_w = int(w * scale)\n",
    "    image = cv2.resize(image, (target_w, target_h))\n",
    "    image = F.normalize(F.to_tensor(image), [0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n",
    "    return image, ori_image"
   ],
   "metadata": {
    "collapsed": false,
    "ExecuteTime": {
     "end_time": "2023-12-13T02:55:42.396579Z",
     "start_time": "2023-12-13T02:55:41.469121Z"
    }
   },
   "id": "1f5b0626c3104d09"
  },
  {
   "cell_type": "markdown",
   "source": [
    "## Tracking functions and classes"
   ],
   "metadata": {
    "collapsed": false
   },
   "id": "7c0593605b6fa0fd"
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "outputs": [],
   "source": [
    "import torch\n",
    "from structures.track_instances import TrackInstances\n",
    "\n",
    "\n",
    "def filter_by_score(tracks: TrackInstances, thresh: float = 0.7):\n",
    "    keep = torch.max(tracks.scores, dim=-1).values > thresh\n",
    "    return tracks[keep]\n",
    "\n",
    "def filter_by_area(tracks: TrackInstances, thresh: int = 100):\n",
    "    assert len(tracks.area) == len(tracks.ids), f\"Tracks' 'area' should have the same dim with 'ids'\"\n",
    "    keep = tracks.area > thresh\n",
    "    return tracks[keep]"
   ],
   "metadata": {
    "collapsed": false,
    "ExecuteTime": {
     "end_time": "2023-12-13T02:55:49.730740Z",
     "start_time": "2023-12-13T02:55:49.714150Z"
    }
   },
   "id": "569b8ba2c44f5424"
  },
  {
   "cell_type": "markdown",
   "source": [
    "## Plot functions"
   ],
   "metadata": {
    "collapsed": false
   },
   "id": "1bb3a9aa4538cbc0"
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "outputs": [],
   "source": [
    "import numpy as np\n",
    "import cv2\n",
    "\n",
    "\n",
    "def get_color(idx):\n",
    "    idx = idx * 3\n",
    "    color = ((37 * idx) % 255, (17 * idx) % 255, (29 * idx) % 255)\n",
    "\n",
    "    return color\n",
    "\n",
    "def plot_tracking(image, tlwhs, obj_ids, scores=None, frame_id=0, fps=0., ids2=None):\n",
    "    # Thanks to https://github.com/noahcao/OC_SORT\n",
    "    im = np.ascontiguousarray(np.copy(image))\n",
    "    im_h, im_w = im.shape[:2]\n",
    "\n",
    "    top_view = np.zeros([im_w, im_w, 3], dtype=np.uint8) + 255\n",
    "    text_scale = 2\n",
    "    text_thickness = 2\n",
    "    line_thickness = 3\n",
    "\n",
    "    radius = max(5, int(im_w/140.))\n",
    "    cv2.putText(im, 'frame: %d fps: %.2f num: %d' % (frame_id, fps, len(tlwhs)),\n",
    "                (0, int(15 * text_scale)), cv2.FONT_HERSHEY_PLAIN, 2, (0, 0, 255), thickness=2)\n",
    "\n",
    "    for i, tlwh in enumerate(tlwhs):\n",
    "        x1, y1, w, h = tlwh\n",
    "        intbox = tuple(map(int, (x1, y1, x1 + w, y1 + h)))\n",
    "        obj_id = int(obj_ids[i])\n",
    "        id_text = '{}'.format(int(obj_id))\n",
    "        if ids2 is not None:\n",
    "            id_text = id_text + ', {}'.format(int(ids2[i]))\n",
    "        color = get_color(abs(obj_id))\n",
    "        cv2.rectangle(im, intbox[0:2], intbox[2:4], color=color, thickness=line_thickness)\n",
    "        cv2.putText(im, id_text, (intbox[0], intbox[1]), cv2.FONT_HERSHEY_PLAIN, text_scale, (0, 0, 255),\n",
    "                    thickness=text_thickness)\n",
    "    return im"
   ],
   "metadata": {
    "collapsed": false,
    "ExecuteTime": {
     "end_time": "2023-12-13T02:55:53.026457Z",
     "start_time": "2023-12-13T02:55:53.002828Z"
    }
   },
   "id": "965b054a753963c6"
  },
  {
   "cell_type": "markdown",
   "source": [
    "## Demo (inference) functions"
   ],
   "metadata": {
    "collapsed": false
   },
   "id": "9c5ea92028ff1606"
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "outputs": [],
   "source": [
    "import cv2\n",
    "import time\n",
    "import os.path\n",
    "import sys\n",
    "import numpy as np\n",
    "from models import build_model\n",
    "from models.utils import load_checkpoint\n",
    "from utils.utils import yaml_to_dict\n",
    "\n",
    "import torch\n",
    "from typing import List\n",
    "from models import build_model\n",
    "from models.utils import load_checkpoint, get_model\n",
    "from models.runtime_tracker import RuntimeTracker\n",
    "from utils.utils import yaml_to_dict, is_distributed, distributed_world_size, distributed_rank, inverse_sigmoid\n",
    "from utils.nested_tensor import tensor_list_to_nested_tensor\n",
    "from utils.box_ops import box_cxcywh_to_xyxy\n",
    "from structures.track_instances import TrackInstances\n",
    "\n",
    "\n",
    "def demo_processing(\n",
    "        model_path: str,\n",
    "        config_path: str,\n",
    "        video_path: str,\n",
    "):\n",
    "    config = yaml_to_dict(config_path)\n",
    "    model = build_model(config)\n",
    "    load_checkpoint(\n",
    "        model=model,\n",
    "        path=model_path\n",
    "    )\n",
    "    model.eval()\n",
    "    print(\"Model loaded.\")\n",
    "    current_time = time.localtime()\n",
    "    cap = cv2.VideoCapture(video_path)\n",
    "    width = cap.get(cv2.CAP_PROP_FRAME_WIDTH)  # float\n",
    "    height = cap.get(cv2.CAP_PROP_FRAME_HEIGHT)  # float\n",
    "    fps = cap.get(cv2.CAP_PROP_FPS)\n",
    "    timestamp = time.strftime(\"%Y_%m_%d_%H_%M_%S\", current_time)\n",
    "    save_folder = os.path.join(\"./cache/\", timestamp)\n",
    "    save_path = os.path.join(save_folder, \"output.avi\")\n",
    "    os.makedirs(save_folder, exist_ok=True)\n",
    "    vid_writer = cv2.VideoWriter(\n",
    "        save_path, cv2.VideoWriter_fourcc(*\"MJPG\"), fps, (int(width), int(height))\n",
    "    )\n",
    "    print((int(width), int(height)))\n",
    "\n",
    "    result_score_thresh = 0.5\n",
    "\n",
    "    timer = Timer()\n",
    "    frame_id = 0\n",
    "\n",
    "\n",
    "    tracks = [TrackInstances(\n",
    "        hidden_dim=model.hidden_dim,\n",
    "        num_classes=model.num_classes,\n",
    "        use_dab=config[\"USE_DAB\"],\n",
    "    ).to(\"cuda\")]\n",
    "    tracker = RuntimeTracker(\n",
    "        det_score_thresh=0.5, \n",
    "        track_score_thresh=0.5,\n",
    "        miss_tolerance=30,\n",
    "        use_motion=False,\n",
    "        motion_min_length=0, \n",
    "        motion_max_length=0,\n",
    "        visualize=False, \n",
    "        use_dab=config[\"USE_DAB\"],\n",
    "    )\n",
    "\n",
    "    with torch.no_grad():\n",
    "        while True:\n",
    "            if frame_id % 20 == 0:\n",
    "                print('Processing frame {} ({:.2f} fps)'.format(frame_id, 1. / max(1e-5, timer.average_time)))\n",
    "            ret_val, ret_frame = cap.read()\n",
    "            # online_im = ret_frame\n",
    "            if ret_val:\n",
    "                image = process_image(ret_frame)\n",
    "                frame = tensor_list_to_nested_tensor([image[0]]).to(\"cuda\")\n",
    "                timer.tic()\n",
    "                res = model(frame=frame, tracks=tracks)\n",
    "                previous_tracks, new_tracks = tracker.update(\n",
    "                    model_outputs=res,\n",
    "                    tracks=tracks\n",
    "                )\n",
    "                # print(previous_tracks[0])\n",
    "                tracks: List[TrackInstances] = model.postprocess_single_frame(previous_tracks, new_tracks, None)\n",
    "        \n",
    "                tracks_result = tracks[0].to(torch.device(\"cpu\"))\n",
    "                # ori_h, ori_w = ori_image.shape[1], ori_image.shape[2]\n",
    "                ori_h, ori_w = height, width\n",
    "                # box = [x, y, w, h]\n",
    "                tracks_result.area = tracks_result.boxes[:, 2] * ori_w * \\\n",
    "                                     tracks_result.boxes[:, 3] * ori_h\n",
    "                tracks_result = filter_by_score(tracks_result, thresh=result_score_thresh)\n",
    "                tracks_result = filter_by_area(tracks_result)\n",
    "                # to xyxy:\n",
    "                tracks_result.boxes = box_cxcywh_to_xyxy(tracks_result.boxes)\n",
    "                tracks_result.boxes = (tracks_result.boxes * torch.as_tensor([ori_w, ori_h, ori_w, ori_h], dtype=torch.float))\n",
    "                online_tlwhs, online_ids = [], []\n",
    "                for i in range(len(tracks_result)):\n",
    "                    x1, y1, x2, y2 = tracks_result.boxes[i].tolist()\n",
    "                    w, h = x2 - x1, y2 - y1\n",
    "                    online_tlwhs.append([x1, y1, w, h])\n",
    "                    online_ids.append(tracks_result.ids[i].item())\n",
    "                timer.toc()\n",
    "                if len(online_tlwhs) > 0:\n",
    "                    online_im = plot_tracking(\n",
    "                        ret_frame, online_tlwhs, online_ids, frame_id=frame_id + 1, fps=1. / timer.average_time\n",
    "                    )\n",
    "                else:\n",
    "                    online_im = ret_frame\n",
    "                vid_writer.write(online_im)\n",
    "                ch = cv2.waitKey(1)\n",
    "                if ch == 27 or ch == ord(\"q\") or ch == ord(\"Q\"):\n",
    "                    break\n",
    "            else:\n",
    "                break\n",
    "            frame_id += 1\n",
    "    return os.path.join(save_folder, \"output.avi\")"
   ],
   "metadata": {
    "collapsed": false,
    "ExecuteTime": {
     "end_time": "2023-12-13T02:56:18.720794Z",
     "start_time": "2023-12-13T02:56:18.689511Z"
    }
   },
   "id": "32a65a3645349542"
  },
  {
   "cell_type": "markdown",
   "source": [
    "## Other functions"
   ],
   "metadata": {
    "collapsed": false
   },
   "id": "a85ec00acf02136e"
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "outputs": [],
   "source": [
    "class Timer(object):\n",
    "    \"\"\"A simple timer.\"\"\"\n",
    "    def __init__(self):\n",
    "        self.total_time = 0.\n",
    "        self.calls = 0\n",
    "        self.start_time = 0.\n",
    "        self.diff = 0.\n",
    "        self.average_time = 0.\n",
    "\n",
    "        self.duration = 0.\n",
    "\n",
    "    def tic(self):\n",
    "        # using time.time instead of time.clock because time time.clock\n",
    "        # does not normalize for multithreading\n",
    "        self.start_time = time.time()\n",
    "\n",
    "    def toc(self, average=True):\n",
    "        self.diff = time.time() - self.start_time\n",
    "        self.total_time += self.diff\n",
    "        self.calls += 1\n",
    "        self.average_time = self.total_time / self.calls\n",
    "        if average:\n",
    "            self.duration = self.average_time\n",
    "        else:\n",
    "            self.duration = self.diff\n",
    "        return self.duration"
   ],
   "metadata": {
    "collapsed": false,
    "ExecuteTime": {
     "end_time": "2023-12-13T02:56:23.670264Z",
     "start_time": "2023-12-13T02:56:23.428047Z"
    }
   },
   "id": "a8ca3ad56dec58ac"
  },
  {
   "cell_type": "markdown",
   "source": [
    "# DEMO"
   ],
   "metadata": {
    "collapsed": false
   },
   "id": "fbf756a3b9164904"
  },
  {
   "cell_type": "markdown",
   "source": [
    "## Select demo mp4"
   ],
   "metadata": {
    "collapsed": false
   },
   "id": "fbfe5dda17f6dcb2"
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "outputs": [
    {
     "data": {
      "text/plain": "<IPython.core.display.HTML object>",
      "text/html": "<img src=\"\">"
     },
     "metadata": {},
     "output_type": "display_data"
    }
   ],
   "source": [
    "video_path = \"./cache/dancer_demo.mp4\"                                  # you should give me a mp4 file path\n",
    "config_path = \"../outputs/memotr_dancetrack/train/config.yaml\"          # the config path of model\n",
    "model_path = \"../outputs/memotr_dancetrack/memotr_dancetrack.pth\"       # the model checkpoint path\n",
    "\n",
    "play_video(video_path)                                                  # play the video"
   ],
   "metadata": {
    "collapsed": false
   },
   "id": "6faff6cc699c1255"
  },
  {
   "cell_type": "markdown",
   "source": [
    "## Run Demo"
   ],
   "metadata": {
    "collapsed": false
   },
   "id": "dfed21dfb102cb17"
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Model loaded.\n",
      "(1920, 1080)\n",
      "Processing frame 0 (100000.00 fps)\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/home/gaoruopeng/anaconda3/envs/MeMOTR/lib/python3.10/site-packages/torch/functional.py:504: UserWarning: torch.meshgrid: in an upcoming release, it will be required to pass the indexing argument. (Triggered internally at /opt/conda/conda-bld/pytorch_1670525541990/work/aten/src/ATen/native/TensorShape.cpp:3190.)\n",
      "  return _VF.meshgrid(tensors, **kwargs)  # type: ignore[attr-defined]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Processing frame 20 (11.08 fps)\n",
      "Processing frame 40 (12.78 fps)\n",
      "Processing frame 60 (13.50 fps)\n",
      "Processing frame 80 (13.90 fps)\n",
      "Processing frame 100 (14.14 fps)\n",
      "Processing frame 120 (14.24 fps)\n",
      "Processing frame 140 (14.37 fps)\n",
      "Processing frame 160 (14.48 fps)\n",
      "Processing frame 180 (14.56 fps)\n",
      "Processing frame 200 (14.62 fps)\n",
      "Processing frame 220 (14.68 fps)\n",
      "Processing frame 240 (14.72 fps)\n",
      "Processing frame 260 (14.76 fps)\n",
      "Processing frame 280 (14.78 fps)\n",
      "Processing frame 300 (14.81 fps)\n",
      "Processing frame 320 (14.84 fps)\n",
      "Processing frame 340 (14.86 fps)\n",
      "Processing frame 360 (14.88 fps)\n",
      "Processing frame 380 (14.89 fps)\n",
      "Processing frame 400 (14.91 fps)\n",
      "Processing frame 420 (14.93 fps)\n",
      "Processing frame 440 (14.94 fps)\n",
      "Processing frame 460 (14.95 fps)\n",
      "Processing frame 480 (14.96 fps)\n",
      "Processing frame 500 (14.97 fps)\n",
      "Processing frame 520 (14.98 fps)\n",
      "Processing frame 540 (14.99 fps)\n",
      "Processing frame 560 (15.00 fps)\n",
      "Processing frame 580 (15.00 fps)\n",
      "Processing frame 600 (15.01 fps)\n"
     ]
    }
   ],
   "source": [
    "output_path = demo_processing(\n",
    "    model_path=model_path,\n",
    "    config_path=config_path,\n",
    "    video_path=video_path,\n",
    ")"
   ],
   "metadata": {
    "collapsed": false,
    "ExecuteTime": {
     "end_time": "2023-12-13T03:00:02.556214Z",
     "start_time": "2023-12-13T02:59:05.790611Z"
    }
   },
   "id": "6b007a0e351f8c38"
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "outputs": [
    {
     "data": {
      "text/plain": "<IPython.core.display.HTML object>",
      "text/html": "<img src=\"\">"
     },
     "metadata": {},
     "output_type": "display_data"
    }
   ],
   "source": [
    "play_video(output_path)"
   ],
   "metadata": {
    "collapsed": false
   },
   "id": "b677910bd2d04277"
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "outputs": [],
   "source": [],
   "metadata": {
    "collapsed": false
   },
   "id": "fcf29590b82cf15b"
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 2
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython2",
   "version": "2.7.6"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
