{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "f37774b8",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "id": "e8410fd2",
   "metadata": {},
   "outputs": [],
   "source": [
    "ASPECT_RATIO = 9 / 16\n",
    "from torchvision.datasets.folder import pil_loader\n",
    "from torchvision.transforms.functional import pil_to_tensor, resize, center_crop\n",
    "from torchvision.transforms.functional import to_pil_image\n",
    "import math\n",
    "import torch\n",
    "import numpy as np\n",
    "\n",
    "\n",
    "from dwpose.preprocess import get_video_pose, get_image_pose"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "18b56078",
   "metadata": {},
   "outputs": [],
   "source": [
    "def preprocess(video_path, image_path, resolution=512, sample_stride=40):\n",
    "    \"\"\"preprocess ref image pose and video pose\n",
    "\n",
    "    Args:\n",
    "        video_path (str): input video pose path\n",
    "        image_path (str): reference image path\n",
    "        resolution (int, optional):  Defaults to 576.\n",
    "        sample_stride (int, optional): Defaults to 2.\n",
    "    \"\"\"\n",
    "    image_pixels = pil_loader(image_path)\n",
    "    image_pixels = pil_to_tensor(image_pixels) # (c, h, w)\n",
    "    h, w = image_pixels.shape[-2:]\n",
    "    print(\"h-w:\",h,w)\n",
    "    ############################ compute target h/w according to original aspect ratio ###############################\n",
    "    if h>w:\n",
    "        w_target, h_target = resolution, int(resolution / ASPECT_RATIO // 64) * 64\n",
    "    else:\n",
    "        w_target, h_target = int(resolution / ASPECT_RATIO // 64) * 64, resolution\n",
    "    h_w_ratio = float(h) / float(w)\n",
    "    print(\"hwr:\",h_w_ratio)\n",
    "    if h_w_ratio < h_target / w_target:\n",
    "        h_resize, w_resize = h_target, math.ceil(h_target / h_w_ratio)\n",
    "    else:\n",
    "        h_resize, w_resize = math.ceil(w_target * h_w_ratio), w_target\n",
    "    image_pixels = resize(image_pixels, [h_resize, w_resize], antialias=None)\n",
    "    image_pixels = center_crop(image_pixels, [h_target, w_target])\n",
    "    image_pixels = image_pixels.permute((1, 2, 0)).numpy()\n",
    "    print(\"im:\",image_pixels.shape)\n",
    "    ##################################### get image&video pose value #################################################\n",
    "    image_pose = get_image_pose(image_pixels)\n",
    "    video_pose = get_video_pose(video_path, image_pixels, sample_stride=sample_stride)\n",
    "    pose_pixels = np.concatenate([np.expand_dims(image_pose, 0), video_pose]) # ref_img 的 pose 被追加到video_pose的后面了，和我的想法一致\n",
    "    image_pixels = np.transpose(np.expand_dims(image_pixels, 0), (0, 3, 1, 2))\n",
    "    return torch.from_numpy(pose_pixels.copy()) / 127.5 - 1, torch.from_numpy(image_pixels) / 127.5 - 1\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "id": "4bd748fa",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "h-w: 1920 1080\n",
      "hwr: 1.7777777777777777\n",
      "(896, 512, 3)\n",
      "(896, 512, 3)\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "\u001b[0;93m2025-06-16 09:02:03.466296553 [W:onnxruntime:, session_state.cc:1166 VerifyEachNodeIsAssignedToAnEp] Some nodes were not assigned to the preferred execution providers which may or may not have an negative impact on performance. e.g. ORT explicitly assigns shape related ops to CPU to improve perf.\u001b[m\n",
      "\u001b[0;93m2025-06-16 09:02:03.466327234 [W:onnxruntime:, session_state.cc:1168 VerifyEachNodeIsAssignedToAnEp] Rerunning with verbose output on a non-minimal build will show node assignments.\u001b[m\n",
      "DWPose: 100%|██████████| 10/10 [00:00<00:00, 28.07it/s]\n"
     ]
    }
   ],
   "source": [
    "res = preprocess(\"data/videos/224321346-1-208.mp4\",\"data/frames/frame_00000.jpg\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "id": "1f6346e1",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "ONNX Runtime Version: 1.18.0\n",
      "Available Providers: ['TensorrtExecutionProvider', 'CUDAExecutionProvider', 'AzureExecutionProvider', 'CPUExecutionProvider']\n"
     ]
    }
   ],
   "source": [
    "import onnxruntime as ort\n",
    "print(\"ONNX Runtime Version:\", ort.__version__)\n",
    "print(\"Available Providers:\", ort.get_available_providers())"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "id": "92e7ee5d",
   "metadata": {},
   "outputs": [],
   "source": [
    "from PIL import Image\n",
    "from torchvision.utils import save_image\n",
    "import os\n",
    "os.makedirs(\"output_images\", exist_ok=True)\n",
    "\n",
    "# 逐张保存\n",
    "for i in range(res[0].size(0)):\n",
    "    save_image(res[0][i], f\"output_images/image_{i:03d}.png\")\n",
    "save_image(res[1], f\"output_images/aaa_{i:03d}.png\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "id": "48da904f",
   "metadata": {},
   "outputs": [],
   "source": [
    "#=== 提取视频帧\n",
    "import cv2\n",
    "import os\n",
    "\n",
    "def extract_frames(video_path, output_folder, frame_rate=1):\n",
    "    cap = cv2.VideoCapture(video_path)\n",
    "    count = 0\n",
    "    frame_interval = int(cap.get(cv2.CAP_PROP_FPS)) // frame_rate\n",
    "\n",
    "    if not os.path.exists(output_folder):\n",
    "        os.makedirs(output_folder)\n",
    "\n",
    "    while cap.isOpened():\n",
    "        ret, frame = cap.read()\n",
    "        if not ret:\n",
    "            break\n",
    "        if count % frame_interval == 0:\n",
    "            cv2.imwrite(f\"{output_folder}/frame_{count:05d}.jpg\", frame)\n",
    "        count += 1\n",
    "    cap.release()\n",
    "\n",
    "# 使用示例\n",
    "video_path = \"videos/224321346-1-208.mp4\"\n",
    "output_folder = \"frames/\"\n",
    "extract_frames(video_path, output_folder, frame_rate=1)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "id": "677d9659",
   "metadata": {},
   "outputs": [],
   "source": [
    "from glob import glob\n",
    "import os\n",
    "import sys\n",
    "sys.path.append(\"..\")\n",
    "from DWPose.inference import get_pose\n",
    "from tqdm import tqdm\n",
    "\n",
    "\n",
    "# 输入帧目录 和 输出 pose 图像目录\n",
    "input_frame_dir = \"data/frames/\"\n",
    "output_pose_dir = \"data/poses/\"\n",
    "\n",
    "if not os.path.exists(output_pose_dir):\n",
    "    os.makedirs(output_pose_dir)\n",
    "\n",
    "# 获取所有帧图片\n",
    "image_paths = glob(os.path.join(input_frame_dir, \"*.jpg\"))\n",
    "\n",
    "for img_path in image_paths:\n",
    "    pose_result = get_pose(img_path)  # 返回 pose 图像或关键点\n",
    "    #detected_poses = [dwprocessor(frm) for frm in tqdm(frames, desc=\"DWPose\")]\n",
    "    output_path = os.path.join(output_pose_dir, os.path.basename(img_path))\n",
    "    pose_result.save(output_path)  # 假设返回的是 PIL.Image 对象"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "flowers",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.10.4"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
