{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Loading pretrain weights\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "num_classes mismatch: pretrain weights has 9 classes, but your model has 90 classes\n",
      "reinitializing detection head with 9 classes\n",
      "UserWarning: torch.meshgrid: in an upcoming release, it will be required to pass the indexing argument. (Triggered internally at C:\\actions-runner\\_work\\pytorch\\pytorch\\pytorch\\aten\\src\\ATen\\native\\TensorShape.cpp:3638.)\n"
     ]
    }
   ],
   "source": [
    "import cv2\n",
    "from rfdetr import RFDETRBase\n",
    "import supervision as sv\n",
    "\n",
    "# 1. 加载模型（默认使用RF-DETR-Base）\n",
    "# model = RFDETRBase(pretrain_weights=r\"E:\\A\\rf-detr-main\\output\\train1\\checkpoint_best_ema.pth\")  # 替换为你的模型路径\n",
    "model = RFDETRBase(pretrain_weights=r\"E:\\A\\rf-detr-main\\output\\checkpoint_best_ema.pth\")\n",
    "\n",
    "# 2. 初始化视频流\n",
    "cap = cv2.VideoCapture(r\"E:\\Data\\XCC_data\\xcc\\xcc002.mp4\")  # 输入视频路径\n",
    "output_path = r\"E:\\Data\\XCC_data\\outputs\\rfdetr002.mp4\"  # 输出视频路径\n",
    "\n",
    "# 3. 创建视频写入器\n",
    "video_writer = cv2.VideoWriter(\n",
    "    output_path,\n",
    "    cv2.VideoWriter_fourcc(*'mp4v'),\n",
    "    int(cap.get(cv2.CAP_PROP_FPS)),\n",
    "    (int(cap.get(3)), int(cap.get(4)))  # 保持原视频分辨率\n",
    ")\n",
    "\n",
    "# 4. 逐帧处理\n",
    "while cap.isOpened():\n",
    "    ret, frame = cap.read()\n",
    "    if not ret: break\n",
    "    \n",
    "    # 推理并标注（阈值设为0.5）\n",
    "    detections = model.predict(frame, threshold=0.5)\n",
    "    annotated_frame = sv.BoxAnnotator().annotate(frame.copy(), detections)\n",
    "    \n",
    "    # 写入处理后的帧\n",
    "    video_writer.write(annotated_frame)\n",
    "\n",
    "# 5. 释放资源\n",
    "cap.release()\n",
    "video_writer.release()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Loading pretrain weights\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "num_classes mismatch: pretrain weights has 9 classes, but your model has 90 classes\n",
      "reinitializing detection head with 9 classes\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[统计结果] 视频中共检测到 1107 个独立对象\n"
     ]
    }
   ],
   "source": [
    "import cv2\n",
    "from rfdetr import RFDETRBase\n",
    "import supervision as sv\n",
    "from collections import defaultdict\n",
    "\n",
    "# 1. 初始化模型和跟踪器\n",
    "model = RFDETRBase(pretrain_weights=r\"E:\\A\\rf-detr-main\\output\\checkpoint_best_ema.pth\")\n",
    "tracker = sv.ByteTrack()  # 使用ByteTrack跟踪算法\n",
    "annotator = sv.BoxAnnotator()\n",
    "label_annotator = sv.LabelAnnotator()\n",
    "\n",
    "# 2. 对象生命周期管理\n",
    "class ObjectManager:\n",
    "    def __init__(self):\n",
    "        self.existing_objects = defaultdict(int)  # {track_id: first_seen_frame}\n",
    "        self.current_ids = set()\n",
    "        self.global_count = 0\n",
    "\n",
    "    def update(self, detections):\n",
    "        \"\"\"更新对象状态并分配全局ID\"\"\"\n",
    "        for track_id in detections.tracker_id:\n",
    "            if track_id not in self.existing_objects:\n",
    "                self.global_count += 1\n",
    "                self.existing_objects[track_id] = self.global_count\n",
    "            self.current_ids.add(track_id)\n",
    "\n",
    "# 3. 初始化视频流\n",
    "manager = ObjectManager()\n",
    "cap = cv2.VideoCapture(r\"E:\\Data\\XCC_data\\xcc\\xcc002.mp4\")\n",
    "output_path = r\"E:\\Data\\XCC_data\\outputs\\rfdetr_tracked001.mp4\"\n",
    "\n",
    "# 4. 配置视频写入器\n",
    "video_info = sv.VideoInfo.from_video_path(r\"E:\\Data\\XCC_data\\xcc\\xcc002.mp4\")\n",
    "writer = cv2.VideoWriter(\n",
    "    output_path,\n",
    "    cv2.VideoWriter_fourcc(*'mp4v'),\n",
    "    video_info.fps,\n",
    "    (video_info.width, video_info.height)\n",
    ")\n",
    "\n",
    "# 5. 逐帧处理\n",
    "while cap.isOpened():\n",
    "    ret, frame = cap.read()\n",
    "    if not ret: break\n",
    "    \n",
    "    # 检测与跟踪\n",
    "    detections = model.predict(frame, threshold=0.5)\n",
    "    detections = tracker.update_with_detections(detections)\n",
    "    \n",
    "    # 更新全局计数器\n",
    "    manager.update(detections)\n",
    "    \n",
    "    # 标注显示（显示全局ID）\n",
    "    labels = [f\"ID: {manager.existing_objects[track_id]}\" for track_id in detections.tracker_id]\n",
    "    annotated_frame = annotator.annotate(frame.copy(), detections)\n",
    "    annotated_frame = label_annotator.annotate(annotated_frame, detections, labels)\n",
    "    \n",
    "    writer.write(annotated_frame)\n",
    "\n",
    "# 6. 输出统计结果\n",
    "cap.release()\n",
    "writer.release()\n",
    "print(f\"[统计结果] 视频中共检测到 {manager.global_count} 个独立对象\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Loading pretrain weights\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "num_classes mismatch: pretrain weights has 9 classes, but your model has 90 classes\n",
      "reinitializing detection head with 9 classes\n",
      "SupervisionWarnings: BoundingBoxAnnotator is deprecated: `BoundingBoxAnnotator` is deprecated and has been renamed to `BoxAnnotator`. `BoundingBoxAnnotator` will be removed in supervision-0.26.0.\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "===== 全局类别计数 =====\n",
      "pedestrian: 151\n",
      "people: 43\n",
      "bicycle: 0\n",
      "car: 1214\n",
      "van: 342\n",
      "truck: 66\n",
      "tricycle: 4\n",
      "awning-tricycle: 1\n",
      "bus: 12\n",
      "motor: 438\n"
     ]
    },
    {
     "ename": "",
     "evalue": "",
     "output_type": "error",
     "traceback": [
      "\u001b[1;31mThe Kernel crashed while executing code in the current cell or a previous cell. \n",
      "\u001b[1;31mPlease review the code in the cell(s) to identify a possible cause of the failure. \n",
      "\u001b[1;31mClick <a href='https://aka.ms/vscodeJupyterKernelCrash'>here</a> for more info. \n",
      "\u001b[1;31mView Jupyter <a href='command:jupyter.viewOutput'>log</a> for further details."
     ]
    }
   ],
   "source": [
    "import cv2\n",
    "import supervision as sv\n",
    "from rfdetr import RFDETRBase\n",
    "from collections import defaultdict\n",
    "from typing import Dict, Set\n",
    "\n",
    "# 1. 初始化模型与跟踪器\n",
    "model = RFDETRBase(\n",
    "    pretrain_weights=r\"E:\\A\\rf-detr-main\\output\\checkpoint_best_ema.pth\",\n",
    "    resolution=560  # 56的倍数，提升小目标检测精度[7](@ref)\n",
    ")\n",
    "tracker = sv.ByteTrack(\n",
    "    track_activation_threshold=0.25, \n",
    "    lost_track_buffer=120,           \n",
    "    minimum_matching_threshold=0.8   \n",
    ")\n",
    "\n",
    "# 2. VisDrone2019类别定义（官方顺序）[1,4](@ref)\n",
    "VISDRONE_CLASSES = [\n",
    "    'pedestrian', 'people', 'bicycle', 'car', 'van',\n",
    "    'truck', 'tricycle', 'awning-tricycle', 'bus', 'motor'\n",
    "]\n",
    "\n",
    "# 3. 全局类别计数器（跨帧持久化）\n",
    "class CategoryCounter:\n",
    "    def __init__(self):\n",
    "        # {class_name: set(track_ids)}\n",
    "        self.class_tracks: Dict[str, Set[int]] = defaultdict(set)\n",
    "        # {class_name: count}\n",
    "        self.category_counts: Dict[str, int] = defaultdict(int)\n",
    "\n",
    "    def update(self, detections: sv.Detections):\n",
    "        for class_id, track_id in zip(detections.class_id, detections.tracker_id):\n",
    "            class_name = VISDRONE_CLASSES[class_id]\n",
    "            if track_id not in self.class_tracks[class_name]:\n",
    "                self.class_tracks[class_name].add(track_id)\n",
    "                self.category_counts[class_name] += 1\n",
    "\n",
    "# 4. 视频处理主逻辑\n",
    "def process_video(input_path: str, output_path: str):\n",
    "    # 初始化组件\n",
    "    counter = CategoryCounter()\n",
    "    cap = cv2.VideoCapture(input_path)\n",
    "    frame_width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))\n",
    "    frame_height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))\n",
    "    fps = cap.get(cv2.CAP_PROP_FPS)\n",
    "    \n",
    "    # 配置视频写入器（H265编码优化文件大小）\n",
    "    writer = cv2.VideoWriter(\n",
    "        output_path,\n",
    "        cv2.VideoWriter_fourcc(*'mp4v'),  # 或使用'hvc1'（需硬件支持）\n",
    "        fps,\n",
    "        (frame_width, frame_height)\n",
    "    )\n",
    "    \n",
    "    # 可视化工具配置（整合网页2、3、4的最佳实践）\n",
    "    box_annotator = sv.BoundingBoxAnnotator(\n",
    "        thickness=2,\n",
    "        color=sv.Color(r=0, g=255, b=0)  # 绿色边框\n",
    "    )\n",
    "    label_annotator = sv.LabelAnnotator(\n",
    "        text_scale=0.5,  # 替代Font对象设置字号（0.5对应原20像素）\n",
    "        text_thickness=1,\n",
    "        text_color=sv.Color(r=255, g=255, b=255),\n",
    "        # text_background_color=sv.Color(r=0, g=0, b=0)\n",
    "    )\n",
    "    trace_annotator = sv.TraceAnnotator(\n",
    "        trace_length=30,\n",
    "        position=sv.Position.BOTTOM_CENTER\n",
    "    )\n",
    "\n",
    "    # 逐帧处理\n",
    "    while cap.isOpened():\n",
    "        ret, frame = cap.read()\n",
    "        if not ret:\n",
    "            break\n",
    "\n",
    "        # 执行检测与跟踪\n",
    "        detections = model.predict(frame, threshold=0.3)  # 低阈值提升召回率[7](@ref)\n",
    "        detections = tracker.update_with_detections(detections)\n",
    "        \n",
    "        # 更新全局计数器\n",
    "        counter.update(detections)\n",
    "\n",
    "        # 构建标注信息\n",
    "        labels = [\n",
    "            f\"{VISDRONE_CLASSES[cls_id]} {track_id}\"\n",
    "            for cls_id, track_id \n",
    "            in zip(detections.class_id, detections.tracker_id)\n",
    "        ]\n",
    "        \n",
    "        # 绘制检测框与轨迹\n",
    "        annotated_frame = box_annotator.annotate(scene=frame.copy(), detections=detections)\n",
    "        annotated_frame = label_annotator.annotate(scene=annotated_frame, detections=detections, labels=labels)\n",
    "        # annotated_frame = trace_annotator.annotate(scene=annotated_frame, detections=detections)\n",
    "        \n",
    "        # 添加统计面板（右上角）\n",
    "        stats_text = \"\\n\".join([\n",
    "            f\"{cls}: {counter.category_counts[cls]}\" \n",
    "            for cls in VISDRONE_CLASSES\n",
    "        ])\n",
    "        annotated_frame = sv.draw_text(\n",
    "            scene=annotated_frame,\n",
    "            text=stats_text,\n",
    "            text_anchor=sv.Point(x=frame_width-250, y=20),\n",
    "            background_color=sv.Color(r=30, g=30, b=30),\n",
    "            text_color=sv.Color(r=220, g=220, b=220),\n",
    "            text_scale=0.7\n",
    "        )\n",
    "\n",
    "        cv2.imshow(\"Real-time Detection\", annotated_frame)\n",
    "        if cv2.waitKey(1) & 0xFF == ord(\"q\"):\n",
    "            break\n",
    "\n",
    "        writer.write(annotated_frame)\n",
    "\n",
    "    # 释放资源并输出结果\n",
    "    cap.release()\n",
    "    writer.release()\n",
    "    print(\"===== 全局类别计数 =====\")\n",
    "    for cls in VISDRONE_CLASSES:\n",
    "        print(f\"{cls}: {counter.category_counts[cls]}\")\n",
    "\n",
    "# 5. 执行处理（示例路径）\n",
    "process_video(\n",
    "    input_path=r\"E:\\Data\\XCC_data\\xcc\\xcc002.mp4\",\n",
    "    output_path=r\"E:\\Data\\XCC_data\\outputs\\rfdetr_tracked002.mp4\"\n",
    ")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Loading pretrain weights\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "num_classes mismatch: pretrain weights has 9 classes, but your model has 90 classes\n",
      "reinitializing detection head with 9 classes\n",
      "SupervisionWarnings: BoundingBoxAnnotator is deprecated: `BoundingBoxAnnotator` is deprecated and has been renamed to `BoxAnnotator`. `BoundingBoxAnnotator` will be removed in supervision-0.26.0.\n",
      "UserWarning: torch.meshgrid: in an upcoming release, it will be required to pass the indexing argument. (Triggered internally at C:\\actions-runner\\_work\\pytorch\\pytorch\\pytorch\\aten\\src\\ATen\\native\\TensorShape.cpp:3638.)\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "===== 全局类别计数 =====\n",
      "pedestrian: 11\n",
      "people: 7\n",
      "bicycle: 0\n",
      "car: 328\n",
      "van: 74\n",
      "truck: 22\n",
      "tricycle: 0\n",
      "awning-tricycle: 5\n",
      "bus: 9\n",
      "motor: 13\n"
     ]
    },
    {
     "ename": "",
     "evalue": "",
     "output_type": "error",
     "traceback": [
      "\u001b[1;31mThe Kernel crashed while executing code in the current cell or a previous cell. \n",
      "\u001b[1;31mPlease review the code in the cell(s) to identify a possible cause of the failure. \n",
      "\u001b[1;31mClick <a href='https://aka.ms/vscodeJupyterKernelCrash'>here</a> for more info. \n",
      "\u001b[1;31mView Jupyter <a href='command:jupyter.viewOutput'>log</a> for further details."
     ]
    }
   ],
   "source": [
    "import cv2\n",
    "import supervision as sv\n",
    "from rfdetr import RFDETRBase\n",
    "from collections import defaultdict\n",
    "from typing import Dict, Set\n",
    "\n",
    "# 1. 初始化模型与跟踪器\n",
    "model = RFDETRBase(\n",
    "    # pretrain_weights=r\"E:\\A\\rf-detr-main\\output\\train2\\checkpoint_best_ema.pth\",\n",
    "    pretrain_weights=r\"E:\\A\\rf-detr-main\\output\\pre-train1\\checkpoint_best_ema.pth\",\n",
    "    resolution=560  # 56的倍数，提升小目标检测精度\n",
    "    # resolution=448\n",
    ")\n",
    "\n",
    "#TODO 最合适的一集\n",
    "tracker = sv.ByteTrack(\n",
    "    track_activation_threshold=0.5,       # 提高激活阈值\n",
    "    lost_track_buffer=120,                 # 缩短丢失缓冲时间（3秒，假设20fps）\n",
    "    minimum_matching_threshold=0.95,       # 降低匹配阈值\n",
    "    minimum_consecutive_frames=2,         # 增加连续帧要求\n",
    "    frame_rate=20                         # 确保与实际帧率一致\n",
    ")\n",
    "\n",
    "# 2. VisDrone2019类别定义\n",
    "VISDRONE_CLASSES = [\n",
    "    'pedestrian', 'people', 'bicycle', 'car', 'van',\n",
    "    'truck', 'tricycle', 'awning-tricycle', 'bus', 'motor'\n",
    "]\n",
    "\n",
    "# 3. 全局类别计数器（跨帧持久化）\n",
    "class CategoryCounter:\n",
    "    def __init__(self):\n",
    "        # {class_name: set(track_ids)}\n",
    "        self.class_tracks: Dict[str, Set[int]] = defaultdict(set)\n",
    "        # {class_name: count}\n",
    "        self.category_counts: Dict[str, int] = defaultdict(int)\n",
    "\n",
    "    def update(self, detections: sv.Detections):\n",
    "        for class_id, track_id in zip(detections.class_id, detections.tracker_id):\n",
    "            class_name = VISDRONE_CLASSES[class_id]\n",
    "            if track_id not in self.class_tracks[class_name]:\n",
    "                self.class_tracks[class_name].add(track_id)\n",
    "                self.category_counts[class_name] += 1\n",
    "\n",
    "# 4. 视频处理主逻辑\n",
    "def process_video(input_path: str, output_path: str):\n",
    "    # 初始化组件\n",
    "    counter = CategoryCounter()\n",
    "    cap = cv2.VideoCapture(input_path)\n",
    "    frame_width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))\n",
    "    frame_height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))\n",
    "    fps = cap.get(cv2.CAP_PROP_FPS)\n",
    "    \n",
    "    # 配置视频写入器（H265编码优化文件大小）\n",
    "    writer = cv2.VideoWriter(\n",
    "        output_path,\n",
    "        cv2.VideoWriter_fourcc(*'mp4v'),  # 或使用'hvc1'（需硬件支持）\n",
    "        fps,\n",
    "        (frame_width, frame_height)\n",
    "    )\n",
    "    \n",
    "    # 可视化工具配置（整合网页2、3、4的最佳实践）\n",
    "    box_annotator = sv.BoundingBoxAnnotator(\n",
    "        thickness=2,\n",
    "        color=sv.Color(r=0, g=255, b=0)  # 绿色边框\n",
    "    )\n",
    "\n",
    "    label_annotator = sv.LabelAnnotator(\n",
    "        text_scale=0.4,  # 替代Font对象设置字号（0.5对应原20像素）\n",
    "        text_thickness=0,\n",
    "        text_color=sv.Color(r=255, g=255, b=255),\n",
    "        text_padding=3\n",
    "        # text_background_color=sv.Color(r=0, g=0, b=0)\n",
    "    )\n",
    "\n",
    "    # 逐帧处理\n",
    "    while cap.isOpened():\n",
    "        ret, frame = cap.read()\n",
    "        if not ret:\n",
    "            break\n",
    "\n",
    "        # 执行检测与跟踪\n",
    "        detections = model.predict(frame, threshold=0.3)  # 低阈值提升召回率\n",
    "        detections = tracker.update_with_detections(detections)\n",
    "        \n",
    "        # 更新全局计数器\n",
    "        counter.update(detections)\n",
    "\n",
    "        # 构建标注信息\n",
    "        labels = [\n",
    "            # f\"{VISDRONE_CLASSES[cls_id]} {track_id}\"\n",
    "            f\"{VISDRONE_CLASSES[cls_id]}\"\n",
    "            for cls_id, track_id \n",
    "            in zip(detections.class_id, detections.tracker_id)\n",
    "        ]\n",
    "        \n",
    "        # 绘制检测框与轨迹\n",
    "        annotated_frame = box_annotator.annotate(scene=frame.copy(), detections=detections)\n",
    "        annotated_frame = label_annotator.annotate(scene=annotated_frame, detections=detections, labels=labels)\n",
    "        # annotated_frame = trace_annotator.annotate(scene=annotated_frame, detections=detections)\n",
    "        \n",
    "        # 添加统计面板（右上角）\n",
    "        stats_text = \" \".join([\n",
    "            f\"{cls}: {counter.category_counts[cls]}\" \n",
    "            for cls in VISDRONE_CLASSES\n",
    "        ])\n",
    "        annotated_frame = sv.draw_text(\n",
    "            scene=annotated_frame,\n",
    "            text=stats_text,\n",
    "            text_anchor=sv.Point(x=frame_width-250, y=20),\n",
    "            background_color=sv.Color(r=30, g=30, b=30),\n",
    "            text_color=sv.Color(r=220, g=220, b=220),\n",
    "            text_scale=0.7\n",
    "        )\n",
    "\n",
    "        cv2.imshow(\"Real-time Detection\", annotated_frame)\n",
    "        if cv2.waitKey(1) & 0xFF == ord(\"q\"):\n",
    "            break\n",
    "\n",
    "        writer.write(annotated_frame)\n",
    "\n",
    "    # 释放资源并输出结果\n",
    "    cap.release()\n",
    "    writer.release()\n",
    "    print(\"===== 全局类别计数 =====\")\n",
    "    for cls in VISDRONE_CLASSES:\n",
    "        print(f\"{cls}: {counter.category_counts[cls]}\")\n",
    "\n",
    "# 5. 执行处理（示例路径）\n",
    "process_video(\n",
    "    input_path=r\"E:\\Data\\XCC_data\\xcc3\\xcc301.mp4\",\n",
    "    # output_path=r\"E:\\Data\\XCC_data\\outputs\\rfdetr_tracked007.mp4\"\n",
    "    output_path=None\n",
    ")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "num_classes mismatch: pretrain weights has 9 classes, but your model has 90 classes\n",
      "reinitializing detection head with 9 classes\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Loading pretrain weights\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "SupervisionWarnings: BoundingBoxAnnotator is deprecated: `BoundingBoxAnnotator` is deprecated and has been renamed to `BoxAnnotator`. `BoundingBoxAnnotator` will be removed in supervision-0.26.0.\n"
     ]
    },
    {
     "ename": "AttributeError",
     "evalue": "'ColorPalette' object has no attribute 'as_rgb'",
     "output_type": "error",
     "traceback": [
      "\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
      "\u001b[1;31mAttributeError\u001b[0m                            Traceback (most recent call last)",
      "Cell \u001b[1;32mIn[11], line 154\u001b[0m\n\u001b[0;32m    151\u001b[0m         \u001b[38;5;28mprint\u001b[39m(\u001b[38;5;124mf\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;132;01m{\u001b[39;00m\u001b[38;5;28mcls\u001b[39m\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m: \u001b[39m\u001b[38;5;132;01m{\u001b[39;00mcounter\u001b[38;5;241m.\u001b[39mcategory_counts[\u001b[38;5;28mcls\u001b[39m]\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m\"\u001b[39m)\n\u001b[0;32m    153\u001b[0m \u001b[38;5;66;03m# 5. 执行处理（示例路径）\u001b[39;00m\n\u001b[1;32m--> 154\u001b[0m \u001b[43mprocess_video\u001b[49m\u001b[43m(\u001b[49m\n\u001b[0;32m    155\u001b[0m \u001b[43m    \u001b[49m\u001b[43minput_path\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;124;43mr\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mE:\u001b[39;49m\u001b[38;5;124;43m\\\u001b[39;49m\u001b[38;5;124;43mData\u001b[39;49m\u001b[38;5;124;43m\\\u001b[39;49m\u001b[38;5;124;43mXCC_data\u001b[39;49m\u001b[38;5;124;43m\\\u001b[39;49m\u001b[38;5;124;43mxcc\u001b[39;49m\u001b[38;5;124;43m\\\u001b[39;49m\u001b[38;5;124;43mxcc002.mp4\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m,\u001b[49m\n\u001b[0;32m    156\u001b[0m \u001b[43m    \u001b[49m\u001b[38;5;66;43;03m# output_path=r\"E:\\Data\\XCC_data\\outputs\\rfdetr_tracked003.mp4\"\u001b[39;49;00m\n\u001b[0;32m    157\u001b[0m \u001b[43m    \u001b[49m\u001b[43moutput_path\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;28;43;01mNone\u001b[39;49;00m\n\u001b[0;32m    158\u001b[0m \u001b[43m)\u001b[49m\n",
      "Cell \u001b[1;32mIn[11], line 123\u001b[0m, in \u001b[0;36mprocess_video\u001b[1;34m(input_path, output_path)\u001b[0m\n\u001b[0;32m    121\u001b[0m \u001b[38;5;66;03m# 绘制检测框与轨迹\u001b[39;00m\n\u001b[0;32m    122\u001b[0m annotated_frame \u001b[38;5;241m=\u001b[39m box_annotator\u001b[38;5;241m.\u001b[39mannotate(scene\u001b[38;5;241m=\u001b[39mframe\u001b[38;5;241m.\u001b[39mcopy(), detections\u001b[38;5;241m=\u001b[39mdetections)\n\u001b[1;32m--> 123\u001b[0m annotated_frame \u001b[38;5;241m=\u001b[39m \u001b[43mlabel_annotator\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mannotate\u001b[49m\u001b[43m(\u001b[49m\u001b[43mscene\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mannotated_frame\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mdetections\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mdetections\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mlabels\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mlabels\u001b[49m\u001b[43m)\u001b[49m\n\u001b[0;32m    124\u001b[0m \u001b[38;5;66;03m# annotated_frame = trace_annotator.annotate(scene=annotated_frame, detections=detections)\u001b[39;00m\n\u001b[0;32m    125\u001b[0m \n\u001b[0;32m    126\u001b[0m \u001b[38;5;66;03m# 添加统计面板（右上角）\u001b[39;00m\n\u001b[0;32m    127\u001b[0m stats_text \u001b[38;5;241m=\u001b[39m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124m \u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;241m.\u001b[39mjoin([\n\u001b[0;32m    128\u001b[0m     \u001b[38;5;124mf\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;132;01m{\u001b[39;00m\u001b[38;5;28mcls\u001b[39m\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m: \u001b[39m\u001b[38;5;132;01m{\u001b[39;00mcounter\u001b[38;5;241m.\u001b[39mcategory_counts[\u001b[38;5;28mcls\u001b[39m]\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m\"\u001b[39m \n\u001b[0;32m    129\u001b[0m     \u001b[38;5;28;01mfor\u001b[39;00m \u001b[38;5;28mcls\u001b[39m \u001b[38;5;129;01min\u001b[39;00m VISDRONE_CLASSES\n\u001b[0;32m    130\u001b[0m ])\n",
      "File \u001b[1;32mf:\\anaconda\\envs\\pytorch\\lib\\site-packages\\supervision\\utils\\conversion.py:23\u001b[0m, in \u001b[0;36mensure_cv2_image_for_annotation.<locals>.wrapper\u001b[1;34m(self, scene, *args, **kwargs)\u001b[0m\n\u001b[0;32m     20\u001b[0m \u001b[38;5;129m@wraps\u001b[39m(annotate_func)\n\u001b[0;32m     21\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m\u001b[38;5;250m \u001b[39m\u001b[38;5;21mwrapper\u001b[39m(\u001b[38;5;28mself\u001b[39m, scene: ImageType, \u001b[38;5;241m*\u001b[39margs, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs):\n\u001b[0;32m     22\u001b[0m     \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28misinstance\u001b[39m(scene, np\u001b[38;5;241m.\u001b[39mndarray):\n\u001b[1;32m---> 23\u001b[0m         \u001b[38;5;28;01mreturn\u001b[39;00m annotate_func(\u001b[38;5;28mself\u001b[39m, scene, \u001b[38;5;241m*\u001b[39margs, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs)\n\u001b[0;32m     25\u001b[0m     \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28misinstance\u001b[39m(scene, Image\u001b[38;5;241m.\u001b[39mImage):\n\u001b[0;32m     26\u001b[0m         scene_np \u001b[38;5;241m=\u001b[39m pillow_to_cv2(scene)\n",
      "File \u001b[1;32mf:\\anaconda\\envs\\pytorch\\lib\\site-packages\\supervision\\annotators\\core.py:1155\u001b[0m, in \u001b[0;36mLabelAnnotator.annotate\u001b[1;34m(self, scene, detections, labels, custom_color_lookup)\u001b[0m\n\u001b[0;32m   1141\u001b[0m     text_y \u001b[38;5;241m=\u001b[39m text_background_xyxy[\u001b[38;5;241m1\u001b[39m] \u001b[38;5;241m+\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mtext_padding \u001b[38;5;241m+\u001b[39m text_h\n\u001b[0;32m   1143\u001b[0m     \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mdraw_rounded_rectangle(\n\u001b[0;32m   1144\u001b[0m         scene\u001b[38;5;241m=\u001b[39mscene,\n\u001b[0;32m   1145\u001b[0m         xyxy\u001b[38;5;241m=\u001b[39mtext_background_xyxy,\n\u001b[0;32m   1146\u001b[0m         color\u001b[38;5;241m=\u001b[39mcolor\u001b[38;5;241m.\u001b[39mas_bgr(),\n\u001b[0;32m   1147\u001b[0m         border_radius\u001b[38;5;241m=\u001b[39m\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mborder_radius,\n\u001b[0;32m   1148\u001b[0m     )\n\u001b[0;32m   1149\u001b[0m     cv2\u001b[38;5;241m.\u001b[39mputText(\n\u001b[0;32m   1150\u001b[0m         img\u001b[38;5;241m=\u001b[39mscene,\n\u001b[0;32m   1151\u001b[0m         text\u001b[38;5;241m=\u001b[39mtext,\n\u001b[0;32m   1152\u001b[0m         org\u001b[38;5;241m=\u001b[39m(text_x, text_y),\n\u001b[0;32m   1153\u001b[0m         fontFace\u001b[38;5;241m=\u001b[39mfont,\n\u001b[0;32m   1154\u001b[0m         fontScale\u001b[38;5;241m=\u001b[39m\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mtext_scale,\n\u001b[1;32m-> 1155\u001b[0m         color\u001b[38;5;241m=\u001b[39m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mtext_color\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mas_rgb\u001b[49m(),\n\u001b[0;32m   1156\u001b[0m         thickness\u001b[38;5;241m=\u001b[39m\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mtext_thickness,\n\u001b[0;32m   1157\u001b[0m         lineType\u001b[38;5;241m=\u001b[39mcv2\u001b[38;5;241m.\u001b[39mLINE_AA,\n\u001b[0;32m   1158\u001b[0m     )\n\u001b[0;32m   1159\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m scene\n",
      "\u001b[1;31mAttributeError\u001b[0m: 'ColorPalette' object has no attribute 'as_rgb'"
     ]
    }
   ],
   "source": [
    "import cv2\n",
    "import supervision as sv\n",
    "from supervision import Color\n",
    "from rfdetr import RFDETRBase\n",
    "from collections import defaultdict\n",
    "from typing import Dict, Set\n",
    "from PIL import ImageFont\n",
    "font = ImageFont.truetype(\"msyh.ttc\", size=20)\n",
    "\n",
    "# 1. 初始化模型与跟踪器\n",
    "model = RFDETRBase(\n",
    "    pretrain_weights=r\"E:\\A\\rf-detr-main\\output\\train2\\checkpoint_best_ema.pth\",\n",
    "    resolution=560  # 56的倍数，提升小目标检测精度[7](@ref)\n",
    ")\n",
    "tracker = sv.ByteTrack(\n",
    "    track_activation_threshold=0.25, \n",
    "    lost_track_buffer=120,           \n",
    "    minimum_matching_threshold=0.8   \n",
    ")\n",
    "\n",
    "# 2. VisDrone2019类别定义（官方顺序）[1,4](@ref)\n",
    "VISDRONE_CLASSES = [\n",
    "    'pedestrian', 'people', 'bicycle', 'car', 'van',\n",
    "    'truck', 'tricycle', 'awning-tricycle', 'bus', 'motor'\n",
    "]\n",
    "\n",
    "# 3. 全局类别计数器（跨帧持久化）\n",
    "class CategoryCounter:\n",
    "    def __init__(self):\n",
    "        # {class_name: set(track_ids)}\n",
    "        self.class_tracks: Dict[str, Set[int]] = defaultdict(set)\n",
    "        # {class_name: count}\n",
    "        self.category_counts: Dict[str, int] = defaultdict(int)\n",
    "\n",
    "    def update(self, detections: sv.Detections):\n",
    "        for class_id, track_id in zip(detections.class_id, detections.tracker_id):\n",
    "            class_name = VISDRONE_CLASSES[class_id]\n",
    "            if track_id not in self.class_tracks[class_name]:\n",
    "                self.class_tracks[class_name].add(track_id)\n",
    "                self.category_counts[class_name] += 1\n",
    "\n",
    "# 4. 视频处理主逻辑\n",
    "def process_video(input_path: str, output_path: str):\n",
    "    # 初始化组件\n",
    "    counter = CategoryCounter()\n",
    "    cap = cv2.VideoCapture(input_path)\n",
    "    frame_width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))\n",
    "    frame_height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))\n",
    "    fps = cap.get(cv2.CAP_PROP_FPS)\n",
    "    \n",
    "    # 配置视频写入器（H265编码优化文件大小）\n",
    "    writer = cv2.VideoWriter(\n",
    "        output_path,\n",
    "        cv2.VideoWriter_fourcc(*'mp4v'),  # 或使用'hvc1'（需硬件支持）\n",
    "        fps,\n",
    "        (frame_width, frame_height)\n",
    "    )\n",
    "\n",
    "    # 新增部分：中文映射与颜色配置\n",
    "    class_name_mapping = {\n",
    "        'pedestrian': '行人',\n",
    "        'people': '人群',\n",
    "        'bicycle': '自行车',\n",
    "        'car': '轿车',\n",
    "        'van': '面包车',\n",
    "        'truck': '卡车',\n",
    "        'tricycle': '三轮车',\n",
    "        'awning-tricycle': '篷式三轮车',\n",
    "        'bus': '公交车',\n",
    "        'motor': '摩托车'\n",
    "    }\n",
    "\n",
    "    color_palette = sv.ColorPalette(\n",
    "        colors=[\n",
    "            Color(r=71, g=0, b=36),    # pedestrian\n",
    "            Color(r=0, g=255, b=0),     # people\n",
    "            Color(r=0, g=49, b=83),    # bicycle\n",
    "            Color(r=0, g=47, b=167),    # car\n",
    "            Color(r=128, g=0, b=128),   # van\n",
    "            Color(r=212, g=72, b=72),   # truck\n",
    "            Color(r=83, g=0, b=0),      # tricycle\n",
    "            Color(r=251, g=220, b=106), # awning-tricycle\n",
    "            Color(r=73, g=45, b=34),    # bus\n",
    "            Color(r=1, g=132, b=127)    # motor\n",
    "        ]\n",
    "    )\n",
    "    \n",
    "    # 修改可视化工具配置\n",
    "    box_annotator = sv.BoundingBoxAnnotator(\n",
    "        thickness=2,\n",
    "        color=color_palette  # 自动按class_id分配颜色\n",
    "    )\n",
    "    \n",
    "    label_annotator = sv.LabelAnnotator(\n",
    "        text_scale=0.4,\n",
    "        text_thickness=1,\n",
    "        text_color=color_palette,  # 同步颜色分配逻辑\n",
    "        # text_font=font,  # 显式指定中文字体（网页3关键配置）\n",
    "        text_padding=3\n",
    "    )\n",
    "\n",
    "    # 逐帧处理\n",
    "    while cap.isOpened():\n",
    "        ret, frame = cap.read()\n",
    "        if not ret:\n",
    "            break\n",
    "\n",
    "        # 执行检测与跟踪\n",
    "        detections = model.predict(frame, threshold=0.3)  # 低阈值提升召回率[7](@ref)\n",
    "        detections = tracker.update_with_detections(detections)\n",
    "        \n",
    "        # 更新全局计数器\n",
    "        counter.update(detections)\n",
    "\n",
    "        # 构建标注信息\n",
    "        labels = [\n",
    "            class_name_mapping[VISDRONE_CLASSES[cls_id]]  # 使用中文类别名\n",
    "            for cls_id in detections.class_id\n",
    "        ]\n",
    "        \n",
    "        # 绘制检测框与轨迹\n",
    "        annotated_frame = box_annotator.annotate(scene=frame.copy(), detections=detections)\n",
    "        annotated_frame = label_annotator.annotate(scene=annotated_frame, detections=detections, labels=labels)\n",
    "        # annotated_frame = trace_annotator.annotate(scene=annotated_frame, detections=detections)\n",
    "        \n",
    "        # 添加统计面板（右上角）\n",
    "        stats_text = \" \".join([\n",
    "            f\"{cls}: {counter.category_counts[cls]}\" \n",
    "            for cls in VISDRONE_CLASSES\n",
    "        ])\n",
    "        annotated_frame = sv.draw_text(\n",
    "            scene=annotated_frame,\n",
    "            text=stats_text,\n",
    "            text_anchor=sv.Point(x=frame_width-250, y=20),\n",
    "            background_color=sv.Color(r=30, g=30, b=30),\n",
    "            text_color=sv.Color(r=220, g=220, b=220),\n",
    "            text_scale=0.7\n",
    "        )\n",
    "\n",
    "        cv2.imshow(\"Real-time Detection\", annotated_frame)\n",
    "        if cv2.waitKey(1) & 0xFF == ord(\"q\"):\n",
    "            break\n",
    "\n",
    "        writer.write(annotated_frame)\n",
    "\n",
    "    # 释放资源并输出结果\n",
    "    cap.release()\n",
    "    writer.release()\n",
    "    print(\"===== 全局类别计数 =====\")\n",
    "    for cls in VISDRONE_CLASSES:\n",
    "        print(f\"{cls}: {counter.category_counts[cls]}\")\n",
    "\n",
    "# 5. 执行处理（示例路径）\n",
    "process_video(\n",
    "    input_path=r\"E:\\Data\\XCC_data\\xcc\\xcc002.mp4\",\n",
    "    # output_path=r\"E:\\Data\\XCC_data\\outputs\\rfdetr_tracked003.mp4\"\n",
    "    output_path=None\n",
    ")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "num_classes mismatch: pretrain weights has 9 classes, but your model has 90 classes\n",
      "reinitializing detection head with 9 classes\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Loading pretrain weights\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "SupervisionWarnings: BoundingBoxAnnotator is deprecated: `BoundingBoxAnnotator` is deprecated and has been renamed to `BoxAnnotator`. `BoundingBoxAnnotator` will be removed in supervision-0.26.0.\n",
      "UserWarning: torch.meshgrid: in an upcoming release, it will be required to pass the indexing argument. (Triggered internally at C:\\actions-runner\\_work\\pytorch\\pytorch\\pytorch\\aten\\src\\ATen\\native\\TensorShape.cpp:3638.)\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "===== 全局类别计数 =====\n",
      "行人: 5\n",
      "人群: 7\n",
      "自行车: 0\n",
      "汽车: 187\n",
      "面包车: 47\n",
      "卡车: 8\n",
      "三轮车: 0\n",
      "遮阳三轮车: 0\n",
      "公交车: 8\n",
      "摩托车: 10\n"
     ]
    },
    {
     "ename": "",
     "evalue": "",
     "output_type": "error",
     "traceback": [
      "\u001b[1;31mThe Kernel crashed while executing code in the current cell or a previous cell. \n",
      "\u001b[1;31mPlease review the code in the cell(s) to identify a possible cause of the failure. \n",
      "\u001b[1;31mClick <a href='https://aka.ms/vscodeJupyterKernelCrash'>here</a> for more info. \n",
      "\u001b[1;31mView Jupyter <a href='command:jupyter.viewOutput'>log</a> for further details."
     ]
    }
   ],
   "source": [
    "import cv2\n",
    "import supervision as sv\n",
    "from rfdetr import RFDETRBase\n",
    "from collections import defaultdict\n",
    "from typing import Dict, Set\n",
    "from PIL import Image, ImageDraw, ImageFont\n",
    "import numpy as np\n",
    "\n",
    "# 1. 初始化模型与跟踪器\n",
    "model = RFDETRBase(\n",
    "    # pretrain_weights=r\"E:\\A\\rf-detr-main\\output\\train2\\checkpoint_best_ema.pth\",\n",
    "    pretrain_weights=r\"E:\\A\\rf-detr-main\\output\\pre-train1\\checkpoint_best_ema.pth\",\n",
    "    resolution=560  # 56的倍数，提升小目标检测精度\n",
    "    # resolution=448\n",
    ")\n",
    "\n",
    "#TODO 最合适的一集\n",
    "tracker = sv.ByteTrack(\n",
    "    track_activation_threshold=0.5,       # 提高激活阈值\n",
    "    lost_track_buffer=120,                 # 缩短丢失缓冲时间（3秒，假设20fps）\n",
    "    minimum_matching_threshold=0.95,       # 降低匹配阈值\n",
    "    minimum_consecutive_frames=2,         # 增加连续帧要求\n",
    "    frame_rate=20                         # 确保与实际帧率一致\n",
    ")\n",
    "\n",
    "# 2. VisDrone2019类别定义（中文）\n",
    "VISDRONE_CLASSES = [\n",
    "    '行人', '人群', '自行车', '汽车', '面包车',\n",
    "    '卡车', '三轮车', '遮阳三轮车', '公交车', '摩托车'\n",
    "]\n",
    "\n",
    "# 3. 全局类别计数器（跨帧持久化）\n",
    "class CategoryCounter:\n",
    "    def __init__(self):\n",
    "        # {class_name: set(track_ids)}\n",
    "        self.class_tracks: Dict[str, Set[int]] = defaultdict(set)\n",
    "        # {class_name: count}\n",
    "        self.category_counts: Dict[str, int] = defaultdict(int)\n",
    "\n",
    "    def update(self, detections: sv.Detections):\n",
    "        for class_id, track_id in zip(detections.class_id, detections.tracker_id):\n",
    "            class_name = VISDRONE_CLASSES[class_id]\n",
    "            if track_id not in self.class_tracks[class_name]:\n",
    "                self.class_tracks[class_name].add(track_id)\n",
    "                self.category_counts[class_name] += 1\n",
    "\n",
    "# 4. 视频处理主逻辑\n",
    "def process_video(input_path: str, output_path: str):\n",
    "    # 初始化组件\n",
    "    counter = CategoryCounter()\n",
    "    cap = cv2.VideoCapture(input_path)\n",
    "    frame_width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))\n",
    "    frame_height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))\n",
    "    fps = cap.get(cv2.CAP_PROP_FPS)\n",
    "\n",
    "    # 配置视频写入器（H265编码优化文件大小）\n",
    "    writer = cv2.VideoWriter(\n",
    "        output_path,\n",
    "        cv2.VideoWriter_fourcc(*'mp4v'),  # 或使用'hvc1'（需硬件支持）\n",
    "        fps,\n",
    "        (frame_width, frame_height)\n",
    "    )\n",
    "\n",
    "    # 可视化工具配置（整合网页2、3、4的最佳实践）\n",
    "    box_annotator = sv.BoundingBoxAnnotator(\n",
    "        thickness=2,\n",
    "        color=sv.Color(r=0, g=255, b=0)  # 绿色边框\n",
    "    )\n",
    "\n",
    "    # 加载中文字体\n",
    "    font_path = r'C:\\Windows\\Fonts\\SimHei.ttf'  # 确保该字体文件存在\n",
    "    font = ImageFont.truetype(font_path, 15)\n",
    "\n",
    "    # 逐帧处理\n",
    "    while cap.isOpened():\n",
    "        ret, frame = cap.read()\n",
    "        if not ret:\n",
    "            break\n",
    "\n",
    "        # 执行检测与跟踪\n",
    "        detections = model.predict(frame, threshold=0.3)  # 低阈值提升召回率\n",
    "        detections = tracker.update_with_detections(detections)\n",
    "\n",
    "        # 更新全局计数器\n",
    "        counter.update(detections)\n",
    "\n",
    "        # 构建标注信息\n",
    "        labels = [\n",
    "            f\"{VISDRONE_CLASSES[cls_id]}\"\n",
    "            for cls_id, track_id\n",
    "            in zip(detections.class_id, detections.tracker_id)\n",
    "        ]\n",
    "\n",
    "        # 绘制检测框\n",
    "        annotated_frame = box_annotator.annotate(scene=frame.copy(), detections=detections)\n",
    "\n",
    "        # 绘制中文标签\n",
    "        for xyxy, label in zip(detections.xyxy, labels):\n",
    "            x1, y1, _, _ = map(int, xyxy)\n",
    "            img_pil = Image.fromarray(annotated_frame)\n",
    "            draw = ImageDraw.Draw(img_pil)\n",
    "            draw.text((x1, y1 - 20), label, font=font, fill=(255, 255, 255))\n",
    "            annotated_frame = np.array(img_pil)\n",
    "\n",
    "        # 添加统计面板（右上角）\n",
    "        stats_text = \" \".join([\n",
    "            f\"{cls}: {counter.category_counts[cls]}\"\n",
    "            for cls in VISDRONE_CLASSES\n",
    "        ])\n",
    "        img_pil = Image.fromarray(annotated_frame)\n",
    "        draw = ImageDraw.Draw(img_pil)\n",
    "        draw.text((frame_width - 250, 20), stats_text, font=font, fill=(220, 220, 220))\n",
    "        annotated_frame = np.array(img_pil)\n",
    "\n",
    "        cv2.imshow(\"实时检测\", annotated_frame)\n",
    "        if cv2.waitKey(1) & 0xFF == ord(\"q\"):\n",
    "            break\n",
    "\n",
    "        writer.write(annotated_frame)\n",
    "\n",
    "    # 释放资源并输出结果\n",
    "    cap.release()\n",
    "    writer.release()\n",
    "    print(\"===== 全局类别计数 =====\")\n",
    "    for cls in VISDRONE_CLASSES:\n",
    "        print(f\"{cls}: {counter.category_counts[cls]}\")\n",
    "\n",
    "# 5. 执行处理（示例路径）\n",
    "process_video(\n",
    "    input_path=r\"E:\\Data\\XCC_data\\xcc3\\xcc301.mp4\",\n",
    "    # output_path=r\"E:\\Data\\XCC_data\\outputs\\rfdetr_tracked007.mp4\"\n",
    "    output_path=None\n",
    ")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Loading pretrain weights\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "UserWarning: Error fetching version info <urlopen error timed out>\n",
      "num_classes mismatch: pretrain weights has 9 classes, but your model has 90 classes\n",
      "reinitializing detection head with 9 classes\n",
      "UserWarning: torch.meshgrid: in an upcoming release, it will be required to pass the indexing argument. (Triggered internally at C:\\actions-runner\\_work\\pytorch\\pytorch\\pytorch\\aten\\src\\ATen\\native\\TensorShape.cpp:3638.)\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "===== 全局类别计数 =====\n",
      "行人: 11\n",
      "人群: 7\n",
      "自行车: 0\n",
      "汽车: 328\n",
      "面包车: 74\n",
      "卡车: 22\n",
      "三轮车: 0\n",
      "遮阳三轮车: 5\n",
      "公交车: 9\n",
      "摩托车: 13\n"
     ]
    }
   ],
   "source": [
    "import cv2\n",
    "import supervision as sv\n",
    "from rfdetr import RFDETRBase\n",
    "from collections import defaultdict\n",
    "from typing import Dict, Set\n",
    "from PIL import Image, ImageDraw, ImageFont\n",
    "import threading\n",
    "import queue\n",
    "import numpy as np\n",
    "\n",
    "# 1. 初始化模型与跟踪器\n",
    "model = RFDETRBase(\n",
    "    # pretrain_weights=r\"E:\\A\\rf-detr-main\\output\\train2\\checkpoint_best_ema.pth\",\n",
    "    pretrain_weights=r\"E:\\A\\rf-detr-main\\output\\pre-train1\\checkpoint_best_ema.pth\",\n",
    "    resolution=560\n",
    "    # resolution=448\n",
    ")\n",
    "\n",
    "#TODO 最合适的一集\n",
    "tracker = sv.ByteTrack(\n",
    "    track_activation_threshold=0.5,       # 提高激活阈值\n",
    "    lost_track_buffer=120,                 # 缩短丢失缓冲时间（3秒，假设20fps）\n",
    "    minimum_matching_threshold=0.95,       # 降低匹配阈值\n",
    "    minimum_consecutive_frames=2,         # 增加连续帧要求\n",
    "    frame_rate=20                         # 确保与实际帧率一致\n",
    ")\n",
    "\n",
    "# 2. VisDrone2019类别定义（中文）\n",
    "VISDRONE_CLASSES = [\n",
    "    '行人', '人群', '自行车', '汽车', '面包车',\n",
    "    '卡车', '三轮车', '遮阳三轮车', '公交车', '摩托车'\n",
    "]\n",
    "\n",
    "# 3. 全局类别计数器（跨帧持久化）\n",
    "class CategoryCounter:\n",
    "    def __init__(self):\n",
    "        # {class_name: set(track_ids)}\n",
    "        self.class_tracks: Dict[str, Set[int]] = defaultdict(set)\n",
    "        # {class_name: count}\n",
    "        self.category_counts: Dict[str, int] = defaultdict(int)\n",
    "\n",
    "    def update(self, detections: sv.Detections):\n",
    "        for class_id, track_id in zip(detections.class_id, detections.tracker_id):\n",
    "            class_name = VISDRONE_CLASSES[class_id]\n",
    "            if track_id not in self.class_tracks[class_name]:\n",
    "                self.class_tracks[class_name].add(track_id)\n",
    "                self.category_counts[class_name] += 1\n",
    "\n",
    "# 视频处理线程函数\n",
    "def video_processing(input_path, output_queue):\n",
    "    cap = cv2.VideoCapture(input_path)\n",
    "    counter = CategoryCounter()\n",
    "    while cap.isOpened():\n",
    "        ret, frame = cap.read()\n",
    "        if not ret:\n",
    "            break\n",
    "        # 执行检测与跟踪\n",
    "        detections = model.predict(frame, threshold=0.3)  # 低阈值提升召回率\n",
    "        detections = tracker.update_with_detections(detections)\n",
    "        # 更新全局计数器\n",
    "        counter.update(detections)\n",
    "        output_queue.put((frame, detections, counter.category_counts.copy()))\n",
    "\n",
    "    print(\"===== 全局类别计数 =====\")\n",
    "    for cls in VISDRONE_CLASSES:\n",
    "        print(f\"{cls}: {counter.category_counts[cls]}\")\n",
    "\n",
    "    cap.release()\n",
    "    output_queue.put(None)  # 表示处理结束\n",
    "\n",
    "# 绘制和显示线程函数\n",
    "def drawing_and_displaying(output_queue, output_path):\n",
    "    font_path = r\"C:\\Windows\\Fonts\\SimHei.ttf\" \n",
    "    font = ImageFont.truetype(font_path, 15)\n",
    "    first_frame = True\n",
    "    writer = None\n",
    "    while True:\n",
    "        data = output_queue.get()\n",
    "        if data is None:\n",
    "            break\n",
    "        frame, detections, category_counts = data\n",
    "        if first_frame:\n",
    "            frame_width = frame.shape[1]\n",
    "            frame_height = frame.shape[0]\n",
    "            fps = 20  # 假设帧率\n",
    "            writer = cv2.VideoWriter(\n",
    "                output_path,\n",
    "                cv2.VideoWriter_fourcc(*'mp4v'),\n",
    "                fps,\n",
    "                (frame_width, frame_height)\n",
    "            )\n",
    "            first_frame = False\n",
    "        # 可视化工具配置\n",
    "        box_annotator = sv.BoxAnnotator(\n",
    "            thickness=2,\n",
    "            color=sv.Color(r=0, g=255, b=0)  # 绿色边框\n",
    "        )\n",
    "        # 绘制检测框\n",
    "        annotated_frame = box_annotator.annotate(scene=frame.copy(), detections=detections)\n",
    "        # 构建标注信息\n",
    "        labels = [\n",
    "            f\"{VISDRONE_CLASSES[cls_id]}\"\n",
    "            for cls_id, track_id\n",
    "            in zip(detections.class_id, detections.tracker_id)\n",
    "        ]\n",
    "        # 绘制中文标签\n",
    "        img_pil = Image.fromarray(annotated_frame)\n",
    "        draw = ImageDraw.Draw(img_pil)\n",
    "        for xyxy, label in zip(detections.xyxy, labels):\n",
    "            x1, y1, _, _ = map(int, xyxy)\n",
    "            draw.text((x1, y1 - 20), label, font=font, fill=(255, 255, 255))\n",
    "        # 添加统计面板（右上角）\n",
    "        stats_text = \"\\n\".join([\n",
    "            f\"{cls}: {category_counts[cls]}\"\n",
    "            for cls in VISDRONE_CLASSES\n",
    "        ])\n",
    "        draw.text((frame_width - 250, 20), stats_text, font=font, fill=(220, 220, 220))\n",
    "        annotated_frame = np.array(img_pil)\n",
    "        cv2.imshow(\"实时检测\", annotated_frame)\n",
    "        if cv2.waitKey(1) & 0xFF == ord(\"q\"):\n",
    "            break\n",
    "        writer.write(annotated_frame)\n",
    "    if writer:\n",
    "        writer.release()\n",
    "    cv2.destroyAllWindows()\n",
    "\n",
    "# 4. 视频处理主逻辑\n",
    "def process_video(input_path: str, output_path: str):\n",
    "    output_queue = queue.Queue()\n",
    "    # 创建并启动视频处理线程\n",
    "    processing_thread = threading.Thread(target=video_processing, args=(input_path, output_queue))\n",
    "    processing_thread.start()\n",
    "    # 创建并启动绘制和显示线程\n",
    "    display_thread = threading.Thread(target=drawing_and_displaying, args=(output_queue, output_path))\n",
    "    display_thread.start()\n",
    "    # 等待两个线程结束\n",
    "    processing_thread.join()\n",
    "    display_thread.join()\n",
    "\n",
    "# 5. 执行处理\n",
    "process_video(\n",
    "    input_path=r\"E:\\Data\\XCC_data\\xcc3\\xcc301.mp4\",\n",
    "    output_path=r\"E:\\Data\\XCC_data\\outputs\\rfdetr_tracked005.mp4\"\n",
    "    # output_path=None\n",
    ")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Loading pretrain weights\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "num_classes mismatch: pretrain weights has 9 classes, but your model has 90 classes\n",
      "reinitializing detection head with 9 classes\n",
      "UserWarning: torch.meshgrid: in an upcoming release, it will be required to pass the indexing argument. (Triggered internally at C:\\actions-runner\\_work\\pytorch\\pytorch\\pytorch\\aten\\src\\ATen\\native\\TensorShape.cpp:3638.)\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "===== 全局类别计数 =====\n",
      "行人: 4\n",
      "小汽车: 79\n",
      "卡车: 7\n",
      "公交车: 3\n",
      "摩托车: 9\n"
     ]
    }
   ],
   "source": [
    "import cv2\n",
    "import supervision as sv\n",
    "from rfdetr import RFDETRBase\n",
    "from collections import defaultdict\n",
    "from typing import Dict, Set\n",
    "from PIL import Image, ImageDraw, ImageFont  # 导入PIL库\n",
    "import numpy as np  # 导入numpy用于图像格式转换\n",
    "\n",
    "# 1. 初始化模型与跟踪器\n",
    "model = RFDETRBase(\n",
    "    pretrain_weights=r\"E:\\A\\rf-detr-main\\output\\train-4\\checkpoint_best_ema.pth\",\n",
    "    # pretrain_weights=r\"E:\\A\\rf-detr-main\\output\\pre-train1\\checkpoint_best_ema.pth\",\n",
    "    resolution=560  # 56的倍数，提升小目标检测精度\n",
    "    # resolution=448\n",
    ")\n",
    "\n",
    "#TODO 最合适的一集\n",
    "tracker = sv.ByteTrack(\n",
    "    track_activation_threshold=0.5,       # 提高激活阈值\n",
    "    lost_track_buffer=120,                 # 缩短丢失缓冲时间（3秒，假设20fps）\n",
    "    minimum_matching_threshold=0.95,       # 降低匹配阈值\n",
    "    minimum_consecutive_frames=2,         # 增加连续帧要求\n",
    "    frame_rate=20                         # 确保与实际帧率一致\n",
    ")\n",
    "\n",
    "# 2. VisDrone2019类别定义 (保持英文用于索引)\n",
    "VISDRONE_CLASSES = [\n",
    "    'pedestrian', 'people', 'bicycle', 'car', 'van',\n",
    "    'truck', 'tricycle', 'awning-tricycle', 'bus', 'motor'\n",
    "]\n",
    "\n",
    "# 2.1 定义中文标签映射\n",
    "VISDRONE_CLASSES_CHINESE = {\n",
    "    'pedestrian': '行人', 'people': '人', 'bicycle': '自行车', 'car': '小汽车', 'van': '面包车',\n",
    "    'truck': '卡车', 'tricycle': '三轮车', 'awning-tricycle': '带篷三轮车', 'bus': '公交车', 'motor': '摩托车'\n",
    "}\n",
    "\n",
    "IGNORE_CLASS_IDS = {1, 4, 6, 7}\n",
    "\n",
    "# 2.2 指定中文字体路径 (***请务必修改为你系统中的实际字体路径***)\n",
    "FONT_PATH = \"C:/Windows/Fonts/simhei.ttf\"  # 或者其他支持中文的字体路径，例如 msyh.ttf\n",
    "FONT_SIZE = 15 # 设定字体大小\n",
    "\n",
    "# 尝试加载字体\n",
    "try:\n",
    "    font = ImageFont.truetype(FONT_PATH, FONT_SIZE)\n",
    "except IOError:\n",
    "    print(f\"错误：无法加载字体 {FONT_PATH}。请确保路径正确且文件存在。\")\n",
    "    font = ImageFont.load_default() # 使用默认字体作为备选\n",
    "\n",
    "# 3. 全局类别计数器（跨帧持久化）\n",
    "class CategoryCounter:\n",
    "    def __init__(self):\n",
    "        # {class_name: set(track_ids)}\n",
    "        self.class_tracks: Dict[str, Set[int]] = defaultdict(set)\n",
    "        # {class_name: count}\n",
    "        self.category_counts: Dict[str, int] = defaultdict(int)\n",
    "\n",
    "    def update(self, detections: sv.Detections):\n",
    "        # 只统计有 tracker_id 的检测结果\n",
    "        valid_indices = detections.tracker_id != None\n",
    "        class_ids = detections.class_id[valid_indices]\n",
    "        track_ids = detections.tracker_id[valid_indices]\n",
    "\n",
    "        for class_id, track_id in zip(class_ids, track_ids):\n",
    "            if track_id is None: # 跳过没有 tracker_id 的项 (虽然上面过滤了，双重保险)\n",
    "                continue\n",
    "            class_name = VISDRONE_CLASSES[class_id]\n",
    "            if track_id not in self.class_tracks[class_name]:\n",
    "                self.class_tracks[class_name].add(track_id)\n",
    "                self.category_counts[class_name] += 1\n",
    "\n",
    "# 4. 视频处理主逻辑\n",
    "def process_video(input_path: str, output_path: str):\n",
    "    # 初始化组件\n",
    "    counter = CategoryCounter()\n",
    "    cap = cv2.VideoCapture(input_path)\n",
    "    frame_width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))\n",
    "    frame_height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))\n",
    "    fps = cap.get(cv2.CAP_PROP_FPS)\n",
    "    \n",
    "    # 配置视频写入器\n",
    "    writer = None\n",
    "    if output_path:\n",
    "        writer = cv2.VideoWriter(\n",
    "            output_path,\n",
    "            cv2.VideoWriter_fourcc(*'mp4v'),\n",
    "            fps,\n",
    "            (frame_width, frame_height)\n",
    "        )\n",
    "    \n",
    "    # 可视化工具配置 - 仅保留边界框绘制器\n",
    "    box_annotator = sv.BoxAnnotator(\n",
    "        thickness=2,\n",
    "        color=sv.Color(r=0, g=255, b=0)  # 绿色边框\n",
    "    )\n",
    "\n",
    "    # 逐帧处理\n",
    "    while cap.isOpened():\n",
    "        ret, frame = cap.read()\n",
    "        if not ret:\n",
    "            break\n",
    "\n",
    "        # 执行检测与跟踪\n",
    "        detections = model.predict(frame, threshold=0.3)  # 低阈值提升召回率\n",
    "        detections = tracker.update_with_detections(detections)\n",
    "\n",
    "        if len(detections) > 0:\n",
    "            mask = np.array([class_id not in IGNORE_CLASS_IDS for class_id in detections.class_id], dtype=bool)\n",
    "            detections = detections[mask]\n",
    "\n",
    "        # 更新全局计数器\n",
    "        counter.update(detections)\n",
    "\n",
    "        # 绘制检测框\n",
    "        annotated_frame = box_annotator.annotate(scene=frame.copy(), detections=detections)\n",
    "\n",
    "        # --- 使用 PIL 绘制中文标签 ---\n",
    "        # 将 OpenCV 图像 (BGR) 转换为 PIL 图像 (RGB)\n",
    "        pil_image = Image.fromarray(cv2.cvtColor(annotated_frame, cv2.COLOR_BGR2RGB))\n",
    "        draw = ImageDraw.Draw(pil_image)\n",
    "\n",
    "        # 遍历有 tracker_id 的检测结果进行绘制\n",
    "        valid_indices = detections.tracker_id != None\n",
    "        boxes = detections.xyxy[valid_indices]\n",
    "        class_ids = detections.class_id[valid_indices]\n",
    "        track_ids = detections.tracker_id[valid_indices]\n",
    "\n",
    "        for box, class_id, track_id in zip(boxes, class_ids, track_ids):\n",
    "            if track_id is None: # 再次检查，虽然理论上不会发生\n",
    "                continue\n",
    "\n",
    "            x1, y1, x2, y2 = box\n",
    "            # 获取英文类别名，再通过映射获取中文类别名\n",
    "            english_label = VISDRONE_CLASSES[class_id]\n",
    "            chinese_label = VISDRONE_CLASSES_CHINESE.get(english_label, english_label) # 如果没找到映射，用回英文\n",
    "\n",
    "            # 准备绘制文本\n",
    "            text_to_draw = f\"{chinese_label}\" # 可以选择性地加上 track_id: f\"{chinese_label} {track_id}\"\n",
    "            text_color = (255, 255, 255) # 白色 (RGB)\n",
    "\n",
    "            # 计算文本位置 (放在框的左上角，稍微向上偏移一点)\n",
    "            text_x = int(x1)\n",
    "            text_y = int(y1) - FONT_SIZE - 2 # 向上偏移字体大小加一点间隙\n",
    "            # 防止文本绘制到图像外部\n",
    "            if text_y < 0:\n",
    "                text_y = int(y1) + 2 # 如果上方空间不够，则绘制在框内顶部\n",
    "\n",
    "            # 使用 PIL 绘制文本\n",
    "            draw.text((text_x, text_y), text_to_draw, font=font, fill=text_color)\n",
    "\n",
    "        # 将 PIL 图像 (RGB) 转换回 OpenCV 图像 (BGR) - 先不转换，继续绘制统计信息\n",
    "        # annotated_frame = cv2.cvtColor(np.array(pil_image), cv2.COLOR_RGB2BGR)\n",
    "        # --- PIL 标签绘制结束 ---\n",
    "\n",
    "        # --- 使用 PIL 绘制统计面板 (右上角) ---\n",
    "        stats_text_lines = [\n",
    "            f\"{VISDRONE_CLASSES_CHINESE.get(cls, cls)}: {counter.category_counts[cls]}\"\n",
    "            for cls in VISDRONE_CLASSES if counter.category_counts[cls] > 0 # 只显示计数大于0的类别\n",
    "        ]\n",
    "\n",
    "        # 计算统计文本起始位置\n",
    "        stats_start_x = frame_width - 200 # 距离右边框 200 像素\n",
    "        stats_start_y = 10 # 距离顶边框 10 像素\n",
    "        line_height = FONT_SIZE + 5 # 行高（字体大小 + 间距）\n",
    "        stats_text_color = (255, 255, 255) # 白色\n",
    "\n",
    "        for i, line in enumerate(stats_text_lines):\n",
    "            text_pos = (stats_start_x, stats_start_y + i * line_height)\n",
    "            # 使用 PIL 绘制统计文本\n",
    "            draw.text(text_pos, line, font=font, fill=stats_text_color)\n",
    "        # --- PIL 统计面板绘制结束 ---\n",
    "\n",
    "        # --- 所有 PIL 绘制完成后，统一转换回 OpenCV 格式 ---\n",
    "        annotated_frame = cv2.cvtColor(np.array(pil_image), cv2.COLOR_RGB2BGR)\n",
    "\n",
    "        cv2.imshow(\"Real-time Detection\", annotated_frame)\n",
    "        if cv2.waitKey(1) & 0xFF == ord(\"q\"):\n",
    "            break\n",
    "\n",
    "        if writer:\n",
    "            writer.write(annotated_frame)\n",
    "\n",
    "    # 释放资源并输出结果\n",
    "    cap.release()\n",
    "    if writer:\n",
    "        writer.release()\n",
    "    cv2.destroyAllWindows() # 关闭显示窗口\n",
    "\n",
    "    print(\"===== 全局类别计数 =====\")\n",
    "    for cls in VISDRONE_CLASSES:\n",
    "         if counter.category_counts[cls] > 0:\n",
    "            print(f\"{VISDRONE_CLASSES_CHINESE.get(cls, cls)}: {counter.category_counts[cls]}\")\n",
    "\n",
    "# 5. 执行处理（示例路径）\n",
    "process_video(\n",
    "    input_path=r\"E:\\Data\\XCC_data\\xcc3\\xcc301.mp4\",\n",
    "    # output_path=r\"E:\\Data\\XCC_data\\outputs\\rfdetr_tracked_chinese_labels.mp4\" # 修改输出文件名\n",
    "    output_path=None # 如果不想保存视频，设为 None\n",
    ")"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "pytorch",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.10.16"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
