{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 启动"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "import os\n",
    "# if using Apple MPS, fall back to CPU for unsupported ops\n",
    "os.environ[\"PYTORCH_ENABLE_MPS_FALLBACK\"] = \"1\"\n",
    "import numpy as np\n",
    "import torch\n",
    "import matplotlib.pyplot as plt\n",
    "import matplotlib.image as mpimg\n",
    "from PIL import Image"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# select the device for computation\n",
    "if torch.cuda.is_available():\n",
    "    device = torch.device(\"cuda\")\n",
    "elif torch.backends.mps.is_available():\n",
    "    device = torch.device(\"mps\")\n",
    "else:\n",
    "    device = torch.device(\"cpu\")\n",
    "print(f\"using device: {device}\")\n",
    "\n",
    "if device.type == \"cuda\":\n",
    "    # use bfloat16 for the entire notebook\n",
    "    torch.autocast(\"cuda\", dtype=torch.bfloat16).__enter__()\n",
    "    # turn on tfloat32 for Ampere GPUs (https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices)\n",
    "    if torch.cuda.get_device_properties(0).major >= 8:\n",
    "        torch.backends.cuda.matmul.allow_tf32 = True\n",
    "        torch.backends.cudnn.allow_tf32 = True\n",
    "elif device.type == \"mps\":\n",
    "    print(\n",
    "        \"\\nSupport for MPS devices is preliminary. SAM 2 is trained with CUDA and might \"\n",
    "        \"give numerically different outputs and sometimes degraded performance on MPS. \"\n",
    "        \"See e.g. https://github.com/pytorch/pytorch/issues/84936 for a discussion.\"\n",
    "    )"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "np.random.seed(3)\n",
    "\n",
    "def show_mask(mask, ax, random_color=False, borders = True):\n",
    "    if random_color:\n",
    "        color = np.concatenate([np.random.random(3), np.array([0.6])], axis=0)\n",
    "    else:\n",
    "        color = np.array([30/255, 144/255, 255/255, 0.6])\n",
    "    h, w = mask.shape[-2:]\n",
    "    mask = mask.astype(np.uint8)\n",
    "    mask_image =  mask.reshape(h, w, 1) * color.reshape(1, 1, -1)\n",
    "    if borders:\n",
    "        import cv2\n",
    "        contours, _ = cv2.findContours(mask,cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE) \n",
    "        # Try to smooth contours\n",
    "        contours = [cv2.approxPolyDP(contour, epsilon=0.01, closed=True) for contour in contours]\n",
    "        mask_image = cv2.drawContours(mask_image, contours, -1, (1, 1, 1, 0.5), thickness=2) \n",
    "    ax.imshow(mask_image)\n",
    "\n",
    "def show_points(coords, labels, ax, marker_size=375):\n",
    "    pos_points = coords[labels==1]\n",
    "    neg_points = coords[labels==0]\n",
    "    ax.scatter(pos_points[:, 0], pos_points[:, 1], color='green', marker='*', s=marker_size, edgecolor='white', linewidth=1.25)\n",
    "    ax.scatter(neg_points[:, 0], neg_points[:, 1], color='red', marker='*', s=marker_size, edgecolor='white', linewidth=1.25)   \n",
    "\n",
    "def show_box(box, ax):\n",
    "    x0, y0 = box[0], box[1]\n",
    "    w, h = box[2] - box[0], box[3] - box[1]\n",
    "    ax.add_patch(plt.Rectangle((x0, y0), w, h, edgecolor='green', facecolor=(0, 0, 0, 0), lw=2))    \n",
    "\n",
    "def show_masks(image, masks, scores, point_coords=None, box_coords=None, input_labels=None, borders=True):\n",
    "    for i, (mask, score) in enumerate(zip(masks, scores)):\n",
    "        plt.figure(figsize=(10, 10))\n",
    "        plt.imshow(image)\n",
    "        show_mask(mask, plt.gca(), borders=borders)\n",
    "        if point_coords is not None:\n",
    "            assert input_labels is not None\n",
    "            show_points(point_coords, input_labels, plt.gca())\n",
    "        if box_coords is not None:\n",
    "            # boxes\n",
    "            show_box(box_coords, plt.gca())\n",
    "        if len(scores) > 1:\n",
    "            plt.title(f\"Mask {i+1}, Score: {score:.3f}\", fontsize=18)\n",
    "        plt.axis('off')\n",
    "        plt.show()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 数据"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "import os\n",
    "import numpy as np\n",
    "from PIL import Image\n",
    "import torch\n",
    "from ultralytics import YOLO\n",
    "from torch.utils.data import Dataset\n",
    "\n",
    "class KITTI3DObjectDataset(Dataset):\n",
    "    def __init__(self, base_path, split='training', transform=None):\n",
    "        \"\"\"\n",
    "        Args:\n",
    "            base_path: 数据集根目录（需包含image_2, velodyne等子目录）\n",
    "            split: 数据集划分（training/testing）\n",
    "            transform: 图像预处理组合\n",
    "        \"\"\"\n",
    "\n",
    "        self.yolomodel = YOLO(\"yolov8n.pt\")\n",
    "\n",
    "        # 初始化路径配置\n",
    "        self.image_dir = os.path.join(base_path, f'data_object_image_2/{split}/image_2')\n",
    "        self.lidar_dir = os.path.join(base_path, f'data_object_velodyne/{split}/velodyne')\n",
    "        self.calib_dir = os.path.join(base_path, f'data_object_calib/{split}/calib')\n",
    "        self.label_dir = os.path.join(base_path, f'data_object_label_2/{split}/label_2')\n",
    "        \n",
    "        # 获取所有样本索引（基于图像文件遍历）\n",
    "        self.sample_ids = self._get_sample_ids()  # 核心索引生成方法\n",
    "        self.dataX = []\n",
    "        self.dataY = []\n",
    "\n",
    "        self._get_sample()\n",
    "        \n",
    "\n",
    "    def __len__(self):\n",
    "        return len(self.sample_ids)\n",
    "\n",
    "    def __getitem__(self, idx):\n",
    "        pass\n",
    "    \n",
    "    def _get_sample(self):\n",
    "        # 遍历所有样本ID，获取对应数据\n",
    "        for idx in self.sample_ids:\n",
    "            idx = '%06d'%idx # 6 digit zeropadding\n",
    "\n",
    "            # 获取对应路径\n",
    "            img_path = f'../KITTI_dataset/data_object_image_2/training/image_2/{idx}.png'\n",
    "            binary_path = f'../KITTI_dataset/data_object_velodyne/training/velodyne/{idx}.bin'\n",
    "            calib_path = f'../KITTI_dataset/data_object_calib/training/calib/{idx}.txt'\n",
    "            label_path = f'../KITTI_dataset/data_object_label_2/training/label_2/{idx}.txt'\n",
    "\n",
    "            # 读取图片\n",
    "            image = Image.open(img_path)\n",
    "            image = np.array(image.convert(\"RGB\"))\n",
    "\n",
    "            boxes_by_yolo, subject_names_by_yolo, subject_imgs_by_yolo = self._detect_by_yolo(image)  \n",
    "            objects = self._read_kitti_label(label_path)\n",
    "\n",
    "    # 使用YOLOv8检测\n",
    "    def _detect_by_yolo(self, image):\n",
    "        boxs = []\n",
    "        subject_names = []\n",
    "        sub_images = []  # 新增子图存储列表\n",
    "        \n",
    "        results = self.yolomodel.predict(image, conf=0.5, classes=[0, 2])\n",
    "        \n",
    "        # 获取原始图像尺寸用于边界检查\n",
    "        h, w = image.shape[:2] if isinstance(image, np.ndarray) else image.size[::-1]\n",
    "        \n",
    "        for detection in results[0].boxes.data.cpu().numpy():\n",
    "            x1, y1, x2, y2, conf, cls_id = detection\n",
    "            \n",
    "            # 坐标边界保护\n",
    "            x1 = max(0, min(int(x1), w-1))\n",
    "            y1 = max(0, min(int(y1), h-1))\n",
    "            x2 = max(0, min(int(x2), w-1))\n",
    "            y2 = max(0, min(int(y2), h-1))\n",
    "            \n",
    "            # 截取子图（支持OpenCV/PIL格式）\n",
    "            if isinstance(image, np.ndarray):  # OpenCV格式\n",
    "                sub_img = image[y1:y2, x1:x2]\n",
    "            else:  # PIL格式\n",
    "                sub_img = image.crop((x1, y1, x2, y2))\n",
    "            \n",
    "            subject_name = \"Person\" if int(cls_id) == 0 else \"Car\"\n",
    "            \n",
    "            boxs.append([x1, y1, x2, y2])\n",
    "            subject_names.append(subject_name)\n",
    "            sub_images.append(sub_img)  \n",
    "            \n",
    "        return boxs, subject_names, sub_images  \n",
    "\n",
    "    def _get_sample_ids(self):\n",
    "        \"\"\"通过遍历图像文件夹获取所有有效样本ID（网页6、7的实现优化）\"\"\"\n",
    "        # 获取所有.png文件名（排除非6位数字命名的文件）\n",
    "        all_images = [f for f in os.listdir(self.image_dir) \n",
    "                    if f.endswith('.png') and len(f.split('.')[0]) == 6]\n",
    "        \n",
    "        # 提取数字ID并排序（确保各模态文件顺序对齐）\n",
    "        sample_ids = sorted([int(f.split('.')[0]) for f in all_images])\n",
    "        \n",
    "        # 验证多模态数据完整性\n",
    "        valid_ids = []\n",
    "        for sid in sample_ids:\n",
    "            if (os.path.exists(self._lidar_path(sid)) and \n",
    "                os.path.exists(self._calib_path(sid)) and \n",
    "                os.path.exists(self._label_path(sid))):\n",
    "                valid_ids.append(sid)\n",
    "        \n",
    "        return valid_ids\n",
    "\n",
    "    def _lidar_path(self, sid):\n",
    "        return os.path.join(self.lidar_dir, f\"{sid:06d}.bin\")\n",
    "\n",
    "    def _calib_path(self, sid):\n",
    "        return os.path.join(self.calib_dir, f\"{sid:06d}.txt\")\n",
    "\n",
    "    def _label_path(self, sid):\n",
    "        return os.path.join(self.label_dir, f\"{sid:06d}.txt\")\n",
    "    \n",
    "    def _read_kitti_label(self, label_path):\n",
    "        \"\"\"\n",
    "        读取KITTI标注文件(.txt)\n",
    "        参数:\n",
    "            label_path: 标注文件路径\n",
    "        返回:\n",
    "            list of dicts，每个字典包含一个对象的标注信息\n",
    "        \"\"\"\n",
    "        objects = []\n",
    "        with open(label_path, 'r') as f:\n",
    "            for line in f.readlines():\n",
    "                data = line.strip().split(' ')\n",
    "                # print(data)\n",
    "\n",
    "                # 跳过DontCare对象\n",
    "                if data[0] == 'DontCare':\n",
    "                    continue\n",
    "                \n",
    "                # 解析基础信息\n",
    "                obj = {\n",
    "                    'type': data[0],         # 类别（Car/Pedestrian/Cyclist等）\n",
    "                    'truncated': float(data[1]),  # 截断程度[0,1]\n",
    "                    'occluded': int(data[2]),     # 遮挡状态(0=完全可见，1=部分遮挡，2=大部分遮挡，3=未知)\n",
    "                    'alpha': float(data[3]),      # 观察角度[-pi, pi]\n",
    "                    \n",
    "                    # 2D边界框（图像坐标系）\n",
    "                    'bbox': [float(x) for x in data[4:8]],  # [left, top, right, bottom]\n",
    "                    \n",
    "                    # 3D尺寸（相机坐标系）\n",
    "                    'dimensions': [float(data[10]), float(data[9]), float(data[8])],  # [length, width, height]\n",
    "                    \n",
    "                    # 3D位置（相机坐标系）\n",
    "                    'location': [float(data[11]), float(data[12]), float(data[13])],  # [x,y,z]\n",
    "                    \n",
    "                    # 旋转角度（绕Y轴）\n",
    "                    'rotation_y': float(data[14])\n",
    "                }\n",
    "\n",
    "                objects.append(obj)\n",
    "                \n",
    "        return objects"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 匈牙利算法匹配（需安装scipy）\n",
    "from scipy.optimize import linear_sum_assignment\n",
    "\n",
    "# 计算中心点特征\n",
    "def get_center(box):\n",
    "    x1, y1, x2, y2 = box\n",
    "    return ((x1+x2)/2, (y1+y2)/2)\n",
    "\n",
    "# 修改后的sort函数\n",
    "def sort(label_boxes, detect_boxes, center_3d_by_sam, extent_3d_by_sam):\n",
    "    # 计算实际数量差异\n",
    "    num_labels = len(label_boxes)\n",
    "    num_detects = len(detect_boxes)\n",
    "    max_dim = max(num_labels, num_detects)\n",
    "    \n",
    "    # 生成动态扩展的成本矩阵\n",
    "    distance_matrix = np.full((max_dim, max_dim), 1e5)  # 初始化填充高成本值[1](@ref)\n",
    "    \n",
    "    # 填充实际计算区域（原逻辑保留）\n",
    "    label_features = [get_center(box) for box in label_boxes]\n",
    "    detect_features = [get_center(box) for box in detect_boxes]\n",
    "    for i in range(num_labels):\n",
    "        for j in range(num_detects):\n",
    "            distance_matrix[i,j] = np.sqrt((label_features[i][0]-detect_features[j][0])**2 + \n",
    "                                          (label_features[i][1]-detect_features[j][1])**2)\n",
    "    \n",
    "    # 执行匈牙利匹配\n",
    "    row_ind, col_ind = linear_sum_assignment(distance_matrix)\n",
    "    \n",
    "    # 构建映射字典（过滤虚拟匹配）\n",
    "    valid_mapping = {}\n",
    "    for r, c in zip(row_ind, col_ind):\n",
    "        if r < num_labels and c < num_detects:  # 排除虚拟节点[2](@ref)\n",
    "            valid_mapping[r] = c\n",
    "    \n",
    "    # 按标签顺序重组检测结果（未匹配标签保留原始数据）\n",
    "    sorted_box_2d = []\n",
    "    sorted_center_3d = []\n",
    "    sorted_extent_3d = []\n",
    "    for label_idx in range(num_labels):\n",
    "        if label_idx in valid_mapping:\n",
    "            detect_idx = valid_mapping[label_idx]\n",
    "            sorted_box_2d.append(detect_boxes[detect_idx])\n",
    "            sorted_center_3d.append(center_3d_by_sam[detect_idx])\n",
    "            sorted_extent_3d.append(extent_3d_by_sam[detect_idx])\n",
    "        else:\n",
    "            # 未匹配标签保留原始数据（根据需求选择填充逻辑）\n",
    "            sorted_box_2d.append(label_boxes[label_idx])  # 或填充默认值\n",
    "            sorted_center_3d.append(center_3d_label[label_idx])\n",
    "            sorted_extent_3d.append(extent_3d_label[label_idx])\n",
    "    \n",
    "    return sorted_box_2d, sorted_center_3d, sorted_extent_3d"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 模型定义"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [],
   "source": [
    "import torch\n",
    "import torch.nn as nn\n",
    "import torch.nn.functional as F\n",
    "import numpy as np"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "from torch_geometric.nn import GCNConv\n",
    "\n",
    "class Radar3DNet(nn.Module):\n",
    "    def __init__(self):\n",
    "        super().__init__()\n",
    "        # 原特征提取层保持不变\n",
    "\n",
    "        # 替代 GeometricAttention()\n",
    "        gat_layer = GATConv(in_channels=64, out_channels=64, heads=3)  # 多头注意力\n",
    "\n",
    "        # 替代 DGCNN 的动态图构建\n",
    "        edge_conv = DynamicEdgeConv(nn=MLP([2*emb_dims, 64]), k=20)  # 动态邻域聚合\n",
    "\n",
    "        self.encoder = nn.Sequential(\n",
    "            GCNConv(in_channels=3, out_channels=128),\n",
    "            GeometricAttention()\n",
    "        )\n",
    "        \n",
    "        # 中心与尺寸回归头保持不变\n",
    "        self.center_head = nn.Sequential(\n",
    "            nn.Linear(128, 64),\n",
    "            nn.BatchNorm1d(64),\n",
    "            nn.Linear(64, 3)\n",
    "        )\n",
    "        \n",
    "        self.dim_head = nn.Sequential(\n",
    "            nn.Linear(128, 64),\n",
    "            nn.BatchNorm1d(64),\n",
    "            nn.Linear(64, 3),\n",
    "            nn.Sigmoid()\n",
    "        )\n",
    "        \n",
    "        # 航向角头修改：输出sinθ和cosθ，并添加角度约束\n",
    "        self.angle_head = nn.Sequential(\n",
    "            nn.Linear(128, 64),\n",
    "            nn.BatchNorm1d(64),\n",
    "            nn.Linear(64, 2),  # 输出[sinθ, cosθ]\n",
    "            nn.Tanh()          # 约束输出在[-1,1]\n",
    "        )\n",
    "\n",
    "    def forward(self, x):\n",
    "        # 特征提取\n",
    "        feat = self.encoder(x)  # 输入shape: [B, N, 3]\n",
    "        \n",
    "        # 各分支回归\n",
    "        center = self.center_head(feat.mean(1))          # [B,3]\n",
    "        dimensions = self.dim_head(feat.max(1)[0])       # [B,3]\n",
    "        angle_sincos = self.angle_head(feat.max(1)[0])   # [B,2]\n",
    "        \n",
    "        # 将sin/cos转换为角度（核心修改）\n",
    "        angle_rad = torch.atan2(angle_sincos[:,0], angle_sincos[:,1])  # [-π, π]\n",
    "        \n",
    "        # 合并输出\n",
    "        return torch.cat([center, dimensions, angle_rad.unsqueeze(1)], dim=1)"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "SAM2",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.10.16"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
