{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [],
   "source": [
    "import os\n",
    "import os.path as osp\n",
    "import time\n",
    "import math\n",
    "import torch\n",
    "import numpy as np\n",
    "\n",
    "os.chdir('../')\n",
    "\n",
    "import mmcv\n",
    "from mmengine.config import Config, DictAction\n",
    "from mmengine.logging import print_log\n",
    "from mmengine.registry import RUNNERS\n",
    "from mmengine.runner import Runner\n",
    "from mmengine import fileio\n",
    "from mmdet3d.utils import replace_ceph_backend\n",
    "from mmengine.structures import InstanceData\n",
    "from mmdet.models.layers import inverse_sigmoid\n",
    "# from my_projects.UniMT.unimt.models.backbones_2d import ConvNeXtV2Backbone"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Jupyter environment detected. Enabling Open3D WebVisualizer.\n",
      "[Open3D INFO] WebRTC GUI backend enabled.\n",
      "[Open3D INFO] WebRTCWindowSystem: HTTP handshake server disabled.\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/home/hello/anaconda3/envs/python310/lib/python3.10/site-packages/timm/models/layers/__init__.py:48: FutureWarning: Importing from timm.models.layers is deprecated, please import via timm.layers\n",
      "  warnings.warn(f\"Importing from {__name__} is deprecated, please import via timm.layers\", FutureWarning)\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "09/04 17:33:56 - mmengine - \u001b[4m\u001b[97mINFO\u001b[0m - \n",
      "------------------------------------------------------------\n",
      "System environment:\n",
      "    sys.platform: linux\n",
      "    Python: 3.10.16 (main, Dec 11 2024, 16:24:50) [GCC 11.2.0]\n",
      "    CUDA available: True\n",
      "    MUSA available: False\n",
      "    numpy_random_seed: 593637846\n",
      "    GPU 0: NVIDIA GeForce RTX 3060\n",
      "    CUDA_HOME: /usr/local/cuda-12.2\n",
      "    NVCC: Cuda compilation tools, release 12.2, V12.2.140\n",
      "    GCC: gcc (Ubuntu 9.4.0-1ubuntu1~20.04.2) 9.4.0\n",
      "    PyTorch: 2.1.2+cu121\n",
      "    PyTorch compiling details: PyTorch built with:\n",
      "  - GCC 9.3\n",
      "  - C++ Version: 201703\n",
      "  - Intel(R) oneAPI Math Kernel Library Version 2022.2-Product Build 20220804 for Intel(R) 64 architecture applications\n",
      "  - Intel(R) MKL-DNN v3.1.1 (Git Hash 64f6bcbcbab628e96f33a62c3e975f8535a7bde4)\n",
      "  - OpenMP 201511 (a.k.a. OpenMP 4.5)\n",
      "  - LAPACK is enabled (usually provided by MKL)\n",
      "  - NNPACK is enabled\n",
      "  - CPU capability usage: AVX2\n",
      "  - CUDA Runtime 12.1\n",
      "  - NVCC architecture flags: -gencode;arch=compute_50,code=sm_50;-gencode;arch=compute_60,code=sm_60;-gencode;arch=compute_70,code=sm_70;-gencode;arch=compute_75,code=sm_75;-gencode;arch=compute_80,code=sm_80;-gencode;arch=compute_86,code=sm_86;-gencode;arch=compute_90,code=sm_90\n",
      "  - CuDNN 8.9.2\n",
      "  - Magma 2.6.1\n",
      "  - Build settings: BLAS_INFO=mkl, BUILD_TYPE=Release, CUDA_VERSION=12.1, CUDNN_VERSION=8.9.2, CXX_COMPILER=/opt/rh/devtoolset-9/root/usr/bin/c++, CXX_FLAGS= -D_GLIBCXX_USE_CXX11_ABI=0 -fabi-version=11 -fvisibility-inlines-hidden -DUSE_PTHREADPOOL -DNDEBUG -DUSE_KINETO -DLIBKINETO_NOROCTRACER -DUSE_FBGEMM -DUSE_QNNPACK -DUSE_PYTORCH_QNNPACK -DUSE_XNNPACK -DSYMBOLICATE_MOBILE_DEBUG_HANDLE -O2 -fPIC -Wall -Wextra -Werror=return-type -Werror=non-virtual-dtor -Werror=bool-operation -Wnarrowing -Wno-missing-field-initializers -Wno-type-limits -Wno-array-bounds -Wno-unknown-pragmas -Wno-unused-parameter -Wno-unused-function -Wno-unused-result -Wno-strict-overflow -Wno-strict-aliasing -Wno-stringop-overflow -Wno-psabi -Wno-error=pedantic -Wno-error=old-style-cast -Wno-invalid-partial-specialization -Wno-unused-private-field -Wno-aligned-allocation-unavailable -Wno-missing-braces -fdiagnostics-color=always -faligned-new -Wno-unused-but-set-variable -Wno-maybe-uninitialized -fno-math-errno -fno-trapping-math -Werror=format -Werror=cast-function-type -Wno-stringop-overflow, LAPACK_INFO=mkl, PERF_WITH_AVX=1, PERF_WITH_AVX2=1, PERF_WITH_AVX512=1, TORCH_DISABLE_GPU_ASSERTS=ON, TORCH_VERSION=2.1.2, USE_CUDA=ON, USE_CUDNN=ON, USE_EXCEPTION_PTR=1, USE_GFLAGS=OFF, USE_GLOG=OFF, USE_MKL=ON, USE_MKLDNN=ON, USE_MPI=OFF, USE_NCCL=1, USE_NNPACK=ON, USE_OPENMP=ON, USE_ROCM=OFF, \n",
      "\n",
      "    TorchVision: 0.16.2+cu121\n",
      "    OpenCV: 4.11.0\n",
      "    MMEngine: 0.10.7\n",
      "\n",
      "Runtime environment:\n",
      "    cudnn_benchmark: False\n",
      "    mp_cfg: {'mp_start_method': 'fork', 'opencv_num_threads': 0}\n",
      "    dist_cfg: {'backend': 'nccl'}\n",
      "    seed: 593637846\n",
      "    Distributed launcher: none\n",
      "    Distributed training: False\n",
      "    GPU number: 1\n",
      "------------------------------------------------------------\n",
      "\n",
      "09/04 17:33:57 - mmengine - \u001b[4m\u001b[97mINFO\u001b[0m - Config:\n",
      "auto_scale_lr = dict(base_batch_size=16, enable=False)\n",
      "backend_args = None\n",
      "class_names = [\n",
      "    'Car',\n",
      "    'Pedestrian',\n",
      "    'Cyclist',\n",
      "]\n",
      "custom_imports = dict(\n",
      "    allow_failed_imports=False,\n",
      "    imports=[\n",
      "        'my_projects.UniMT.unimt',\n",
      "        'my_projects.datasets',\n",
      "    ])\n",
      "data_prefix = dict(img='', pts='', sweeps='')\n",
      "data_root = 'data/CODA/'\n",
      "dataset_type = 'CodaDataset'\n",
      "db_sampler = dict(\n",
      "    classes=[\n",
      "        'Car',\n",
      "        'Pedestrian',\n",
      "        'Cyclist',\n",
      "    ],\n",
      "    data_root='data/CODA/',\n",
      "    info_path='data/CODA/coda_dbinfos_train.pkl',\n",
      "    points_loader=dict(\n",
      "        coord_type='LIDAR',\n",
      "        load_dim=4,\n",
      "        type='LoadPointsFromFile',\n",
      "        use_dim=[\n",
      "            0,\n",
      "            1,\n",
      "            2,\n",
      "            3,\n",
      "        ]),\n",
      "    prepare=dict(\n",
      "        filter_by_difficulty=[\n",
      "            -1,\n",
      "        ],\n",
      "        filter_by_min_points=dict(Car=5, Cyclist=5, Pedestrian=5)),\n",
      "    rate=1.0,\n",
      "    sample_groups=dict(Car=3, Cyclist=4, Pedestrian=2),\n",
      "    type='UnifiedDataBaseSampler')\n",
      "default_hooks = dict(\n",
      "    changestrategy=dict(\n",
      "        change_args=[\n",
      "            None,\n",
      "            None,\n",
      "            None,\n",
      "        ],\n",
      "        change_epoch=[\n",
      "            -1,\n",
      "            -1,\n",
      "            -1,\n",
      "        ],\n",
      "        change_strategy=[\n",
      "            'remove_GTSample',\n",
      "            'remove_DN',\n",
      "            'change_layers_loss_weight',\n",
      "        ],\n",
      "        type='ChangeStrategyHook'),\n",
      "    checkpoint=dict(interval=1, type='CheckpointHook'),\n",
      "    logger=dict(interval=50, type='LoggerHook'),\n",
      "    param_scheduler=dict(type='ParamSchedulerHook'),\n",
      "    sampler_seed=dict(type='DistSamplerSeedHook'),\n",
      "    timer=dict(type='IterTimerHook'),\n",
      "    visualization=dict(type='Det3DVisualizationHook'))\n",
      "default_scope = 'mmdet3d'\n",
      "env_cfg = dict(\n",
      "    cudnn_benchmark=False,\n",
      "    dist_cfg=dict(backend='nccl'),\n",
      "    mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0))\n",
      "ida_aug_conf = dict(\n",
      "    H=1024,\n",
      "    W=1224,\n",
      "    bot_pct_lim=(\n",
      "        0.0,\n",
      "        0.0,\n",
      "    ),\n",
      "    final_dim=(\n",
      "        640,\n",
      "        768,\n",
      "    ),\n",
      "    rand_flip=True,\n",
      "    resize_lim=(\n",
      "        0.5,\n",
      "        0.625,\n",
      "    ),\n",
      "    rot_lim=(\n",
      "        0.0,\n",
      "        0.0,\n",
      "    ))\n",
      "img_norm_cfg = dict(\n",
      "    mean=[\n",
      "        103.53,\n",
      "        116.28,\n",
      "        123.675,\n",
      "    ],\n",
      "    std=[\n",
      "        57.375,\n",
      "        57.12,\n",
      "        58.395,\n",
      "    ],\n",
      "    to_rgb=False)\n",
      "input_modality = dict(use_camera=True, use_lidar=True)\n",
      "load_from = None\n",
      "log_level = 'INFO'\n",
      "log_processor = dict(by_epoch=True, type='LogProcessor', window_size=50)\n",
      "lr = 3.5e-05\n",
      "metainfo = dict(classes=[\n",
      "    'Car',\n",
      "    'Pedestrian',\n",
      "    'Cyclist',\n",
      "])\n",
      "model = dict(\n",
      "    data_preprocessor=dict(bgr_to_rgb=False, type='Det3DDataPreprocessor'),\n",
      "    img_backbone=dict(\n",
      "        depths=[\n",
      "            2,\n",
      "            2,\n",
      "            8,\n",
      "            2,\n",
      "        ],\n",
      "        dims=[\n",
      "            80,\n",
      "            160,\n",
      "            320,\n",
      "            640,\n",
      "        ],\n",
      "        drop_path_rate=0.0,\n",
      "        freeze_module=True,\n",
      "        in_chans=3,\n",
      "        pretrain_ckpt='./ckpts/convnextv2_nano.pth',\n",
      "        type='ConvNeXtV2Backbone'),\n",
      "    img_neck=dict(\n",
      "        depths=[\n",
      "            2,\n",
      "            2,\n",
      "            8,\n",
      "            2,\n",
      "        ],\n",
      "        dims=[\n",
      "            80,\n",
      "            160,\n",
      "            320,\n",
      "            640,\n",
      "        ],\n",
      "        out_dim=128,\n",
      "        type='ConvNeXtV2FPN'),\n",
      "    pts_backbone=dict(\n",
      "        conv_cfg=dict(bias=False, type='Conv2d'),\n",
      "        in_channels=256,\n",
      "        layer_nums=[\n",
      "            1,\n",
      "            2,\n",
      "            2,\n",
      "        ],\n",
      "        layer_strides=[\n",
      "            1,\n",
      "            2,\n",
      "            2,\n",
      "        ],\n",
      "        norm_cfg=dict(eps=0.001, momentum=0.01, type='BN'),\n",
      "        out_channels=[\n",
      "            128,\n",
      "            128,\n",
      "            256,\n",
      "        ],\n",
      "        type='SECOND'),\n",
      "    pts_bbox_head=dict(\n",
      "        bbox_coder=dict(\n",
      "            max_num=100,\n",
      "            nms_radius=None,\n",
      "            num_classes=3,\n",
      "            pc_range=[\n",
      "                -21.0,\n",
      "                -21.0,\n",
      "                -1.8,\n",
      "                21.0,\n",
      "                21.0,\n",
      "                5.4,\n",
      "            ],\n",
      "            post_center_range=[\n",
      "                -30.0,\n",
      "                -30.0,\n",
      "                -5.0,\n",
      "                30.0,\n",
      "                30.0,\n",
      "                10.0,\n",
      "            ],\n",
      "            score_threshold=0.1,\n",
      "            type='NMSFreeBBoxCoder',\n",
      "            voxel_size=[\n",
      "                0.075,\n",
      "                0.075,\n",
      "                0.2,\n",
      "            ]),\n",
      "        common_heads=dict(\n",
      "            center=(\n",
      "                2,\n",
      "                2,\n",
      "            ), dim=(\n",
      "                3,\n",
      "                2,\n",
      "            ), height=(\n",
      "                1,\n",
      "                2,\n",
      "            ), rot=(\n",
      "                2,\n",
      "                2,\n",
      "            )),\n",
      "        downsample_scale=8,\n",
      "        hidden_dim=128,\n",
      "        in_channels=384,\n",
      "        loss_bbox=dict(\n",
      "            loss_weight=0.25, reduction='mean', type='mmdet.L1Loss'),\n",
      "        loss_cls=dict(\n",
      "            alpha=0.25,\n",
      "            gamma=2,\n",
      "            loss_weight=2.0,\n",
      "            reduction='mean',\n",
      "            type='mmdet.FocalLoss',\n",
      "            use_sigmoid=True),\n",
      "        loss_heatmap=dict(\n",
      "            loss_weight=1.0, reduction='mean', type='mmdet.GaussianFocalLoss'),\n",
      "        num_query=500,\n",
      "        separate_head=dict(\n",
      "            final_kernel=1, init_bias=-2.19, type='SeparateTaskHead'),\n",
      "        task=dict(class_names=[\n",
      "            'Car',\n",
      "            'Pedestrian',\n",
      "            'Cyclist',\n",
      "        ], num_class=3),\n",
      "        transformer_decoder=dict(\n",
      "            num_layers=6,\n",
      "            return_intermediate=True,\n",
      "            transformerlayers=dict(\n",
      "                attn_cfgs=[\n",
      "                    dict(\n",
      "                        dropout=0.1,\n",
      "                        embed_dims=128,\n",
      "                        num_heads=8,\n",
      "                        type='MultiheadAttention'),\n",
      "                    dict(\n",
      "                        dropout=0.1,\n",
      "                        embed_dims=128,\n",
      "                        num_heads=8,\n",
      "                        type='DeformableAttention2MultiModality'),\n",
      "                ],\n",
      "                batch_first=True,\n",
      "                ffn_cfgs=dict(\n",
      "                    act_cfg=dict(inplace=True, type='ReLU'),\n",
      "                    embed_dims=128,\n",
      "                    feedforward_channels=512,\n",
      "                    ffn_drop=0.0,\n",
      "                    num_fcs=2,\n",
      "                    type='FFN'),\n",
      "                operation_order=(\n",
      "                    'self_attn',\n",
      "                    'norm',\n",
      "                    'cross_attn',\n",
      "                    'norm',\n",
      "                    'ffn',\n",
      "                    'norm',\n",
      "                ),\n",
      "                type='UniMTTransformerDecoderLayer',\n",
      "                with_cp=False),\n",
      "            type='UniMTTransformerDecoder'),\n",
      "        type='UniMTHead'),\n",
      "    pts_middle_encoder=dict(\n",
      "        diff_scale=0.2,\n",
      "        diffusion=True,\n",
      "        direction=[\n",
      "            'x',\n",
      "            'y',\n",
      "        ],\n",
      "        encoder_channels=(\n",
      "            (\n",
      "                16,\n",
      "                16,\n",
      "                32,\n",
      "            ),\n",
      "            (\n",
      "                32,\n",
      "                32,\n",
      "                64,\n",
      "            ),\n",
      "            (\n",
      "                64,\n",
      "                64,\n",
      "            ),\n",
      "        ),\n",
      "        encoder_paddings=(\n",
      "            (\n",
      "                0,\n",
      "                0,\n",
      "                (\n",
      "                    0,\n",
      "                    1,\n",
      "                    1,\n",
      "                ),\n",
      "            ),\n",
      "            (\n",
      "                0,\n",
      "                0,\n",
      "                (\n",
      "                    0,\n",
      "                    1,\n",
      "                    1,\n",
      "                ),\n",
      "            ),\n",
      "            (\n",
      "                0,\n",
      "                0,\n",
      "            ),\n",
      "        ),\n",
      "        image2lidar=dict(\n",
      "            depths=[\n",
      "                2,\n",
      "            ],\n",
      "            group_size=[\n",
      "                512,\n",
      "            ],\n",
      "            layer_down_scales=[\n",
      "                [\n",
      "                    [\n",
      "                        2,\n",
      "                        2,\n",
      "                        2,\n",
      "                    ],\n",
      "                    [\n",
      "                        2,\n",
      "                        2,\n",
      "                        2,\n",
      "                    ],\n",
      "                ],\n",
      "            ],\n",
      "            window_shape=[\n",
      "                [\n",
      "                    13,\n",
      "                    13,\n",
      "                    4,\n",
      "                ],\n",
      "            ]),\n",
      "        img_in_channels=128,\n",
      "        layer_dim=128,\n",
      "        lidar2image=dict(\n",
      "            depths=[\n",
      "                2,\n",
      "            ],\n",
      "            group_size=[\n",
      "                1024,\n",
      "            ],\n",
      "            layer_down_scales=[\n",
      "                0,\n",
      "            ],\n",
      "            window_shape=[\n",
      "                [\n",
      "                    13,\n",
      "                    13,\n",
      "                    1,\n",
      "                ],\n",
      "            ]),\n",
      "        lidar_in_channels=4,\n",
      "        mamba_cfg=dict(d_conv=4, d_state=16, drop_path=0.2, expand=2),\n",
      "        patch_size=[\n",
      "            40,\n",
      "            48,\n",
      "        ],\n",
      "        pc_range=[\n",
      "            -21.0,\n",
      "            -21.0,\n",
      "            -1.8,\n",
      "            21.0,\n",
      "            21.0,\n",
      "            5.4,\n",
      "        ],\n",
      "        shift=True,\n",
      "        sparse_shape=[\n",
      "            36,\n",
      "            560,\n",
      "            560,\n",
      "        ],\n",
      "        type='UniMTFusionBackbone'),\n",
      "    pts_neck=dict(\n",
      "        in_channels=[\n",
      "            128,\n",
      "            128,\n",
      "            256,\n",
      "        ],\n",
      "        norm_cfg=dict(eps=0.001, momentum=0.01, type='BN'),\n",
      "        out_channels=[\n",
      "            128,\n",
      "            128,\n",
      "            128,\n",
      "        ],\n",
      "        type='SECONDFPN',\n",
      "        upsample_cfg=dict(bias=False, type='deconv'),\n",
      "        upsample_strides=[\n",
      "            0.5,\n",
      "            1,\n",
      "            2,\n",
      "        ],\n",
      "        use_conv_for_no_stride=True),\n",
      "    pts_voxel_encoder=dict(num_features=4, type='HardSimpleVFE'),\n",
      "    pts_voxel_layer=dict(\n",
      "        max_num_points=10,\n",
      "        max_voxels=(\n",
      "            120000,\n",
      "            160000,\n",
      "        ),\n",
      "        num_point_features=4,\n",
      "        point_cloud_range=[\n",
      "            -21.0,\n",
      "            -21.0,\n",
      "            -1.8,\n",
      "            21.0,\n",
      "            21.0,\n",
      "            5.4,\n",
      "        ],\n",
      "        voxel_size=[\n",
      "            0.075,\n",
      "            0.075,\n",
      "            0.2,\n",
      "        ]),\n",
      "    train_cfg=dict(\n",
      "        pts=dict(\n",
      "            assigner=dict(\n",
      "                cls_cost=dict(type='FocalLossCost', weight=2.0),\n",
      "                code_weights=[\n",
      "                    2.0,\n",
      "                    2.0,\n",
      "                    1.0,\n",
      "                    1.0,\n",
      "                    1.0,\n",
      "                    1.0,\n",
      "                    1.0,\n",
      "                    1.0,\n",
      "                ],\n",
      "                iou_cost=dict(type='IoUCost', weight=0.0),\n",
      "                pc_range=[\n",
      "                    -21.0,\n",
      "                    -21.0,\n",
      "                    -1.8,\n",
      "                    21.0,\n",
      "                    21.0,\n",
      "                    5.4,\n",
      "                ],\n",
      "                reg_cost=dict(type='BBox3DL1Cost', weight=0.25),\n",
      "                type='HungarianAssigner3D'),\n",
      "            code_weights=[\n",
      "                2.0,\n",
      "                2.0,\n",
      "                1.0,\n",
      "                1.0,\n",
      "                1.0,\n",
      "                1.0,\n",
      "                1.0,\n",
      "                1.0,\n",
      "            ],\n",
      "            grid_size=[\n",
      "                560,\n",
      "                560,\n",
      "                36,\n",
      "            ])),\n",
      "    type='UniMTDetector',\n",
      "    use_grid_mask=True)\n",
      "optim_wrapper = dict(\n",
      "    clip_grad=dict(max_norm=35, norm_type=2),\n",
      "    optimizer=dict(lr=3.5e-05, type='AdamW', weight_decay=0.01),\n",
      "    type='OptimWrapper')\n",
      "param_scheduler = [\n",
      "    dict(\n",
      "        T_max=4,\n",
      "        begin=0,\n",
      "        by_epoch=True,\n",
      "        convert_to_iter_based=True,\n",
      "        end=4,\n",
      "        eta_min=0.00020999999999999998,\n",
      "        type='CosineAnnealingLR'),\n",
      "    dict(\n",
      "        T_max=26,\n",
      "        begin=4,\n",
      "        by_epoch=True,\n",
      "        convert_to_iter_based=True,\n",
      "        end=30,\n",
      "        eta_min=3.4999999999999996e-08,\n",
      "        type='CosineAnnealingLR'),\n",
      "    dict(\n",
      "        T_max=4,\n",
      "        begin=0,\n",
      "        by_epoch=True,\n",
      "        convert_to_iter_based=True,\n",
      "        end=4,\n",
      "        eta_min=0.8947368421052632,\n",
      "        type='CosineAnnealingMomentum'),\n",
      "    dict(\n",
      "        T_max=26,\n",
      "        begin=4,\n",
      "        by_epoch=True,\n",
      "        convert_to_iter_based=True,\n",
      "        end=30,\n",
      "        eta_min=1,\n",
      "        type='CosineAnnealingMomentum'),\n",
      "]\n",
      "point_cloud_range = [\n",
      "    -21.0,\n",
      "    -21.0,\n",
      "    -1.8,\n",
      "    21.0,\n",
      "    21.0,\n",
      "    5.4,\n",
      "]\n",
      "resume = False\n",
      "test_cfg = dict()\n",
      "test_dataloader = dict(\n",
      "    batch_size=1,\n",
      "    dataset=dict(\n",
      "        ann_file='coda_infos_test.pkl',\n",
      "        backend_args=None,\n",
      "        box_type_3d='LiDAR',\n",
      "        data_prefix=dict(img='', pts='', sweeps=''),\n",
      "        data_root='data/CODA/',\n",
      "        metainfo=dict(classes=[\n",
      "            'Car',\n",
      "            'Pedestrian',\n",
      "            'Cyclist',\n",
      "        ]),\n",
      "        modality=dict(use_camera=True, use_lidar=True),\n",
      "        pipeline=[\n",
      "            dict(\n",
      "                coord_type='LIDAR',\n",
      "                load_dim=4,\n",
      "                type='LoadPointsFromFile',\n",
      "                use_dim=[\n",
      "                    0,\n",
      "                    1,\n",
      "                    2,\n",
      "                    3,\n",
      "                ]),\n",
      "            dict(\n",
      "                backend_args=None,\n",
      "                color_type='color',\n",
      "                num_views=2,\n",
      "                to_float32=True,\n",
      "                type='LoadMultiViewImageFromFilesNus'),\n",
      "            dict(\n",
      "                point_cloud_range=[\n",
      "                    -21.0,\n",
      "                    -21.0,\n",
      "                    -1.8,\n",
      "                    21.0,\n",
      "                    21.0,\n",
      "                    5.4,\n",
      "                ],\n",
      "                type='PointsRangeFilter'),\n",
      "            dict(\n",
      "                flip=False,\n",
      "                img_scale=(\n",
      "                    1333,\n",
      "                    800,\n",
      "                ),\n",
      "                pts_scale_ratio=1,\n",
      "                transforms=[\n",
      "                    dict(\n",
      "                        rot_range=[\n",
      "                            0,\n",
      "                            0,\n",
      "                        ],\n",
      "                        scale_ratio_range=[\n",
      "                            1.0,\n",
      "                            1.0,\n",
      "                        ],\n",
      "                        translation_std=[\n",
      "                            0,\n",
      "                            0,\n",
      "                            0,\n",
      "                        ],\n",
      "                        type='GlobalRotScaleTrans'),\n",
      "                    dict(\n",
      "                        data_aug_conf=dict(\n",
      "                            H=1024,\n",
      "                            W=1224,\n",
      "                            bot_pct_lim=(\n",
      "                                0.0,\n",
      "                                0.0,\n",
      "                            ),\n",
      "                            final_dim=(\n",
      "                                640,\n",
      "                                768,\n",
      "                            ),\n",
      "                            rand_flip=True,\n",
      "                            resize_lim=(\n",
      "                                0.5,\n",
      "                                0.625,\n",
      "                            ),\n",
      "                            rot_lim=(\n",
      "                                0.0,\n",
      "                                0.0,\n",
      "                            )),\n",
      "                        training=False,\n",
      "                        type='ResizeCropFlipImage'),\n",
      "                    dict(\n",
      "                        mean=[\n",
      "                            103.53,\n",
      "                            116.28,\n",
      "                            123.675,\n",
      "                        ],\n",
      "                        std=[\n",
      "                            57.375,\n",
      "                            57.12,\n",
      "                            58.395,\n",
      "                        ],\n",
      "                        to_rgb=False,\n",
      "                        type='NormalizeMultiviewImage'),\n",
      "                    dict(size_divisor=32, type='PadMultiViewImage'),\n",
      "                ],\n",
      "                type='MultiScaleFlipAug3D'),\n",
      "            dict(keys=[\n",
      "                'points',\n",
      "                'img',\n",
      "            ], type='Pack3DDetInputs'),\n",
      "        ],\n",
      "        test_mode=True,\n",
      "        type='CodaDataset'),\n",
      "    drop_last=False,\n",
      "    num_workers=4,\n",
      "    persistent_workers=True,\n",
      "    sampler=dict(shuffle=False, type='DefaultSampler'))\n",
      "test_evaluator = dict(\n",
      "    ann_file='data/CODA/coda_infos_test.pkl',\n",
      "    backend_args=None,\n",
      "    metric='bbox',\n",
      "    type='CodaMetric')\n",
      "test_pipeline = [\n",
      "    dict(\n",
      "        coord_type='LIDAR',\n",
      "        load_dim=4,\n",
      "        type='LoadPointsFromFile',\n",
      "        use_dim=[\n",
      "            0,\n",
      "            1,\n",
      "            2,\n",
      "            3,\n",
      "        ]),\n",
      "    dict(\n",
      "        backend_args=None,\n",
      "        color_type='color',\n",
      "        num_views=2,\n",
      "        to_float32=True,\n",
      "        type='LoadMultiViewImageFromFilesNus'),\n",
      "    dict(\n",
      "        point_cloud_range=[\n",
      "            -21.0,\n",
      "            -21.0,\n",
      "            -1.8,\n",
      "            21.0,\n",
      "            21.0,\n",
      "            5.4,\n",
      "        ],\n",
      "        type='PointsRangeFilter'),\n",
      "    dict(\n",
      "        flip=False,\n",
      "        img_scale=(\n",
      "            1333,\n",
      "            800,\n",
      "        ),\n",
      "        pts_scale_ratio=1,\n",
      "        transforms=[\n",
      "            dict(\n",
      "                rot_range=[\n",
      "                    0,\n",
      "                    0,\n",
      "                ],\n",
      "                scale_ratio_range=[\n",
      "                    1.0,\n",
      "                    1.0,\n",
      "                ],\n",
      "                translation_std=[\n",
      "                    0,\n",
      "                    0,\n",
      "                    0,\n",
      "                ],\n",
      "                type='GlobalRotScaleTrans'),\n",
      "            dict(\n",
      "                data_aug_conf=dict(\n",
      "                    H=1024,\n",
      "                    W=1224,\n",
      "                    bot_pct_lim=(\n",
      "                        0.0,\n",
      "                        0.0,\n",
      "                    ),\n",
      "                    final_dim=(\n",
      "                        640,\n",
      "                        768,\n",
      "                    ),\n",
      "                    rand_flip=True,\n",
      "                    resize_lim=(\n",
      "                        0.5,\n",
      "                        0.625,\n",
      "                    ),\n",
      "                    rot_lim=(\n",
      "                        0.0,\n",
      "                        0.0,\n",
      "                    )),\n",
      "                training=False,\n",
      "                type='ResizeCropFlipImage'),\n",
      "            dict(\n",
      "                mean=[\n",
      "                    103.53,\n",
      "                    116.28,\n",
      "                    123.675,\n",
      "                ],\n",
      "                std=[\n",
      "                    57.375,\n",
      "                    57.12,\n",
      "                    58.395,\n",
      "                ],\n",
      "                to_rgb=False,\n",
      "                type='NormalizeMultiviewImage'),\n",
      "            dict(size_divisor=32, type='PadMultiViewImage'),\n",
      "        ],\n",
      "        type='MultiScaleFlipAug3D'),\n",
      "    dict(keys=[\n",
      "        'points',\n",
      "        'img',\n",
      "    ], type='Pack3DDetInputs'),\n",
      "]\n",
      "train_cfg = dict(by_epoch=True, max_epochs=30, val_interval=1)\n",
      "train_dataloader = dict(\n",
      "    batch_size=4,\n",
      "    dataset=dict(\n",
      "        dataset=dict(\n",
      "            ann_file='coda_infos_train.pkl',\n",
      "            box_type_3d='LiDAR',\n",
      "            data_prefix=dict(img='', pts='', sweeps=''),\n",
      "            data_root='data/CODA/',\n",
      "            metainfo=dict(classes=[\n",
      "                'Car',\n",
      "                'Pedestrian',\n",
      "                'Cyclist',\n",
      "            ]),\n",
      "            modality=dict(use_camera=True, use_lidar=True),\n",
      "            pipeline=[\n",
      "                dict(\n",
      "                    coord_type='LIDAR',\n",
      "                    load_dim=4,\n",
      "                    type='LoadPointsFromFile',\n",
      "                    use_dim=[\n",
      "                        0,\n",
      "                        1,\n",
      "                        2,\n",
      "                        3,\n",
      "                    ]),\n",
      "                dict(\n",
      "                    backend_args=None,\n",
      "                    color_type='color',\n",
      "                    num_views=2,\n",
      "                    to_float32=True,\n",
      "                    type='LoadMultiViewImageFromFilesNus'),\n",
      "                dict(\n",
      "                    type='LoadAnnotations3D',\n",
      "                    with_bbox_3d=True,\n",
      "                    with_label_3d=True),\n",
      "                dict(\n",
      "                    rot_range=[\n",
      "                        -0.78539816,\n",
      "                        0.78539816,\n",
      "                    ],\n",
      "                    scale_ratio_range=[\n",
      "                        0.9,\n",
      "                        1.1,\n",
      "                    ],\n",
      "                    translation_std=[\n",
      "                        0.5,\n",
      "                        0.5,\n",
      "                        0.5,\n",
      "                    ],\n",
      "                    type='GlobalRotScaleTransAll'),\n",
      "                dict(\n",
      "                    flip_ratio_bev_horizontal=0.5,\n",
      "                    flip_ratio_bev_vertical=0.5,\n",
      "                    type='CustomRandomFlip3D'),\n",
      "                dict(\n",
      "                    point_cloud_range=[\n",
      "                        -21.0,\n",
      "                        -21.0,\n",
      "                        -1.8,\n",
      "                        21.0,\n",
      "                        21.0,\n",
      "                        5.4,\n",
      "                    ],\n",
      "                    type='PointsRangeFilter'),\n",
      "                dict(\n",
      "                    point_cloud_range=[\n",
      "                        -21.0,\n",
      "                        -21.0,\n",
      "                        -1.8,\n",
      "                        21.0,\n",
      "                        21.0,\n",
      "                        5.4,\n",
      "                    ],\n",
      "                    type='ObjectRangeFilter'),\n",
      "                dict(\n",
      "                    classes=[\n",
      "                        'Car',\n",
      "                        'Pedestrian',\n",
      "                        'Cyclist',\n",
      "                    ],\n",
      "                    type='ObjectNameFilter'),\n",
      "                dict(type='PointShuffle'),\n",
      "                dict(\n",
      "                    data_aug_conf=dict(\n",
      "                        H=1024,\n",
      "                        W=1224,\n",
      "                        bot_pct_lim=(\n",
      "                            0.0,\n",
      "                            0.0,\n",
      "                        ),\n",
      "                        final_dim=(\n",
      "                            640,\n",
      "                            768,\n",
      "                        ),\n",
      "                        rand_flip=True,\n",
      "                        resize_lim=(\n",
      "                            0.5,\n",
      "                            0.625,\n",
      "                        ),\n",
      "                        rot_lim=(\n",
      "                            0.0,\n",
      "                            0.0,\n",
      "                        )),\n",
      "                    training=True,\n",
      "                    type='ResizeCropFlipImage'),\n",
      "                dict(\n",
      "                    mean=[\n",
      "                        103.53,\n",
      "                        116.28,\n",
      "                        123.675,\n",
      "                    ],\n",
      "                    std=[\n",
      "                        57.375,\n",
      "                        57.12,\n",
      "                        58.395,\n",
      "                    ],\n",
      "                    to_rgb=False,\n",
      "                    type='NormalizeMultiviewImage'),\n",
      "                dict(size_divisor=32, type='PadMultiViewImage'),\n",
      "                dict(\n",
      "                    keys=[\n",
      "                        'points',\n",
      "                        'img',\n",
      "                        'gt_bboxes_3d',\n",
      "                        'gt_labels_3d',\n",
      "                    ],\n",
      "                    meta_keys=(\n",
      "                        'filename',\n",
      "                        'ori_shape',\n",
      "                        'img_shape',\n",
      "                        'lidar2img',\n",
      "                        'depth2img',\n",
      "                        'cam2img',\n",
      "                        'pad_shape',\n",
      "                        'scale_factor',\n",
      "                        'flip',\n",
      "                        'pcd_horizontal_flip',\n",
      "                        'pcd_vertical_flip',\n",
      "                        'box_mode_3d',\n",
      "                        'box_type_3d',\n",
      "                        'img_norm_cfg',\n",
      "                        'pcd_trans',\n",
      "                        'sample_idx',\n",
      "                        'pcd_scale_factor',\n",
      "                        'pcd_rotation',\n",
      "                        'pts_filename',\n",
      "                        'transformation_3d_flow',\n",
      "                        'rot_degree',\n",
      "                        'gt_bboxes_3d',\n",
      "                        'gt_labels_3d',\n",
      "                    ),\n",
      "                    type='Pack3DDetInputs'),\n",
      "            ],\n",
      "            test_mode=False,\n",
      "            type='CodaDataset',\n",
      "            use_valid_flag=True),\n",
      "        type='CBGSDataset'),\n",
      "    num_workers=4,\n",
      "    persistent_workers=True,\n",
      "    sampler=dict(shuffle=True, type='DefaultSampler'))\n",
      "train_pipeline = [\n",
      "    dict(\n",
      "        coord_type='LIDAR',\n",
      "        load_dim=4,\n",
      "        type='LoadPointsFromFile',\n",
      "        use_dim=[\n",
      "            0,\n",
      "            1,\n",
      "            2,\n",
      "            3,\n",
      "        ]),\n",
      "    dict(\n",
      "        backend_args=None,\n",
      "        color_type='color',\n",
      "        num_views=2,\n",
      "        to_float32=True,\n",
      "        type='LoadMultiViewImageFromFilesNus'),\n",
      "    dict(type='LoadAnnotations3D', with_bbox_3d=True, with_label_3d=True),\n",
      "    dict(\n",
      "        rot_range=[\n",
      "            -0.78539816,\n",
      "            0.78539816,\n",
      "        ],\n",
      "        scale_ratio_range=[\n",
      "            0.9,\n",
      "            1.1,\n",
      "        ],\n",
      "        translation_std=[\n",
      "            0.5,\n",
      "            0.5,\n",
      "            0.5,\n",
      "        ],\n",
      "        type='GlobalRotScaleTransAll'),\n",
      "    dict(\n",
      "        flip_ratio_bev_horizontal=0.5,\n",
      "        flip_ratio_bev_vertical=0.5,\n",
      "        type='CustomRandomFlip3D'),\n",
      "    dict(\n",
      "        point_cloud_range=[\n",
      "            -21.0,\n",
      "            -21.0,\n",
      "            -1.8,\n",
      "            21.0,\n",
      "            21.0,\n",
      "            5.4,\n",
      "        ],\n",
      "        type='PointsRangeFilter'),\n",
      "    dict(\n",
      "        point_cloud_range=[\n",
      "            -21.0,\n",
      "            -21.0,\n",
      "            -1.8,\n",
      "            21.0,\n",
      "            21.0,\n",
      "            5.4,\n",
      "        ],\n",
      "        type='ObjectRangeFilter'),\n",
      "    dict(classes=[\n",
      "        'Car',\n",
      "        'Pedestrian',\n",
      "        'Cyclist',\n",
      "    ], type='ObjectNameFilter'),\n",
      "    dict(type='PointShuffle'),\n",
      "    dict(\n",
      "        data_aug_conf=dict(\n",
      "            H=1024,\n",
      "            W=1224,\n",
      "            bot_pct_lim=(\n",
      "                0.0,\n",
      "                0.0,\n",
      "            ),\n",
      "            final_dim=(\n",
      "                640,\n",
      "                768,\n",
      "            ),\n",
      "            rand_flip=True,\n",
      "            resize_lim=(\n",
      "                0.5,\n",
      "                0.625,\n",
      "            ),\n",
      "            rot_lim=(\n",
      "                0.0,\n",
      "                0.0,\n",
      "            )),\n",
      "        training=True,\n",
      "        type='ResizeCropFlipImage'),\n",
      "    dict(\n",
      "        mean=[\n",
      "            103.53,\n",
      "            116.28,\n",
      "            123.675,\n",
      "        ],\n",
      "        std=[\n",
      "            57.375,\n",
      "            57.12,\n",
      "            58.395,\n",
      "        ],\n",
      "        to_rgb=False,\n",
      "        type='NormalizeMultiviewImage'),\n",
      "    dict(size_divisor=32, type='PadMultiViewImage'),\n",
      "    dict(\n",
      "        keys=[\n",
      "            'points',\n",
      "            'img',\n",
      "            'gt_bboxes_3d',\n",
      "            'gt_labels_3d',\n",
      "        ],\n",
      "        meta_keys=(\n",
      "            'filename',\n",
      "            'ori_shape',\n",
      "            'img_shape',\n",
      "            'lidar2img',\n",
      "            'depth2img',\n",
      "            'cam2img',\n",
      "            'pad_shape',\n",
      "            'scale_factor',\n",
      "            'flip',\n",
      "            'pcd_horizontal_flip',\n",
      "            'pcd_vertical_flip',\n",
      "            'box_mode_3d',\n",
      "            'box_type_3d',\n",
      "            'img_norm_cfg',\n",
      "            'pcd_trans',\n",
      "            'sample_idx',\n",
      "            'pcd_scale_factor',\n",
      "            'pcd_rotation',\n",
      "            'pts_filename',\n",
      "            'transformation_3d_flow',\n",
      "            'rot_degree',\n",
      "            'gt_bboxes_3d',\n",
      "            'gt_labels_3d',\n",
      "        ),\n",
      "        type='Pack3DDetInputs'),\n",
      "]\n",
      "val_cfg = dict()\n",
      "val_dataloader = dict(\n",
      "    batch_size=4,\n",
      "    dataset=dict(\n",
      "        ann_file='coda_infos_val.pkl',\n",
      "        backend_args=None,\n",
      "        box_type_3d='LiDAR',\n",
      "        data_prefix=dict(img='', pts='', sweeps=''),\n",
      "        data_root='data/CODA/',\n",
      "        metainfo=dict(classes=[\n",
      "            'Car',\n",
      "            'Pedestrian',\n",
      "            'Cyclist',\n",
      "        ]),\n",
      "        modality=dict(use_camera=True, use_lidar=True),\n",
      "        pipeline=[\n",
      "            dict(\n",
      "                coord_type='LIDAR',\n",
      "                load_dim=4,\n",
      "                type='LoadPointsFromFile',\n",
      "                use_dim=[\n",
      "                    0,\n",
      "                    1,\n",
      "                    2,\n",
      "                    3,\n",
      "                ]),\n",
      "            dict(\n",
      "                backend_args=None,\n",
      "                color_type='color',\n",
      "                num_views=2,\n",
      "                to_float32=True,\n",
      "                type='LoadMultiViewImageFromFilesNus'),\n",
      "            dict(\n",
      "                point_cloud_range=[\n",
      "                    -21.0,\n",
      "                    -21.0,\n",
      "                    -1.8,\n",
      "                    21.0,\n",
      "                    21.0,\n",
      "                    5.4,\n",
      "                ],\n",
      "                type='PointsRangeFilter'),\n",
      "            dict(\n",
      "                flip=False,\n",
      "                img_scale=(\n",
      "                    1333,\n",
      "                    800,\n",
      "                ),\n",
      "                pts_scale_ratio=1,\n",
      "                transforms=[\n",
      "                    dict(\n",
      "                        rot_range=[\n",
      "                            0,\n",
      "                            0,\n",
      "                        ],\n",
      "                        scale_ratio_range=[\n",
      "                            1.0,\n",
      "                            1.0,\n",
      "                        ],\n",
      "                        translation_std=[\n",
      "                            0,\n",
      "                            0,\n",
      "                            0,\n",
      "                        ],\n",
      "                        type='GlobalRotScaleTrans'),\n",
      "                    dict(\n",
      "                        data_aug_conf=dict(\n",
      "                            H=1024,\n",
      "                            W=1224,\n",
      "                            bot_pct_lim=(\n",
      "                                0.0,\n",
      "                                0.0,\n",
      "                            ),\n",
      "                            final_dim=(\n",
      "                                640,\n",
      "                                768,\n",
      "                            ),\n",
      "                            rand_flip=True,\n",
      "                            resize_lim=(\n",
      "                                0.5,\n",
      "                                0.625,\n",
      "                            ),\n",
      "                            rot_lim=(\n",
      "                                0.0,\n",
      "                                0.0,\n",
      "                            )),\n",
      "                        training=False,\n",
      "                        type='ResizeCropFlipImage'),\n",
      "                    dict(\n",
      "                        mean=[\n",
      "                            103.53,\n",
      "                            116.28,\n",
      "                            123.675,\n",
      "                        ],\n",
      "                        std=[\n",
      "                            57.375,\n",
      "                            57.12,\n",
      "                            58.395,\n",
      "                        ],\n",
      "                        to_rgb=False,\n",
      "                        type='NormalizeMultiviewImage'),\n",
      "                    dict(size_divisor=32, type='PadMultiViewImage'),\n",
      "                ],\n",
      "                type='MultiScaleFlipAug3D'),\n",
      "            dict(keys=[\n",
      "                'points',\n",
      "                'img',\n",
      "            ], type='Pack3DDetInputs'),\n",
      "        ],\n",
      "        test_mode=True,\n",
      "        type='CodaDataset'),\n",
      "    drop_last=False,\n",
      "    num_workers=4,\n",
      "    persistent_workers=True,\n",
      "    sampler=dict(shuffle=False, type='DefaultSampler'))\n",
      "val_evaluator = dict(\n",
      "    ann_file='data/CODA/coda_infos_val.pkl',\n",
      "    backend_args=None,\n",
      "    metric='bbox',\n",
      "    type='CodaMetric')\n",
      "vis_backends = [\n",
      "    dict(type='LocalVisBackend'),\n",
      "]\n",
      "visualizer = dict(\n",
      "    name='visualizer',\n",
      "    type='Det3DLocalVisualizer',\n",
      "    vis_backends=[\n",
      "        dict(type='LocalVisBackend'),\n",
      "    ])\n",
      "voxel_size = [\n",
      "    0.075,\n",
      "    0.075,\n",
      "    0.2,\n",
      "]\n",
      "work_dir = '/media/hello/新加卷/now_study_code/3D_Perception/detection/test/work_dirs'\n",
      "\n",
      "Load pretrain_ckpt from ./ckpts/convnextv2_nano.pth\n",
      "09/04 17:33:59 - mmengine - \u001b[4m\u001b[97mINFO\u001b[0m - Distributed training is not used, all SyncBatchNorm (SyncBN) layers in the model will be automatically reverted to BatchNormXd layers if they are used.\n",
      "09/04 17:33:59 - mmengine - \u001b[4m\u001b[97mINFO\u001b[0m - Hooks will be executed in the following order:\n",
      "before_run:\n",
      "(VERY_HIGH   ) RuntimeInfoHook                    \n",
      "(BELOW_NORMAL) LoggerHook                         \n",
      " -------------------- \n",
      "before_train:\n",
      "(VERY_HIGH   ) RuntimeInfoHook                    \n",
      "(NORMAL      ) IterTimerHook                      \n",
      "(VERY_LOW    ) CheckpointHook                     \n",
      " -------------------- \n",
      "before_train_epoch:\n",
      "(VERY_HIGH   ) RuntimeInfoHook                    \n",
      "(NORMAL      ) IterTimerHook                      \n",
      "(NORMAL      ) DistSamplerSeedHook                \n",
      "(NORMAL      ) ChangeStrategyHook                 \n",
      " -------------------- \n",
      "before_train_iter:\n",
      "(VERY_HIGH   ) RuntimeInfoHook                    \n",
      "(NORMAL      ) IterTimerHook                      \n",
      " -------------------- \n",
      "after_train_iter:\n",
      "(VERY_HIGH   ) RuntimeInfoHook                    \n",
      "(NORMAL      ) IterTimerHook                      \n",
      "(BELOW_NORMAL) LoggerHook                         \n",
      "(LOW         ) ParamSchedulerHook                 \n",
      "(VERY_LOW    ) CheckpointHook                     \n",
      " -------------------- \n",
      "after_train_epoch:\n",
      "(NORMAL      ) IterTimerHook                      \n",
      "(LOW         ) ParamSchedulerHook                 \n",
      "(VERY_LOW    ) CheckpointHook                     \n",
      " -------------------- \n",
      "before_val:\n",
      "(VERY_HIGH   ) RuntimeInfoHook                    \n",
      " -------------------- \n",
      "before_val_epoch:\n",
      "(NORMAL      ) IterTimerHook                      \n",
      " -------------------- \n",
      "before_val_iter:\n",
      "(NORMAL      ) IterTimerHook                      \n",
      " -------------------- \n",
      "after_val_iter:\n",
      "(NORMAL      ) IterTimerHook                      \n",
      "(NORMAL      ) Det3DVisualizationHook             \n",
      "(BELOW_NORMAL) LoggerHook                         \n",
      " -------------------- \n",
      "after_val_epoch:\n",
      "(VERY_HIGH   ) RuntimeInfoHook                    \n",
      "(NORMAL      ) IterTimerHook                      \n",
      "(BELOW_NORMAL) LoggerHook                         \n",
      "(LOW         ) ParamSchedulerHook                 \n",
      "(VERY_LOW    ) CheckpointHook                     \n",
      " -------------------- \n",
      "after_val:\n",
      "(VERY_HIGH   ) RuntimeInfoHook                    \n",
      " -------------------- \n",
      "after_train:\n",
      "(VERY_HIGH   ) RuntimeInfoHook                    \n",
      "(VERY_LOW    ) CheckpointHook                     \n",
      " -------------------- \n",
      "before_test:\n",
      "(VERY_HIGH   ) RuntimeInfoHook                    \n",
      " -------------------- \n",
      "before_test_epoch:\n",
      "(NORMAL      ) IterTimerHook                      \n",
      " -------------------- \n",
      "before_test_iter:\n",
      "(NORMAL      ) IterTimerHook                      \n",
      " -------------------- \n",
      "after_test_iter:\n",
      "(NORMAL      ) IterTimerHook                      \n",
      "(NORMAL      ) Det3DVisualizationHook             \n",
      "(BELOW_NORMAL) LoggerHook                         \n",
      " -------------------- \n",
      "after_test_epoch:\n",
      "(VERY_HIGH   ) RuntimeInfoHook                    \n",
      "(NORMAL      ) IterTimerHook                      \n",
      "(BELOW_NORMAL) LoggerHook                         \n",
      " -------------------- \n",
      "after_test:\n",
      "(VERY_HIGH   ) RuntimeInfoHook                    \n",
      " -------------------- \n",
      "after_run:\n",
      "(BELOW_NORMAL) LoggerHook                         \n",
      " -------------------- \n"
     ]
    }
   ],
   "source": [
    "cfg = Config.fromfile('my_projects/UniMT/configs/unimt_coda.py')\n",
    "# cfg = Config.fromfile('my_projects/UniMT/configs/unimt_wheelchair.py')\n",
    "# cfg = Config.fromfile('my_projects/CMT/configs/cmt_nus.py')\n",
    "# cfg = Config.fromfile('my_projects/CMDT/configs/cmdt_coda.py')\n",
    "# cfg = Config.fromfile('my_projects/CMDT/configs/cmdt_coda_16lines.py')\n",
    "# cfg = Config.fromfile('my_projects/CMDT/configs/cmdt_wheelchair.py')\n",
    "# cfg = Config.fromfile('my_projects/CMT/configs/cmt_coda.py')\n",
    "# cfg = Config.fromfile('my_projects/CMT/configs/cmt_coda2.py')\n",
    "# cfg = Config.fromfile('my_projects/MVXNet/configs/mvxnet_coda.py')\n",
    "# cfg = Config.fromfile('my_projects/BEVFusion/configs/bevfusion_lidar_cam_coda.py')\n",
    "# cfg = Config.fromfile('my_projects/CenterPoint/configs/centerpoint_coda.py')\n",
    "cfg.work_dir = osp.abspath('./test/work_dirs')\n",
    "runner = Runner.from_cfg(cfg)\n",
    "# runner.load_checkpoint('ckpts/CMT/epoch_29.pth')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "27129720\n",
      "27123960\n"
     ]
    }
   ],
   "source": [
    "print(sum(p.numel() for p in runner.model.parameters()))\n",
    "print(sum(p.numel() for p in runner.model.parameters() if p.requires_grad))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [],
   "source": [
    "model = ConvNeXtV2Backbone()\n",
    "ckpt = torch.load('./ckpts/convnextv2_nano_22k_384_ema.pt')\n",
    "model.load_state_dict(ckpt['model'], strict=False)\n",
    "torch.save(model.state_dict(), './ckpts/convnextv2_nano.pth')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([[[0.0044, 0.0084, 0.0091, 0.0417, 0.0326, 0.0111, 0.0079],\n",
       "         [0.0050, 0.0096, 0.0139, 0.0510, 0.0389, 0.0126, 0.0096],\n",
       "         [0.0233, 0.0299, 0.0411, 0.0934, 0.0723, 0.0302, 0.0210],\n",
       "         [0.0318, 0.0385, 0.0462, 0.0809, 0.0862, 0.0479, 0.0387],\n",
       "         [0.0343, 0.0408, 0.0568, 0.1028, 0.0889, 0.0439, 0.0331],\n",
       "         [0.0053, 0.0083, 0.0138, 0.0517, 0.0393, 0.0106, 0.0067],\n",
       "         [0.0076, 0.0057, 0.0057, 0.0400, 0.0285, 0.0065, 0.0045]]],\n",
       "       grad_fn=<SelectBackward0>)"
      ]
     },
     "execution_count": 5,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "model.stages[0][0].dwconv.weight[0]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([[[0.0044, 0.0084, 0.0091, 0.0417, 0.0326, 0.0111, 0.0079],\n",
       "         [0.0050, 0.0096, 0.0139, 0.0510, 0.0389, 0.0126, 0.0096],\n",
       "         [0.0233, 0.0299, 0.0411, 0.0934, 0.0723, 0.0302, 0.0210],\n",
       "         [0.0318, 0.0385, 0.0462, 0.0809, 0.0862, 0.0479, 0.0387],\n",
       "         [0.0343, 0.0408, 0.0568, 0.1028, 0.0889, 0.0439, 0.0331],\n",
       "         [0.0053, 0.0083, 0.0138, 0.0517, 0.0393, 0.0106, 0.0067],\n",
       "         [0.0076, 0.0057, 0.0057, 0.0400, 0.0285, 0.0065, 0.0045]]],\n",
       "       device='cuda:0', grad_fn=<SelectBackward0>)"
      ]
     },
     "execution_count": 4,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "runner.model.img_backbone.stages[0][0].dwconv.weight[0]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "runner.model.eval()\n",
    "with torch.no_grad():\n",
    "    for data_batch in runner.val_dataloader:\n",
    "        data_batch = runner.model.data_preprocessor(data_batch, training=False)\n",
    "        if isinstance(data_batch, dict):\n",
    "            outputs = runner.model(**data_batch, mode='predict')\n",
    "        elif isinstance(data_batch, (list, tuple)):\n",
    "            outputs = runner.model(**data_batch, mode='predict')\n",
    "        else:\n",
    "            raise TypeError()\n",
    "        runner.val_evaluator.process(data_samples=outputs, data_batch=data_batch)\n",
    "        \n",
    "    metrics = runner.val_evaluator.evaluate(len(runner.val_dataloader.dataset))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "runner.model.eval()\n",
    "for data_batch in runner.test_dataloader:\n",
    "    break\n",
    "data_batch = runner.model.data_preprocessor(data_batch, training=False)\n",
    "batch_inputs_dict = data_batch['inputs']\n",
    "batch_data_samples = data_batch['data_samples']\n",
    "imgs = batch_inputs_dict.get('imgs', None)\n",
    "points = batch_inputs_dict.get('points', None)\n",
    "img_metas = [item.metainfo for item in batch_data_samples]\n",
    "gt_bboxes_3d = [item.get('eval_ann_info')['gt_bboxes_3d'] for item in batch_data_samples]\n",
    "gt_labels_3d = [item.get('eval_ann_info')['gt_labels_3d'] for item in batch_data_samples]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 训练过程\n",
    "runner.model.train()\n",
    "for data_batch in runner.train_dataloader:\n",
    "    data_batch = runner.model.data_preprocessor(data_batch, training=True)\n",
    "    if isinstance(data_batch, dict):\n",
    "        losses = runner.model(**data_batch, mode='loss')\n",
    "    elif isinstance(data_batch, (list, tuple)):\n",
    "        losses = runner.model(*data_batch, mode='loss')\n",
    "    else:\n",
    "        raise TypeError()\n",
    "    break"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "average infer time: 0.14785617040404192\n"
     ]
    }
   ],
   "source": [
    "# 验证过程\n",
    "runner.model.cuda().eval()\n",
    "infer_time = 0\n",
    "with torch.no_grad():\n",
    "    for data_batch in runner.test_dataloader:\n",
    "        data_batch = runner.model.data_preprocessor(data_batch, training=False)\n",
    "        start = time.time()\n",
    "        if isinstance(data_batch, dict):\n",
    "            outputs = runner.model(**data_batch, mode='predict')\n",
    "        elif isinstance(data_batch, (list, tuple)):\n",
    "            outputs = runner.model(**data_batch, mode='predict')\n",
    "        else:\n",
    "            raise TypeError()\n",
    "        infer_time += time.time() - start\n",
    "        runner.test_evaluator.process(data_samples=outputs, data_batch=data_batch)\n",
    "        # break\n",
    "print(f'average infer time: {infer_time/len(runner.test_dataloader)}')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "09/04 17:34:00 - mmengine - \u001b[4m\u001b[97mINFO\u001b[0m - ------------------------------\n",
      "09/04 17:34:00 - mmengine - \u001b[4m\u001b[97mINFO\u001b[0m - The length of test dataset: 282\n",
      "09/04 17:34:00 - mmengine - \u001b[4m\u001b[97mINFO\u001b[0m - The number of instances per category in the dataset:\n",
      "+------------+--------+\n",
      "| category   | number |\n",
      "+------------+--------+\n",
      "| Car        | 557    |\n",
      "| Pedestrian | 1925   |\n",
      "| Cyclist    | 29     |\n",
      "+------------+--------+\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "UniMTDetector(\n",
       "  (data_preprocessor): Det3DDataPreprocessor()\n",
       "  (pts_voxel_encoder): HardSimpleVFE()\n",
       "  (pts_middle_encoder): UniMTFusionBackbone(\n",
       "    (img_in): Sequential(\n",
       "      (0): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n",
       "      (1): BatchNorm2d(128, eps=0.001, momentum=0.01, affine=True, track_running_stats=True)\n",
       "      (2): ReLU()\n",
       "    )\n",
       "    (img_out): Sequential(\n",
       "      (0): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n",
       "      (1): BatchNorm2d(128, eps=0.001, momentum=0.01, affine=True, track_running_stats=True)\n",
       "      (2): ReLU()\n",
       "    )\n",
       "    (lidar_in): SparseSequential(\n",
       "      (0): SubMConv3d(4, 16, kernel_size=[3, 3, 3], stride=[1, 1, 1], padding=[0, 0, 0], dilation=[1, 1, 1], output_padding=[0, 0, 0], bias=False, algo=ConvAlgo.MaskImplicitGemm)\n",
       "    )\n",
       "    (encoder_layers): SparseSequential(\n",
       "      (encoder_layer1): SparseSequential(\n",
       "        (0): SparseBasicBlock(\n",
       "          (conv1): SubMConv3d(16, 16, kernel_size=[3, 3, 3], stride=[1, 1, 1], padding=[1, 1, 1], dilation=[1, 1, 1], output_padding=[0, 0, 0], bias=False, algo=ConvAlgo.MaskImplicitGemm)\n",
       "          (bn1): BatchNorm1d(16, eps=0.001, momentum=0.01, affine=True, track_running_stats=True)\n",
       "          (conv2): SubMConv3d(16, 16, kernel_size=[3, 3, 3], stride=[1, 1, 1], padding=[1, 1, 1], dilation=[1, 1, 1], output_padding=[0, 0, 0], bias=False, algo=ConvAlgo.MaskImplicitGemm)\n",
       "          (bn2): BatchNorm1d(16, eps=0.001, momentum=0.01, affine=True, track_running_stats=True)\n",
       "          (relu): ReLU(inplace=True)\n",
       "        )\n",
       "        (1): SparseBasicBlock(\n",
       "          (conv1): SubMConv3d(16, 16, kernel_size=[3, 3, 3], stride=[1, 1, 1], padding=[1, 1, 1], dilation=[1, 1, 1], output_padding=[0, 0, 0], bias=False, algo=ConvAlgo.MaskImplicitGemm)\n",
       "          (bn1): BatchNorm1d(16, eps=0.001, momentum=0.01, affine=True, track_running_stats=True)\n",
       "          (conv2): SubMConv3d(16, 16, kernel_size=[3, 3, 3], stride=[1, 1, 1], padding=[1, 1, 1], dilation=[1, 1, 1], output_padding=[0, 0, 0], bias=False, algo=ConvAlgo.MaskImplicitGemm)\n",
       "          (bn2): BatchNorm1d(16, eps=0.001, momentum=0.01, affine=True, track_running_stats=True)\n",
       "          (relu): ReLU(inplace=True)\n",
       "        )\n",
       "        (2): SparseSequential(\n",
       "          (0): SparseConv3d(16, 32, kernel_size=[3, 3, 3], stride=[2, 2, 2], padding=[0, 1, 1], dilation=[1, 1, 1], output_padding=[0, 0, 0], bias=False, algo=ConvAlgo.MaskImplicitGemm)\n",
       "          (1): BatchNorm1d(32, eps=0.001, momentum=0.01, affine=True, track_running_stats=True)\n",
       "          (2): ReLU(inplace=True)\n",
       "        )\n",
       "      )\n",
       "      (encoder_layer2): SparseSequential(\n",
       "        (0): SparseBasicBlock(\n",
       "          (conv1): SubMConv3d(32, 32, kernel_size=[3, 3, 3], stride=[1, 1, 1], padding=[1, 1, 1], dilation=[1, 1, 1], output_padding=[0, 0, 0], bias=False, algo=ConvAlgo.MaskImplicitGemm)\n",
       "          (bn1): BatchNorm1d(32, eps=0.001, momentum=0.01, affine=True, track_running_stats=True)\n",
       "          (conv2): SubMConv3d(32, 32, kernel_size=[3, 3, 3], stride=[1, 1, 1], padding=[1, 1, 1], dilation=[1, 1, 1], output_padding=[0, 0, 0], bias=False, algo=ConvAlgo.MaskImplicitGemm)\n",
       "          (bn2): BatchNorm1d(32, eps=0.001, momentum=0.01, affine=True, track_running_stats=True)\n",
       "          (relu): ReLU(inplace=True)\n",
       "        )\n",
       "        (1): SparseBasicBlock(\n",
       "          (conv1): SubMConv3d(32, 32, kernel_size=[3, 3, 3], stride=[1, 1, 1], padding=[1, 1, 1], dilation=[1, 1, 1], output_padding=[0, 0, 0], bias=False, algo=ConvAlgo.MaskImplicitGemm)\n",
       "          (bn1): BatchNorm1d(32, eps=0.001, momentum=0.01, affine=True, track_running_stats=True)\n",
       "          (conv2): SubMConv3d(32, 32, kernel_size=[3, 3, 3], stride=[1, 1, 1], padding=[1, 1, 1], dilation=[1, 1, 1], output_padding=[0, 0, 0], bias=False, algo=ConvAlgo.MaskImplicitGemm)\n",
       "          (bn2): BatchNorm1d(32, eps=0.001, momentum=0.01, affine=True, track_running_stats=True)\n",
       "          (relu): ReLU(inplace=True)\n",
       "        )\n",
       "        (2): SparseSequential(\n",
       "          (0): SparseConv3d(32, 64, kernel_size=[3, 3, 3], stride=[2, 2, 2], padding=[0, 1, 1], dilation=[1, 1, 1], output_padding=[0, 0, 0], bias=False, algo=ConvAlgo.MaskImplicitGemm)\n",
       "          (1): BatchNorm1d(64, eps=0.001, momentum=0.01, affine=True, track_running_stats=True)\n",
       "          (2): ReLU(inplace=True)\n",
       "        )\n",
       "      )\n",
       "      (encoder_layer3): SparseSequential(\n",
       "        (0): SparseBasicBlock(\n",
       "          (conv1): SubMConv3d(64, 64, kernel_size=[3, 3, 3], stride=[1, 1, 1], padding=[1, 1, 1], dilation=[1, 1, 1], output_padding=[0, 0, 0], bias=False, algo=ConvAlgo.MaskImplicitGemm)\n",
       "          (bn1): BatchNorm1d(64, eps=0.001, momentum=0.01, affine=True, track_running_stats=True)\n",
       "          (conv2): SubMConv3d(64, 64, kernel_size=[3, 3, 3], stride=[1, 1, 1], padding=[1, 1, 1], dilation=[1, 1, 1], output_padding=[0, 0, 0], bias=False, algo=ConvAlgo.MaskImplicitGemm)\n",
       "          (bn2): BatchNorm1d(64, eps=0.001, momentum=0.01, affine=True, track_running_stats=True)\n",
       "          (relu): ReLU(inplace=True)\n",
       "        )\n",
       "        (1): SparseBasicBlock(\n",
       "          (conv1): SubMConv3d(64, 64, kernel_size=[3, 3, 3], stride=[1, 1, 1], padding=[1, 1, 1], dilation=[1, 1, 1], output_padding=[0, 0, 0], bias=False, algo=ConvAlgo.MaskImplicitGemm)\n",
       "          (bn1): BatchNorm1d(64, eps=0.001, momentum=0.01, affine=True, track_running_stats=True)\n",
       "          (conv2): SubMConv3d(64, 64, kernel_size=[3, 3, 3], stride=[1, 1, 1], padding=[1, 1, 1], dilation=[1, 1, 1], output_padding=[0, 0, 0], bias=False, algo=ConvAlgo.MaskImplicitGemm)\n",
       "          (bn2): BatchNorm1d(64, eps=0.001, momentum=0.01, affine=True, track_running_stats=True)\n",
       "          (relu): ReLU(inplace=True)\n",
       "        )\n",
       "      )\n",
       "    )\n",
       "    (lidar_out): SparseSequential(\n",
       "      (0): SubMConv3d(64, 128, kernel_size=[3, 3, 3], stride=[1, 1, 1], padding=[0, 0, 0], dilation=[1, 1, 1], output_padding=[0, 0, 0], bias=False, algo=ConvAlgo.MaskImplicitGemm)\n",
       "    )\n",
       "    (patch_down1): PatchMerging3D(\n",
       "      (sub_conv): SparseSequential(\n",
       "        (0): SubMConv3d(128, 128, kernel_size=[3, 3, 3], stride=[1, 1, 1], padding=[0, 0, 0], dilation=[1, 1, 1], output_padding=[0, 0, 0], bias=False, algo=ConvAlgo.MaskImplicitGemm)\n",
       "        (1): LayerNorm((128,), eps=1e-05, elementwise_affine=True)\n",
       "        (2): GELU(approximate='none')\n",
       "      )\n",
       "      (norm): LayerNorm((128,), eps=1e-05, elementwise_affine=True)\n",
       "      (sigmoid): Sigmoid()\n",
       "    )\n",
       "    (neighbor_pos_embed): PositionEmbeddingLearned(\n",
       "      (position_embedding_head): Sequential(\n",
       "        (0): Linear(in_features=2, out_features=128, bias=True)\n",
       "        (1): BatchNorm1d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "        (2): ReLU(inplace=True)\n",
       "        (3): Linear(in_features=128, out_features=128, bias=True)\n",
       "      )\n",
       "    )\n",
       "    (linear_i2l): LIONBlock(\n",
       "      (encoder): ModuleList(\n",
       "        (0-1): 2 x LIONLayer(\n",
       "          (blocks): ModuleList(\n",
       "            (0-1): 2 x Block(\n",
       "              (mamba): Mamba(\n",
       "                (in_proj): Linear(in_features=128, out_features=512, bias=False)\n",
       "                (conv1d): Conv1d(256, 256, kernel_size=(4,), stride=(1,), padding=(3,), groups=256)\n",
       "                (act): SiLU()\n",
       "                (x_proj): Linear(in_features=256, out_features=40, bias=False)\n",
       "                (dt_proj): Linear(in_features=8, out_features=256, bias=True)\n",
       "                (conv1d_b): Conv1d(256, 256, kernel_size=(4,), stride=(1,), padding=(3,), groups=256)\n",
       "                (x_proj_b): Linear(in_features=256, out_features=40, bias=False)\n",
       "                (dt_proj_b): Linear(in_features=8, out_features=256, bias=True)\n",
       "                (out_proj): Linear(in_features=256, out_features=128, bias=False)\n",
       "              )\n",
       "              (norm): LayerNorm((128,), eps=1e-05, elementwise_affine=True)\n",
       "              (drop_path): DropPath(drop_prob=0.200)\n",
       "            )\n",
       "          )\n",
       "          (window_partition): FlattenedWindowMapping()\n",
       "        )\n",
       "      )\n",
       "      (downsample_list): ModuleList(\n",
       "        (0-1): 2 x PatchMerging3D(\n",
       "          (sub_conv): SparseSequential(\n",
       "            (0): SubMConv3d(128, 128, kernel_size=[3, 3, 3], stride=[1, 1, 1], padding=[0, 0, 0], dilation=[1, 1, 1], output_padding=[0, 0, 0], bias=False, algo=ConvAlgo.MaskImplicitGemm)\n",
       "            (1): LayerNorm((128,), eps=1e-05, elementwise_affine=True)\n",
       "            (2): GELU(approximate='none')\n",
       "          )\n",
       "          (norm): LayerNorm((128,), eps=1e-05, elementwise_affine=True)\n",
       "          (sigmoid): Sigmoid()\n",
       "        )\n",
       "      )\n",
       "      (pos_emb_list): ModuleList(\n",
       "        (0-1): 2 x PositionEmbeddingLearned(\n",
       "          (position_embedding_head): Sequential(\n",
       "            (0): Linear(in_features=3, out_features=128, bias=True)\n",
       "            (1): BatchNorm1d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "            (2): ReLU(inplace=True)\n",
       "            (3): Linear(in_features=128, out_features=128, bias=True)\n",
       "          )\n",
       "        )\n",
       "      )\n",
       "      (decoder): ModuleList(\n",
       "        (0-1): 2 x LIONLayer(\n",
       "          (blocks): ModuleList(\n",
       "            (0-1): 2 x Block(\n",
       "              (mamba): Mamba(\n",
       "                (in_proj): Linear(in_features=128, out_features=512, bias=False)\n",
       "                (conv1d): Conv1d(256, 256, kernel_size=(4,), stride=(1,), padding=(3,), groups=256)\n",
       "                (act): SiLU()\n",
       "                (x_proj): Linear(in_features=256, out_features=40, bias=False)\n",
       "                (dt_proj): Linear(in_features=8, out_features=256, bias=True)\n",
       "                (conv1d_b): Conv1d(256, 256, kernel_size=(4,), stride=(1,), padding=(3,), groups=256)\n",
       "                (x_proj_b): Linear(in_features=256, out_features=40, bias=False)\n",
       "                (dt_proj_b): Linear(in_features=8, out_features=256, bias=True)\n",
       "                (out_proj): Linear(in_features=256, out_features=128, bias=False)\n",
       "              )\n",
       "              (norm): LayerNorm((128,), eps=1e-05, elementwise_affine=True)\n",
       "              (drop_path): DropPath(drop_prob=0.200)\n",
       "            )\n",
       "          )\n",
       "          (window_partition): FlattenedWindowMapping()\n",
       "        )\n",
       "      )\n",
       "      (decoder_norm): ModuleList(\n",
       "        (0-1): 2 x LayerNorm((128,), eps=1e-05, elementwise_affine=True)\n",
       "      )\n",
       "      (upsample_list): ModuleList(\n",
       "        (0-1): 2 x PatchExpanding3D()\n",
       "      )\n",
       "    )\n",
       "    (patch_down2): PatchMerging3D(\n",
       "      (sub_conv): SparseSequential(\n",
       "        (0): SubMConv3d(128, 128, kernel_size=[3, 3, 3], stride=[1, 1, 1], padding=[0, 0, 0], dilation=[1, 1, 1], output_padding=[0, 0, 0], bias=False, algo=ConvAlgo.MaskImplicitGemm)\n",
       "        (1): LayerNorm((128,), eps=1e-05, elementwise_affine=True)\n",
       "        (2): GELU(approximate='none')\n",
       "      )\n",
       "      (norm): LayerNorm((128,), eps=1e-05, elementwise_affine=True)\n",
       "      (sigmoid): Sigmoid()\n",
       "    )\n",
       "    (linear_out): LIONLayer(\n",
       "      (blocks): ModuleList(\n",
       "        (0-1): 2 x Block(\n",
       "          (mamba): Mamba(\n",
       "            (in_proj): Linear(in_features=128, out_features=512, bias=False)\n",
       "            (conv1d): Conv1d(256, 256, kernel_size=(4,), stride=(1,), padding=(3,), groups=256)\n",
       "            (act): SiLU()\n",
       "            (x_proj): Linear(in_features=256, out_features=40, bias=False)\n",
       "            (dt_proj): Linear(in_features=8, out_features=256, bias=True)\n",
       "            (conv1d_b): Conv1d(256, 256, kernel_size=(4,), stride=(1,), padding=(3,), groups=256)\n",
       "            (x_proj_b): Linear(in_features=256, out_features=40, bias=False)\n",
       "            (dt_proj_b): Linear(in_features=8, out_features=256, bias=True)\n",
       "            (out_proj): Linear(in_features=256, out_features=128, bias=False)\n",
       "          )\n",
       "          (norm): LayerNorm((128,), eps=1e-05, elementwise_affine=True)\n",
       "          (drop_path): DropPath(drop_prob=0.200)\n",
       "        )\n",
       "      )\n",
       "      (window_partition): FlattenedWindowMapping()\n",
       "    )\n",
       "  )\n",
       "  (pts_backbone): SECOND(\n",
       "    (blocks): ModuleList(\n",
       "      (0): Sequential(\n",
       "        (0): Conv2d(256, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
       "        (1): BatchNorm2d(128, eps=0.001, momentum=0.01, affine=True, track_running_stats=True)\n",
       "        (2): ReLU(inplace=True)\n",
       "        (3): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
       "        (4): BatchNorm2d(128, eps=0.001, momentum=0.01, affine=True, track_running_stats=True)\n",
       "        (5): ReLU(inplace=True)\n",
       "      )\n",
       "      (1): Sequential(\n",
       "        (0): Conv2d(128, 128, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)\n",
       "        (1): BatchNorm2d(128, eps=0.001, momentum=0.01, affine=True, track_running_stats=True)\n",
       "        (2): ReLU(inplace=True)\n",
       "        (3): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
       "        (4): BatchNorm2d(128, eps=0.001, momentum=0.01, affine=True, track_running_stats=True)\n",
       "        (5): ReLU(inplace=True)\n",
       "        (6): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
       "        (7): BatchNorm2d(128, eps=0.001, momentum=0.01, affine=True, track_running_stats=True)\n",
       "        (8): ReLU(inplace=True)\n",
       "      )\n",
       "      (2): Sequential(\n",
       "        (0): Conv2d(128, 256, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)\n",
       "        (1): BatchNorm2d(256, eps=0.001, momentum=0.01, affine=True, track_running_stats=True)\n",
       "        (2): ReLU(inplace=True)\n",
       "        (3): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
       "        (4): BatchNorm2d(256, eps=0.001, momentum=0.01, affine=True, track_running_stats=True)\n",
       "        (5): ReLU(inplace=True)\n",
       "        (6): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
       "        (7): BatchNorm2d(256, eps=0.001, momentum=0.01, affine=True, track_running_stats=True)\n",
       "        (8): ReLU(inplace=True)\n",
       "      )\n",
       "    )\n",
       "  )\n",
       "  init_cfg={'type': 'Kaiming', 'layer': 'Conv2d'}\n",
       "  (pts_neck): SECONDFPN(\n",
       "    (deblocks): ModuleList(\n",
       "      (0): Sequential(\n",
       "        (0): Conv2d(128, 128, kernel_size=(2, 2), stride=(2, 2), bias=False)\n",
       "        (1): BatchNorm2d(128, eps=0.001, momentum=0.01, affine=True, track_running_stats=True)\n",
       "        (2): ReLU(inplace=True)\n",
       "      )\n",
       "      (1): Sequential(\n",
       "        (0): Conv2d(128, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
       "        (1): BatchNorm2d(128, eps=0.001, momentum=0.01, affine=True, track_running_stats=True)\n",
       "        (2): ReLU(inplace=True)\n",
       "      )\n",
       "      (2): Sequential(\n",
       "        (0): ConvTranspose2d(256, 128, kernel_size=(2, 2), stride=(2, 2), bias=False)\n",
       "        (1): BatchNorm2d(128, eps=0.001, momentum=0.01, affine=True, track_running_stats=True)\n",
       "        (2): ReLU(inplace=True)\n",
       "      )\n",
       "    )\n",
       "  )\n",
       "  init_cfg=[{'type': 'Kaiming', 'layer': 'ConvTranspose2d'}, {'type': 'Constant', 'layer': 'NaiveSyncBatchNorm2d', 'val': 1.0}]\n",
       "  (pts_bbox_head): UniMTHead(\n",
       "    (loss_cls): FocalLoss()\n",
       "    (loss_bbox): L1Loss()\n",
       "    (loss_heatmap): GaussianFocalLoss()\n",
       "    (shared_conv): ConvModule(\n",
       "      (conv): Conv2d(384, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
       "      (bn): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "      (activate): ReLU(inplace=True)\n",
       "    )\n",
       "    (transformer_decoder): UniMTTransformerDecoder(\n",
       "      (layers): ModuleList(\n",
       "        (0-5): 6 x UniMTTransformerDecoderLayer(\n",
       "          (attentions): ModuleList(\n",
       "            (0): MultiheadAttention(\n",
       "              (attn): MultiheadAttention(\n",
       "                (out_proj): NonDynamicallyQuantizableLinear(in_features=128, out_features=128, bias=True)\n",
       "              )\n",
       "              (proj_drop): Dropout(p=0.0, inplace=False)\n",
       "              (dropout_layer): Dropout(p=0.1, inplace=False)\n",
       "            )\n",
       "            (1): DeformableAttention2MultiModality(\n",
       "              (sampling_offsets): Linear(in_features=128, out_features=39, bias=True)\n",
       "              (cam_embedding): Sequential(\n",
       "                (0): Linear(in_features=12, out_features=64, bias=True)\n",
       "                (1): ReLU(inplace=True)\n",
       "                (2): Linear(in_features=64, out_features=128, bias=True)\n",
       "                (3): ReLU(inplace=True)\n",
       "                (4): LayerNorm((128,), eps=1e-05, elementwise_affine=True)\n",
       "              )\n",
       "              (pts_attention_weights): Linear(in_features=128, out_features=104, bias=True)\n",
       "              (img_attention_weights): Linear(in_features=128, out_features=312, bias=True)\n",
       "              (pts_proj): Linear(in_features=128, out_features=128, bias=True)\n",
       "              (img_proj): Linear(in_features=128, out_features=128, bias=True)\n",
       "              (output_proj): Linear(in_features=256, out_features=128, bias=True)\n",
       "              (dropout): Dropout(p=0.1, inplace=False)\n",
       "            )\n",
       "          )\n",
       "          (ffns): ModuleList(\n",
       "            (0): FFN(\n",
       "              (layers): Sequential(\n",
       "                (0): Sequential(\n",
       "                  (0): Linear(in_features=128, out_features=512, bias=True)\n",
       "                  (1): ReLU(inplace=True)\n",
       "                  (2): Dropout(p=0.0, inplace=False)\n",
       "                )\n",
       "                (1): Linear(in_features=512, out_features=128, bias=True)\n",
       "                (2): Dropout(p=0.0, inplace=False)\n",
       "              )\n",
       "              (dropout_layer): Identity()\n",
       "              (gamma2): Identity()\n",
       "            )\n",
       "          )\n",
       "          (norms): ModuleList(\n",
       "            (0-2): 3 x LayerNorm((128,), eps=1e-05, elementwise_affine=True)\n",
       "          )\n",
       "        )\n",
       "      )\n",
       "      (post_norm): LayerNorm((128,), eps=1e-05, elementwise_affine=True)\n",
       "    )\n",
       "    (reference_points): Embedding(500, 3)\n",
       "    (bev_embedding): Sequential(\n",
       "      (0): Linear(in_features=256, out_features=128, bias=True)\n",
       "      (1): ReLU(inplace=True)\n",
       "      (2): Linear(in_features=128, out_features=128, bias=True)\n",
       "    )\n",
       "    (rv_embedding): Sequential(\n",
       "      (0): Linear(in_features=192, out_features=512, bias=True)\n",
       "      (1): ReLU(inplace=True)\n",
       "      (2): Linear(in_features=512, out_features=128, bias=True)\n",
       "    )\n",
       "    (task_head): SeparateTaskHead(\n",
       "      (center): Sequential(\n",
       "        (0): Conv1d(768, 384, kernel_size=(1,), stride=(1,), groups=6, bias=False)\n",
       "        (1): GroupLayerNorm1d()\n",
       "        (2): ReLU(inplace=True)\n",
       "        (3): Conv1d(384, 12, kernel_size=(1,), stride=(1,), groups=6)\n",
       "      )\n",
       "      (height): Sequential(\n",
       "        (0): Conv1d(768, 384, kernel_size=(1,), stride=(1,), groups=6, bias=False)\n",
       "        (1): GroupLayerNorm1d()\n",
       "        (2): ReLU(inplace=True)\n",
       "        (3): Conv1d(384, 6, kernel_size=(1,), stride=(1,), groups=6)\n",
       "      )\n",
       "      (dim): Sequential(\n",
       "        (0): Conv1d(768, 384, kernel_size=(1,), stride=(1,), groups=6, bias=False)\n",
       "        (1): GroupLayerNorm1d()\n",
       "        (2): ReLU(inplace=True)\n",
       "        (3): Conv1d(384, 18, kernel_size=(1,), stride=(1,), groups=6)\n",
       "      )\n",
       "      (rot): Sequential(\n",
       "        (0): Conv1d(768, 384, kernel_size=(1,), stride=(1,), groups=6, bias=False)\n",
       "        (1): GroupLayerNorm1d()\n",
       "        (2): ReLU(inplace=True)\n",
       "        (3): Conv1d(384, 12, kernel_size=(1,), stride=(1,), groups=6)\n",
       "      )\n",
       "      (cls_logits): Sequential(\n",
       "        (0): Conv1d(768, 384, kernel_size=(1,), stride=(1,), groups=6, bias=False)\n",
       "        (1): GroupLayerNorm1d()\n",
       "        (2): ReLU(inplace=True)\n",
       "        (3): Conv1d(384, 18, kernel_size=(1,), stride=(1,), groups=6)\n",
       "      )\n",
       "    )\n",
       "    init_cfg={'type': 'Kaiming', 'layer': 'Conv1d'}\n",
       "  )\n",
       "  (img_backbone): ConvNeXtV2Backbone(\n",
       "    (downsample_layers): ModuleList(\n",
       "      (0): Sequential(\n",
       "        (0): Conv2d(3, 80, kernel_size=(4, 4), stride=(4, 4))\n",
       "        (1): LayerNorm()\n",
       "      )\n",
       "      (1): Sequential(\n",
       "        (0): LayerNorm()\n",
       "        (1): Conv2d(80, 160, kernel_size=(2, 2), stride=(2, 2))\n",
       "      )\n",
       "      (2): Sequential(\n",
       "        (0): LayerNorm()\n",
       "        (1): Conv2d(160, 320, kernel_size=(2, 2), stride=(2, 2))\n",
       "      )\n",
       "      (3): Sequential(\n",
       "        (0): LayerNorm()\n",
       "        (1): Conv2d(320, 640, kernel_size=(2, 2), stride=(2, 2))\n",
       "      )\n",
       "    )\n",
       "    (stages): ModuleList(\n",
       "      (0): Sequential(\n",
       "        (0): Block(\n",
       "          (dwconv): Conv2d(80, 80, kernel_size=(7, 7), stride=(1, 1), padding=(3, 3), groups=80)\n",
       "          (norm): LayerNorm()\n",
       "          (pwconv1): Linear(in_features=80, out_features=320, bias=True)\n",
       "          (act): GELU(approximate='none')\n",
       "          (grn): GRN()\n",
       "          (pwconv2): Linear(in_features=320, out_features=80, bias=True)\n",
       "          (drop_path): Identity()\n",
       "        )\n",
       "        (1): Block(\n",
       "          (dwconv): Conv2d(80, 80, kernel_size=(7, 7), stride=(1, 1), padding=(3, 3), groups=80)\n",
       "          (norm): LayerNorm()\n",
       "          (pwconv1): Linear(in_features=80, out_features=320, bias=True)\n",
       "          (act): GELU(approximate='none')\n",
       "          (grn): GRN()\n",
       "          (pwconv2): Linear(in_features=320, out_features=80, bias=True)\n",
       "          (drop_path): Identity()\n",
       "        )\n",
       "      )\n",
       "      (1): Sequential(\n",
       "        (0): Block(\n",
       "          (dwconv): Conv2d(160, 160, kernel_size=(7, 7), stride=(1, 1), padding=(3, 3), groups=160)\n",
       "          (norm): LayerNorm()\n",
       "          (pwconv1): Linear(in_features=160, out_features=640, bias=True)\n",
       "          (act): GELU(approximate='none')\n",
       "          (grn): GRN()\n",
       "          (pwconv2): Linear(in_features=640, out_features=160, bias=True)\n",
       "          (drop_path): Identity()\n",
       "        )\n",
       "        (1): Block(\n",
       "          (dwconv): Conv2d(160, 160, kernel_size=(7, 7), stride=(1, 1), padding=(3, 3), groups=160)\n",
       "          (norm): LayerNorm()\n",
       "          (pwconv1): Linear(in_features=160, out_features=640, bias=True)\n",
       "          (act): GELU(approximate='none')\n",
       "          (grn): GRN()\n",
       "          (pwconv2): Linear(in_features=640, out_features=160, bias=True)\n",
       "          (drop_path): Identity()\n",
       "        )\n",
       "      )\n",
       "      (2): Sequential(\n",
       "        (0): Block(\n",
       "          (dwconv): Conv2d(320, 320, kernel_size=(7, 7), stride=(1, 1), padding=(3, 3), groups=320)\n",
       "          (norm): LayerNorm()\n",
       "          (pwconv1): Linear(in_features=320, out_features=1280, bias=True)\n",
       "          (act): GELU(approximate='none')\n",
       "          (grn): GRN()\n",
       "          (pwconv2): Linear(in_features=1280, out_features=320, bias=True)\n",
       "          (drop_path): Identity()\n",
       "        )\n",
       "        (1): Block(\n",
       "          (dwconv): Conv2d(320, 320, kernel_size=(7, 7), stride=(1, 1), padding=(3, 3), groups=320)\n",
       "          (norm): LayerNorm()\n",
       "          (pwconv1): Linear(in_features=320, out_features=1280, bias=True)\n",
       "          (act): GELU(approximate='none')\n",
       "          (grn): GRN()\n",
       "          (pwconv2): Linear(in_features=1280, out_features=320, bias=True)\n",
       "          (drop_path): Identity()\n",
       "        )\n",
       "        (2): Block(\n",
       "          (dwconv): Conv2d(320, 320, kernel_size=(7, 7), stride=(1, 1), padding=(3, 3), groups=320)\n",
       "          (norm): LayerNorm()\n",
       "          (pwconv1): Linear(in_features=320, out_features=1280, bias=True)\n",
       "          (act): GELU(approximate='none')\n",
       "          (grn): GRN()\n",
       "          (pwconv2): Linear(in_features=1280, out_features=320, bias=True)\n",
       "          (drop_path): Identity()\n",
       "        )\n",
       "        (3): Block(\n",
       "          (dwconv): Conv2d(320, 320, kernel_size=(7, 7), stride=(1, 1), padding=(3, 3), groups=320)\n",
       "          (norm): LayerNorm()\n",
       "          (pwconv1): Linear(in_features=320, out_features=1280, bias=True)\n",
       "          (act): GELU(approximate='none')\n",
       "          (grn): GRN()\n",
       "          (pwconv2): Linear(in_features=1280, out_features=320, bias=True)\n",
       "          (drop_path): Identity()\n",
       "        )\n",
       "        (4): Block(\n",
       "          (dwconv): Conv2d(320, 320, kernel_size=(7, 7), stride=(1, 1), padding=(3, 3), groups=320)\n",
       "          (norm): LayerNorm()\n",
       "          (pwconv1): Linear(in_features=320, out_features=1280, bias=True)\n",
       "          (act): GELU(approximate='none')\n",
       "          (grn): GRN()\n",
       "          (pwconv2): Linear(in_features=1280, out_features=320, bias=True)\n",
       "          (drop_path): Identity()\n",
       "        )\n",
       "        (5): Block(\n",
       "          (dwconv): Conv2d(320, 320, kernel_size=(7, 7), stride=(1, 1), padding=(3, 3), groups=320)\n",
       "          (norm): LayerNorm()\n",
       "          (pwconv1): Linear(in_features=320, out_features=1280, bias=True)\n",
       "          (act): GELU(approximate='none')\n",
       "          (grn): GRN()\n",
       "          (pwconv2): Linear(in_features=1280, out_features=320, bias=True)\n",
       "          (drop_path): Identity()\n",
       "        )\n",
       "        (6): Block(\n",
       "          (dwconv): Conv2d(320, 320, kernel_size=(7, 7), stride=(1, 1), padding=(3, 3), groups=320)\n",
       "          (norm): LayerNorm()\n",
       "          (pwconv1): Linear(in_features=320, out_features=1280, bias=True)\n",
       "          (act): GELU(approximate='none')\n",
       "          (grn): GRN()\n",
       "          (pwconv2): Linear(in_features=1280, out_features=320, bias=True)\n",
       "          (drop_path): Identity()\n",
       "        )\n",
       "        (7): Block(\n",
       "          (dwconv): Conv2d(320, 320, kernel_size=(7, 7), stride=(1, 1), padding=(3, 3), groups=320)\n",
       "          (norm): LayerNorm()\n",
       "          (pwconv1): Linear(in_features=320, out_features=1280, bias=True)\n",
       "          (act): GELU(approximate='none')\n",
       "          (grn): GRN()\n",
       "          (pwconv2): Linear(in_features=1280, out_features=320, bias=True)\n",
       "          (drop_path): Identity()\n",
       "        )\n",
       "      )\n",
       "      (3): Sequential(\n",
       "        (0): Block(\n",
       "          (dwconv): Conv2d(640, 640, kernel_size=(7, 7), stride=(1, 1), padding=(3, 3), groups=640)\n",
       "          (norm): LayerNorm()\n",
       "          (pwconv1): Linear(in_features=640, out_features=2560, bias=True)\n",
       "          (act): GELU(approximate='none')\n",
       "          (grn): GRN()\n",
       "          (pwconv2): Linear(in_features=2560, out_features=640, bias=True)\n",
       "          (drop_path): Identity()\n",
       "        )\n",
       "        (1): Block(\n",
       "          (dwconv): Conv2d(640, 640, kernel_size=(7, 7), stride=(1, 1), padding=(3, 3), groups=640)\n",
       "          (norm): LayerNorm()\n",
       "          (pwconv1): Linear(in_features=640, out_features=2560, bias=True)\n",
       "          (act): GELU(approximate='none')\n",
       "          (grn): GRN()\n",
       "          (pwconv2): Linear(in_features=2560, out_features=640, bias=True)\n",
       "          (drop_path): Identity()\n",
       "        )\n",
       "      )\n",
       "    )\n",
       "  )\n",
       "  (img_neck): ConvNeXtV2FPN(\n",
       "    (upsample_layers): ModuleList(\n",
       "      (0): Sequential(\n",
       "        (0): ConvTranspose2d(640, 320, kernel_size=(2, 2), stride=(2, 2))\n",
       "        (1): BatchNorm2d(320, eps=0.001, momentum=0.01, affine=True, track_running_stats=True)\n",
       "        (2): ReLU()\n",
       "      )\n",
       "      (1): Sequential(\n",
       "        (0): ConvTranspose2d(320, 160, kernel_size=(2, 2), stride=(2, 2))\n",
       "        (1): BatchNorm2d(160, eps=0.001, momentum=0.01, affine=True, track_running_stats=True)\n",
       "        (2): ReLU()\n",
       "      )\n",
       "    )\n",
       "    (out_layers): ModuleList(\n",
       "      (0): Sequential(\n",
       "        (0): Conv2d(640, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n",
       "        (1): BatchNorm2d(128, eps=0.001, momentum=0.01, affine=True, track_running_stats=True)\n",
       "        (2): ReLU()\n",
       "      )\n",
       "      (1): Sequential(\n",
       "        (0): Conv2d(320, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n",
       "        (1): BatchNorm2d(128, eps=0.001, momentum=0.01, affine=True, track_running_stats=True)\n",
       "        (2): ReLU()\n",
       "      )\n",
       "      (2): Sequential(\n",
       "        (0): Conv2d(160, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n",
       "        (1): BatchNorm2d(128, eps=0.001, momentum=0.01, affine=True, track_running_stats=True)\n",
       "        (2): ReLU()\n",
       "      )\n",
       "    )\n",
       "  )\n",
       "  (grid_mask): GridMask()\n",
       "  (pts_voxel_layer): SPConvVoxelization(voxel_size=[0.075 0.075 0.2  ], point_cloud_range=[-21.  -21.   -1.8  21.   21.    5.4], max_num_points=10, max_voxels=(120000, 160000), num_point_features=4)\n",
       ")"
      ]
     },
     "execution_count": 3,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "test_iter = iter(runner.test_dataloader)\n",
    "runner.model.cuda().eval()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "average infer time: 0.140768620967865\n"
     ]
    }
   ],
   "source": [
    "runner.model.cuda().eval()\n",
    "with torch.no_grad():\n",
    "    data_batch = next(test_iter)\n",
    "    data_batch = runner.model.data_preprocessor(data_batch, training=False)\n",
    "    start = time.time()\n",
    "    for i in range(100):\n",
    "        outputs = runner.model(**data_batch, mode='predict')\n",
    "    print(f'average infer time: {(time.time() - start)/100}')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "0.0052607059478759766 0.04562997817993164 0.053598880767822266 0.0010895729064941406 0.0220491886138916\n"
     ]
    }
   ],
   "source": [
    "# runner.model.cuda().eval()\n",
    "with torch.no_grad():\n",
    "    data_batch = next(test_iter)\n",
    "    data_batch = runner.model.data_preprocessor(data_batch, training=False)\n",
    "    imgs = data_batch['inputs'].get('imgs', None)\n",
    "    points = data_batch['inputs'].get('points', None)\n",
    "    img_metas = [item.metainfo for item in data_batch['data_samples']]\n",
    "        \n",
    "    time1 = time.time()\n",
    "    img_feats = runner.model.extract_img_feat(imgs, img_metas)\n",
    "    \n",
    "    time2 = time.time()\n",
    "    voxels, num_points, coors = runner.model.voxelize(points)\n",
    "    voxel_features = runner.model.pts_voxel_encoder(voxels, num_points, coors)\n",
    "    batch_size = coors[-1, 0] + 1\n",
    "    \n",
    "    time3 = time.time()\n",
    "    pts_feats, img_feats = runner.model.pts_middle_encoder(voxel_features, coors, batch_size, img_feats, img_metas)\n",
    "    # pts_feats = runner.model.pts_middle_encoder(voxel_features, coors, batch_size)\n",
    "    \n",
    "    time4 = time.time()\n",
    "    pts_feats = runner.model.pts_backbone(pts_feats)\n",
    "    if runner.model.with_pts_neck:\n",
    "        pts_feats = runner.model.pts_neck(pts_feats)\n",
    "\n",
    "    time5 = time.time()\n",
    "    if (pts_feats or img_feats) and runner.model.with_pts_bbox:\n",
    "        bbox_pts = runner.model.pts_bbox_head_test(pts_feats, img_feats, img_metas)\n",
    "\n",
    "    time6 = time.time()\n",
    "    detsamples = runner.model.add_pred_to_datasample(data_batch['data_samples'],\n",
    "                                                data_instances_3d = bbox_pts,\n",
    "                                                data_instances_2d = None)\n",
    "    \n",
    "print(time2 - time1, time3 - time2, time4 - time3, time5 - time4, time6 - time5)\n",
    "# print(time2 - time1, time3 - time2)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "0.08369183540344238 0.11413288116455078 0.0013115406036376953 0.026012659072875977"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "pred = outputs[0].get('pred_instances_3d')\n",
    "pred['scores_3d'].shape, pred['labels_3d'].shape, pred['bboxes_3d'].shape"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "runner.model.train()\n",
    "with torch.no_grad():\n",
    "    data_batch = next(iter(runner.train_dataloader))\n",
    "    data_batch = runner.model.data_preprocessor(data_batch, training=True)\n",
    "    batch_inputs_dict = data_batch['inputs']\n",
    "    batch_data_samples = data_batch['data_samples']\n",
    "    imgs = batch_inputs_dict.get('imgs', None)\n",
    "    points = batch_inputs_dict.get('points', None)\n",
    "    img_metas = [item.metainfo for item in batch_data_samples]\n",
    "    gt_bboxes_3d = [item.get('gt_instances_3d')['bboxes_3d'] for item in batch_data_samples]\n",
    "    gt_labels_3d = [item.get('gt_instances_3d')['labels_3d'] for item in batch_data_samples]\n",
    "    \n",
    "    img_feats, pts_feats = runner.model.extract_feat(points, imgs=imgs, img_metas=img_metas)\n",
    "    outs = runner.model.pts_bbox_head(pts_feats, img_feats, img_metas)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "outs['dn_mask_dict']"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "runner.model.eval()\n",
    "with torch.no_grad():\n",
    "    data_batch = next(iter(runner.val_dataloader))\n",
    "    data_batch = runner.model.data_preprocessor(data_batch, training=False)\n",
    "    batch_inputs_dict = data_batch['inputs']\n",
    "    batch_data_samples = data_batch['data_samples']\n",
    "    imgs = batch_inputs_dict.get('imgs', None)\n",
    "    points = batch_inputs_dict.get('points', None)\n",
    "    img_metas = [item.metainfo for item in batch_data_samples]\n",
    "    gt_bboxes_3d = [item.get('eval_ann_info')['gt_bboxes_3d'] for item in batch_data_samples]\n",
    "    gt_labels_3d = [item.get('eval_ann_info')['gt_labels_3d'] for item in batch_data_samples]\n",
    "    \n",
    "    img_feats, pts_feats = runner.model.extract_feat(points, imgs=imgs, img_metas=img_metas)\n",
    "    outs = runner.model.pts_bbox_head(pts_feats, img_feats, img_metas)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "class_names = ['Car', 'Pedestrian', 'Cyclist']\n",
    "dist_thresh = [0.15, 0.25, 0.5, 1.0]\n",
    "pred_scores_list = [[]] * len(class_names)\n",
    "is_tp_list = [[[]] * len(dist_thresh)] * len(class_names)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 计算mAP和AR\n",
    "class_names = ['Car', 'Pedestrian', 'Cyclist']\n",
    "dist_thresh = [0.15, 0.25, 0.5, 1.0]\n",
    "pred_scores_list = [[] for _ in range(len(class_names))]\n",
    "is_tp_list = [[[] for _ in range(len(dist_thresh))] for _ in range(len(class_names))]\n",
    "num_gt = [0 for _ in range(len(class_names))]\n",
    "count = 0\n",
    "runner.model.eval()\n",
    "\n",
    "with torch.no_grad():\n",
    "    for data_batch in runner.test_dataloader:\n",
    "        data_batch = runner.model.data_preprocessor(data_batch, training=False)\n",
    "        if isinstance(data_batch, dict):\n",
    "            outputs = runner.model(**data_batch, mode='predict')\n",
    "        elif isinstance(data_batch, (list, tuple)):\n",
    "            outputs = runner.model(**data_batch, mode='predict')\n",
    "        else:\n",
    "            raise TypeError()\n",
    "        for i in range(len(outputs)):\n",
    "            pred_bboxes = outputs[i].get('pred_instances_3d')['bboxes_3d'].tensor\n",
    "            pred_labels = outputs[i].get('pred_instances_3d')['labels_3d']\n",
    "            pred_scores = outputs[i].get('pred_instances_3d')['scores_3d']\n",
    "            gt_bboxes = data_batch['data_samples'][i].get('eval_ann_info')['gt_bboxes_3d'].tensor\n",
    "            gt_labels = data_batch['data_samples'][i].get('eval_ann_info')['gt_labels_3d']\n",
    "            for c in range(len(class_names)):\n",
    "                gt_bboxes_c = gt_bboxes[gt_labels == c]\n",
    "                if len(gt_bboxes_c) == 0:\n",
    "                    continue\n",
    "                pred_bboxes_c = pred_bboxes[pred_labels == c]\n",
    "                pred_scores_c = pred_scores[pred_labels == c]\n",
    "                dist_matrix = torch.cdist(pred_bboxes_c[:, :3], gt_bboxes_c[:, :3]).repeat(len(dist_thresh), 1).reshape(len(dist_thresh), len(pred_bboxes_c), len(gt_bboxes_c))\n",
    "                is_tp = torch.zeros(len(dist_thresh), len(pred_bboxes_c))\n",
    "                for j in range(len(dist_thresh)):\n",
    "                    for k in range(len(pred_bboxes_c)):\n",
    "                        if dist_matrix[j][k].min() < dist_thresh[j]:\n",
    "                            is_tp[j][k] = 1\n",
    "                            dist_matrix[j][:, dist_matrix[j][k].argmin()] = 1000\n",
    "                for j in range(len(dist_thresh)):\n",
    "                    is_tp_list[c][j].append(is_tp[j])\n",
    "                pred_scores_list[c].append(pred_scores_c)\n",
    "                num_gt[c] += len(gt_bboxes_c)\n",
    "        count += 1\n",
    "        if count % 100 == 0:\n",
    "            print(count)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "all_class_ap = []\n",
    "all_class_cum_precision = []\n",
    "all_class_cum_recall = []\n",
    "for c in range(len(class_names)):\n",
    "    pred_scores = torch.cat(pred_scores_list[c], dim=0)\n",
    "    cum_precision_list = []\n",
    "    cum_recall_list = []\n",
    "    ap_list = []\n",
    "    _, sort_idx = pred_scores.sort(descending=True)\n",
    "    print('sum of preds:', len(pred_scores))\n",
    "\n",
    "    for i in range(len(dist_thresh)):\n",
    "        is_tp = torch.cat(is_tp_list[c][i], dim=0)\n",
    "        is_tp = is_tp[sort_idx]\n",
    "        cum_tp = torch.cumsum(is_tp, dim=0)\n",
    "        cum_precision = cum_tp / torch.arange(1, len(pred_scores) + 1)\n",
    "        cum_recall = cum_tp / num_gt[c]\n",
    "        \n",
    "        max_precision = torch.zeros_like(cum_precision)\n",
    "        max_precision[-1] = cum_precision[-1]\n",
    "        for j in range(len(pred_scores) - 2, -1, -1):\n",
    "            max_precision[j] = torch.max(max_precision[j+1], cum_precision[j])\n",
    "        \n",
    "        div = []\n",
    "        for j in range(len(pred_scores)):\n",
    "            if j == 0 or pred_scores[j] != pred_scores[j-1]:\n",
    "                div.append(j)\n",
    "\n",
    "        ap = 0\n",
    "        for j in div:\n",
    "            if cum_recall[j] > 0.1:\n",
    "                if j == 0:\n",
    "                    ap += max_precision[j] * cum_recall[j]\n",
    "                else:\n",
    "                    ap += max_precision[j] * (cum_recall[j] - cum_recall[j-1])\n",
    "\n",
    "        ap_list.append(ap)\n",
    "        cum_precision_list.append(cum_precision)\n",
    "        cum_recall_list.append(cum_recall)\n",
    "        print('class:', class_names[c], 'dist_thresh:', dist_thresh[i], 'ap:', ap)\n",
    "        \n",
    "    all_class_ap.append(ap_list)\n",
    "    all_class_cum_precision.append(cum_precision_list)\n",
    "    all_class_cum_recall.append(cum_recall_list)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "python310",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.10.16"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
