{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/opt/conda/envs/pytorch/lib/python3.8/site-packages/pytorch_lightning/core/datamodule.py:175: LightningDeprecationWarning: DataModule property `dims` was deprecated in v1.5 and will be removed in v1.7.\n",
      "  rank_zero_deprecation(\"DataModule property `dims` was deprecated in v1.5 and will be removed in v1.7.\")\n",
      "/opt/conda/envs/pytorch/lib/python3.8/site-packages/pytorch_lightning/core/datamodule.py:152: LightningDeprecationWarning: DataModule property `test_transforms` was deprecated in v1.5 and will be removed in v1.7.\n",
      "  rank_zero_deprecation(\n"
     ]
    }
   ],
   "source": [
    "import torch\n",
    "from torch import nn\n",
    "from ddmimv4 import DDMIMV4\n",
    "from pytorch_lightning.callbacks import ModelCheckpoint\n",
    "from pl_bolts.datamodules import ImagenetDataModule\n",
    "from config.option import parse_args\n",
    "# args= parse_args()\n",
    "\n",
    "model=DDMIMV4.load_from_checkpoint('log/seed1/version_203/checkpoints/last.ckpt')  \n",
    "datamodule=ImagenetDataModule(\"/root/study/imagenet2012\",num_workers=2,batch_size=4,pin_memory=False)\n",
    "\n",
    "test_loader=datamodule.test_dataloader()\n",
    "for index,batch in enumerate(test_loader):\n",
    "    loss=model.test_step(batch,index) "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [
    {
     "ename": "SystemExit",
     "evalue": "2",
     "output_type": "error",
     "traceback": [
      "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
      "\u001b[0;31mSystemExit\u001b[0m                                Traceback (most recent call last)",
      "Cell \u001b[0;32mIn[1], line 7\u001b[0m\n\u001b[1;32m      5\u001b[0m \u001b[39mfrom\u001b[39;00m \u001b[39mpl_bolts\u001b[39;00m\u001b[39m.\u001b[39;00m\u001b[39mdatamodules\u001b[39;00m \u001b[39mimport\u001b[39;00m ImagenetDataModule\n\u001b[1;32m      6\u001b[0m \u001b[39mfrom\u001b[39;00m \u001b[39moption\u001b[39;00m \u001b[39mimport\u001b[39;00m parse_args\n\u001b[0;32m----> 7\u001b[0m args\u001b[39m=\u001b[39m parse_args()\n\u001b[1;32m      9\u001b[0m model\u001b[39m=\u001b[39mDDMIMV4\u001b[39m.\u001b[39mload_from_checkpoint(\u001b[39m'\u001b[39m\u001b[39mlog/seed1/version_203/checkpoints/last.ckpt\u001b[39m\u001b[39m'\u001b[39m)\n\u001b[1;32m     10\u001b[0m model    \n",
      "File \u001b[0;32m~/study/DDMIM/option.py:36\u001b[0m, in \u001b[0;36mparse_args\u001b[0;34m()\u001b[0m\n\u001b[1;32m     31\u001b[0m parser\u001b[39m.\u001b[39madd_argument(\u001b[39m'\u001b[39m\u001b[39m--gpu\u001b[39m\u001b[39m'\u001b[39m, default\u001b[39m=\u001b[39m\u001b[39m1\u001b[39m, \u001b[39mtype\u001b[39m\u001b[39m=\u001b[39m\u001b[39mint\u001b[39m,\n\u001b[1;32m     32\u001b[0m                 help\u001b[39m=\u001b[39m\u001b[39m'\u001b[39m\u001b[39mGPU id to use.\u001b[39m\u001b[39m'\u001b[39m)\n\u001b[1;32m     33\u001b[0m parser\u001b[39m.\u001b[39madd_argument(\u001b[39m'\u001b[39m\u001b[39m--log_dir\u001b[39m\u001b[39m'\u001b[39m, metavar\u001b[39m=\u001b[39m\u001b[39m'\u001b[39m\u001b[39mDIR\u001b[39m\u001b[39m'\u001b[39m, nargs\u001b[39m=\u001b[39m\u001b[39m'\u001b[39m\u001b[39m?\u001b[39m\u001b[39m'\u001b[39m, default\u001b[39m=\u001b[39m\u001b[39m'\u001b[39m\u001b[39mDDMIM/log\u001b[39m\u001b[39m'\u001b[39m,\n\u001b[1;32m     34\u001b[0m                 help\u001b[39m=\u001b[39m\u001b[39m'\u001b[39m\u001b[39mpath to log (default: DDMIM/log)\u001b[39m\u001b[39m'\u001b[39m)\n\u001b[0;32m---> 36\u001b[0m \u001b[39mreturn\u001b[39;00m parser\u001b[39m.\u001b[39;49mparse_args()\n",
      "File \u001b[0;32m/opt/conda/envs/pytorch/lib/python3.8/argparse.py:1771\u001b[0m, in \u001b[0;36mArgumentParser.parse_args\u001b[0;34m(self, args, namespace)\u001b[0m\n\u001b[1;32m   1769\u001b[0m \u001b[39mif\u001b[39;00m argv:\n\u001b[1;32m   1770\u001b[0m     msg \u001b[39m=\u001b[39m _(\u001b[39m'\u001b[39m\u001b[39munrecognized arguments: \u001b[39m\u001b[39m%s\u001b[39;00m\u001b[39m'\u001b[39m)\n\u001b[0;32m-> 1771\u001b[0m     \u001b[39mself\u001b[39;49m\u001b[39m.\u001b[39;49merror(msg \u001b[39m%\u001b[39;49m \u001b[39m'\u001b[39;49m\u001b[39m \u001b[39;49m\u001b[39m'\u001b[39;49m\u001b[39m.\u001b[39;49mjoin(argv))\n\u001b[1;32m   1772\u001b[0m \u001b[39mreturn\u001b[39;00m args\n",
      "File \u001b[0;32m/opt/conda/envs/pytorch/lib/python3.8/argparse.py:2521\u001b[0m, in \u001b[0;36mArgumentParser.error\u001b[0;34m(self, message)\u001b[0m\n\u001b[1;32m   2519\u001b[0m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39mprint_usage(_sys\u001b[39m.\u001b[39mstderr)\n\u001b[1;32m   2520\u001b[0m args \u001b[39m=\u001b[39m {\u001b[39m'\u001b[39m\u001b[39mprog\u001b[39m\u001b[39m'\u001b[39m: \u001b[39mself\u001b[39m\u001b[39m.\u001b[39mprog, \u001b[39m'\u001b[39m\u001b[39mmessage\u001b[39m\u001b[39m'\u001b[39m: message}\n\u001b[0;32m-> 2521\u001b[0m \u001b[39mself\u001b[39;49m\u001b[39m.\u001b[39;49mexit(\u001b[39m2\u001b[39;49m, _(\u001b[39m'\u001b[39;49m\u001b[39m%(prog)s\u001b[39;49;00m\u001b[39m: error: \u001b[39;49m\u001b[39m%(message)s\u001b[39;49;00m\u001b[39m\\n\u001b[39;49;00m\u001b[39m'\u001b[39;49m) \u001b[39m%\u001b[39;49m args)\n",
      "File \u001b[0;32m/opt/conda/envs/pytorch/lib/python3.8/argparse.py:2508\u001b[0m, in \u001b[0;36mArgumentParser.exit\u001b[0;34m(self, status, message)\u001b[0m\n\u001b[1;32m   2506\u001b[0m \u001b[39mif\u001b[39;00m message:\n\u001b[1;32m   2507\u001b[0m     \u001b[39mself\u001b[39m\u001b[39m.\u001b[39m_print_message(message, _sys\u001b[39m.\u001b[39mstderr)\n\u001b[0;32m-> 2508\u001b[0m _sys\u001b[39m.\u001b[39;49mexit(status)\n",
      "\u001b[0;31mSystemExit\u001b[0m: 2"
     ]
    }
   ],
   "source": [
    "%tb"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "(2, 1, 1, 1, 1, 1)\n",
      "(1, 1)\n"
     ]
    }
   ],
   "source": [
    "print((2,)+(1,)*(5))\n",
    "print((1,)+(1,))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "True\n",
      "True\n",
      "False\n"
     ]
    }
   ],
   "source": [
    "import torch\n",
    "from timm.models.layers import DropPath\n",
    "a=torch.nn.Module()\n",
    "a.b=DropPath(0.5)\n",
    "a.c=torch.nn.Module()\n",
    "with torch.no_grad():\n",
    "    a.c.train(False)\n",
    "    # a.c.training=False\n",
    "    print(a.training)\n",
    "    print(a.b.training)\n",
    "    print(a.c.training)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 19,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "tensor([[0., 1., 1., 0.],\n",
      "        [1., 1., 0., 1.],\n",
      "        [0., 0., 0., 1.]])\n"
     ]
    }
   ],
   "source": [
    "import torch\n",
    "torch.manual_seed(1)\n",
    "x=torch.zeros([3,4])\n",
    "\n",
    "random_tensor = x.new_empty(x.shape).bernoulli_(0.5)\n",
    "print(random_tensor)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [
    {
     "ename": "",
     "evalue": "",
     "output_type": "error",
     "traceback": [
      "\u001b[1;31m在当前单元格或上一个单元格中执行代码时 Kernel 崩溃。请查看单元格中的代码，以确定故障的可能原因。有关详细信息，请单击 <a href='https://aka.ms/vscodeJupyterKernelCrash'>此处</a>。有关更多详细信息，请查看 Jupyter <a href='command:jupyter.viewOutput'>log</a>。"
     ]
    }
   ],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "model.encoder.patch_embed.proj.weight 96\n",
      "model.encoder.patch_embed.proj.bias 96\n",
      "model.encoder.patch_embed.norm.weight 96\n",
      "model.encoder.patch_embed.norm.bias 96\n",
      "model.encoder.layers.0.blocks.0.norm1.weight 96\n",
      "model.encoder.layers.0.blocks.0.norm1.bias 96\n",
      "model.encoder.layers.0.blocks.0.attn.logit_scale 3\n",
      "model.encoder.layers.0.blocks.0.attn.q_bias 96\n",
      "model.encoder.layers.0.blocks.0.attn.v_bias 96\n",
      "model.encoder.layers.0.blocks.0.attn.relative_coords_table 1\n",
      "model.encoder.layers.0.blocks.0.attn.relative_position_index 49\n",
      "model.encoder.layers.0.blocks.0.attn.cpb_mlp.0.weight 512\n",
      "model.encoder.layers.0.blocks.0.attn.cpb_mlp.0.bias 512\n",
      "model.encoder.layers.0.blocks.0.attn.cpb_mlp.2.weight 3\n",
      "model.encoder.layers.0.blocks.0.attn.qkv.weight 288\n",
      "model.encoder.layers.0.blocks.0.attn.proj.weight 96\n",
      "model.encoder.layers.0.blocks.0.attn.proj.bias 96\n",
      "model.encoder.layers.0.blocks.0.norm2.weight 96\n",
      "model.encoder.layers.0.blocks.0.norm2.bias 96\n",
      "model.encoder.layers.0.blocks.0.mlp.fc1.weight 384\n",
      "model.encoder.layers.0.blocks.0.mlp.fc1.bias 384\n",
      "model.encoder.layers.0.blocks.0.mlp.fc2.weight 96\n",
      "model.encoder.layers.0.blocks.0.mlp.fc2.bias 96\n",
      "model.encoder.layers.0.blocks.1.attn_mask 64\n",
      "model.encoder.layers.0.blocks.1.norm1.weight 96\n",
      "model.encoder.layers.0.blocks.1.norm1.bias 96\n",
      "model.encoder.layers.0.blocks.1.attn.logit_scale 3\n",
      "model.encoder.layers.0.blocks.1.attn.q_bias 96\n",
      "model.encoder.layers.0.blocks.1.attn.v_bias 96\n",
      "model.encoder.layers.0.blocks.1.attn.relative_coords_table 1\n",
      "model.encoder.layers.0.blocks.1.attn.relative_position_index 49\n",
      "model.encoder.layers.0.blocks.1.attn.cpb_mlp.0.weight 512\n",
      "model.encoder.layers.0.blocks.1.attn.cpb_mlp.0.bias 512\n",
      "model.encoder.layers.0.blocks.1.attn.cpb_mlp.2.weight 3\n",
      "model.encoder.layers.0.blocks.1.attn.qkv.weight 288\n",
      "model.encoder.layers.0.blocks.1.attn.proj.weight 96\n",
      "model.encoder.layers.0.blocks.1.attn.proj.bias 96\n",
      "model.encoder.layers.0.blocks.1.norm2.weight 96\n",
      "model.encoder.layers.0.blocks.1.norm2.bias 96\n",
      "model.encoder.layers.0.blocks.1.mlp.fc1.weight 384\n",
      "model.encoder.layers.0.blocks.1.mlp.fc1.bias 384\n",
      "model.encoder.layers.0.blocks.1.mlp.fc2.weight 96\n",
      "model.encoder.layers.0.blocks.1.mlp.fc2.bias 96\n",
      "model.encoder.layers.0.downsample.reduction.weight 192\n",
      "model.encoder.layers.0.downsample.norm.weight 192\n",
      "model.encoder.layers.0.downsample.norm.bias 192\n",
      "model.encoder.layers.1.blocks.0.norm1.weight 192\n",
      "model.encoder.layers.1.blocks.0.norm1.bias 192\n",
      "model.encoder.layers.1.blocks.0.attn.logit_scale 6\n",
      "model.encoder.layers.1.blocks.0.attn.q_bias 192\n",
      "model.encoder.layers.1.blocks.0.attn.v_bias 192\n",
      "model.encoder.layers.1.blocks.0.attn.relative_coords_table 1\n",
      "model.encoder.layers.1.blocks.0.attn.relative_position_index 49\n",
      "model.encoder.layers.1.blocks.0.attn.cpb_mlp.0.weight 512\n",
      "model.encoder.layers.1.blocks.0.attn.cpb_mlp.0.bias 512\n",
      "model.encoder.layers.1.blocks.0.attn.cpb_mlp.2.weight 6\n",
      "model.encoder.layers.1.blocks.0.attn.qkv.weight 576\n",
      "model.encoder.layers.1.blocks.0.attn.proj.weight 192\n",
      "model.encoder.layers.1.blocks.0.attn.proj.bias 192\n",
      "model.encoder.layers.1.blocks.0.norm2.weight 192\n",
      "model.encoder.layers.1.blocks.0.norm2.bias 192\n",
      "model.encoder.layers.1.blocks.0.mlp.fc1.weight 768\n",
      "model.encoder.layers.1.blocks.0.mlp.fc1.bias 768\n",
      "model.encoder.layers.1.blocks.0.mlp.fc2.weight 192\n",
      "model.encoder.layers.1.blocks.0.mlp.fc2.bias 192\n",
      "model.encoder.layers.1.blocks.1.attn_mask 16\n",
      "model.encoder.layers.1.blocks.1.norm1.weight 192\n",
      "model.encoder.layers.1.blocks.1.norm1.bias 192\n",
      "model.encoder.layers.1.blocks.1.attn.logit_scale 6\n",
      "model.encoder.layers.1.blocks.1.attn.q_bias 192\n",
      "model.encoder.layers.1.blocks.1.attn.v_bias 192\n",
      "model.encoder.layers.1.blocks.1.attn.relative_coords_table 1\n",
      "model.encoder.layers.1.blocks.1.attn.relative_position_index 49\n",
      "model.encoder.layers.1.blocks.1.attn.cpb_mlp.0.weight 512\n",
      "model.encoder.layers.1.blocks.1.attn.cpb_mlp.0.bias 512\n",
      "model.encoder.layers.1.blocks.1.attn.cpb_mlp.2.weight 6\n",
      "model.encoder.layers.1.blocks.1.attn.qkv.weight 576\n",
      "model.encoder.layers.1.blocks.1.attn.proj.weight 192\n",
      "model.encoder.layers.1.blocks.1.attn.proj.bias 192\n",
      "model.encoder.layers.1.blocks.1.norm2.weight 192\n",
      "model.encoder.layers.1.blocks.1.norm2.bias 192\n",
      "model.encoder.layers.1.blocks.1.mlp.fc1.weight 768\n",
      "model.encoder.layers.1.blocks.1.mlp.fc1.bias 768\n",
      "model.encoder.layers.1.blocks.1.mlp.fc2.weight 192\n",
      "model.encoder.layers.1.blocks.1.mlp.fc2.bias 192\n",
      "model.encoder.layers.1.downsample.reduction.weight 384\n",
      "model.encoder.layers.1.downsample.norm.weight 384\n",
      "model.encoder.layers.1.downsample.norm.bias 384\n",
      "model.encoder.layers.2.blocks.0.norm1.weight 384\n",
      "model.encoder.layers.2.blocks.0.norm1.bias 384\n",
      "model.encoder.layers.2.blocks.0.attn.logit_scale 12\n",
      "model.encoder.layers.2.blocks.0.attn.q_bias 384\n",
      "model.encoder.layers.2.blocks.0.attn.v_bias 384\n",
      "model.encoder.layers.2.blocks.0.attn.relative_coords_table 1\n",
      "model.encoder.layers.2.blocks.0.attn.relative_position_index 49\n",
      "model.encoder.layers.2.blocks.0.attn.cpb_mlp.0.weight 512\n",
      "model.encoder.layers.2.blocks.0.attn.cpb_mlp.0.bias 512\n",
      "model.encoder.layers.2.blocks.0.attn.cpb_mlp.2.weight 12\n",
      "model.encoder.layers.2.blocks.0.attn.qkv.weight 1152\n",
      "model.encoder.layers.2.blocks.0.attn.proj.weight 384\n",
      "model.encoder.layers.2.blocks.0.attn.proj.bias 384\n",
      "model.encoder.layers.2.blocks.0.norm2.weight 384\n",
      "model.encoder.layers.2.blocks.0.norm2.bias 384\n",
      "model.encoder.layers.2.blocks.0.mlp.fc1.weight 1536\n",
      "model.encoder.layers.2.blocks.0.mlp.fc1.bias 1536\n",
      "model.encoder.layers.2.blocks.0.mlp.fc2.weight 384\n",
      "model.encoder.layers.2.blocks.0.mlp.fc2.bias 384\n",
      "model.encoder.layers.2.blocks.1.attn_mask 4\n",
      "model.encoder.layers.2.blocks.1.norm1.weight 384\n",
      "model.encoder.layers.2.blocks.1.norm1.bias 384\n",
      "model.encoder.layers.2.blocks.1.attn.logit_scale 12\n",
      "model.encoder.layers.2.blocks.1.attn.q_bias 384\n",
      "model.encoder.layers.2.blocks.1.attn.v_bias 384\n",
      "model.encoder.layers.2.blocks.1.attn.relative_coords_table 1\n",
      "model.encoder.layers.2.blocks.1.attn.relative_position_index 49\n",
      "model.encoder.layers.2.blocks.1.attn.cpb_mlp.0.weight 512\n",
      "model.encoder.layers.2.blocks.1.attn.cpb_mlp.0.bias 512\n",
      "model.encoder.layers.2.blocks.1.attn.cpb_mlp.2.weight 12\n",
      "model.encoder.layers.2.blocks.1.attn.qkv.weight 1152\n",
      "model.encoder.layers.2.blocks.1.attn.proj.weight 384\n",
      "model.encoder.layers.2.blocks.1.attn.proj.bias 384\n",
      "model.encoder.layers.2.blocks.1.norm2.weight 384\n",
      "model.encoder.layers.2.blocks.1.norm2.bias 384\n",
      "model.encoder.layers.2.blocks.1.mlp.fc1.weight 1536\n",
      "model.encoder.layers.2.blocks.1.mlp.fc1.bias 1536\n",
      "model.encoder.layers.2.blocks.1.mlp.fc2.weight 384\n",
      "model.encoder.layers.2.blocks.1.mlp.fc2.bias 384\n",
      "model.encoder.layers.2.blocks.2.norm1.weight 384\n",
      "model.encoder.layers.2.blocks.2.norm1.bias 384\n",
      "model.encoder.layers.2.blocks.2.attn.logit_scale 12\n",
      "model.encoder.layers.2.blocks.2.attn.q_bias 384\n",
      "model.encoder.layers.2.blocks.2.attn.v_bias 384\n",
      "model.encoder.layers.2.blocks.2.attn.relative_coords_table 1\n",
      "model.encoder.layers.2.blocks.2.attn.relative_position_index 49\n",
      "model.encoder.layers.2.blocks.2.attn.cpb_mlp.0.weight 512\n",
      "model.encoder.layers.2.blocks.2.attn.cpb_mlp.0.bias 512\n",
      "model.encoder.layers.2.blocks.2.attn.cpb_mlp.2.weight 12\n",
      "model.encoder.layers.2.blocks.2.attn.qkv.weight 1152\n",
      "model.encoder.layers.2.blocks.2.attn.proj.weight 384\n",
      "model.encoder.layers.2.blocks.2.attn.proj.bias 384\n",
      "model.encoder.layers.2.blocks.2.norm2.weight 384\n",
      "model.encoder.layers.2.blocks.2.norm2.bias 384\n",
      "model.encoder.layers.2.blocks.2.mlp.fc1.weight 1536\n",
      "model.encoder.layers.2.blocks.2.mlp.fc1.bias 1536\n",
      "model.encoder.layers.2.blocks.2.mlp.fc2.weight 384\n",
      "model.encoder.layers.2.blocks.2.mlp.fc2.bias 384\n",
      "model.encoder.layers.2.blocks.3.attn_mask 4\n",
      "model.encoder.layers.2.blocks.3.norm1.weight 384\n",
      "model.encoder.layers.2.blocks.3.norm1.bias 384\n",
      "model.encoder.layers.2.blocks.3.attn.logit_scale 12\n",
      "model.encoder.layers.2.blocks.3.attn.q_bias 384\n",
      "model.encoder.layers.2.blocks.3.attn.v_bias 384\n",
      "model.encoder.layers.2.blocks.3.attn.relative_coords_table 1\n",
      "model.encoder.layers.2.blocks.3.attn.relative_position_index 49\n",
      "model.encoder.layers.2.blocks.3.attn.cpb_mlp.0.weight 512\n",
      "model.encoder.layers.2.blocks.3.attn.cpb_mlp.0.bias 512\n",
      "model.encoder.layers.2.blocks.3.attn.cpb_mlp.2.weight 12\n",
      "model.encoder.layers.2.blocks.3.attn.qkv.weight 1152\n",
      "model.encoder.layers.2.blocks.3.attn.proj.weight 384\n",
      "model.encoder.layers.2.blocks.3.attn.proj.bias 384\n",
      "model.encoder.layers.2.blocks.3.norm2.weight 384\n",
      "model.encoder.layers.2.blocks.3.norm2.bias 384\n",
      "model.encoder.layers.2.blocks.3.mlp.fc1.weight 1536\n",
      "model.encoder.layers.2.blocks.3.mlp.fc1.bias 1536\n",
      "model.encoder.layers.2.blocks.3.mlp.fc2.weight 384\n",
      "model.encoder.layers.2.blocks.3.mlp.fc2.bias 384\n",
      "model.encoder.layers.2.blocks.4.norm1.weight 384\n",
      "model.encoder.layers.2.blocks.4.norm1.bias 384\n",
      "model.encoder.layers.2.blocks.4.attn.logit_scale 12\n",
      "model.encoder.layers.2.blocks.4.attn.q_bias 384\n",
      "model.encoder.layers.2.blocks.4.attn.v_bias 384\n",
      "model.encoder.layers.2.blocks.4.attn.relative_coords_table 1\n",
      "model.encoder.layers.2.blocks.4.attn.relative_position_index 49\n",
      "model.encoder.layers.2.blocks.4.attn.cpb_mlp.0.weight 512\n",
      "model.encoder.layers.2.blocks.4.attn.cpb_mlp.0.bias 512\n",
      "model.encoder.layers.2.blocks.4.attn.cpb_mlp.2.weight 12\n",
      "model.encoder.layers.2.blocks.4.attn.qkv.weight 1152\n",
      "model.encoder.layers.2.blocks.4.attn.proj.weight 384\n",
      "model.encoder.layers.2.blocks.4.attn.proj.bias 384\n",
      "model.encoder.layers.2.blocks.4.norm2.weight 384\n",
      "model.encoder.layers.2.blocks.4.norm2.bias 384\n",
      "model.encoder.layers.2.blocks.4.mlp.fc1.weight 1536\n",
      "model.encoder.layers.2.blocks.4.mlp.fc1.bias 1536\n",
      "model.encoder.layers.2.blocks.4.mlp.fc2.weight 384\n",
      "model.encoder.layers.2.blocks.4.mlp.fc2.bias 384\n",
      "model.encoder.layers.2.blocks.5.attn_mask 4\n",
      "model.encoder.layers.2.blocks.5.norm1.weight 384\n",
      "model.encoder.layers.2.blocks.5.norm1.bias 384\n",
      "model.encoder.layers.2.blocks.5.attn.logit_scale 12\n",
      "model.encoder.layers.2.blocks.5.attn.q_bias 384\n",
      "model.encoder.layers.2.blocks.5.attn.v_bias 384\n",
      "model.encoder.layers.2.blocks.5.attn.relative_coords_table 1\n",
      "model.encoder.layers.2.blocks.5.attn.relative_position_index 49\n",
      "model.encoder.layers.2.blocks.5.attn.cpb_mlp.0.weight 512\n",
      "model.encoder.layers.2.blocks.5.attn.cpb_mlp.0.bias 512\n",
      "model.encoder.layers.2.blocks.5.attn.cpb_mlp.2.weight 12\n",
      "model.encoder.layers.2.blocks.5.attn.qkv.weight 1152\n",
      "model.encoder.layers.2.blocks.5.attn.proj.weight 384\n",
      "model.encoder.layers.2.blocks.5.attn.proj.bias 384\n",
      "model.encoder.layers.2.blocks.5.norm2.weight 384\n",
      "model.encoder.layers.2.blocks.5.norm2.bias 384\n",
      "model.encoder.layers.2.blocks.5.mlp.fc1.weight 1536\n",
      "model.encoder.layers.2.blocks.5.mlp.fc1.bias 1536\n",
      "model.encoder.layers.2.blocks.5.mlp.fc2.weight 384\n",
      "model.encoder.layers.2.blocks.5.mlp.fc2.bias 384\n",
      "model.encoder.layers.2.downsample.reduction.weight 768\n",
      "model.encoder.layers.2.downsample.norm.weight 768\n",
      "model.encoder.layers.2.downsample.norm.bias 768\n",
      "model.encoder.layers.3.blocks.0.norm1.weight 768\n",
      "model.encoder.layers.3.blocks.0.norm1.bias 768\n",
      "model.encoder.layers.3.blocks.0.attn.logit_scale 24\n",
      "model.encoder.layers.3.blocks.0.attn.q_bias 768\n",
      "model.encoder.layers.3.blocks.0.attn.v_bias 768\n",
      "model.encoder.layers.3.blocks.0.attn.relative_coords_table 1\n",
      "model.encoder.layers.3.blocks.0.attn.relative_position_index 49\n",
      "model.encoder.layers.3.blocks.0.attn.cpb_mlp.0.weight 512\n",
      "model.encoder.layers.3.blocks.0.attn.cpb_mlp.0.bias 512\n",
      "model.encoder.layers.3.blocks.0.attn.cpb_mlp.2.weight 24\n",
      "model.encoder.layers.3.blocks.0.attn.qkv.weight 2304\n",
      "model.encoder.layers.3.blocks.0.attn.proj.weight 768\n",
      "model.encoder.layers.3.blocks.0.attn.proj.bias 768\n",
      "model.encoder.layers.3.blocks.0.norm2.weight 768\n",
      "model.encoder.layers.3.blocks.0.norm2.bias 768\n",
      "model.encoder.layers.3.blocks.0.mlp.fc1.weight 3072\n",
      "model.encoder.layers.3.blocks.0.mlp.fc1.bias 3072\n",
      "model.encoder.layers.3.blocks.0.mlp.fc2.weight 768\n",
      "model.encoder.layers.3.blocks.0.mlp.fc2.bias 768\n",
      "model.encoder.layers.3.blocks.1.norm1.weight 768\n",
      "model.encoder.layers.3.blocks.1.norm1.bias 768\n",
      "model.encoder.layers.3.blocks.1.attn.logit_scale 24\n",
      "model.encoder.layers.3.blocks.1.attn.q_bias 768\n",
      "model.encoder.layers.3.blocks.1.attn.v_bias 768\n",
      "model.encoder.layers.3.blocks.1.attn.relative_coords_table 1\n",
      "model.encoder.layers.3.blocks.1.attn.relative_position_index 49\n",
      "model.encoder.layers.3.blocks.1.attn.cpb_mlp.0.weight 512\n",
      "model.encoder.layers.3.blocks.1.attn.cpb_mlp.0.bias 512\n",
      "model.encoder.layers.3.blocks.1.attn.cpb_mlp.2.weight 24\n",
      "model.encoder.layers.3.blocks.1.attn.qkv.weight 2304\n",
      "model.encoder.layers.3.blocks.1.attn.proj.weight 768\n",
      "model.encoder.layers.3.blocks.1.attn.proj.bias 768\n",
      "model.encoder.layers.3.blocks.1.norm2.weight 768\n",
      "model.encoder.layers.3.blocks.1.norm2.bias 768\n",
      "model.encoder.layers.3.blocks.1.mlp.fc1.weight 3072\n",
      "model.encoder.layers.3.blocks.1.mlp.fc1.bias 3072\n",
      "model.encoder.layers.3.blocks.1.mlp.fc2.weight 768\n",
      "model.encoder.layers.3.blocks.1.mlp.fc2.bias 768\n",
      "model.encoder.norm.weight 768\n",
      "model.encoder.norm.bias 768\n",
      "model.encoder.head.0.weight 768\n",
      "model.encoder.head.0.bias 768\n",
      "model.encoder.head.0.running_mean 768\n",
      "model.encoder.head.0.running_var 768\n"
     ]
    },
    {
     "ename": "TypeError",
     "evalue": "len() of a 0-d tensor",
     "output_type": "error",
     "traceback": [
      "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
      "\u001b[0;31mTypeError\u001b[0m                                 Traceback (most recent call last)",
      "Cell \u001b[0;32mIn[3], line 27\u001b[0m\n\u001b[1;32m     25\u001b[0m \u001b[38;5;66;03m# model=DDMIMV4.load_from_checkpoint(ckpt_path)\u001b[39;00m\n\u001b[1;32m     26\u001b[0m \u001b[38;5;28;01mfor\u001b[39;00m key,v \u001b[38;5;129;01min\u001b[39;00m ckpt_dict[\u001b[38;5;124m'\u001b[39m\u001b[38;5;124mstate_dict\u001b[39m\u001b[38;5;124m'\u001b[39m]\u001b[38;5;241m.\u001b[39mitems():\n\u001b[0;32m---> 27\u001b[0m     \u001b[38;5;28mprint\u001b[39m(key,\u001b[38;5;28;43mlen\u001b[39;49m\u001b[43m(\u001b[49m\u001b[43mv\u001b[49m\u001b[43m)\u001b[49m)\n",
      "File \u001b[0;32m/opt/conda/envs/pytorch/lib/python3.8/site-packages/torch/tensor.py:573\u001b[0m, in \u001b[0;36mTensor.__len__\u001b[0;34m(self)\u001b[0m\n\u001b[1;32m    571\u001b[0m     \u001b[38;5;28;01mreturn\u001b[39;00m handle_torch_function(Tensor\u001b[38;5;241m.\u001b[39m\u001b[38;5;21m__len__\u001b[39m, relevant_args, \u001b[38;5;28mself\u001b[39m)\n\u001b[1;32m    572\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mdim() \u001b[38;5;241m==\u001b[39m \u001b[38;5;241m0\u001b[39m:\n\u001b[0;32m--> 573\u001b[0m     \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mTypeError\u001b[39;00m(\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mlen() of a 0-d tensor\u001b[39m\u001b[38;5;124m\"\u001b[39m)\n\u001b[1;32m    574\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mshape[\u001b[38;5;241m0\u001b[39m]\n",
      "\u001b[0;31mTypeError\u001b[0m: len() of a 0-d tensor"
     ]
    }
   ],
   "source": [
    "import torch\n",
    "import config.option\n",
    "import pytorch_lightning as pl\n",
    "from models.ddmimv6 import DDMIMV6\n",
    "from models.ddmimv4 import DDMIMV4\n",
    "import models\n",
    "import classifier\n",
    "# model=pl.LightningModule.load_from_checkpoint(\"log/seed3407/version_13/checkpoints/last.ckpt\")\n",
    "# cktp=torch.load(\"log/seed3407/version_13/checkpoints/last.ckpt\")\n",
    "from pytorch_lightning.plugins.io import TorchCheckpointIO as tcio\n",
    "# 实例化自己的model\n",
    "\n",
    "# nn_model = A()\n",
    "# ckpt_path = \"log/seed1/version_129/checkpoints/last.ckpt\"\n",
    "ckpt_path = \"log/seed1/version_236/checkpoints/epoch=41_step=849999_val_loss=0.84_val_acc1=78.50.ckpt\"\n",
    "# ckpt_path = \"log/seed1/version_256/checkpoints/epoch=71_step=1444999_val_loss=0.88_val_acc1=78.67.ckpt\"\n",
    "# ckpt_path =\"log/seed1/version_224/checkpoints/epoch=19-step=779999.ckpt\"\n",
    "# ckpt_path = \"log/seed1/version_199/checkpoints/epoch=35-step=719999.ckpt\"\n",
    "# ckpt_path =\"log/seed1/version_221/checkpoints/last.ckpt\"\n",
    "# trainer = pl.Trainer(resume_from_checkpoint=ckpt_path)\n",
    "# 实例化函数\n",
    "tc = tcio()\n",
    "ckpt_dict = tc.load_checkpoint(path=ckpt_path)\n",
    "\n",
    "# model=DDMIMV4.load_from_checkpoint(ckpt_path)\n",
    "for key,v in ckpt_dict['state_dict'].items():\n",
    "    print(key,len(v))\n",
    "# print(len(ckpt_dict['state_dict']['projection_head.2.weight']))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [
    {
     "ename": "RuntimeError",
     "evalue": "Error(s) in loading state_dict for Classifierv10:\n\tsize mismatch for mlp_head.1.weight: copying a param with shape torch.Size([1024, 1024]) from checkpoint, the shape in current model is torch.Size([1001, 1024]).\n\tsize mismatch for mlp_head.1.bias: copying a param with shape torch.Size([1024]) from checkpoint, the shape in current model is torch.Size([1001]).\n\tsize mismatch for mlp_head.3.weight: copying a param with shape torch.Size([1001, 1024]) from checkpoint, the shape in current model is torch.Size([1001, 1001]).",
     "output_type": "error",
     "traceback": [
      "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
      "\u001b[0;31mRuntimeError\u001b[0m                              Traceback (most recent call last)",
      "Cell \u001b[0;32mIn[2], line 65\u001b[0m\n\u001b[1;32m     47\u001b[0m ckpt_path \u001b[38;5;241m=\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mlog/seed3407/version_35/checkpoints/epoch=217_step=4329043_val_loss=0.71_val_acc1=81.92.ckpt\u001b[39m\u001b[38;5;124m\"\u001b[39m\n\u001b[1;32m     48\u001b[0m \u001b[38;5;66;03m# ckpt_path =\"log/seed3407/version_21/checkpoints/epoch=88_val_loss=0.30_Lightning_mae.ckpt\"\u001b[39;00m\n\u001b[1;32m     49\u001b[0m \u001b[38;5;66;03m# ckpt_path =\"log/seed3407/version_4/checkpoints/DDMIMV7_epoch=51_val_loss=2.06.ckpt\"\u001b[39;00m\n\u001b[1;32m     50\u001b[0m \u001b[38;5;66;03m# trainer = pl.Trainer(resume_from_checkpoint=ckpt_path)\u001b[39;00m\n\u001b[0;32m   (...)\u001b[0m\n\u001b[1;32m     63\u001b[0m         \u001b[38;5;66;03m# resume=\"ddmim/log/seed3407/version_21/checkpoints/epoch=88_val_loss=0.30_Lightning_mae.ckpt\",\u001b[39;00m\n\u001b[1;32m     64\u001b[0m \u001b[38;5;66;03m# model=Lightning_mae.load_from_checkpoint(ckpt_path)\u001b[39;00m\n\u001b[0;32m---> 65\u001b[0m model\u001b[38;5;241m=\u001b[39m\u001b[43mClassifierv10\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m     66\u001b[0m \u001b[43m    \u001b[49m\u001b[43mmodel\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mDDMIMV10\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m     67\u001b[0m \u001b[43m    \u001b[49m\u001b[38;5;66;43;03m# resume=\"log/seed1/version_234/checkpoints/last.ckpt\",\u001b[39;49;00m\n\u001b[1;32m     68\u001b[0m \u001b[43m    \u001b[49m\u001b[43mresume\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mlog/seed3407/version_34/checkpoints/epoch=7_val_loss=0.20_DDMIMV10.ckpt\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\n\u001b[1;32m     69\u001b[0m \u001b[43m    \u001b[49m\u001b[38;5;66;43;03m# resume=\"log/seed1/version_254/checkpoints/epoch=41-step=844999.ckpt\",\u001b[39;49;00m\n\u001b[1;32m     70\u001b[0m \u001b[43m    \u001b[49m\u001b[43m)\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mload_from_checkpoint\u001b[49m\u001b[43m(\u001b[49m\u001b[43mckpt_path\u001b[49m\u001b[43m,\u001b[49m\u001b[43mresume\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mlog/seed3407/version_34/checkpoints/epoch=7_val_loss=0.20_DDMIMV10.ckpt\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m)\u001b[49m\u001b[38;5;66;03m#,resume=\"log/seed1/version_234/checkpoints/last.ckpt\",)\u001b[39;00m\n\u001b[1;32m     71\u001b[0m \u001b[38;5;28mprint\u001b[39m(torchinfo\u001b[38;5;241m.\u001b[39msummary(model,depth\u001b[38;5;241m=\u001b[39m\u001b[38;5;241m5\u001b[39m,input_size\u001b[38;5;241m=\u001b[39m[\u001b[38;5;241m1\u001b[39m,\u001b[38;5;241m3\u001b[39m,\u001b[38;5;241m224\u001b[39m,\u001b[38;5;241m224\u001b[39m],device\u001b[38;5;241m=\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mcpu\u001b[39m\u001b[38;5;124m\"\u001b[39m))\n",
      "File \u001b[0;32m/opt/conda/envs/pytorch/lib/python3.8/site-packages/pytorch_lightning/core/saving.py:156\u001b[0m, in \u001b[0;36mModelIO.load_from_checkpoint\u001b[0;34m(cls, checkpoint_path, map_location, hparams_file, strict, **kwargs)\u001b[0m\n\u001b[1;32m    153\u001b[0m \u001b[38;5;66;03m# override the hparams with values that were passed in\u001b[39;00m\n\u001b[1;32m    154\u001b[0m checkpoint[\u001b[38;5;28mcls\u001b[39m\u001b[38;5;241m.\u001b[39mCHECKPOINT_HYPER_PARAMS_KEY]\u001b[38;5;241m.\u001b[39mupdate(kwargs)\n\u001b[0;32m--> 156\u001b[0m model \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mcls\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_load_model_state\u001b[49m\u001b[43m(\u001b[49m\u001b[43mcheckpoint\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mstrict\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mstrict\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m    157\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m model\n",
      "File \u001b[0;32m/opt/conda/envs/pytorch/lib/python3.8/site-packages/pytorch_lightning/core/saving.py:204\u001b[0m, in \u001b[0;36mModelIO._load_model_state\u001b[0;34m(cls, checkpoint, strict, **cls_kwargs_new)\u001b[0m\n\u001b[1;32m    201\u001b[0m model\u001b[38;5;241m.\u001b[39mon_load_checkpoint(checkpoint)\n\u001b[1;32m    203\u001b[0m \u001b[38;5;66;03m# load the state_dict on the model automatically\u001b[39;00m\n\u001b[0;32m--> 204\u001b[0m keys \u001b[38;5;241m=\u001b[39m \u001b[43mmodel\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mload_state_dict\u001b[49m\u001b[43m(\u001b[49m\u001b[43mcheckpoint\u001b[49m\u001b[43m[\u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mstate_dict\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m]\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mstrict\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mstrict\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m    206\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m strict:\n\u001b[1;32m    207\u001b[0m     \u001b[38;5;28;01mif\u001b[39;00m keys\u001b[38;5;241m.\u001b[39mmissing_keys:\n",
      "File \u001b[0;32m/opt/conda/envs/pytorch/lib/python3.8/site-packages/torch/nn/modules/module.py:1051\u001b[0m, in \u001b[0;36mModule.load_state_dict\u001b[0;34m(self, state_dict, strict)\u001b[0m\n\u001b[1;32m   1046\u001b[0m         error_msgs\u001b[38;5;241m.\u001b[39minsert(\n\u001b[1;32m   1047\u001b[0m             \u001b[38;5;241m0\u001b[39m, \u001b[38;5;124m'\u001b[39m\u001b[38;5;124mMissing key(s) in state_dict: \u001b[39m\u001b[38;5;132;01m{}\u001b[39;00m\u001b[38;5;124m. \u001b[39m\u001b[38;5;124m'\u001b[39m\u001b[38;5;241m.\u001b[39mformat(\n\u001b[1;32m   1048\u001b[0m                 \u001b[38;5;124m'\u001b[39m\u001b[38;5;124m, \u001b[39m\u001b[38;5;124m'\u001b[39m\u001b[38;5;241m.\u001b[39mjoin(\u001b[38;5;124m'\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;132;01m{}\u001b[39;00m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124m'\u001b[39m\u001b[38;5;241m.\u001b[39mformat(k) \u001b[38;5;28;01mfor\u001b[39;00m k \u001b[38;5;129;01min\u001b[39;00m missing_keys)))\n\u001b[1;32m   1050\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28mlen\u001b[39m(error_msgs) \u001b[38;5;241m>\u001b[39m \u001b[38;5;241m0\u001b[39m:\n\u001b[0;32m-> 1051\u001b[0m     \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mRuntimeError\u001b[39;00m(\u001b[38;5;124m'\u001b[39m\u001b[38;5;124mError(s) in loading state_dict for \u001b[39m\u001b[38;5;132;01m{}\u001b[39;00m\u001b[38;5;124m:\u001b[39m\u001b[38;5;130;01m\\n\u001b[39;00m\u001b[38;5;130;01m\\t\u001b[39;00m\u001b[38;5;132;01m{}\u001b[39;00m\u001b[38;5;124m'\u001b[39m\u001b[38;5;241m.\u001b[39mformat(\n\u001b[1;32m   1052\u001b[0m                        \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m\u001b[38;5;18m__class__\u001b[39m\u001b[38;5;241m.\u001b[39m\u001b[38;5;18m__name__\u001b[39m, \u001b[38;5;124m\"\u001b[39m\u001b[38;5;130;01m\\n\u001b[39;00m\u001b[38;5;130;01m\\t\u001b[39;00m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;241m.\u001b[39mjoin(error_msgs)))\n\u001b[1;32m   1053\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m _IncompatibleKeys(missing_keys, unexpected_keys)\n",
      "\u001b[0;31mRuntimeError\u001b[0m: Error(s) in loading state_dict for Classifierv10:\n\tsize mismatch for mlp_head.1.weight: copying a param with shape torch.Size([1024, 1024]) from checkpoint, the shape in current model is torch.Size([1001, 1024]).\n\tsize mismatch for mlp_head.1.bias: copying a param with shape torch.Size([1024]) from checkpoint, the shape in current model is torch.Size([1001]).\n\tsize mismatch for mlp_head.3.weight: copying a param with shape torch.Size([1001, 1024]) from checkpoint, the shape in current model is torch.Size([1001, 1001])."
     ]
    }
   ],
   "source": [
    "from typing import Any\n",
    "import torch\n",
    "import torchinfo\n",
    "import config.option\n",
    "import pytorch_lightning as pl\n",
    "from models.ddmimv9 import DDMIMV9\n",
    "from models.ddmimv2 import DDMIMV2\n",
    "from models.ddmim import DDMIM\n",
    "from models.ddmimv3 import DDMIMV3\n",
    "from models.ddmimv4 import DDMIMV4\n",
    "from models.ddmimv6 import DDMIMV6\n",
    "from models.ddmimv10 import DDMIMV10\n",
    "from models.lightning_mae import Lightning_mae\n",
    "from models.swinTransformerV2 import SwinTransformerV2\n",
    "from classifierv10 import Classifierv10\n",
    "from classifier_simmim import Classifier_simmim\n",
    "\n",
    "from pytorch_lightning import LightningModule\n",
    "class swint(LightningModule):\n",
    "    def __init__(self) -> None:\n",
    "        super().__init__()\n",
    "        \n",
    "        self.model=SwinTransformerV2(\n",
    "            \n",
    "        )\n",
    "    def forward(self,x):\n",
    "        return self.model(x)\n",
    "    \n",
    "\n",
    "\n",
    "# class classf()\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "# model=pl.LightningModule.load_from_checkpoint(\"log/seed3407/version_13/checkpoints/last.ckpt\")\n",
    "# cktp=torch.load(\"log/seed3407/version_13/checkpoints/last.ckpt\")\n",
    "from pytorch_lightning.plugins.io import TorchCheckpointIO as tcio\n",
    "# 实例化自己的model\n",
    "# nn_model = A()\n",
    "# ckpt_path = \"log/seed1/version_122/checkpoints/last.ckpt\"\n",
    "# ckpt_path = \"log/seed3407/version_35/checkpoints/epoch=217_step=4329043_val_loss=0.71_val_acc1=81.92.ckpt\"\n",
    "# ckpt_path =\"log/seed1/version_224/checkpoints/epoch=19-step=779999.ckpt\"\n",
    "# ckpt_path = \"log/seed1/version_199/checkpoints/epoch=35-step=719999.ckpt\"\n",
    "# ckpt_path = \"log/seed1/version_236/checkpoints/last.ckpt\"\n",
    "# ckpt_path =\"log/seed1/version_256/checkpoints/epoch=71_step=1444999_val_loss=0.88_val_acc1=78.67.ckpt\"\n",
    "ckpt_path =\"log/seed3407/version_35/checkpoints/epoch=217_step=4329043_val_loss=0.71_val_acc1=81.92.ckpt\"\n",
    "# ckpt_path =\"log/seed3407/version_21/checkpoints/epoch=88_val_loss=0.30_Lightning_mae.ckpt\"\n",
    "# ckpt_path =\"log/seed3407/version_4/checkpoints/DDMIMV7_epoch=51_val_loss=2.06.ckpt\"\n",
    "# trainer = pl.Trainer(resume_from_checkpoint=ckpt_path)\n",
    "# 实例化函数\n",
    "# tc = tcio()\n",
    "# ckpt_dict = tc.load_checkpoint(path=ckpt_path)\n",
    "# ckpt=torch.load(ckpt_dict)\n",
    "\n",
    "# model=Classifierv10.load_from_checkpoint(ckpt_path,resume=\"log/seed3407/version_34/checkpoints/epoch=7_val_loss=0.20_DDMIMV10.ckpt\",)\n",
    "# model=Classifierv10(\n",
    "#     model=DDMIMV2,\n",
    "#     resume=\"log/seed1/version_132/checkpoints/epoch=6-step=304999.ckpt\"\n",
    "#     ).load_from_checkpoint(ckpt_path)\n",
    "        # model=Lightning_mae,\n",
    "        # #resume=\"ddmim/log/seed1/version_199/checkpoints/epoch=35-step=719999.ckpt\",\n",
    "        # resume=\"ddmim/log/seed3407/version_21/checkpoints/epoch=88_val_loss=0.30_Lightning_mae.ckpt\",\n",
    "# model=Lightning_mae.load_from_checkpoint(ckpt_path)\n",
    "model=Classifierv10(\n",
    "    model=DDMIMV10,\n",
    "    # resume=\"log/seed1/version_234/checkpoints/last.ckpt\",\n",
    "    resume=\"log/seed3407/version_34/checkpoints/epoch=7_val_loss=0.20_DDMIMV10.ckpt\"\n",
    "    # resume=\"log/seed1/version_254/checkpoints/epoch=41-step=844999.ckpt\",\n",
    "    ).load_from_checkpoint(ckpt_path,resume=\"log/seed3407/version_34/checkpoints/epoch=7_val_loss=0.20_DDMIMV10.ckpt\",)#,resume=\"log/seed1/version_234/checkpoints/last.ckpt\",)\n",
    "print(torchinfo.summary(model,depth=5,input_size=[1,3,224,224],device=\"cpu\"))\n",
    "\n",
    "# for key,v in ckpt_dict['state_dict'].items():\n",
    "#     print(key,len(v))\n",
    "# print(len(ckpt_dict['state_dict']['projection_head.2.weight']))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "==============================================================================================================\n",
      "Layer (type:depth-idx)                                       Output Shape              Param #\n",
      "==============================================================================================================\n",
      "Classifierv10                                                [1, 1001]                 --\n",
      "├─DDMIMV10: 1-1                                              [1, 1024]                 2,362,368\n",
      "│    └─SwinTransformerV2: 2-1                                --                        --\n",
      "│    │    └─PatchEmbed: 3-1                                  [1, 3136, 96]             --\n",
      "│    │    │    └─Conv2d: 4-1                                 [1, 96, 56, 56]           4,704\n",
      "│    │    │    └─LayerNorm: 4-2                              [1, 3136, 96]             192\n",
      "│    │    └─Dropout: 3-2                                     [1, 3136, 96]             --\n",
      "│    │    └─ModuleList: 3-3                                  --                        --\n",
      "│    │    │    └─BasicLayer: 4-3                             [1, 784, 192]             --\n",
      "│    │    │    │    └─ModuleList: 5-1                        --                        229,638\n",
      "│    │    │    │    └─PatchMerging: 5-2                      [1, 784, 192]             74,112\n",
      "│    │    │    └─BasicLayer: 4-4                             [1, 196, 384]             --\n",
      "│    │    │    │    └─ModuleList: 5-3                        --                        898,572\n",
      "│    │    │    │    └─PatchMerging: 5-4                      [1, 196, 384]             295,680\n",
      "│    │    │    └─BasicLayer: 4-5                             [1, 49, 768]              --\n",
      "│    │    │    │    └─ModuleList: 5-5                        --                        10,690,632\n",
      "│    │    │    │    └─PatchMerging: 5-6                      [1, 49, 768]              1,181,184\n",
      "│    │    │    └─BasicLayer: 4-6                             [1, 49, 768]              --\n",
      "│    │    │    │    └─ModuleList: 5-7                        --                        14,201,904\n",
      "│    │    └─LayerNorm: 3-4                                   [1, 49, 768]              1,536\n",
      "│    │    └─AdaptiveAvgPool1d: 3-5                           [1, 768, 1]               --\n",
      "│    │    └─Linear: 3-6                                      [1, 1024]                 787,456\n",
      "├─Sequential: 1-2                                            [1, 1001]                 --\n",
      "│    └─BatchNorm1d: 2-2                                      [1, 1024]                 2,048\n",
      "│    └─Linear: 2-3                                           [1, 1024]                 1,049,600\n",
      "│    └─GELU: 2-4                                             [1, 1024]                 --\n",
      "│    └─Linear: 2-5                                           [1, 1001]                 1,026,025\n",
      "==============================================================================================================\n",
      "Total params: 32,805,651\n",
      "Trainable params: 32,805,651\n",
      "Non-trainable params: 0\n",
      "Total mult-adds (M): 43.66\n",
      "==============================================================================================================\n",
      "Input size (MB): 0.60\n",
      "Forward/backward pass size (MB): 109.38\n",
      "Params size (MB): 95.86\n",
      "Estimated Total Size (MB): 205.84\n",
      "==============================================================================================================\n"
     ]
    }
   ],
   "source": [
    "import torch\n",
    "import torchinfo\n",
    "import config.option\n",
    "import pytorch_lightning as pl\n",
    "from models.ddmimv2 import DDMIMV2\n",
    "from models.ddmimv10 import DDMIMV10\n",
    "from classifierv10 import Classifierv10\n",
    "\n",
    "# model=pl.LightningModule.load_from_checkpoint(\"log/seed3407/version_13/checkpoints/last.ckpt\")\n",
    "# cktp=torch.load(\"log/seed3407/version_13/checkpoints/last.ckpt\")\n",
    "from pytorch_lightning.plugins.io import TorchCheckpointIO as tcio\n",
    "# 实例化自己的model\n",
    "# nn_model = A()\n",
    "# ckpt_path = \"log/seed1/version_122/checkpoints/last.ckpt\"\n",
    "ckpt_path = \"log/seed3407/version_35/checkpoints/epoch=217_step=4329043_val_loss=0.71_val_acc1=81.92.ckpt\"\n",
    "# trainer = pl.Trainer(resume_from_checkpoint=ckpt_path)\n",
    "# 实例化函数\n",
    "# tc = tcio()\n",
    "# ckpt_dict = tc.load_checkpoint(path=ckpt_path)\n",
    "\n",
    "model=Classifierv10.load_from_checkpoint(ckpt_path,resume=\"log/seed3407/version_34/checkpoints/epoch=7_val_loss=0.20_DDMIMV10.ckpt\")\n",
    "print(torchinfo.summary(model,depth=5,input_size=[1,3,800,800],device=\"cpu\"))\n",
    "\n",
    "# for key,v in ckpt_dict['state_dict'].items():\n",
    "#     print(key,len(v))\n",
    "# print(len(ckpt_dict['state_dict']['projection_head.2.weight']))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "=========================================================================================================\n",
      "Layer (type:depth-idx)                                  Output Shape              Param #\n",
      "=========================================================================================================\n",
      "DDMIMV2                                                 [1, 197, 768]             744,960\n",
      "├─Rearrange: 1-1                                        [1, 196, 768]             --\n",
      "├─Linear: 1-2                                           [1, 196, 768]             590,592\n",
      "├─ModuleList: 1-3                                       --                        --\n",
      "│    └─Transformer: 2-1                                 [1, 197, 768]             --\n",
      "│    │    └─ModuleList: 3-1                             --                        --\n",
      "│    │    │    └─ModuleList: 4-1                        --                        --\n",
      "│    │    │    │    └─PreNorm: 5-1                      [1, 197, 768]             --\n",
      "│    │    │    │    │    └─LayerNorm: 6-1               [1, 197, 768]             1,536\n",
      "│    │    │    │    │    └─Attention: 6-2               [1, 197, 768]             2,360,064\n",
      "│    │    │    │    └─PreNorm: 5-2                      [1, 197, 768]             --\n",
      "│    │    │    │    │    └─LayerNorm: 6-3               [1, 197, 768]             1,536\n",
      "│    │    │    │    │    └─FeedForward: 6-4             [1, 197, 768]             4,759,320\n",
      "│    │    │    └─ModuleList: 4-2                        --                        --\n",
      "│    │    │    │    └─PreNorm: 5-3                      [1, 197, 768]             --\n",
      "│    │    │    │    │    └─LayerNorm: 6-5               [1, 197, 768]             1,536\n",
      "│    │    │    │    │    └─Attention: 6-6               [1, 197, 768]             2,360,064\n",
      "│    │    │    │    └─PreNorm: 5-4                      [1, 197, 768]             --\n",
      "│    │    │    │    │    └─LayerNorm: 6-7               [1, 197, 768]             1,536\n",
      "│    │    │    │    │    └─FeedForward: 6-8             [1, 197, 768]             4,759,320\n",
      "│    └─Transformer: 2-2                                 [1, 197, 768]             --\n",
      "│    │    └─ModuleList: 3-2                             --                        --\n",
      "│    │    │    └─ModuleList: 4-3                        --                        --\n",
      "│    │    │    │    └─PreNorm: 5-5                      [1, 197, 768]             --\n",
      "│    │    │    │    │    └─LayerNorm: 6-9               [1, 197, 768]             1,536\n",
      "│    │    │    │    │    └─Attention: 6-10              [1, 197, 768]             2,360,064\n",
      "│    │    │    │    └─PreNorm: 5-6                      [1, 197, 768]             --\n",
      "│    │    │    │    │    └─LayerNorm: 6-11              [1, 197, 768]             1,536\n",
      "│    │    │    │    │    └─FeedForward: 6-12            [1, 197, 768]             4,759,320\n",
      "│    │    │    └─ModuleList: 4-4                        --                        --\n",
      "│    │    │    │    └─PreNorm: 5-7                      [1, 197, 768]             --\n",
      "│    │    │    │    │    └─LayerNorm: 6-13              [1, 197, 768]             1,536\n",
      "│    │    │    │    │    └─Attention: 6-14              [1, 197, 768]             2,360,064\n",
      "│    │    │    │    └─PreNorm: 5-8                      [1, 197, 768]             --\n",
      "│    │    │    │    │    └─LayerNorm: 6-15              [1, 197, 768]             1,536\n",
      "│    │    │    │    │    └─FeedForward: 6-16            [1, 197, 768]             4,759,320\n",
      "│    └─Transformer: 2-3                                 [1, 197, 768]             --\n",
      "│    │    └─ModuleList: 3-3                             --                        --\n",
      "│    │    │    └─ModuleList: 4-5                        --                        --\n",
      "│    │    │    │    └─PreNorm: 5-9                      [1, 197, 768]             --\n",
      "│    │    │    │    │    └─LayerNorm: 6-17              [1, 197, 768]             1,536\n",
      "│    │    │    │    │    └─Attention: 6-18              [1, 197, 768]             2,360,064\n",
      "│    │    │    │    └─PreNorm: 5-10                     [1, 197, 768]             --\n",
      "│    │    │    │    │    └─LayerNorm: 6-19              [1, 197, 768]             1,536\n",
      "│    │    │    │    │    └─FeedForward: 6-20            [1, 197, 768]             4,759,320\n",
      "│    │    │    └─ModuleList: 4-6                        --                        --\n",
      "│    │    │    │    └─PreNorm: 5-11                     [1, 197, 768]             --\n",
      "│    │    │    │    │    └─LayerNorm: 6-21              [1, 197, 768]             1,536\n",
      "│    │    │    │    │    └─Attention: 6-22              [1, 197, 768]             2,360,064\n",
      "│    │    │    │    └─PreNorm: 5-12                     [1, 197, 768]             --\n",
      "│    │    │    │    │    └─LayerNorm: 6-23              [1, 197, 768]             1,536\n",
      "│    │    │    │    │    └─FeedForward: 6-24            [1, 197, 768]             4,759,320\n",
      "│    │    │    └─ModuleList: 4-7                        --                        --\n",
      "│    │    │    │    └─PreNorm: 5-13                     [1, 197, 768]             --\n",
      "│    │    │    │    │    └─LayerNorm: 6-25              [1, 197, 768]             1,536\n",
      "│    │    │    │    │    └─Attention: 6-26              [1, 197, 768]             2,360,064\n",
      "│    │    │    │    └─PreNorm: 5-14                     [1, 197, 768]             --\n",
      "│    │    │    │    │    └─LayerNorm: 6-27              [1, 197, 768]             1,536\n",
      "│    │    │    │    │    └─FeedForward: 6-28            [1, 197, 768]             4,759,320\n",
      "│    │    │    └─ModuleList: 4-8                        --                        --\n",
      "│    │    │    │    └─PreNorm: 5-15                     [1, 197, 768]             --\n",
      "│    │    │    │    │    └─LayerNorm: 6-29              [1, 197, 768]             1,536\n",
      "│    │    │    │    │    └─Attention: 6-30              [1, 197, 768]             2,360,064\n",
      "│    │    │    │    └─PreNorm: 5-16                     [1, 197, 768]             --\n",
      "│    │    │    │    │    └─LayerNorm: 6-31              [1, 197, 768]             1,536\n",
      "│    │    │    │    │    └─FeedForward: 6-32            [1, 197, 768]             4,759,320\n",
      "│    │    │    └─ModuleList: 4-9                        --                        --\n",
      "│    │    │    │    └─PreNorm: 5-17                     [1, 197, 768]             --\n",
      "│    │    │    │    │    └─LayerNorm: 6-33              [1, 197, 768]             1,536\n",
      "│    │    │    │    │    └─Attention: 6-34              [1, 197, 768]             2,360,064\n",
      "│    │    │    │    └─PreNorm: 5-18                     [1, 197, 768]             --\n",
      "│    │    │    │    │    └─LayerNorm: 6-35              [1, 197, 768]             1,536\n",
      "│    │    │    │    │    └─FeedForward: 6-36            [1, 197, 768]             4,759,320\n",
      "│    │    │    └─ModuleList: 4-10                       --                        --\n",
      "│    │    │    │    └─PreNorm: 5-19                     [1, 197, 768]             --\n",
      "│    │    │    │    │    └─LayerNorm: 6-37              [1, 197, 768]             1,536\n",
      "│    │    │    │    │    └─Attention: 6-38              [1, 197, 768]             2,360,064\n",
      "│    │    │    │    └─PreNorm: 5-20                     [1, 197, 768]             --\n",
      "│    │    │    │    │    └─LayerNorm: 6-39              [1, 197, 768]             1,536\n",
      "│    │    │    │    │    └─FeedForward: 6-40            [1, 197, 768]             4,759,320\n",
      "│    └─Transformer: 2-4                                 [1, 197, 768]             --\n",
      "│    │    └─ModuleList: 3-4                             --                        --\n",
      "│    │    │    └─ModuleList: 4-11                       --                        --\n",
      "│    │    │    │    └─PreNorm: 5-21                     [1, 197, 768]             --\n",
      "│    │    │    │    │    └─LayerNorm: 6-41              [1, 197, 768]             1,536\n",
      "│    │    │    │    │    └─Attention: 6-42              [1, 197, 768]             2,360,064\n",
      "│    │    │    │    └─PreNorm: 5-22                     [1, 197, 768]             --\n",
      "│    │    │    │    │    └─LayerNorm: 6-43              [1, 197, 768]             1,536\n",
      "│    │    │    │    │    └─FeedForward: 6-44            [1, 197, 768]             4,759,320\n",
      "│    │    │    └─ModuleList: 4-12                       --                        --\n",
      "│    │    │    │    └─PreNorm: 5-23                     [1, 197, 768]             --\n",
      "│    │    │    │    │    └─LayerNorm: 6-45              [1, 197, 768]             1,536\n",
      "│    │    │    │    │    └─Attention: 6-46              [1, 197, 768]             2,360,064\n",
      "│    │    │    │    └─PreNorm: 5-24                     [1, 197, 768]             --\n",
      "│    │    │    │    │    └─LayerNorm: 6-47              [1, 197, 768]             1,536\n",
      "│    │    │    │    │    └─FeedForward: 6-48            [1, 197, 768]             4,759,320\n",
      "=========================================================================================================\n",
      "Total params: 86,805,024\n",
      "Trainable params: 86,805,024\n",
      "Non-trainable params: 0\n",
      "Total mult-adds (M): 86.06\n",
      "=========================================================================================================\n",
      "Input size (MB): 0.60\n",
      "Forward/backward pass size (MB): 161.43\n",
      "Params size (MB): 344.24\n",
      "Estimated Total Size (MB): 506.27\n",
      "=========================================================================================================\n"
     ]
    }
   ],
   "source": [
    "import torch\n",
    "import torchinfo\n",
    "import config.option\n",
    "import pytorch_lightning as pl\n",
    "from models.ddmimv2 import DDMIMV2\n",
    "\n",
    "# model=pl.LightningModule.load_from_checkpoint(\"log/seed3407/version_13/checkpoints/last.ckpt\")\n",
    "# cktp=torch.load(\"log/seed3407/version_13/checkpoints/last.ckpt\")\n",
    "from pytorch_lightning.plugins.io import TorchCheckpointIO as tcio\n",
    "# 实例化自己的model\n",
    "# nn_model = A()\n",
    "ckpt_path = \"log/seed1/version_132/checkpoints/last.ckpt\"\n",
    "# trainer = pl.Trainer(resume_from_checkpoint=ckpt_path)\n",
    "# 实例化函数\n",
    "tc = tcio()\n",
    "ckpt_dict = tc.load_checkpoint(path=ckpt_path)\n",
    "\n",
    "model=DDMIMV2.load_from_checkpoint(ckpt_path,dim=768)\n",
    "print(torchinfo.summary(model,depth=6,input_size=[1,3,224,224],device=\"cpu\"))\n",
    "\n",
    "# for key,v in ckpt_dict['state_dict'].items():\n",
    "#     print(key,len(v))\n",
    "# print(len(ckpt_dict['state_dict']['projection_head.2.weight']))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "pytorch",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.8.16"
  },
  "orig_nbformat": 4
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
