{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 5,
   "id": "20409c81-55ee-4631-b433-ce49c0fcb3da",
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "WARNING:root:IFNode(\n",
      "  v_threshold=1.0, v_reset=0.0, detach_reset=False, step_mode=s, backend=torch\n",
      "  (surrogate_function): Sigmoid(alpha=4.0, spiking=True)\n",
      ") supports for step_mode == 's', which should not be contained by MultiStepContainer!\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "y_seq: tensor([[[[[0., 0., 0., 0., 0.],\n",
      "           [0., 0., 0., 0., 0.],\n",
      "           [0., 0., 0., 0., 0.],\n",
      "           [0., 0., 0., 0., 0.],\n",
      "           [0., 0., 0., 0., 0.]],\n",
      "\n",
      "          [[0., 0., 0., 0., 0.],\n",
      "           [0., 0., 0., 0., 0.],\n",
      "           [0., 0., 0., 0., 0.],\n",
      "           [0., 0., 0., 0., 0.],\n",
      "           [0., 0., 0., 0., 0.]],\n",
      "\n",
      "          [[0., 0., 0., 0., 0.],\n",
      "           [0., 0., 0., 0., 0.],\n",
      "           [0., 0., 0., 0., 0.],\n",
      "           [0., 0., 0., 0., 0.],\n",
      "           [0., 0., 0., 0., 0.]]]],\n",
      "\n",
      "\n",
      "\n",
      "        [[[[0., 1., 1., 1., 0.],\n",
      "           [0., 0., 1., 0., 0.],\n",
      "           [0., 0., 1., 1., 0.],\n",
      "           [1., 0., 1., 1., 1.],\n",
      "           [1., 1., 0., 0., 1.]],\n",
      "\n",
      "          [[1., 0., 1., 0., 0.],\n",
      "           [1., 0., 1., 1., 0.],\n",
      "           [1., 0., 0., 1., 0.],\n",
      "           [0., 0., 1., 0., 1.],\n",
      "           [1., 0., 1., 1., 1.]],\n",
      "\n",
      "          [[1., 0., 1., 1., 1.],\n",
      "           [0., 1., 0., 1., 1.],\n",
      "           [0., 0., 0., 1., 1.],\n",
      "           [0., 0., 0., 1., 1.],\n",
      "           [0., 0., 0., 1., 0.]]]]])\n",
      "z_seq: tensor([[[[[0., 0., 0., 0., 0.],\n",
      "           [0., 0., 0., 0., 0.],\n",
      "           [0., 0., 0., 0., 0.],\n",
      "           [0., 0., 0., 0., 0.],\n",
      "           [0., 0., 0., 0., 0.]],\n",
      "\n",
      "          [[0., 0., 0., 0., 0.],\n",
      "           [0., 0., 0., 0., 0.],\n",
      "           [0., 0., 0., 0., 0.],\n",
      "           [0., 0., 0., 0., 0.],\n",
      "           [0., 0., 0., 0., 0.]],\n",
      "\n",
      "          [[0., 0., 0., 0., 0.],\n",
      "           [0., 0., 0., 0., 0.],\n",
      "           [0., 0., 0., 0., 0.],\n",
      "           [0., 0., 0., 0., 0.],\n",
      "           [0., 0., 0., 0., 0.]]]],\n",
      "\n",
      "\n",
      "\n",
      "        [[[[0., 1., 1., 1., 0.],\n",
      "           [0., 0., 1., 0., 0.],\n",
      "           [0., 0., 1., 1., 0.],\n",
      "           [1., 0., 1., 1., 1.],\n",
      "           [1., 1., 0., 0., 1.]],\n",
      "\n",
      "          [[1., 0., 1., 0., 0.],\n",
      "           [1., 0., 1., 1., 0.],\n",
      "           [1., 0., 0., 1., 0.],\n",
      "           [0., 0., 1., 0., 1.],\n",
      "           [1., 0., 1., 1., 1.]],\n",
      "\n",
      "          [[1., 0., 1., 1., 1.],\n",
      "           [0., 1., 0., 1., 1.],\n",
      "           [0., 0., 0., 1., 1.],\n",
      "           [0., 0., 0., 1., 1.],\n",
      "           [0., 0., 0., 1., 0.]]]]])\n",
      "tensor(True)\n"
     ]
    }
   ],
   "source": [
    "import torch\n",
    "from spikingjelly.activation_based import neuron, functional, layer\n",
    "\n",
    "net_s = neuron.IFNode(step_mode='s')\n",
    "T = 2\n",
    "N = 1\n",
    "C = 3\n",
    "H = 5\n",
    "W = 5\n",
    "x_seq = torch.rand([T, N, C, H, W])\n",
    "y_seq = functional.multi_step_forward(x_seq, net_s)\n",
    "# y_seq.shape = [T, N, C, H, W]\n",
    "print(\"y_seq:\", y_seq) \n",
    "\n",
    "net_s.reset()\n",
    "net_m = layer.MultiStepContainer(net_s)\n",
    "z_seq = net_m(x_seq)\n",
    "# z_seq.shape = [T, N, C, H, W]\n",
    "\n",
    "# z_seq is identical to y_seq\n",
    "print(\"z_seq:\", z_seq) \n",
    "print((z_seq == y_seq).all())"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "814fa1fb-8a1f-4180-b80b-90287dd1e214",
   "metadata": {},
   "source": [
    "对于无状态的ANN网络层，例如 torch.nn.Conv2d，其本身要求输入数据的 shape = [N, *]，若用于多步模式，则可以用多步的包装器进行包装："
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "id": "4481dccf-d63e-418f-b2c6-8657d6dd0bd9",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "tensor(True)\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "tensor([[[[[ 0.8300,  0.3288,  0.6911, -0.0896,  0.6402],\n",
       "           [ 0.0439, -1.6546, -0.7304,  0.8632, -1.1816],\n",
       "           [-0.9756,  2.7853,  0.6467, -0.0362,  1.0011],\n",
       "           [ 1.7994, -0.0506, -0.2328,  0.0851, -0.3796],\n",
       "           [-1.3141, -1.1158, -0.4447, -0.7539, -0.7553]],\n",
       "\n",
       "          [[ 0.4977,  1.5432,  0.0356,  0.3352,  1.4618],\n",
       "           [ 0.3611, -1.0213, -2.3380,  0.5638, -0.4641],\n",
       "           [-0.6976,  1.4139,  1.5400, -0.7997,  0.6420],\n",
       "           [ 1.7276, -0.0069, -0.5504, -0.3439, -0.4013],\n",
       "           [ 0.0524, -0.5705, -1.2030, -0.6555, -1.1223]],\n",
       "\n",
       "          [[-0.1446,  0.9292,  0.9548,  1.6822,  0.6979],\n",
       "           [-1.0995, -0.8103, -0.6111, -0.6579,  0.8098],\n",
       "           [-1.6292,  0.5951,  0.4130,  0.1627, -0.4830],\n",
       "           [ 0.3504,  2.4021,  1.0523,  0.2918,  0.3520],\n",
       "           [-1.2284, -0.9520, -1.4366, -0.6304, -1.0104]],\n",
       "\n",
       "          [[-0.8962, -1.0553,  1.0172, -0.5346, -0.0084],\n",
       "           [-0.1340,  0.0502, -1.0004,  1.1565,  1.4167],\n",
       "           [-1.1181, -0.7740, -1.4220,  1.2582,  2.6132],\n",
       "           [-0.4199,  0.0032, -0.7540,  0.1977,  1.4918],\n",
       "           [-0.4973, -1.1426, -0.2371,  0.0956,  0.6937]],\n",
       "\n",
       "          [[-0.3879,  1.9131,  0.7115,  0.0554,  0.8967],\n",
       "           [ 0.5479,  0.3539, -0.2427,  1.0983, -0.3573],\n",
       "           [-1.1404, -0.9853,  0.0167,  1.4746,  1.0650],\n",
       "           [-0.0487,  0.2527, -0.0441, -1.0125,  1.1812],\n",
       "           [ 0.5103, -1.2940, -0.8486, -1.4014, -2.3144]],\n",
       "\n",
       "          [[ 1.1952, -0.3690, -0.3742,  2.0248,  1.1049],\n",
       "           [ 0.8001, -2.1507,  0.4044,  0.4204,  0.0255],\n",
       "           [ 1.0078,  0.3186, -0.0876, -0.0720, -0.4043],\n",
       "           [ 0.9571, -2.5048, -0.3536,  0.8538,  0.1059],\n",
       "           [-0.9177, -0.5287, -0.3463, -1.3144,  0.2051]],\n",
       "\n",
       "          [[ 1.6200,  0.1678,  2.3654,  0.6517,  0.8797],\n",
       "           [-1.3854, -1.1363, -1.2694, -0.2192,  0.9284],\n",
       "           [-0.1410, -0.2628, -1.1300, -0.7584, -0.9187],\n",
       "           [-0.0398,  1.9687,  0.6840, -1.2615,  0.1459],\n",
       "           [-0.0439, -0.4355, -0.8774,  0.2635,  0.2040]],\n",
       "\n",
       "          [[ 2.1509,  2.0231, -0.5971, -0.1105, -1.1768],\n",
       "           [-1.2354,  0.2256,  0.5254,  0.8303,  0.3389],\n",
       "           [-0.2618,  0.6051, -0.6751, -1.0894, -1.1085],\n",
       "           [-0.2645, -0.4105, -0.7749,  0.4993, -1.7439],\n",
       "           [-0.2867,  0.5068, -0.5118,  1.8159,  0.7254]]]],\n",
       "\n",
       "\n",
       "\n",
       "        [[[[ 0.5297, -0.4255,  2.0793,  0.6079,  0.0177],\n",
       "           [ 1.6326,  1.3693, -0.6566,  1.4489,  0.8256],\n",
       "           [ 0.1727, -0.3884, -1.3775,  0.7979, -0.6197],\n",
       "           [-1.5736, -0.5109, -0.2000, -0.8634, -0.1339],\n",
       "           [ 0.4024, -0.8725, -2.1125,  0.0813, -0.2309]],\n",
       "\n",
       "          [[ 0.2036, -0.8632,  1.3526,  0.2648,  0.7805],\n",
       "           [ 0.1487,  1.8192, -0.1601, -0.0812,  1.2624],\n",
       "           [ 2.1728, -0.8270, -1.8444, -0.7298, -0.1057],\n",
       "           [ 0.5850,  0.1781, -0.5079, -0.5232, -0.5892],\n",
       "           [ 0.7515,  0.6106, -1.2733, -1.0593, -1.5655]],\n",
       "\n",
       "          [[-0.2686,  0.4740, -0.4002, -0.2143, -0.1933],\n",
       "           [ 0.1373,  1.7319,  1.1730,  0.5427,  0.3558],\n",
       "           [-0.0462,  1.3330,  1.3450,  1.8906,  0.8199],\n",
       "           [-0.0637, -0.2397, -0.2433, -0.2624, -0.9281],\n",
       "           [-1.8695, -0.7451, -1.8985, -1.4852, -0.9449]],\n",
       "\n",
       "          [[-0.6338, -1.8010,  0.0469,  0.1111,  0.2264],\n",
       "           [-1.2446, -0.8035,  0.4513,  1.0078,  1.7801],\n",
       "           [-0.7817,  1.2562,  0.1869, -0.6170,  1.7548],\n",
       "           [-0.8532, -1.0484,  0.3128, -0.3340,  1.7495],\n",
       "           [-0.2950, -0.8602, -0.8420, -0.2867,  1.5172]],\n",
       "\n",
       "          [[-0.2487, -0.3528,  1.2023,  1.1828, -0.1388],\n",
       "           [-0.6445, -1.3372,  0.3818,  0.9225,  0.5458],\n",
       "           [ 0.8472,  1.0614, -1.2497,  1.2517, -0.8637],\n",
       "           [ 0.3701,  0.6226,  0.5189,  1.9783, -0.1520],\n",
       "           [-1.0428, -1.1262, -1.9286, -0.5156, -1.2847]],\n",
       "\n",
       "          [[ 0.4047,  0.7925,  0.2528, -0.2233,  0.6455],\n",
       "           [ 0.7815,  0.0632,  0.1831, -0.4464,  1.0455],\n",
       "           [ 0.3790, -2.5573, -0.1454,  1.3520,  0.3050],\n",
       "           [ 0.0238,  0.2303, -0.3602, -2.2324,  1.5464],\n",
       "           [ 0.0426, -1.8637,  0.1830, -1.1892,  0.7871]],\n",
       "\n",
       "          [[ 0.1026,  0.3632,  1.0801,  0.3205,  1.0681],\n",
       "           [ 1.0481,  1.6116, -0.0588,  0.5093, -0.1743],\n",
       "           [-0.6701,  0.9962,  1.6834,  0.5615,  1.1513],\n",
       "           [-0.4541, -2.1182, -0.7681, -1.4765, -1.3099],\n",
       "           [-0.5032, -0.0219, -0.7919, -1.3639, -0.7852]],\n",
       "\n",
       "          [[ 0.0324,  0.2270,  0.8891,  0.2754,  0.5275],\n",
       "           [ 0.3108,  1.5288, -1.8204, -0.9164, -1.1729],\n",
       "           [-0.3578, -0.2106, -0.9309, -0.1387, -1.6150],\n",
       "           [-0.7614, -0.5479, -1.7569,  0.3171,  0.2533],\n",
       "           [ 0.5491,  1.9437,  1.2589,  1.3172,  0.7987]]]]],\n",
       "       grad_fn=<StackBackward0>)"
      ]
     },
     "execution_count": 16,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "import torch.nn as nn\n",
    "conv = nn.Conv2d(C, 8, kernel_size=3, padding=1, bias=False)\n",
    "bn = nn.BatchNorm2d(8)\n",
    "\n",
    "y_seq = functional.multi_step_forward(x_seq, (conv, bn))\n",
    "# y_seq.shape = [T, N, 8, H, W]\n",
    "\n",
    "net = layer.MultiStepContainer(conv, bn)\n",
    "z_seq = net(x_seq)\n",
    "# z_seq.shape = [T, N, 8, H, W]\n",
    "\n",
    "# z_seq is identical to y_seq\n",
    "print((z_seq == y_seq).all())\n",
    "z_seq"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "68d6241d-97ce-4e1f-9814-64a7ade66397",
   "metadata": {},
   "source": [
    "但是ANN的网络层本身是无状态的，不存在前序依赖，没有必要在时间上串行的计算，可以使用函数风格的 seq_to_ann_forward 或模块风格的 SeqToANNContainer 进行包装。seq_to_ann_forward 将 shape = [T, N, *] 的数据首先变换为 shape = [TN, *]，再送入无状态的网络层进行计算，输出的结果会被重新变换为 shape = [T, N, *]。**不同时刻的数据是并行计算的，因而速度更快**："
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 18,
   "id": "50ec83e1-7916-4a2e-ac23-83f9bf3081f9",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "q_seq.shape: torch.Size([2, 1, 8, 5, 5])\n",
      "tensor(True)\n",
      "tensor(False)\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "tensor([[[[[ 0.8384,  0.3403,  0.7003, -0.0757,  0.6498],\n",
       "           [ 0.0571, -1.6314, -0.7127,  0.8714, -1.1612],\n",
       "           [-0.9564,  2.7821,  0.6563, -0.0226,  1.0085],\n",
       "           [ 1.8021, -0.0369, -0.2180,  0.0980, -0.3639],\n",
       "           [-1.2929, -1.0958, -0.4286, -0.7360, -0.7374]],\n",
       "\n",
       "          [[ 0.5255,  1.7038,  0.0046,  0.3423,  1.6121],\n",
       "           [ 0.3715, -1.1866, -2.6707,  0.5999, -0.5586],\n",
       "           [-0.8218,  1.5582,  1.7003, -0.9368,  0.6881],\n",
       "           [ 1.9117, -0.0432, -0.6558, -0.4231, -0.4878],\n",
       "           [ 0.0235, -0.6785, -1.3914, -0.7743, -1.3005]],\n",
       "\n",
       "          [[ 0.0224,  0.8164,  0.8353,  1.3732,  0.6454],\n",
       "           [-0.6836, -0.4698, -0.3225, -0.3571,  0.7282],\n",
       "           [-1.0753,  0.5694,  0.4347,  0.2496, -0.2277],\n",
       "           [ 0.3884,  1.9055,  0.9075,  0.3451,  0.3896],\n",
       "           [-0.7789, -0.5745, -0.9328, -0.3368, -0.6177]],\n",
       "\n",
       "          [[-0.9974, -1.1585,  0.9410, -0.6311, -0.0979],\n",
       "           [-0.2252, -0.0386, -1.1030,  1.0821,  1.3457],\n",
       "           [-1.2222, -0.8735, -1.5301,  1.1851,  2.5578],\n",
       "           [-0.5149, -0.0863, -0.8533,  0.1108,  1.4218],\n",
       "           [-0.5932, -1.2470, -0.3297,  0.0074,  0.6132]],\n",
       "\n",
       "          [[-0.3414,  1.8131,  0.6880,  0.0736,  0.8614],\n",
       "           [ 0.5349,  0.3531, -0.2055,  1.0502, -0.3128],\n",
       "           [-1.0460, -0.9007,  0.0374,  1.4025,  1.0189],\n",
       "           [-0.0238,  0.2584, -0.0195, -0.9262,  1.1277],\n",
       "           [ 0.4996, -1.1898, -0.7727, -1.2903, -2.1452]],\n",
       "\n",
       "          [[ 1.2062, -0.4702, -0.4757,  2.0953,  1.1094],\n",
       "           [ 0.7827, -2.3795,  0.3587,  0.3758, -0.0474],\n",
       "           [ 1.0053,  0.2667, -0.1686, -0.1519, -0.5080],\n",
       "           [ 0.9510, -2.7590, -0.4537,  0.8403,  0.0387],\n",
       "           [-1.0582, -0.6413, -0.4458, -1.4833,  0.1451]],\n",
       "\n",
       "          [[ 1.6881,  0.2237,  2.4399,  0.7117,  0.9416],\n",
       "           [-1.3427, -1.0915, -1.2258, -0.1667,  0.9907],\n",
       "           [-0.0878, -0.2107, -1.0852, -0.7104, -0.8721],\n",
       "           [ 0.0142,  2.0398,  0.7441, -1.2178,  0.2015],\n",
       "           [ 0.0102, -0.3848, -0.8304,  0.3201,  0.2602]],\n",
       "\n",
       "          [[ 1.6078,  1.5047, -0.6097, -0.2170, -1.0775],\n",
       "           [-1.1248,  0.0542,  0.2961,  0.5422,  0.1456],\n",
       "           [-0.3391,  0.3604, -0.6727, -1.0070, -1.0224],\n",
       "           [-0.3414, -0.4591, -0.7532,  0.2750, -1.5352],\n",
       "           [-0.3593,  0.2811, -0.5409,  1.3375,  0.4575]]]],\n",
       "\n",
       "\n",
       "\n",
       "        [[[[ 0.5193, -0.4414,  2.0778,  0.5980,  0.0044],\n",
       "           [ 1.6286,  1.3638, -0.6738,  1.4438,  0.8170],\n",
       "           [ 0.1603, -0.4040, -1.3988,  0.7891, -0.6366],\n",
       "           [-1.5960, -0.5272, -0.2145, -0.8818, -0.1481],\n",
       "           [ 0.3913, -0.8909, -2.1380,  0.0684, -0.2456]],\n",
       "\n",
       "          [[ 0.2092, -0.7005,  1.1889,  0.2613,  0.7010],\n",
       "           [ 0.1623,  1.5868, -0.1010, -0.0337,  1.1119],\n",
       "           [ 1.8882, -0.6697, -1.5372, -0.5868, -0.0546],\n",
       "           [ 0.5344,  0.1874, -0.3976, -0.4106, -0.4669],\n",
       "           [ 0.6763,  0.5561, -1.0502, -0.8678, -1.2994]],\n",
       "\n",
       "          [[-0.4495,  0.4354, -0.6062, -0.3847, -0.3597],\n",
       "           [ 0.0342,  1.9343,  1.2683,  0.5173,  0.2945],\n",
       "           [-0.1845,  1.4590,  1.4733,  2.1234,  0.8476],\n",
       "           [-0.2052, -0.4150, -0.4193, -0.4420, -1.2353],\n",
       "           [-2.3570, -1.0171, -2.3915, -1.8990, -1.2553]],\n",
       "\n",
       "          [[-0.5308, -1.6730,  0.1353,  0.1982,  0.3110],\n",
       "           [-1.1285, -0.6968,  0.5311,  1.0757,  1.8315],\n",
       "           [-0.6755,  1.3188,  0.2724, -0.5143,  1.8068],\n",
       "           [-0.7455, -0.9365,  0.3956, -0.2374,  1.8015],\n",
       "           [-0.1992, -0.7523, -0.7345, -0.1911,  1.5743]],\n",
       "\n",
       "          [[-0.2853, -0.3956,  1.2519,  1.2313, -0.1689],\n",
       "           [-0.7047, -1.4385,  0.3827,  0.9555,  0.5564],\n",
       "           [ 0.8757,  1.1027, -1.3458,  1.3043, -0.9369],\n",
       "           [ 0.3703,  0.6378,  0.5279,  2.0741, -0.1828],\n",
       "           [-1.1265, -1.2150, -2.0650, -0.5680, -1.3829]],\n",
       "\n",
       "          [[ 0.4457,  0.8012,  0.3064, -0.1300,  0.6664],\n",
       "           [ 0.7911,  0.1326,  0.2426, -0.3345,  1.0331],\n",
       "           [ 0.4221, -2.2696, -0.0586,  1.3141,  0.3543],\n",
       "           [ 0.0965,  0.2858, -0.2555, -1.9718,  1.4923],\n",
       "           [ 0.1138, -1.6338,  0.2425, -1.0154,  0.7963]],\n",
       "\n",
       "          [[ 0.0471,  0.3046,  1.0132,  0.2624,  1.0013],\n",
       "           [ 0.9816,  1.5386, -0.1125,  0.4491, -0.2267],\n",
       "           [-0.7167,  0.9303,  1.6095,  0.5006,  1.0837],\n",
       "           [-0.5032, -2.1481, -0.8136, -1.5138, -1.3492],\n",
       "           [-0.5518, -0.0761, -0.8371, -1.4025, -0.8305]],\n",
       "\n",
       "          [[ 0.1650,  0.3883,  1.1479,  0.4439,  0.7331],\n",
       "           [ 0.4844,  1.8817, -1.9604, -0.9234, -1.2177],\n",
       "           [-0.2825, -0.1137, -0.9400, -0.0313, -1.7249],\n",
       "           [-0.7456, -0.5007, -1.8876,  0.4917,  0.4185],\n",
       "           [ 0.7578,  2.3577,  1.5721,  1.6389,  1.0441]]]]],\n",
       "       grad_fn=<ViewBackward0>)"
      ]
     },
     "execution_count": 18,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "p_seq = functional.seq_to_ann_forward(x_seq, (conv, bn))\n",
    "# p_seq.shape = [T, N, 8, H, W]\n",
    "\n",
    "net = layer.SeqToANNContainer(conv, bn)\n",
    "q_seq = net(x_seq)\n",
    "print(f\"q_seq.shape: {q_seq.shape}\")\n",
    "# q_seq.shape = [T, N, 8, H, W]\n",
    "\n",
    "# q_seq is identical to p_seq, and also identical to y_seq and z_seq\n",
    "print((p_seq == q_seq).all())\n",
    "print((p_seq == z_seq).all())\n",
    "p_seq"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 25,
   "id": "d23012e2-e029-4629-9d82-84dc1de352f8",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "ann.state_dict.keys()=odict_keys(['0.weight', '1.weight', '1.bias', '1.running_mean', '1.running_var', '1.num_batches_tracked'])\n"
     ]
    }
   ],
   "source": [
    "import torch\n",
    "import torch.nn as nn\n",
    "from spikingjelly.activation_based import functional, layer, neuron\n",
    "\n",
    "\n",
    "ann = nn.Sequential(\n",
    "    nn.Conv2d(3, 8, kernel_size=3, padding=1, bias=False),\n",
    "    nn.BatchNorm2d(8),\n",
    "    nn.ReLU()\n",
    ")\n",
    "print(f'ann.state_dict.keys()={ann.state_dict().keys()}')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 24,
   "id": "57b890ee-1e00-427d-8d14-f168c0c14c84",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "net_container.state_dict.keys()=odict_keys(['0.0.weight', '0.1.weight', '0.1.bias', '0.1.running_mean', '0.1.running_var', '0.1.num_batches_tracked'])\n"
     ]
    }
   ],
   "source": [
    "net_container = nn.Sequential(\n",
    "    layer.SeqToANNContainer(\n",
    "        nn.Conv2d(3, 8, kernel_size=3, padding=1, bias=False),\n",
    "        nn.BatchNorm2d(8),\n",
    "    ),\n",
    "    neuron.IFNode(step_mode='m')\n",
    ")\n",
    "print(f'net_container.state_dict.keys()={net_container.state_dict().keys()}')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 21,
   "id": "5d1f479e-e0ef-4be0-946e-bf9a491c5281",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "net_origin.state_dict.keys()=odict_keys(['0.weight', '1.weight', '1.bias', '1.running_mean', '1.running_var', '1.num_batches_tracked'])\n"
     ]
    }
   ],
   "source": [
    "net_origin = nn.Sequential(\n",
    "    layer.Conv2d(3, 8, kernel_size=3, padding=1, bias=False),\n",
    "    nn.BatchNorm2d(8),\n",
    "    neuron.IFNode(step_mode='m')\n",
    ")\n",
    "print(f'net_origin.state_dict.keys()={net_origin.state_dict().keys()}')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 26,
   "id": "bf702bff-6f60-4d90-850d-520581a95345",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "net_container is trying to load state dict from ann...\n",
      "net_container can not load! The error message is\n",
      " Error(s) in loading state_dict for Sequential:\n",
      "\tMissing key(s) in state_dict: \"0.0.weight\", \"0.1.weight\", \"0.1.bias\", \"0.1.running_mean\", \"0.1.running_var\". \n",
      "\tUnexpected key(s) in state_dict: \"0.weight\", \"1.weight\", \"1.bias\", \"1.running_mean\", \"1.running_var\", \"1.num_batches_tracked\". \n",
      "net_origin is trying to load state dict from ann...\n",
      "Load success!\n"
     ]
    }
   ],
   "source": [
    "try:\n",
    "    print('net_container is trying to load state dict from ann...')\n",
    "    net_container.load_state_dict(ann.state_dict())\n",
    "    print('Load success!')\n",
    "except BaseException as e:\n",
    "    print('net_container can not load! The error message is\\n', e)\n",
    "\n",
    "try:\n",
    "    print('net_origin is trying to load state dict from ann...')\n",
    "    net_origin.load_state_dict(ann.state_dict())\n",
    "    print('Load success!')\n",
    "except BaseException as e:\n",
    "    print('net_origin can not load! The error message is', e)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "752bc8c1-cef0-42cc-8a84-79807cf65fa3",
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3 (ipykernel)",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.10.14"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
