{
 "cells": [
  {
   "cell_type": "markdown",
   "id": "0155f6ab-c25b-41f4-8ee8-577a2e58930b",
   "metadata": {},
   "source": [
    "# 4 梯度替代"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "id": "fd182062-23d1-4045-a120-c9ff3d5fb103",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "x=tensor([ 0.3423, -0.3934,  0.0680, -0.4670,  0.4730, -0.3250, -0.2425, -0.4453],\n",
      "       requires_grad=True)\n",
      "y=tensor([1., 0., 1., 0., 1., 0., 0., 0.], grad_fn=<sigmoidBackward>)\n",
      "x.grad=tensor([0.6466, 0.5689, 0.9818, 0.4635, 0.4554, 0.6732, 0.7973, 0.4935])\n"
     ]
    }
   ],
   "source": [
    "import torch\n",
    "from spikingjelly.activation_based import surrogate\n",
    "\n",
    "sg = surrogate.Sigmoid(alpha=4.)\n",
    "\n",
    "x = torch.rand([8]) - 0.5\n",
    "x.requires_grad = True\n",
    "y = sg(x)\n",
    "y.sum().backward()\n",
    "print(f'x={x}')\n",
    "print(f'y={y}')\n",
    "print(f'x.grad={x.grad}')"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "ebe4b268-56e0-491e-87da-c5ac1c71bea3",
   "metadata": {},
   "source": [
    "## 函数风格API的用法示例"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "id": "8f86d897-be71-4346-9377-5bc0e54b3ad8",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "x=tensor([-0.2255, -0.3481, -0.3292,  0.2155, -0.3839, -0.1287, -0.0280, -0.1504],\n",
      "       requires_grad=True)\n",
      "y=tensor([0., 0., 0., 1., 0., 0., 0., 0.], grad_fn=<sigmoidBackward>)\n",
      "x.grad=tensor([0.8213, 0.6377, 0.6667, 0.8351, 0.5832, 0.9366, 0.9969, 0.9147])\n"
     ]
    }
   ],
   "source": [
    "import torch\n",
    "from spikingjelly.activation_based import surrogate\n",
    "\n",
    "alpha = 4.\n",
    "x = torch.rand([8]) - 0.5\n",
    "x.requires_grad = True\n",
    "y = surrogate.sigmoid.apply(x, alpha)\n",
    "y.sum().backward()\n",
    "print(f'x={x}')\n",
    "print(f'y={y}')\n",
    "print(f'x.grad={x.grad}')"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "8a8bb82e-63fe-4d3a-886b-9ff9e09818da",
   "metadata": {},
   "source": [
    "# 5 监视器"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "id": "103dba92-f771-4961-904d-3a4894224fb8",
   "metadata": {},
   "outputs": [],
   "source": [
    "import torch\n",
    "import torch.nn as nn\n",
    "from spikingjelly.activation_based import monitor, neuron, functional, layer\n",
    "\n",
    "net = nn.Sequential(\n",
    "    layer.Linear(8, 4),\n",
    "    neuron.IFNode(),\n",
    "    layer.Linear(4, 2),\n",
    "    neuron.IFNode()\n",
    ")\n",
    "\n",
    "for param in net.parameters():\n",
    "    param.data.abs_()\n",
    "\n",
    "functional.set_step_mode(net, 'm')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "id": "3fff30ac-d958-4e03-a970-777ef54634fc",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([[[3.2044e-01, 7.4399e-01, 6.7130e-01, 1.7881e-01, 9.2793e-01,\n",
       "          6.6905e-01, 9.8539e-01, 9.2431e-01]],\n",
       "\n",
       "        [[8.0416e-01, 3.1813e-01, 9.2965e-04, 2.1816e-02, 7.1104e-01,\n",
       "          9.5833e-01, 2.1858e-01, 2.3780e-01]],\n",
       "\n",
       "        [[1.7810e-01, 3.3067e-01, 7.9970e-01, 1.9733e-01, 1.6314e-01,\n",
       "          1.1327e-01, 2.6158e-01, 4.8034e-01]],\n",
       "\n",
       "        [[5.4038e-01, 6.3446e-01, 3.4203e-01, 3.2293e-01, 7.4836e-01,\n",
       "          5.1645e-01, 1.5953e-03, 2.9002e-01]]])"
      ]
     },
     "execution_count": 4,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "spike_seq_monitor = monitor.OutputMonitor(net, neuron.IFNode)\n",
    "T = 4\n",
    "N = 1\n",
    "x_seq = torch.rand([T, N, 8])\n",
    "\n",
    "with torch.no_grad():\n",
    "    net(x_seq)\n",
    "\n",
    "x_seq"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "id": "9d000cd3-0bf6-46c5-9a38-5193c5e4b631",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "spike_seq_monitor.records=\n",
      "[tensor([[[1., 1., 0., 0.]],\n",
      "\n",
      "        [[0., 0., 1., 1.]],\n",
      "\n",
      "        [[1., 1., 0., 0.]],\n",
      "\n",
      "        [[0., 0., 1., 1.]]]), tensor([[[0., 0.]],\n",
      "\n",
      "        [[1., 1.]],\n",
      "\n",
      "        [[0., 0.]],\n",
      "\n",
      "        [[1., 1.]]])]\n"
     ]
    }
   ],
   "source": [
    "print(f'spike_seq_monitor.records=\\n{spike_seq_monitor.records}')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "id": "53906bea-75b6-4b92-be1e-8bacc0f4a2fe",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "spike_seq_monitor[1]=tensor([[[0., 0.]],\n",
      "\n",
      "        [[1., 1.]],\n",
      "\n",
      "        [[0., 0.]],\n",
      "\n",
      "        [[1., 1.]]])\n"
     ]
    }
   ],
   "source": [
    "print(f'spike_seq_monitor[1]={spike_seq_monitor[1]}')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "id": "0e53faa5-4fbb-42c7-941d-4f976642ab1d",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "net=Sequential(\n",
      "  (0): Linear(in_features=8, out_features=4, bias=True)\n",
      "  (1): IFNode(\n",
      "    v_threshold=1.0, v_reset=0.0, detach_reset=False, step_mode=m, backend=torch\n",
      "    (surrogate_function): Sigmoid(alpha=4.0, spiking=True)\n",
      "  )\n",
      "  (2): Linear(in_features=4, out_features=2, bias=True)\n",
      "  (3): IFNode(\n",
      "    v_threshold=1.0, v_reset=0.0, detach_reset=False, step_mode=m, backend=torch\n",
      "    (surrogate_function): Sigmoid(alpha=4.0, spiking=True)\n",
      "  )\n",
      ")\n",
      "spike_seq_monitor.monitored_layers=['1', '3']\n"
     ]
    }
   ],
   "source": [
    "print(f'net={net}')\n",
    "print(f'spike_seq_monitor.monitored_layers={spike_seq_monitor.monitored_layers}')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "id": "e8e21ab1-e91b-4497-9ca2-d9218b6a0948",
   "metadata": {},
   "outputs": [],
   "source": [
    "def cal_firing_rate(s_seq: torch.Tensor):\n",
    "    # s_seq.shape = [T, N, *]\n",
    "    return s_seq.flatten(1).mean(1)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "id": "a585abec-d3f0-4701-8e94-97d82f21e75a",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "torch.Size([4, 8])"
      ]
     },
     "execution_count": 9,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "x_seq.flatten(1).shape"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "id": "f10124d8-064c-4f21-9916-1e2c329b2890",
   "metadata": {},
   "outputs": [],
   "source": [
    "fr_monitor = monitor.OutputMonitor(net, neuron.IFNode, cal_firing_rate)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "id": "8b76c85d-4ebc-435a-b3d9-bbe2dd91d1cf",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "after call fr_monitor.disable(), fr_monitor.records=\n",
      "[]\n",
      "after call fr_monitor.enable(), fr_monitor.records=\n",
      "[tensor([0.5000, 0.5000, 0.5000, 0.5000]), tensor([0., 1., 0., 1.])]\n"
     ]
    }
   ],
   "source": [
    "with torch.no_grad():\n",
    "    functional.reset_net(net)\n",
    "    fr_monitor.disable()\n",
    "    net(x_seq)\n",
    "    functional.reset_net(net)\n",
    "    print(f'after call fr_monitor.disable(), fr_monitor.records=\\n{fr_monitor.records}')\n",
    "\n",
    "    fr_monitor.enable()\n",
    "    net(x_seq)\n",
    "    print(f'after call fr_monitor.enable(), fr_monitor.records=\\n{fr_monitor.records}')\n",
    "    functional.reset_net(net)\n",
    "    del fr_monitor"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "10bdedf6-5d96-4a56-abea-3289a3215fab",
   "metadata": {},
   "source": [
    "## 记录模块成员变量"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "id": "58d04051-bba7-4d15-8c51-f0e7a8c26a9e",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "v_seq_monitor.records=\n",
      "[tensor([[[0.0000, 0.0000, 0.9599, 0.7849]],\n",
      "\n",
      "        [[0.9703, 0.8989, 0.0000, 0.0000]],\n",
      "\n",
      "        [[0.0000, 0.0000, 0.5277, 0.5013]],\n",
      "\n",
      "        [[0.8303, 0.9024, 0.0000, 0.0000]]]), tensor([[[0.5559, 0.5877]],\n",
      "\n",
      "        [[0.0000, 0.0000]],\n",
      "\n",
      "        [[0.5559, 0.5877]],\n",
      "\n",
      "        [[0.0000, 0.0000]]])]\n"
     ]
    }
   ],
   "source": [
    "for m in net.modules():\n",
    "    if isinstance(m, neuron.IFNode):\n",
    "        m.store_v_seq = True\n",
    "\n",
    "v_seq_monitor = monitor.AttributeMonitor('v_seq', pre_forward=False, net=net, instance=neuron.IFNode)\n",
    "with torch.no_grad():\n",
    "    net(x_seq)\n",
    "    print(f'v_seq_monitor.records=\\n{v_seq_monitor.records}')\n",
    "    functional.reset_net(net)\n",
    "    del v_seq_monitor"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "4a79707b-5858-4a0d-a58c-5427be4a1d3b",
   "metadata": {},
   "source": [
    "## 记录模块输入"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "id": "b29043da-4fac-4d87-bed7-41d91e2639f4",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "input_monitor.records=\n",
      "[tensor([[[1.3886, 1.3785, 0.9599, 0.7849]],\n",
      "\n",
      "        [[0.9703, 0.8989, 0.6064, 0.6169]],\n",
      "\n",
      "        [[0.7474, 0.6899, 0.5277, 0.5013]],\n",
      "\n",
      "        [[0.8303, 0.9024, 0.6127, 0.6137]]]), tensor([[[0.5559, 0.5877]],\n",
      "\n",
      "        [[0.5299, 1.0261]],\n",
      "\n",
      "        [[0.5559, 0.5877]],\n",
      "\n",
      "        [[0.5299, 1.0261]]])]\n"
     ]
    }
   ],
   "source": [
    "input_monitor = monitor.InputMonitor(net, neuron.IFNode)\n",
    "with torch.no_grad():\n",
    "    net(x_seq)\n",
    "    print(f'input_monitor.records=\\n{input_monitor.records}')\n",
    "    functional.reset_net(net)\n",
    "    del input_monitor"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "29aa1a4d-a42d-4e9e-81ac-3f942d269ede",
   "metadata": {},
   "source": [
    "## 记录模块的输出梯度$\\frac{\\partial L}{\\partial Y}$"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "id": "e50c8f87-080e-4a03-a752-faa45894f602",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "spike_seq_grad_monitor.records=\n",
      "[tensor([[[1., 1.]],\n",
      "\n",
      "        [[1., 1.]],\n",
      "\n",
      "        [[1., 1.]],\n",
      "\n",
      "        [[1., 1.]]]), tensor([[[ 0.1457,  0.0423,  0.1593,  0.2416]],\n",
      "\n",
      "        [[-0.0862, -0.0438, -0.1009, -0.0463]],\n",
      "\n",
      "        [[ 0.4238,  0.1792,  0.4831,  0.4130]],\n",
      "\n",
      "        [[ 0.3003,  0.1451,  0.3487,  0.1992]]])]\n"
     ]
    }
   ],
   "source": [
    "spike_seq_grad_monitor = monitor.GradOutputMonitor(net, neuron.IFNode)\n",
    "net(x_seq).sum().backward()\n",
    "print(f'spike_seq_grad_monitor.records=\\n{spike_seq_grad_monitor.records}')\n",
    "functional.reset_net(net)\n",
    "del spike_seq_grad_monitor"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "af08bd3a-1f78-4c5e-bccb-1543608618b3",
   "metadata": {},
   "source": [
    "## 记录模块的输入梯度$\\frac{\\partial L}{\\partial X}$"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "id": "281b899e-42ad-4f67-a6d3-da6e8c5f12a9",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "x_seq: tensor([[[0.1316, 0.2103, 0.6254, 0.2426, 0.2512, 0.5407, 0.7536, 0.8359]],\n",
      "\n",
      "        [[0.8314, 0.4819, 0.8313, 0.5193, 0.7946, 0.4317, 0.7771, 0.2008]],\n",
      "\n",
      "        [[0.7576, 0.3544, 0.1206, 0.0089, 0.7968, 0.5960, 0.1490, 0.4545]],\n",
      "\n",
      "        [[0.6637, 0.5811, 0.9735, 0.5649, 0.8999, 0.0139, 0.0996, 0.7273]]])\n",
      "alpha=0.1, input_grad_monitor.records=\n",
      "[tensor(0.3866), tensor(0.0151), tensor(0.0004), tensor(8.1184e-06), tensor(1.3254e-07), tensor(2.7747e-09), tensor(6.1243e-11), tensor(1.1518e-12), tensor(2.3432e-14), tensor(3.3038e-16)]\n",
      "\n",
      "alpha=0.5, input_grad_monitor.records=\n",
      "[tensor(1.7588), tensor(0.3149), tensor(0.0387), tensor(0.0035), tensor(0.0002), tensor(2.2940e-05), tensor(2.3701e-06), tensor(2.1575e-07), tensor(2.0149e-08), tensor(1.3258e-09)]\n",
      "\n",
      "alpha=2, input_grad_monitor.records=\n",
      "[tensor(3.4888), tensor(1.0388), tensor(0.2216), tensor(0.0546), tensor(0.0097), tensor(0.0022), tensor(0.0005), tensor(0.0001), tensor(3.4679e-05), tensor(6.5298e-06)]\n",
      "\n",
      "alpha=4, input_grad_monitor.records=\n",
      "[tensor(4.4175), tensor(1.3824), tensor(0.3056), tensor(0.1107), tensor(0.0243), tensor(0.0029), tensor(0.0006), tensor(0.0002), tensor(6.1846e-05), tensor(2.8485e-05)]\n",
      "\n",
      "alpha=8, input_grad_monitor.records=\n",
      "[tensor(6.3680), tensor(2.1610), tensor(0.5254), tensor(0.2090), tensor(0.0632), tensor(0.0088), tensor(0.0016), tensor(0.0008), tensor(0.0004), tensor(0.0002)]\n",
      "\n"
     ]
    }
   ],
   "source": [
    "import torch\n",
    "import torch.nn as nn\n",
    "from spikingjelly.activation_based import monitor, neuron, functional, layer, surrogate\n",
    "\n",
    "net = []\n",
    "for i in range(10):\n",
    "    net.append(layer.Linear(8, 8))\n",
    "    net.append(neuron.IFNode())\n",
    "\n",
    "net = nn.Sequential(*net)\n",
    "\n",
    "functional.set_step_mode(net, 'm')\n",
    "\n",
    "T = 4\n",
    "N = 1\n",
    "x_seq = torch.rand([T, N, 8])\n",
    "print(f\"x_seq: {x_seq}\")\n",
    "\n",
    "input_grad_monitor = monitor.GradInputMonitor(net, neuron.IFNode, function_on_grad_input=torch.norm)\n",
    "\n",
    "for alpha in [0.1, 0.5, 2, 4, 8]:\n",
    "    for m in net.modules():\n",
    "        if isinstance(m, surrogate.Sigmoid):\n",
    "            m.alpha = alpha\n",
    "    net(x_seq).sum().backward()\n",
    "    print(f'alpha={alpha}, input_grad_monitor.records=\\n{input_grad_monitor.records}\\n')\n",
    "    functional.reset_net(net)\n",
    "    # zero grad\n",
    "    for param in net.parameters():\n",
    "        param.grad.zero_()\n",
    "\n",
    "    input_grad_monitor.records.clear()"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "39bbbbd9-a397-4efb-a5ed-c571fff56b31",
   "metadata": {},
   "source": [
    "## 降低内存占用"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "id": "3c22293e-a561-4f12-a8cd-3f5425ac3b22",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "float32 size = 4096\n",
      "torch.bool size = 1024\n"
     ]
    }
   ],
   "source": [
    "import torch\n",
    "\n",
    "def tensor_memory(x: torch.Tensor):\n",
    "    return x.element_size() * x.numel()\n",
    "\n",
    "N = 1 << 10\n",
    "spike = torch.randint(0, 2, [N]).float()\n",
    "\n",
    "print('float32 size =', tensor_memory(spike))\n",
    "print('torch.bool size =', tensor_memory(spike.to(torch.bool)))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 19,
   "id": "724bd1a9-ce9f-448d-b421-cbd1f7d3bdf9",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "float32 size = 4096\n",
      "torch.bool size = 1024\n"
     ]
    },
    {
     "ename": "AssertionError",
     "evalue": "",
     "output_type": "error",
     "traceback": [
      "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
      "\u001b[0;31mAssertionError\u001b[0m                            Traceback (most recent call last)",
      "Cell \u001b[0;32mIn[19], line 14\u001b[0m\n\u001b[1;32m     10\u001b[0m \u001b[38;5;28mprint\u001b[39m(\u001b[38;5;124m'\u001b[39m\u001b[38;5;124mtorch.bool size =\u001b[39m\u001b[38;5;124m'\u001b[39m, tensor_memory(spike\u001b[38;5;241m.\u001b[39mto(torch\u001b[38;5;241m.\u001b[39mbool)))\n\u001b[1;32m     12\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01mspikingjelly\u001b[39;00m\u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01mactivation_based\u001b[39;00m \u001b[38;5;28;01mimport\u001b[39;00m tensor_cache\n\u001b[0;32m---> 14\u001b[0m spike_b, s_dtype, s_shape, s_padding \u001b[38;5;241m=\u001b[39m \u001b[43mtensor_cache\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mfloat_spike_to_bool\u001b[49m\u001b[43m(\u001b[49m\u001b[43mspike\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m     15\u001b[0m \u001b[38;5;28mprint\u001b[39m(\u001b[38;5;124m'\u001b[39m\u001b[38;5;124mbool size =\u001b[39m\u001b[38;5;124m'\u001b[39m, tensor_memory(spike_b))\n\u001b[1;32m     17\u001b[0m spike_recover \u001b[38;5;241m=\u001b[39m tensor_cache\u001b[38;5;241m.\u001b[39mbool_spike_to_float(spike_b, s_dtype, s_shape, s_padding)\n",
      "File \u001b[0;32m~/anaconda3/envs/spikingjelly/lib/python3.10/site-packages/spikingjelly/activation_based/tensor_cache.py:123\u001b[0m, in \u001b[0;36mfloat_spike_to_bool\u001b[0;34m(spike)\u001b[0m\n\u001b[1;32m    115\u001b[0m     kernel_args \u001b[38;5;241m=\u001b[39m [spike, spike_b, numel]\n\u001b[1;32m    116\u001b[0m     kernel \u001b[38;5;241m=\u001b[39m cupy\u001b[38;5;241m.\u001b[39mRawKernel(\n\u001b[1;32m    117\u001b[0m         kernel_codes,\n\u001b[1;32m    118\u001b[0m         kernel_name,\n\u001b[1;32m    119\u001b[0m         options\u001b[38;5;241m=\u001b[39mconfigure\u001b[38;5;241m.\u001b[39mcuda_compiler_options, backend\u001b[38;5;241m=\u001b[39mconfigure\u001b[38;5;241m.\u001b[39mcuda_compiler_backend\n\u001b[1;32m    120\u001b[0m     )\n\u001b[1;32m    121\u001b[0m     kernel(\n\u001b[1;32m    122\u001b[0m         (blocks,), (configure\u001b[38;5;241m.\u001b[39mcuda_threads,),\n\u001b[0;32m--> 123\u001b[0m         \u001b[43mcuda_utils\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mwrap_args_to_raw_kernel\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m    124\u001b[0m \u001b[43m            \u001b[49m\u001b[43mdevice_id\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m    125\u001b[0m \u001b[43m            \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkernel_args\u001b[49m\n\u001b[1;32m    126\u001b[0m \u001b[43m        \u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m    127\u001b[0m     )\n\u001b[1;32m    128\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m spike_b, s_dtype, s_shape, s_padding\n",
      "File \u001b[0;32m~/anaconda3/envs/spikingjelly/lib/python3.10/site-packages/spikingjelly/activation_based/cuda_utils.py:249\u001b[0m, in \u001b[0;36mwrap_args_to_raw_kernel\u001b[0;34m(device, *args)\u001b[0m\n\u001b[1;32m    246\u001b[0m     ret_list\u001b[38;5;241m.\u001b[39mappend(item\u001b[38;5;241m.\u001b[39mdata_ptr())\n\u001b[1;32m    248\u001b[0m \u001b[38;5;28;01melif\u001b[39;00m \u001b[38;5;28misinstance\u001b[39m(item, cupy\u001b[38;5;241m.\u001b[39mndarray):\n\u001b[0;32m--> 249\u001b[0m     \u001b[38;5;28;01massert\u001b[39;00m item\u001b[38;5;241m.\u001b[39mdevice\u001b[38;5;241m.\u001b[39mid \u001b[38;5;241m==\u001b[39m device\n\u001b[1;32m    250\u001b[0m     \u001b[38;5;28;01massert\u001b[39;00m item\u001b[38;5;241m.\u001b[39mflags[\u001b[38;5;124m'\u001b[39m\u001b[38;5;124mC_CONTIGUOUS\u001b[39m\u001b[38;5;124m'\u001b[39m]\n\u001b[1;32m    251\u001b[0m     ret_list\u001b[38;5;241m.\u001b[39mappend(item)\n",
      "\u001b[0;31mAssertionError\u001b[0m: "
     ]
    }
   ],
   "source": [
    "import torch\n",
    "\n",
    "def tensor_memory(x: torch.Tensor):\n",
    "    return x.element_size() * x.numel()\n",
    "\n",
    "N = 1 << 10\n",
    "spike = torch.randint(0, 2, [N]).float()\n",
    "\n",
    "print('float32 size =', tensor_memory(spike))\n",
    "print('torch.bool size =', tensor_memory(spike.to(torch.bool)))\n",
    "\n",
    "from spikingjelly.activation_based import tensor_cache\n",
    "\n",
    "spike_b, s_dtype, s_shape, s_padding = tensor_cache.float_spike_to_bool(spike)\n",
    "print('bool size =', tensor_memory(spike_b))\n",
    "\n",
    "spike_recover = tensor_cache.bool_spike_to_float(spike_b, s_dtype, s_shape, s_padding)\n",
    "print('spike == spike_recover?', torch.equal(spike, spike_recover))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 20,
   "id": "c7c8714e-9939-4995-9d8f-a8ebfe003fcc",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "-1"
      ]
     },
     "execution_count": 20,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "spike.get_device()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 23,
   "id": "0945075c-2531-49dc-af17-dc2b31d094cb",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "<spikingjelly.activation_based.cuda_utils.DeviceEnvironment at 0x7f149de3d3f0>"
      ]
     },
     "execution_count": 23,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "from spikingjelly.activation_based import cuda_utils\n",
    "\n",
    "cuda_utils.DeviceEnvironment(-1)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "aab7d51d-1392-4782-9f3b-032e0d219b7d",
   "metadata": {},
   "source": [
    "对于稀疏的脉冲，还可以考虑使用 zlib 等库进行进一步的压缩。下面是对发放率为0.2的脉冲进行进一步压缩的例子："
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 24,
   "id": "e06ee1a8-2c30-45a6-98ac-063ae730f918",
   "metadata": {},
   "outputs": [
    {
     "ename": "AssertionError",
     "evalue": "",
     "output_type": "error",
     "traceback": [
      "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
      "\u001b[0;31mAssertionError\u001b[0m                            Traceback (most recent call last)",
      "Cell \u001b[0;32mIn[24], line 11\u001b[0m\n\u001b[1;32m      8\u001b[0m N \u001b[38;5;241m=\u001b[39m \u001b[38;5;241m1\u001b[39m \u001b[38;5;241m<<\u001b[39m \u001b[38;5;241m20\u001b[39m\n\u001b[1;32m      9\u001b[0m spike \u001b[38;5;241m=\u001b[39m (torch\u001b[38;5;241m.\u001b[39mrand([N]) \u001b[38;5;241m>\u001b[39m \u001b[38;5;241m0.8\u001b[39m)\u001b[38;5;241m.\u001b[39mfloat()\n\u001b[0;32m---> 11\u001b[0m spike_b, s_dtype, s_shape, s_padding \u001b[38;5;241m=\u001b[39m \u001b[43mtensor_cache\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mfloat_spike_to_bool\u001b[49m\u001b[43m(\u001b[49m\u001b[43mspike\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m     13\u001b[0m arr \u001b[38;5;241m=\u001b[39m spike_b\u001b[38;5;241m.\u001b[39mnumpy()\n\u001b[1;32m     15\u001b[0m compressed_arr \u001b[38;5;241m=\u001b[39m zlib\u001b[38;5;241m.\u001b[39mcompress(arr\u001b[38;5;241m.\u001b[39mtobytes())\n",
      "File \u001b[0;32m~/anaconda3/envs/spikingjelly/lib/python3.10/site-packages/spikingjelly/activation_based/tensor_cache.py:123\u001b[0m, in \u001b[0;36mfloat_spike_to_bool\u001b[0;34m(spike)\u001b[0m\n\u001b[1;32m    115\u001b[0m     kernel_args \u001b[38;5;241m=\u001b[39m [spike, spike_b, numel]\n\u001b[1;32m    116\u001b[0m     kernel \u001b[38;5;241m=\u001b[39m cupy\u001b[38;5;241m.\u001b[39mRawKernel(\n\u001b[1;32m    117\u001b[0m         kernel_codes,\n\u001b[1;32m    118\u001b[0m         kernel_name,\n\u001b[1;32m    119\u001b[0m         options\u001b[38;5;241m=\u001b[39mconfigure\u001b[38;5;241m.\u001b[39mcuda_compiler_options, backend\u001b[38;5;241m=\u001b[39mconfigure\u001b[38;5;241m.\u001b[39mcuda_compiler_backend\n\u001b[1;32m    120\u001b[0m     )\n\u001b[1;32m    121\u001b[0m     kernel(\n\u001b[1;32m    122\u001b[0m         (blocks,), (configure\u001b[38;5;241m.\u001b[39mcuda_threads,),\n\u001b[0;32m--> 123\u001b[0m         \u001b[43mcuda_utils\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mwrap_args_to_raw_kernel\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m    124\u001b[0m \u001b[43m            \u001b[49m\u001b[43mdevice_id\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m    125\u001b[0m \u001b[43m            \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkernel_args\u001b[49m\n\u001b[1;32m    126\u001b[0m \u001b[43m        \u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m    127\u001b[0m     )\n\u001b[1;32m    128\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m spike_b, s_dtype, s_shape, s_padding\n",
      "File \u001b[0;32m~/anaconda3/envs/spikingjelly/lib/python3.10/site-packages/spikingjelly/activation_based/cuda_utils.py:249\u001b[0m, in \u001b[0;36mwrap_args_to_raw_kernel\u001b[0;34m(device, *args)\u001b[0m\n\u001b[1;32m    246\u001b[0m     ret_list\u001b[38;5;241m.\u001b[39mappend(item\u001b[38;5;241m.\u001b[39mdata_ptr())\n\u001b[1;32m    248\u001b[0m \u001b[38;5;28;01melif\u001b[39;00m \u001b[38;5;28misinstance\u001b[39m(item, cupy\u001b[38;5;241m.\u001b[39mndarray):\n\u001b[0;32m--> 249\u001b[0m     \u001b[38;5;28;01massert\u001b[39;00m item\u001b[38;5;241m.\u001b[39mdevice\u001b[38;5;241m.\u001b[39mid \u001b[38;5;241m==\u001b[39m device\n\u001b[1;32m    250\u001b[0m     \u001b[38;5;28;01massert\u001b[39;00m item\u001b[38;5;241m.\u001b[39mflags[\u001b[38;5;124m'\u001b[39m\u001b[38;5;124mC_CONTIGUOUS\u001b[39m\u001b[38;5;124m'\u001b[39m]\n\u001b[1;32m    251\u001b[0m     ret_list\u001b[38;5;241m.\u001b[39mappend(item)\n",
      "\u001b[0;31mAssertionError\u001b[0m: "
     ]
    }
   ],
   "source": [
    "import torch\n",
    "import zlib\n",
    "from spikingjelly.activation_based import tensor_cache\n",
    "\n",
    "def tensor_memory(x: torch.Tensor):\n",
    "    return x.element_size() * x.numel()\n",
    "\n",
    "N = 1 << 20\n",
    "spike = (torch.rand([N]) > 0.8).float()\n",
    "\n",
    "spike_b, s_dtype, s_shape, s_padding = tensor_cache.float_spike_to_bool(spike)\n",
    "\n",
    "arr = spike_b.numpy()\n",
    "\n",
    "compressed_arr = zlib.compress(arr.tobytes())\n",
    "\n",
    "print(\"compressed ratio:\", len(compressed_arr) / arr.nbytes * tensor_memory(spike_b) / tensor_memory(spike))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "2292448f-869e-41eb-aeb4-1a676e5acfd9",
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3 (ipykernel)",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.10.14"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
