{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 2,
   "id": "d382369d",
   "metadata": {},
   "outputs": [],
   "source": [
    "import torch\n",
    "import torch.nn as nn\n",
    "import torch.profiler\n",
    "import time\n",
    "import os\n",
    "\n",
    "# 检查 CUDA 设备\n",
    "device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "id": "d56ed7ee",
   "metadata": {},
   "outputs": [],
   "source": [
    "from deepsignal3.mtms.mtm import MTM"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "id": "371ef7f1",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "MTM(\n",
       "  (embedding): Embedding(16, 4)\n",
       "  (chn_emb): Embedding(5, 128)\n",
       "  (inp_layer): TokenMixingLayer(\n",
       "    (temporal): TemporalAttn(\n",
       "      (wq): Linear(in_features=128, out_features=128, bias=True)\n",
       "      (wk): Linear(in_features=128, out_features=128, bias=True)\n",
       "      (wv): Linear(in_features=128, out_features=128, bias=True)\n",
       "      (drop): Dropout(p=0.2, inplace=False)\n",
       "      (layer_norm): LayerNorm((128,), eps=1e-05, elementwise_affine=True)\n",
       "      (layer_scale): LayerScale()\n",
       "    )\n",
       "    (mixer): TokenMixingAttn(\n",
       "      (wq): Linear(in_features=128, out_features=128, bias=True)\n",
       "      (wk): Linear(in_features=128, out_features=128, bias=True)\n",
       "      (wv): Linear(in_features=128, out_features=128, bias=True)\n",
       "      (drop): Dropout(p=0.2, inplace=False)\n",
       "      (layer_norm): LayerNorm((128,), eps=1e-05, elementwise_affine=True)\n",
       "      (layer_scale): LayerScale()\n",
       "    )\n",
       "    (channel): ChannelAttn(\n",
       "      (wq): Linear(in_features=128, out_features=128, bias=True)\n",
       "      (wk): Linear(in_features=128, out_features=128, bias=True)\n",
       "      (wv): Linear(in_features=128, out_features=128, bias=True)\n",
       "      (drop): Dropout(p=0.2, inplace=False)\n",
       "      (layer_norm): LayerNorm((128,), eps=1e-05, elementwise_affine=True)\n",
       "      (layer_scale): LayerScale()\n",
       "    )\n",
       "    (mlp3): MLP(\n",
       "      (net): Sequential(\n",
       "        (0): Linear(in_features=128, out_features=512, bias=True)\n",
       "        (1): GELU(approximate='none')\n",
       "        (2): Linear(in_features=512, out_features=128, bias=True)\n",
       "        (3): LayerScale()\n",
       "        (4): DropPath(drop_prob=0.200)\n",
       "      )\n",
       "      (norm): LayerNorm((128,), eps=1e-05, elementwise_affine=True)\n",
       "    )\n",
       "  )\n",
       "  (mixers): ModuleList(\n",
       "    (0-3): 4 x TokenMixingLayer(\n",
       "      (temporal): TemporalAttn(\n",
       "        (wq): Linear(in_features=128, out_features=128, bias=True)\n",
       "        (wk): Linear(in_features=128, out_features=128, bias=True)\n",
       "        (wv): Linear(in_features=128, out_features=128, bias=True)\n",
       "        (drop): Dropout(p=0.2, inplace=False)\n",
       "        (layer_norm): LayerNorm((128,), eps=1e-05, elementwise_affine=True)\n",
       "        (layer_scale): LayerScale()\n",
       "      )\n",
       "      (mixer): TokenMixingAttn(\n",
       "        (wq): Linear(in_features=128, out_features=128, bias=True)\n",
       "        (wk): Linear(in_features=128, out_features=128, bias=True)\n",
       "        (wv): Linear(in_features=128, out_features=128, bias=True)\n",
       "        (drop): Dropout(p=0.2, inplace=False)\n",
       "        (layer_norm): LayerNorm((128,), eps=1e-05, elementwise_affine=True)\n",
       "        (layer_scale): LayerScale()\n",
       "      )\n",
       "      (channel): ChannelAttn(\n",
       "        (wq): Linear(in_features=128, out_features=128, bias=True)\n",
       "        (wk): Linear(in_features=128, out_features=128, bias=True)\n",
       "        (wv): Linear(in_features=128, out_features=128, bias=True)\n",
       "        (drop): Dropout(p=0.2, inplace=False)\n",
       "        (layer_norm): LayerNorm((128,), eps=1e-05, elementwise_affine=True)\n",
       "        (layer_scale): LayerScale()\n",
       "      )\n",
       "      (mlp3): MLP(\n",
       "        (net): Sequential(\n",
       "          (0): Linear(in_features=128, out_features=512, bias=True)\n",
       "          (1): GELU(approximate='none')\n",
       "          (2): Linear(in_features=512, out_features=128, bias=True)\n",
       "          (3): LayerScale()\n",
       "          (4): DropPath(drop_prob=0.200)\n",
       "        )\n",
       "        (norm): LayerNorm((128,), eps=1e-05, elementwise_affine=True)\n",
       "      )\n",
       "    )\n",
       "  )\n",
       "  (samplers): ModuleList(\n",
       "    (0-3): 4 x DownsampleLayer(\n",
       "      (down): Downsample(\n",
       "        (lin): Linear(in_features=256, out_features=128, bias=True)\n",
       "      )\n",
       "    )\n",
       "  )\n",
       "  (cls_head): CLSHead(\n",
       "    (net): Sequential(\n",
       "      (0): Linear(in_features=129, out_features=512, bias=True)\n",
       "      (1): GELU(approximate='none')\n",
       "      (2): Dropout(p=0.2, inplace=False)\n",
       "      (3): Linear(in_features=512, out_features=2, bias=True)\n",
       "    )\n",
       "  )\n",
       ")"
      ]
     },
     "execution_count": 6,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "D_MODEL = 128          # args.hid_rnn\n",
    "N_EMBED = 4          # args.n_embed\n",
    "SEQ_LEN = 21           # args.seq_len (截取的时序长度)\n",
    "SIGNAL_LEN_S = 15      # 假设 S=10 (args.signal_len)\n",
    "NUM_LAYERS = 4         # 假设模型有 4 层 (mixers)\n",
    "BATCH_SIZE = 16        # 假设推理 Batch Size\n",
    "\n",
    "# 序列总长度 L_S = SEQ_LEN * SIGNAL_LEN_S\n",
    "L_S_ORIG = SEQ_LEN * SIGNAL_LEN_S # 原始 L_S: 21 * 10 = 210\n",
    "\n",
    "# 减少后的序列总长度 (你提问中 L_new = 5/21 L_orig 的情况)\n",
    "# 假设是减少了 SEQ_LEN，即 5 * S\n",
    "SEQ_LEN_NEW = 5 \n",
    "L_S_NEW = SEQ_LEN_NEW * SIGNAL_LEN_S # 减少后的 L_S: 5 * 10 = 50\n",
    "\n",
    "# 模拟 MTM 模型的参数\n",
    "model_params = {\n",
    "    'num_chn': 1 + N_EMBED, # 简化通道数\n",
    "    'd_static': 1,\n",
    "    'num_cls': 2,\n",
    "    'ratios': [2, 2, 2, 2],\n",
    "    'd_model': D_MODEL,\n",
    "    'r_hid': NUM_LAYERS,\n",
    "    'drop': 0.2,\n",
    "    'norm_first': True,\n",
    "    'down_mode': 'concat',\n",
    "    'vocab_size': 16, # 假设 kmer 词汇表大小\n",
    "    'embedding_size': N_EMBED,\n",
    "    'use_channel_attn': True, # 假设是简化版\n",
    "    'use_mixer': True\n",
    "\n",
    "}\n",
    "\n",
    "model = MTM(**model_params).to(device)\n",
    "model.eval()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "id": "201b468c",
   "metadata": {},
   "outputs": [],
   "source": [
    "def create_input(L_S, B):\n",
    "    signals = torch.randn(B, L_S, 1).to(device).float()\n",
    "    kmer = torch.randint(0, 15, (B, L_S)).to(device).long()\n",
    "    \n",
    "    # 构造 x_mask: (B, L_S, C_mtm)\n",
    "    # C_mtm 简化为 1 + N_EMBED\n",
    "    C_MTM = 1 + N_EMBED \n",
    "    x_mask = torch.zeros(B, L_S, C_MTM).to(device).bool() \n",
    "    \n",
    "    # 构造 t: (B, L_S)\n",
    "    t = torch.arange(L_S, device=device).repeat(B, 1).long()\n",
    "    \n",
    "    # 构造 x_static: (B, D_static)\n",
    "    x_static = torch.randint(0, 5, (B, 1)).to(device).long()\n",
    "    \n",
    "    return signals, kmer, x_mask, t, x_static"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "id": "d8ce0df1",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "--- MTM 模型性能分析 (设备: cpu) ---\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/home/xiaoyf/anaconda3/envs/deepsignal3/lib/python3.12/site-packages/torch/autograd/profiler.py:271: UserWarning: CUDA is not available, disabling CUDA profiling\n",
      "  warn(\"CUDA is not available, disabling CUDA profiling\")\n",
      "ERROR:2025-11-16 19:49:08 2396328:2396328 DeviceProperties.cpp:47] gpuGetDeviceCount failed with code 35\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\n",
      "======== 序列长度: L_ORIG (210) (L_S=315) ========\n",
      "--------------------------------------------------  ------------  ------------  ------------  ------------  ------------  ------------  ------------  ------------  \n",
      "                                              Name    Self CPU %      Self CPU   CPU total %     CPU total  CPU time avg       CPU Mem  Self CPU Mem    # of Calls  \n",
      "--------------------------------------------------  ------------  ------------  ------------  ------------  ------------  ------------  ------------  ------------  \n",
      "                                     ProfilerStep*        15.32%     100.934ms       100.00%     658.688ms     131.738ms           0 B      -9.68 GB             5  \n",
      "                                   aten::embedding         0.01%      84.889us         0.08%     516.034us      51.603us     406.25 KB           0 B            10  \n",
      "                                     aten::reshape         0.71%       4.678ms         4.04%      26.594ms      15.240us     723.05 MB           0 B          1745  \n",
      "                                        aten::view         0.78%       5.124ms         0.78%       5.124ms       2.194us           0 B           0 B          2335  \n",
      "                                aten::index_select         0.04%     279.289us         0.06%     401.538us      40.154us     406.25 KB     406.25 KB            10  \n",
      "                                       aten::empty         0.76%       5.003ms         0.76%       5.003ms       1.913us       2.01 GB       2.01 GB          2615  \n",
      "                                      aten::select         0.69%       4.557ms         0.93%       6.138ms       4.263us           0 B           0 B          1440  \n",
      "                                  aten::as_strided         1.03%       6.777ms         1.03%       6.777ms       1.272us           0 B           0 B          5330  \n",
      "                                         aten::cat         0.84%       5.559ms         0.95%       6.283ms      83.767us     242.46 MB     242.46 MB            75  \n",
      "                                   aten::unsqueeze         0.45%       2.992ms         0.61%       4.048ms       5.327us           0 B           0 B           760  \n",
      "--------------------------------------------------  ------------  ------------  ------------  ------------  ------------  ------------  ------------  ------------  \n",
      "Self CPU time total: 658.688ms\n",
      "\n"
     ]
    }
   ],
   "source": [
    "print(f\"--- MTM 模型性能分析 (设备: {device}) ---\")\n",
    "\n",
    "# 运行两次分析，分别针对原始长度和缩短后的长度\n",
    "PROFILES = {\n",
    "    \"L_ORIG (210)\": create_input(L_S_ORIG, BATCH_SIZE),\n",
    "}\n",
    "\n",
    "for name, input_data in PROFILES.items():\n",
    "    signals, kmer, x_mask, t, x_static = input_data\n",
    "    \n",
    "    # 使用 torch.profiler 进行详细分析\n",
    "    with torch.profiler.profile(\n",
    "        activities=[\n",
    "            torch.profiler.ProfilerActivity.CPU,\n",
    "            torch.profiler.ProfilerActivity.CUDA,\n",
    "        ],\n",
    "        schedule=torch.profiler.schedule(wait=1, warmup=2, active=5),\n",
    "        on_trace_ready=torch.profiler.tensorboard_trace_handler(f'./log/mtm_profile_{name.split(\" \")[0]}'),\n",
    "        record_shapes=True,\n",
    "        profile_memory=True,\n",
    "    ) as prof:\n",
    "        for _ in range(8):\n",
    "            with torch.no_grad():\n",
    "                model(signals, kmer, x_mask, t, x_static, True, True)\n",
    "            prof.step()\n",
    "    \n",
    "    # 打印简要报告\n",
    "    print(f\"\\n======== 序列长度: {name} (L_S={signals.shape[1]}) ========\")\n",
    "    print(prof.key_averages().table(sort_by=\"cuda_time_total\", row_limit=10))"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "deepsignal3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.12.2"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
