{
 "cells": [
  {
   "cell_type": "markdown",
   "id": "3068c6e9",
   "metadata": {},
   "source": [
    "### RNA Inverse Folding Baseline (Based on RDesign)\n",
    "This code establishes a baseline for ​RNA inverse folding​ (sequence design from structure), adapted from [A4Bio/RDesign](https://github.com/A4Bio/RDesign). It processes RNA structural coordinates (.npy files) paired with sequences (FASTA), splits data into train/valid/test sets, and creates PyTorch datasets where inputs are 3D backbone coordinates (P, C4', O3' atoms) and targets are corresponding RNA sequences. "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "id": "a1a25400",
   "metadata": {
    "execution": {
     "iopub.execute_input": "2025-05-16T08:20:18.487704Z",
     "iopub.status.busy": "2025-05-16T08:20:18.487493Z",
     "iopub.status.idle": "2025-05-16T08:20:18.490116Z",
     "shell.execute_reply": "2025-05-16T08:20:18.489728Z",
     "shell.execute_reply.started": "2025-05-16T08:20:18.487690Z"
    }
   },
   "outputs": [],
   "source": [
    "# !pip install torch, pandas, biopython"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "id": "159f7029-9f98-48b1-aa8f-d3ab43894deb",
   "metadata": {
    "execution": {
     "iopub.execute_input": "2025-05-16T08:20:18.490819Z",
     "iopub.status.busy": "2025-05-16T08:20:18.490571Z",
     "iopub.status.idle": "2025-05-16T08:20:19.882162Z",
     "shell.execute_reply": "2025-05-16T08:20:19.881733Z",
     "shell.execute_reply.started": "2025-05-16T08:20:18.490808Z"
    }
   },
   "outputs": [],
   "source": [
    "from dataclasses import dataclass, field\n",
    "from torch.utils.data import Dataset\n",
    "from torch.utils.data import DataLoader\n",
    "import pandas as pd\n",
    "import random\n",
    "import os\n",
    "import torch\n",
    "import torch.nn as nn\n",
    "import numpy as np\n",
    "from torch_scatter import scatter_sum, scatter_softmax\n",
    "import torch.nn.functional as F\n",
    "from typing import List\n",
    "from torch.optim import Adam\n",
    "from tqdm import tqdm\n",
    "from Bio import SeqIO"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "f6c9a137",
   "metadata": {},
   "source": [
    "### Data processing\n",
    "This code implements an RNA data processing pipeline for machine learning tasks using PyTorch and Biopython."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "id": "a6c2f4ec-4be4-4b8f-a0c6-d213ca157a86",
   "metadata": {
    "execution": {
     "iopub.execute_input": "2025-05-16T08:20:19.883379Z",
     "iopub.status.busy": "2025-05-16T08:20:19.883104Z",
     "iopub.status.idle": "2025-05-16T08:20:32.480473Z",
     "shell.execute_reply": "2025-05-16T08:20:32.479967Z",
     "shell.execute_reply.started": "2025-05-16T08:20:19.883360Z"
    }
   },
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "100%|██████████| 2317/2317 [00:12<00:00, 186.87it/s]\n"
     ]
    }
   ],
   "source": [
    "# Define function to read FASTA files using Biopython\n",
    "def read_fasta_biopython(file_path):\n",
    "    sequences = {}\n",
    "    for record in SeqIO.parse(file_path, \"fasta\"):\n",
    "        sequences[record.id] = str(record.seq)\n",
    "    return sequences\n",
    "\n",
    "train_file_list = os.listdir(\"./RNAdesignv1/train/seqs\")\n",
    "content_dict = {\n",
    "    \"pdb_id\": [],\n",
    "    \"seq\": []\n",
    "}\n",
    "for file in tqdm(train_file_list):\n",
    "    sequences = read_fasta_biopython(\"./RNAdesignv1/train/seqs/\" + file)\n",
    "    content_dict[\"pdb_id\"].append(list(sequences.keys())[0])\n",
    "    content_dict[\"seq\"].append(list(sequences.values())[0])\n",
    "\n",
    "data = pd.DataFrame(content_dict)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "id": "3a924975-46ec-49ab-a115-8487f96aa142",
   "metadata": {
    "execution": {
     "iopub.execute_input": "2025-05-16T08:20:32.481270Z",
     "iopub.status.busy": "2025-05-16T08:20:32.481050Z",
     "iopub.status.idle": "2025-05-16T08:20:32.858866Z",
     "shell.execute_reply": "2025-05-16T08:20:32.858343Z",
     "shell.execute_reply.started": "2025-05-16T08:20:32.481252Z"
    }
   },
   "outputs": [],
   "source": [
    "# Split data into train, validation, and test sets\n",
    "split = np.random.choice(['train', 'valid', 'test'], size=len(data), p=[0.7, 0.2, 0.1])\n",
    "data['split'] = split\n",
    "train_data = data[data['split']=='train']\n",
    "valid_data = data[data['split']=='valid']\n",
    "test_data = data[data['split']=='test']\n",
    "train_data.to_csv(\"public_train_data.csv\", index=False)\n",
    "valid_data.to_csv(\"public_valid_data.csv\", index=False)\n",
    "test_data.to_csv(\"public_test_data.csv\", index=False)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "id": "59c76ad2-fd6e-4564-85d9-c3a4617bfb7c",
   "metadata": {
    "ExecutionIndicator": {
     "show": false
    },
    "execution": {
     "iopub.execute_input": "2025-05-16T08:20:32.859695Z",
     "iopub.status.busy": "2025-05-16T08:20:32.859450Z",
     "iopub.status.idle": "2025-05-16T08:20:32.867411Z",
     "shell.execute_reply": "2025-05-16T08:20:32.866960Z",
     "shell.execute_reply.started": "2025-05-16T08:20:32.859676Z"
    },
    "tags": []
   },
   "outputs": [],
   "source": [
    "# Define Configuration Classes\n",
    "@dataclass\n",
    "class DataConfig:\n",
    "    train_npy_data_dir: str = './RNAdesignv1/train/coords'\n",
    "    train_data_path: str = 'public_train_data.csv'\n",
    "    valid_npy_data_dir: str = './RNAdesignv1/train/coords'\n",
    "    valid_data_path: str = 'public_valid_data.csv'\n",
    "    test_npy_data_dir: str = './RNAdesignv1/train/coords'\n",
    "    test_data_path: str = 'public_test_data.csv'\n",
    "\n",
    "@dataclass\n",
    "class ModelConfig:\n",
    "    smoothing: float = 0.1\n",
    "    hidden: int = 128\n",
    "    vocab_size: int = 4  # 明确指定为 int 类型\n",
    "    k_neighbors: int = 30  # 明确指定为 int 类型\n",
    "    dropout: float = 0.1\n",
    "    node_feat_types: List[str] = field(default_factory=lambda: ['angle', 'distance', 'direction'])  # 使用 field 避免可变对象问题\n",
    "    edge_feat_types: List[str] = field(default_factory=lambda: ['orientation', 'distance', 'direction'])  # 同上\n",
    "    num_encoder_layers: int = 3\n",
    "    num_decoder_layers: int = 3  # 修正为整数，去掉多余的小数点\n",
    "    \n",
    "# 修改训练配置为网格搜索\n",
    "@dataclass\n",
    "class TrainConfig:\n",
    "    batch_size: int = 16\n",
    "    # epoch: int = 60       #epoch: int = 30\n",
    "    # lr: float = 0.001\n",
    "    output_dir: str = 'ckpts/public_v2'\n",
    "    # ckpt_path: str = 'ckpts/public_v2/best.pt'\n",
    "\n",
    "@dataclass\n",
    "class Config:\n",
    "    pipeline: str = 'train'\n",
    "    seed: int = 2025\n",
    "    device: str = 'cuda:0'\n",
    "    data_config: DataConfig = field(default_factory=DataConfig)  # 修改这里\n",
    "    model_config: ModelConfig = field(default_factory=ModelConfig)  # 修改这里\n",
    "    train_config: TrainConfig = field(default_factory=TrainConfig)  # 修改这里\n",
    "    # data_config: DataConfig = DataConfig()\n",
    "    # model_config: ModelConfig = ModelConfig()\n",
    "    # train_config: TrainConfig = TrainConfig()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "id": "75647323-a3f8-400f-9d64-701aa850ae81",
   "metadata": {
    "execution": {
     "iopub.execute_input": "2025-05-16T08:20:32.868276Z",
     "iopub.status.busy": "2025-05-16T08:20:32.867893Z",
     "iopub.status.idle": "2025-05-16T08:20:32.871691Z",
     "shell.execute_reply": "2025-05-16T08:20:32.871143Z",
     "shell.execute_reply.started": "2025-05-16T08:20:32.868256Z"
    },
    "tags": []
   },
   "outputs": [],
   "source": [
    "def plot_heatmap(results_matrix, epochs, lrs):\n",
    "        import seaborn as sns\n",
    "        import matplotlib.pyplot as plt\n",
    "\n",
    "        plt.figure(figsize=(10, 6))\n",
    "        sns.heatmap(results_matrix, annot=True, fmt=\".2%\", \n",
    "                    xticklabels=lrs, yticklabels=epochs,\n",
    "                    cmap=\"YlGnBu\", cbar_kws={'label': 'Recovery Rate'})\n",
    "        plt.title(\"Hyperparameter Grid Search Results\")\n",
    "        plt.xlabel(\"Learning Rate\")\n",
    "        plt.ylabel(\"Epochs\")\n",
    "        plt.tight_layout()\n",
    "        plt.savefig(\"grid_search_heatmap.png\")\n",
    "        plt.close()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "id": "ef567b16-a1f2-4a00-bc6a-a7d6daceb6cc",
   "metadata": {
    "execution": {
     "iopub.execute_input": "2025-05-16T08:20:32.872476Z",
     "iopub.status.busy": "2025-05-16T08:20:32.872260Z",
     "iopub.status.idle": "2025-05-16T08:20:32.878485Z",
     "shell.execute_reply": "2025-05-16T08:20:32.877981Z",
     "shell.execute_reply.started": "2025-05-16T08:20:32.872462Z"
    },
    "tags": []
   },
   "outputs": [],
   "source": [
    "# Define RNADataset Class and Seeding Function\n",
    "class RNADataset(Dataset):\n",
    "    def __init__(self, data_path, npy_dir):\n",
    "        super(RNADataset, self).__init__()\n",
    "        self.data = pd.read_csv(data_path)\n",
    "        self.npy_dir = npy_dir\n",
    "        self.seq_list = self.data['seq'].to_list()\n",
    "        self.name_list = self.data['pdb_id'].to_list()\n",
    "\n",
    "    def __len__(self):\n",
    "        return len(self.name_list)\n",
    "    \n",
    "    def plot_heatmap(results_matrix, epochs, lrs):\n",
    "        import seaborn as sns\n",
    "        import matplotlib.pyplot as plt\n",
    "\n",
    "        plt.figure(figsize=(10, 6))\n",
    "        sns.heatmap(results_matrix, annot=True, fmt=\".2%\", \n",
    "                    xticklabels=lrs, yticklabels=epochs,\n",
    "                    cmap=\"YlGnBu\", cbar_kws={'label': 'Recovery Rate'})\n",
    "        plt.title(\"Hyperparameter Grid Search Results\")\n",
    "        plt.xlabel(\"Learning Rate\")\n",
    "        plt.ylabel(\"Epochs\")\n",
    "        plt.tight_layout()\n",
    "        plt.savefig(\"grid_search_heatmap.png\")\n",
    "        plt.close()\n",
    "    \n",
    "    def __getitem__(self, idx):\n",
    "        seq = self.seq_list[idx]\n",
    "        pdb_id = self.name_list[idx]\n",
    "        coords = np.load(os.path.join(self.npy_dir, pdb_id + '.npy'))\n",
    "\n",
    "        feature = {\n",
    "            \"name\": pdb_id,\n",
    "            \"seq\": seq,\n",
    "            \"coords\": {\n",
    "                \"P\": coords[:, 0, :],\n",
    "                \"O5'\": coords[:, 1, :],\n",
    "                \"C5'\": coords[:, 2, :],\n",
    "                \"C4'\": coords[:, 3, :],\n",
    "                \"C3'\": coords[:, 4, :],\n",
    "                \"O3'\": coords[:, 5, :],\n",
    "            }\n",
    "        }\n",
    "\n",
    "        return feature"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "id": "bffd6013-4f5b-42bf-9b05-42bd6041daa9",
   "metadata": {
    "execution": {
     "iopub.execute_input": "2025-05-16T08:20:32.880246Z",
     "iopub.status.busy": "2025-05-16T08:20:32.880017Z",
     "iopub.status.idle": "2025-05-16T08:20:32.887914Z",
     "shell.execute_reply": "2025-05-16T08:20:32.887494Z",
     "shell.execute_reply.started": "2025-05-16T08:20:32.880229Z"
    }
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "seeding done!!!\n"
     ]
    }
   ],
   "source": [
    "def seeding(seed):\n",
    "    np.random.seed(seed)\n",
    "    random.seed(seed)\n",
    "    os.environ['PYTHONHASHSEED'] = str(seed)\n",
    "    torch.manual_seed(seed)\n",
    "    torch.cuda.manual_seed(seed)\n",
    "    torch.cuda.manual_seed_all(seed)\n",
    "    torch.backends.cudnn.deterministic = True\n",
    "    torch.backends.cudnn.benchmark = False\n",
    "    print('seeding done!!!')\n",
    "config = Config()\n",
    "data_config = config.data_config\n",
    "train_config = config.train_config\n",
    "seeding(config.seed)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "id": "85a61f93-e518-4d50-aa87-9e4bc1f74516",
   "metadata": {
    "execution": {
     "iopub.execute_input": "2025-05-16T08:20:32.888678Z",
     "iopub.status.busy": "2025-05-16T08:20:32.888441Z",
     "iopub.status.idle": "2025-05-16T08:20:32.915211Z",
     "shell.execute_reply": "2025-05-16T08:20:32.914760Z",
     "shell.execute_reply.started": "2025-05-16T08:20:32.888664Z"
    }
   },
   "outputs": [],
   "source": [
    "\n",
    "train_dataset = RNADataset(\n",
    "    data_path=data_config.train_data_path,\n",
    "    npy_dir=data_config.train_npy_data_dir,\n",
    ")\n",
    "valid_dataset = RNADataset(\n",
    "    data_path=data_config.valid_data_path,\n",
    "    npy_dir=data_config.valid_npy_data_dir,\n",
    ")\n",
    "test_dataset = RNADataset(\n",
    "    data_path=data_config.test_data_path,\n",
    "    npy_dir=data_config.test_npy_data_dir,\n",
    ")"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "5dbc500a",
   "metadata": {},
   "source": [
    "### RNA Inverse Folding Framework \n",
    "This framework integrates geometric featurization with GNNs to predict RNA sequences compatible with input 3D conformations."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "id": "f7d5fa3e-c392-49ee-bde9-ab96c8650dbf",
   "metadata": {
    "execution": {
     "iopub.execute_input": "2025-05-16T08:20:32.915949Z",
     "iopub.status.busy": "2025-05-16T08:20:32.915697Z",
     "iopub.status.idle": "2025-05-16T08:20:32.924245Z",
     "shell.execute_reply": "2025-05-16T08:20:32.923774Z",
     "shell.execute_reply.started": "2025-05-16T08:20:32.915925Z"
    }
   },
   "outputs": [],
   "source": [
    "def featurize(batch):\n",
    "    alphabet = 'AUCG'\n",
    "    B = len(batch)\n",
    "    lengths = np.array([len(b['seq']) for b in batch], dtype=np.int32)\n",
    "    L_max = max([len(b['seq']) for b in batch])\n",
    "    X = np.zeros([B, L_max, 6, 3])\n",
    "    S = np.zeros([B, L_max], dtype=np.int32)\n",
    "    names = []\n",
    "\n",
    "    # Build the batch\n",
    "    for i, b in enumerate(batch):\n",
    "        #x = np.stack([b['coords'][c] for c in [\"P\", \"O5'\", \"C5'\", \"C4'\", \"C3'\", \"O3'\"]], 1)\n",
    "        x = np.stack([np.nan_to_num(b['coords'][c], nan=0.0) for c in [\"P\", \"O5'\", \"C5'\", \"C4'\", \"C3'\", \"O3'\"]], 1)\n",
    "        l = len(b['seq'])\n",
    "        x_pad = np.pad(x, [[0, L_max-l], [0,0], [0,0]], 'constant', constant_values=(np.nan, ))\n",
    "        X[i,:,:,:] = x_pad\n",
    "        indices = np.asarray([alphabet.index(a) for a in b['seq']], dtype=np.int32)\n",
    "        S[i, :l] = indices\n",
    "        names.append(b['name'])\n",
    "    \n",
    "    mask = np.isfinite(np.sum(X,(2,3))).astype(np.float32)\n",
    "    numbers = np.sum(mask, axis=1).astype(np.int32)\n",
    "    S_new = np.zeros_like(S)\n",
    "    X_new = np.zeros_like(X)+np.nan\n",
    "    for i, n in enumerate(numbers):\n",
    "        X_new[i,:n,::] = X[i][mask[i]==1]\n",
    "        S_new[i,:n] = S[i][mask[i]==1]\n",
    "\n",
    "    X = X_new\n",
    "    S = S_new\n",
    "    isnan = np.isnan(X)\n",
    "    mask = np.isfinite(np.sum(X,(2,3))).astype(np.float32)\n",
    "    X[isnan] = 0.\n",
    "    #mask = np.isfinite(np.sum(X,(2,3))).astype(np.float32)\n",
    "    # Conversion\n",
    "    S = torch.from_numpy(S).to(dtype=torch.long)\n",
    "    X = torch.from_numpy(X).to(dtype=torch.float32)\n",
    "    mask = torch.from_numpy(mask).to(dtype=torch.float32)\n",
    "    return X, S, mask, lengths, names"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "id": "0d8d9a12-eb92-404f-8cd4-0998d310c1f5",
   "metadata": {
    "execution": {
     "iopub.execute_input": "2025-05-16T08:20:32.924966Z",
     "iopub.status.busy": "2025-05-16T08:20:32.924780Z",
     "iopub.status.idle": "2025-05-16T08:20:32.928473Z",
     "shell.execute_reply": "2025-05-16T08:20:32.927966Z",
     "shell.execute_reply.started": "2025-05-16T08:20:32.924951Z"
    }
   },
   "outputs": [],
   "source": [
    "train_loader = DataLoader(train_dataset,\n",
    "        batch_size=train_config.batch_size,\n",
    "        shuffle=True,\n",
    "        num_workers=0,\n",
    "        collate_fn=featurize)\n",
    "\n",
    "valid_loader = DataLoader(valid_dataset,\n",
    "        batch_size=train_config.batch_size,\n",
    "        shuffle=False,\n",
    "        num_workers=0,\n",
    "        collate_fn=featurize)\n",
    "\n",
    "test_loader = DataLoader(test_dataset,\n",
    "        batch_size=train_config.batch_size,\n",
    "        shuffle=False,\n",
    "        num_workers=0,\n",
    "        collate_fn=featurize)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "id": "1ed12564-029e-496c-8826-29f98c6b580c",
   "metadata": {
    "execution": {
     "iopub.execute_input": "2025-05-16T08:20:32.929265Z",
     "iopub.status.busy": "2025-05-16T08:20:32.928978Z",
     "iopub.status.idle": "2025-05-16T08:20:32.938880Z",
     "shell.execute_reply": "2025-05-16T08:20:32.938454Z",
     "shell.execute_reply.started": "2025-05-16T08:20:32.929248Z"
    }
   },
   "outputs": [],
   "source": [
    "def gather_edges(edges, neighbor_idx):\n",
    "    neighbors = neighbor_idx.unsqueeze(-1).expand(-1, -1, -1, edges.size(-1))\n",
    "    return torch.gather(edges, 2, neighbors)\n",
    "\n",
    "def gather_nodes(nodes, neighbor_idx):\n",
    "    neighbors_flat = neighbor_idx.view((neighbor_idx.shape[0], -1))\n",
    "    neighbors_flat = neighbors_flat.unsqueeze(-1).expand(-1, -1, nodes.size(2))\n",
    "    neighbor_features = torch.gather(nodes, 1, neighbors_flat)\n",
    "    neighbor_features = neighbor_features.view(list(neighbor_idx.shape)[:3] + [-1])\n",
    "    return neighbor_features\n",
    "\n",
    "def gather_nodes_t(nodes, neighbor_idx):\n",
    "    idx_flat = neighbor_idx.unsqueeze(-1).expand(-1, -1, nodes.size(2))\n",
    "    return torch.gather(nodes, 1, idx_flat)\n",
    "\n",
    "def cat_neighbors_nodes(h_nodes, h_neighbors, E_idx):\n",
    "    h_nodes = gather_nodes(h_nodes, E_idx)\n",
    "    return torch.cat([h_neighbors, h_nodes], -1)\n",
    "\n",
    "\n",
    "class MPNNLayer(nn.Module):\n",
    "    def __init__(self, num_hidden, num_in, dropout=0.1, num_heads=None, scale=30):\n",
    "        super(MPNNLayer, self).__init__()\n",
    "        self.num_hidden = num_hidden\n",
    "        self.num_in = num_in\n",
    "        self.scale = scale\n",
    "        self.dropout = nn.Dropout(dropout)\n",
    "        self.norm1 = nn.LayerNorm(num_hidden)\n",
    "        self.norm2 = nn.LayerNorm(num_hidden)\n",
    "\n",
    "        self.W1 = nn.Linear(num_hidden + num_in, num_hidden, bias=True)\n",
    "        self.W2 = nn.Linear(num_hidden, num_hidden, bias=True)\n",
    "        self.W3 = nn.Linear(num_hidden, num_hidden, bias=True)\n",
    "        self.act = nn.ReLU()\n",
    "\n",
    "        self.dense = nn.Sequential(\n",
    "            nn.Linear(num_hidden, num_hidden*4),\n",
    "            nn.ReLU(),\n",
    "            nn.Linear(num_hidden*4, num_hidden)\n",
    "        )\n",
    "\n",
    "    def forward(self, h_V, h_E, edge_idx, batch_id=None):\n",
    "        src_idx, dst_idx = edge_idx[0], edge_idx[1]\n",
    "        h_message = self.W3(self.act(self.W2(self.act(self.W1(h_E)))))\n",
    "        dh = scatter_sum(h_message, src_idx, dim=0) / self.scale\n",
    "        h_V = self.norm1(h_V + self.dropout(dh))\n",
    "        dh = self.dense(h_V)\n",
    "        h_V = self.norm2(h_V + self.dropout(dh))\n",
    "        return h_V\n",
    "\n",
    "class Normalize(nn.Module):\n",
    "    def __init__(self, features, epsilon=1e-6):\n",
    "        super(Normalize, self).__init__()\n",
    "        self.gain = nn.Parameter(torch.ones(features))\n",
    "        self.bias = nn.Parameter(torch.zeros(features))\n",
    "        self.epsilon = epsilon\n",
    "\n",
    "    def forward(self, x, dim=-1):\n",
    "        mu = x.mean(dim, keepdim=True)\n",
    "        sigma = torch.sqrt(x.var(dim, keepdim=True) + self.epsilon)\n",
    "        gain = self.gain\n",
    "        bias = self.bias\n",
    "        if dim != -1:\n",
    "            shape = [1] * len(mu.size())\n",
    "            shape[dim] = self.gain.size()[0]\n",
    "            gain = gain.view(shape)\n",
    "            bias = bias.view(shape)\n",
    "        return gain * (x - mu) / (sigma + self.epsilon) + bias\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "id": "102ead4b-e635-464b-a3c2-68bda9f2e9fc",
   "metadata": {
    "execution": {
     "iopub.execute_input": "2025-05-16T08:20:32.939680Z",
     "iopub.status.busy": "2025-05-16T08:20:32.939391Z",
     "iopub.status.idle": "2025-05-16T08:20:32.943122Z",
     "shell.execute_reply": "2025-05-16T08:20:32.942663Z",
     "shell.execute_reply.started": "2025-05-16T08:20:32.939664Z"
    }
   },
   "outputs": [],
   "source": [
    "feat_dims = {\n",
    "    'node': {\n",
    "        'angle': 12,\n",
    "        'distance': 80,\n",
    "        'direction': 9,\n",
    "    },\n",
    "    'edge': {\n",
    "        'orientation': 4,\n",
    "        'distance': 96,\n",
    "        'direction': 15,\n",
    "    }\n",
    "}\n",
    "\n",
    "\n",
    "def nan_to_num(tensor, nan=0.0):\n",
    "    idx = torch.isnan(tensor)\n",
    "    tensor[idx] = nan\n",
    "    return tensor\n",
    "\n",
    "def _normalize(tensor, dim=-1):\n",
    "    return nan_to_num(\n",
    "        torch.div(tensor, torch.norm(tensor, dim=dim, keepdim=True)))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "id": "fddb2bad-8121-404e-bd34-5f1be813f0de",
   "metadata": {
    "execution": {
     "iopub.execute_input": "2025-05-16T08:20:32.943801Z",
     "iopub.status.busy": "2025-05-16T08:20:32.943627Z",
     "iopub.status.idle": "2025-05-16T08:20:32.973154Z",
     "shell.execute_reply": "2025-05-16T08:20:32.972661Z",
     "shell.execute_reply.started": "2025-05-16T08:20:32.943787Z"
    }
   },
   "outputs": [],
   "source": [
    "class RNAFeatures(nn.Module):\n",
    "    def __init__(self, edge_features, node_features, node_feat_types=[], edge_feat_types=[], num_rbf=16, top_k=30, augment_eps=0., dropout=0.1, args=None):\n",
    "        super(RNAFeatures, self).__init__()\n",
    "        \"\"\"Extract RNA Features\"\"\"\n",
    "        self.edge_features = edge_features\n",
    "        self.node_features = node_features\n",
    "        self.top_k = top_k\n",
    "        self.augment_eps = augment_eps \n",
    "        self.num_rbf = num_rbf\n",
    "        self.dropout = nn.Dropout(dropout)\n",
    "        self.node_feat_types = node_feat_types\n",
    "        self.edge_feat_types = edge_feat_types\n",
    "\n",
    "        node_in = sum([feat_dims['node'][feat] for feat in node_feat_types])\n",
    "        edge_in = sum([feat_dims['edge'][feat] for feat in edge_feat_types])\n",
    "        self.node_embedding = nn.Linear(node_in,  node_features, bias=True)\n",
    "        self.edge_embedding = nn.Linear(edge_in, edge_features, bias=True)\n",
    "        self.norm_nodes = Normalize(node_features)\n",
    "        self.norm_edges = Normalize(edge_features)\n",
    "        \n",
    "    def _dist(self, X, mask, eps=1E-6):\n",
    "        mask_2D = torch.unsqueeze(mask,1) * torch.unsqueeze(mask,2)\n",
    "        dX = torch.unsqueeze(X,1) - torch.unsqueeze(X,2)\n",
    "        D = (1. - mask_2D)*10000 + mask_2D* torch.sqrt(torch.sum(dX**2, 3) + eps)\n",
    "\n",
    "        D_max, _ = torch.max(D, -1, keepdim=True)\n",
    "        D_adjust = D + (1. - mask_2D) * (D_max+1)\n",
    "        D_neighbors, E_idx = torch.topk(D_adjust, min(self.top_k, D_adjust.shape[-1]), dim=-1, largest=False)\n",
    "        return D_neighbors, E_idx\n",
    "    \n",
    "    def _rbf(self, D):\n",
    "        D_min, D_max, D_count = 0., 20., self.num_rbf\n",
    "        D_mu = torch.linspace(D_min, D_max, D_count, device=D.device)\n",
    "        D_mu = D_mu.view([1,1,1,-1])\n",
    "        D_sigma = (D_max - D_min) / D_count\n",
    "        D_expand = torch.unsqueeze(D, -1)\n",
    "        return torch.exp(-((D_expand - D_mu) / D_sigma)**2)\n",
    "    \n",
    "    def _get_rbf(self, A, B, E_idx=None, num_rbf=16):\n",
    "        if E_idx is not None:\n",
    "            D_A_B = torch.sqrt(torch.sum((A[:,:,None,:] - B[:,None,:,:])**2,-1) + 1e-6)\n",
    "            D_A_B_neighbors = gather_edges(D_A_B[:,:,:,None], E_idx)[:,:,:,0]\n",
    "            RBF_A_B = self._rbf(D_A_B_neighbors)\n",
    "        else:\n",
    "            D_A_B = torch.sqrt(torch.sum((A[:,:,None,:] - B[:,:,None,:])**2,-1) + 1e-6)\n",
    "            RBF_A_B = self._rbf(D_A_B)\n",
    "        return RBF_A_B\n",
    "    \n",
    "    def _quaternions(self, R):\n",
    "        diag = torch.diagonal(R, dim1=-2, dim2=-1)\n",
    "        Rxx, Ryy, Rzz = diag.unbind(-1)\n",
    "        magnitudes = 0.5 * torch.sqrt(torch.abs(1 + torch.stack([\n",
    "              Rxx - Ryy - Rzz, \n",
    "            - Rxx + Ryy - Rzz, \n",
    "            - Rxx - Ryy + Rzz\n",
    "        ], -1)))\n",
    "        _R = lambda i,j: R[:,:,:,i,j]\n",
    "        signs = torch.sign(torch.stack([\n",
    "            _R(2,1) - _R(1,2),\n",
    "            _R(0,2) - _R(2,0),\n",
    "            _R(1,0) - _R(0,1)\n",
    "        ], -1))\n",
    "        xyz = signs * magnitudes\n",
    "        w = torch.sqrt(F.relu(1 + diag.sum(-1, keepdim=True))) / 2.\n",
    "        Q = torch.cat((xyz, w), -1)\n",
    "        Q = F.normalize(Q, dim=-1)\n",
    "        return Q\n",
    "    \n",
    "    def _orientations_coarse(self, X, E_idx, eps=1e-6):\n",
    "        V = X.clone()\n",
    "        X = X[:,:,:6,:].reshape(X.shape[0], 6*X.shape[1], 3) \n",
    "        dX = X[:,1:,:] - X[:,:-1,:]\n",
    "        U = _normalize(dX, dim=-1)\n",
    "        u_0, u_1 = U[:,:-2,:], U[:,1:-1,:]\n",
    "        n_0 = _normalize(torch.cross(u_0, u_1), dim=-1)\n",
    "        b_1 = _normalize(u_0 - u_1, dim=-1)\n",
    "        \n",
    "        # select C3'\n",
    "        n_0 = n_0[:,4::6,:] \n",
    "        b_1 = b_1[:,4::6,:]\n",
    "        X = X[:,4::6,:]\n",
    "\n",
    "        Q = torch.stack((b_1, n_0, torch.cross(b_1, n_0)), 2)\n",
    "        Q = Q.view(list(Q.shape[:2]) + [9])\n",
    "        Q = F.pad(Q, (0,0,0,1), 'constant', 0) # [16, 464, 9]\n",
    "\n",
    "        Q_neighbors = gather_nodes(Q, E_idx) # [16, 464, 30, 9]\n",
    "        P_neighbors = gather_nodes(V[:,:,0,:], E_idx) # [16, 464, 30, 3]\n",
    "        O5_neighbors = gather_nodes(V[:,:,1,:], E_idx)\n",
    "        C5_neighbors = gather_nodes(V[:,:,2,:], E_idx)\n",
    "        C4_neighbors = gather_nodes(V[:,:,3,:], E_idx)\n",
    "        O3_neighbors = gather_nodes(V[:,:,5,:], E_idx)\n",
    "        \n",
    "        Q = Q.view(list(Q.shape[:2]) + [3,3]).unsqueeze(2) # [16, 464, 1, 3, 3]\n",
    "        Q_neighbors = Q_neighbors.view(list(Q_neighbors.shape[:3]) + [3,3]) # [16, 464, 30, 3, 3]\n",
    "\n",
    "        dX = torch.stack([P_neighbors,O5_neighbors,C5_neighbors,C4_neighbors,O3_neighbors], dim=3) - X[:,:,None,None,:] # [16, 464, 30, 3]\n",
    "        dU = torch.matmul(Q[:,:,:,None,:,:], dX[...,None]).squeeze(-1) # [16, 464, 30, 3] 邻居的相对坐标\n",
    "        B, N, K = dU.shape[:3]\n",
    "        E_direct = _normalize(dU, dim=-1)\n",
    "        E_direct = E_direct.reshape(B, N, K,-1)\n",
    "        R = torch.matmul(Q.transpose(-1,-2), Q_neighbors)\n",
    "        E_orient = self._quaternions(R)\n",
    "        \n",
    "        dX_inner = V[:,:,[0,2,3],:] - X.unsqueeze(-2)\n",
    "        dU_inner = torch.matmul(Q, dX_inner.unsqueeze(-1)).squeeze(-1)\n",
    "        dU_inner = _normalize(dU_inner, dim=-1)\n",
    "        V_direct = dU_inner.reshape(B,N,-1)\n",
    "        return V_direct, E_direct, E_orient\n",
    "    \n",
    "    def _dihedrals(self, X, eps=1e-7):\n",
    "        # P, O5', C5', C4', C3', O3'\n",
    "        X = X[:,:,:6,:].reshape(X.shape[0], 6*X.shape[1], 3)\n",
    "\n",
    "        # Shifted slices of unit vectors\n",
    "        # https://iupac.qmul.ac.uk/misc/pnuc2.html#220\n",
    "        # https://x3dna.org/highlights/torsion-angles-of-nucleic-acid-structures\n",
    "        # alpha:   O3'_{i-1} P_i O5'_i C5'_i\n",
    "        # beta:    P_i O5'_i C5'_i C4'_i\n",
    "        # gamma:   O5'_i C5'_i C4'_i C3'_i\n",
    "        # delta:   C5'_i C4'_i C3'_i O3'_i\n",
    "        # epsilon: C4'_i C3'_i O3'_i P_{i+1}\n",
    "        # zeta:    C3'_i O3'_i P_{i+1} O5'_{i+1} \n",
    "        # What's more:\n",
    "        #   chi: C1' - N9 \n",
    "        #   chi is different for (C, T, U) and (A, G) https://x3dna.org/highlights/the-chi-x-torsion-angle-characterizes-base-sugar-relative-orientation\n",
    "\n",
    "        dX = X[:, 5:, :] - X[:, :-5, :] # O3'-P, P-O5', O5'-C5', C5'-C4', ...\n",
    "        U = F.normalize(dX, dim=-1)\n",
    "        u_2 = U[:,:-2,:]  # O3'-P, P-O5', ...\n",
    "        u_1 = U[:,1:-1,:] # P-O5', O5'-C5', ...\n",
    "        u_0 = U[:,2:,:]   # O5'-C5', C5'-C4', ...\n",
    "        # Backbone normals\n",
    "        n_2 = F.normalize(torch.cross(u_2, u_1), dim=-1)\n",
    "        n_1 = F.normalize(torch.cross(u_1, u_0), dim=-1)\n",
    "\n",
    "        # Angle between normals\n",
    "        cosD = (n_2 * n_1).sum(-1)\n",
    "        cosD = torch.clamp(cosD, -1+eps, 1-eps)\n",
    "        D = torch.sign((u_2 * n_1).sum(-1)) * torch.acos(cosD)\n",
    "        \n",
    "        D = F.pad(D, (3,4), 'constant', 0)\n",
    "        D = D.view((D.size(0), D.size(1) //6, 6))\n",
    "        return torch.cat((torch.cos(D), torch.sin(D)), 2) # return D_features\n",
    "    \n",
    "    def forward(self, X, S, mask):\n",
    "        if self.training and self.augment_eps > 0:\n",
    "            X = X + self.augment_eps * torch.randn_like(X)\n",
    "\n",
    "        # Build k-Nearest Neighbors graph\n",
    "        B, N, _,_ = X.shape\n",
    "        # P, O5', C5', C4', C3', O3'\n",
    "        atom_P = X[:, :, 0, :]\n",
    "        atom_O5_ = X[:, :, 1, :]\n",
    "        atom_C5_ = X[:, :, 2, :]\n",
    "        atom_C4_ = X[:, :, 3, :]\n",
    "        atom_C3_ = X[:, :, 4, :] \n",
    "        atom_O3_ = X[:, :, 5, :]\n",
    "\n",
    "        X_backbone = atom_P\n",
    "        D_neighbors, E_idx = self._dist(X_backbone, mask)        \n",
    "\n",
    "        mask_bool = (mask==1)\n",
    "        mask_attend = gather_nodes(mask.unsqueeze(-1), E_idx).squeeze(-1)\n",
    "        mask_attend = (mask.unsqueeze(-1) * mask_attend) == 1\n",
    "        edge_mask_select = lambda x: torch.masked_select(x, mask_attend.unsqueeze(-1)).reshape(-1,x.shape[-1])\n",
    "        node_mask_select = lambda x: torch.masked_select(x, mask_bool.unsqueeze(-1)).reshape(-1, x.shape[-1])\n",
    "\n",
    "        # node features\n",
    "        h_V = []\n",
    "        # angle\n",
    "        V_angle = node_mask_select(self._dihedrals(X))\n",
    "        # distance\n",
    "        node_list = ['O5_-P', 'C5_-P', 'C4_-P', 'C3_-P', 'O3_-P']\n",
    "        V_dist = []\n",
    "        \n",
    "        for pair in node_list:\n",
    "            atom1, atom2 = pair.split('-')\n",
    "            V_dist.append(node_mask_select(self._get_rbf(vars()['atom_' + atom1], vars()['atom_' + atom2], None, self.num_rbf).squeeze()))\n",
    "        V_dist = torch.cat(tuple(V_dist), dim=-1).squeeze()\n",
    "        # direction\n",
    "        V_direct, E_direct, E_orient = self._orientations_coarse(X, E_idx)\n",
    "        V_direct = node_mask_select(V_direct)\n",
    "        E_direct, E_orient = list(map(lambda x: edge_mask_select(x), [E_direct, E_orient]))\n",
    "\n",
    "        # edge features\n",
    "        h_E = []\n",
    "        # dist\n",
    "        edge_list = ['P-P', 'O5_-P', 'C5_-P', 'C4_-P', 'C3_-P', 'O3_-P']\n",
    "        E_dist = [] \n",
    "        for pair in edge_list:\n",
    "            atom1, atom2 = pair.split('-')\n",
    "            E_dist.append(edge_mask_select(self._get_rbf(vars()['atom_' + atom1], vars()['atom_' + atom2], E_idx, self.num_rbf)))\n",
    "        E_dist = torch.cat(tuple(E_dist), dim=-1)\n",
    "\n",
    "        if 'angle' in self.node_feat_types:\n",
    "            h_V.append(V_angle)\n",
    "        if 'distance' in self.node_feat_types:\n",
    "            h_V.append(V_dist)\n",
    "        if 'direction' in self.node_feat_types:\n",
    "            h_V.append(V_direct)\n",
    "\n",
    "        if 'orientation' in self.edge_feat_types:\n",
    "            h_E.append(E_orient)\n",
    "        if 'distance' in self.edge_feat_types:\n",
    "            h_E.append(E_dist)\n",
    "        if 'direction' in self.edge_feat_types:\n",
    "            h_E.append(E_direct)\n",
    "            \n",
    "        # Embed the nodes\n",
    "        h_V = self.norm_nodes(self.node_embedding(torch.cat(h_V, dim=-1)))\n",
    "        h_E = self.norm_edges(self.edge_embedding(torch.cat(h_E, dim=-1)))\n",
    "\n",
    "        # prepare the variables to return\n",
    "        S = torch.masked_select(S, mask_bool)\n",
    "        shift = mask.sum(dim=1).cumsum(dim=0) - mask.sum(dim=1)\n",
    "        src = shift.view(B,1,1) + E_idx\n",
    "        src = torch.masked_select(src, mask_attend).view(1,-1)\n",
    "        dst = shift.view(B,1,1) + torch.arange(0, N, device=src.device).view(1,-1,1).expand_as(mask_attend)\n",
    "        dst = torch.masked_select(dst, mask_attend).view(1,-1)\n",
    "        E_idx = torch.cat((dst, src), dim=0).long()\n",
    "\n",
    "        sparse_idx = mask.nonzero()\n",
    "        X = X[sparse_idx[:,0], sparse_idx[:,1], :, :]\n",
    "        batch_id = sparse_idx[:,0]\n",
    "        return X, S, h_V, h_E, E_idx, batch_id"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "id": "e12b215f-66ff-46cb-8645-733655cbf66f",
   "metadata": {
    "execution": {
     "iopub.execute_input": "2025-05-16T08:20:32.974021Z",
     "iopub.status.busy": "2025-05-16T08:20:32.973775Z",
     "iopub.status.idle": "2025-05-16T08:20:32.982893Z",
     "shell.execute_reply": "2025-05-16T08:20:32.982463Z",
     "shell.execute_reply.started": "2025-05-16T08:20:32.974005Z"
    }
   },
   "outputs": [],
   "source": [
    "class RNAModel(nn.Module):\n",
    "    def __init__(self, model_config):\n",
    "        super(RNAModel, self).__init__()\n",
    "\n",
    "        self.smoothing = model_config.smoothing\n",
    "        self.node_features = self.edge_features = model_config.hidden\n",
    "        self.hidden_dim = model_config.hidden\n",
    "        self.vocab = model_config.vocab_size\n",
    "\n",
    "        self.features = RNAFeatures(\n",
    "            model_config.hidden, model_config.hidden, \n",
    "            top_k=model_config.k_neighbors, \n",
    "            dropout=model_config.dropout,\n",
    "            node_feat_types=model_config.node_feat_types, \n",
    "            edge_feat_types=model_config.edge_feat_types,\n",
    "            args=model_config\n",
    "        )\n",
    "\n",
    "        layer = MPNNLayer\n",
    "        self.W_s = nn.Embedding(model_config.vocab_size, self.hidden_dim)\n",
    "        self.encoder_layers = nn.ModuleList([\n",
    "            layer(self.hidden_dim, self.hidden_dim*2, dropout=model_config.dropout)\n",
    "            for _ in range(model_config.num_encoder_layers)])\n",
    "        self.decoder_layers = nn.ModuleList([\n",
    "            layer(self.hidden_dim, self.hidden_dim*2, dropout=model_config.dropout)\n",
    "            for _ in range(model_config.num_decoder_layers)])\n",
    "\n",
    "        self.projection_head = nn.Sequential(\n",
    "            nn.Linear(self.hidden_dim, self.hidden_dim, bias=False), \n",
    "            nn.ReLU(inplace=True), \n",
    "            nn.Linear(self.hidden_dim, self.hidden_dim, bias=True)\n",
    "        )\n",
    "\n",
    "        self.readout = nn.Linear(self.hidden_dim, model_config.vocab_size, bias=True)\n",
    "\n",
    "        for p in self.parameters():\n",
    "            if p.dim() > 1:\n",
    "                nn.init.xavier_uniform_(p)\n",
    "\n",
    "    def forward(self, X, S, mask):\n",
    "        X, S, h_V, h_E, E_idx, batch_id = self.features(X, S, mask)\n",
    "        for enc_layer in self.encoder_layers:\n",
    "            h_EV = torch.cat([h_E, h_V[E_idx[0]], h_V[E_idx[1]]], dim=-1)\n",
    "            h_V = enc_layer(h_V, h_EV, E_idx, batch_id)\n",
    "\n",
    "        for dec_layer in self.decoder_layers:\n",
    "            h_EV = torch.cat([h_E, h_V[E_idx[0]], h_V[E_idx[1]]], dim=-1)\n",
    "            h_V = dec_layer(h_V, h_EV, E_idx, batch_id)\n",
    "\n",
    "        graph_embs = []\n",
    "        for b_id in range(batch_id[-1].item()+1):\n",
    "            b_data = h_V[batch_id == b_id].mean(0)\n",
    "            graph_embs.append(b_data)\n",
    "        graph_embs = torch.stack(graph_embs, dim=0)\n",
    "        graph_prjs = self.projection_head(graph_embs)\n",
    "\n",
    "        logits = self.readout(h_V)\n",
    "        return logits, S, graph_prjs\n",
    "\n",
    "    def sample(self, X, S, mask=None):\n",
    "        X, gt_S, h_V, h_E, E_idx, batch_id = self.features(X, S, mask) \n",
    "\n",
    "        for enc_layer in self.encoder_layers:\n",
    "            h_EV = torch.cat([h_E, h_V[E_idx[0]], h_V[E_idx[1]]], dim=-1)\n",
    "            h_V = enc_layer(h_V, h_EV, E_idx, batch_id)\n",
    "\n",
    "        for dec_layer in self.decoder_layers:\n",
    "            h_EV = torch.cat([h_E, h_V[E_idx[0]], h_V[E_idx[1]]], dim=-1)\n",
    "            h_V = dec_layer(h_V, h_EV, E_idx, batch_id)\n",
    "\n",
    "        logits = self.readout(h_V)\n",
    "        return logits, gt_S"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "id": "6fce9f8d-d53a-48a1-a082-1047dca20c5c",
   "metadata": {
    "execution": {
     "iopub.execute_input": "2025-05-16T08:20:32.983694Z",
     "iopub.status.busy": "2025-05-16T08:20:32.983488Z",
     "iopub.status.idle": "2025-05-16T08:20:33.404844Z",
     "shell.execute_reply": "2025-05-16T08:20:33.404340Z",
     "shell.execute_reply.started": "2025-05-16T08:20:32.983679Z"
    }
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "RNAModel(\n",
      "  (features): RNAFeatures(\n",
      "    (dropout): Dropout(p=0.1, inplace=False)\n",
      "    (node_embedding): Linear(in_features=101, out_features=128, bias=True)\n",
      "    (edge_embedding): Linear(in_features=115, out_features=128, bias=True)\n",
      "    (norm_nodes): Normalize()\n",
      "    (norm_edges): Normalize()\n",
      "  )\n",
      "  (W_s): Embedding(4, 128)\n",
      "  (encoder_layers): ModuleList(\n",
      "    (0-2): 3 x MPNNLayer(\n",
      "      (dropout): Dropout(p=0.1, inplace=False)\n",
      "      (norm1): LayerNorm((128,), eps=1e-05, elementwise_affine=True)\n",
      "      (norm2): LayerNorm((128,), eps=1e-05, elementwise_affine=True)\n",
      "      (W1): Linear(in_features=384, out_features=128, bias=True)\n",
      "      (W2): Linear(in_features=128, out_features=128, bias=True)\n",
      "      (W3): Linear(in_features=128, out_features=128, bias=True)\n",
      "      (act): ReLU()\n",
      "      (dense): Sequential(\n",
      "        (0): Linear(in_features=128, out_features=512, bias=True)\n",
      "        (1): ReLU()\n",
      "        (2): Linear(in_features=512, out_features=128, bias=True)\n",
      "      )\n",
      "    )\n",
      "  )\n",
      "  (decoder_layers): ModuleList(\n",
      "    (0-2): 3 x MPNNLayer(\n",
      "      (dropout): Dropout(p=0.1, inplace=False)\n",
      "      (norm1): LayerNorm((128,), eps=1e-05, elementwise_affine=True)\n",
      "      (norm2): LayerNorm((128,), eps=1e-05, elementwise_affine=True)\n",
      "      (W1): Linear(in_features=384, out_features=128, bias=True)\n",
      "      (W2): Linear(in_features=128, out_features=128, bias=True)\n",
      "      (W3): Linear(in_features=128, out_features=128, bias=True)\n",
      "      (act): ReLU()\n",
      "      (dense): Sequential(\n",
      "        (0): Linear(in_features=128, out_features=512, bias=True)\n",
      "        (1): ReLU()\n",
      "        (2): Linear(in_features=512, out_features=128, bias=True)\n",
      "      )\n",
      "    )\n",
      "  )\n",
      "  (projection_head): Sequential(\n",
      "    (0): Linear(in_features=128, out_features=128, bias=False)\n",
      "    (1): ReLU(inplace=True)\n",
      "    (2): Linear(in_features=128, out_features=128, bias=True)\n",
      "  )\n",
      "  (readout): Linear(in_features=128, out_features=4, bias=True)\n",
      ")\n"
     ]
    }
   ],
   "source": [
    "model = RNAModel(config.model_config).to(config.device)\n",
    "print(model)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 17,
   "id": "b5fa53a6-9b1b-4292-b0b4-eebb39810e15",
   "metadata": {
    "execution": {
     "iopub.execute_input": "2025-05-16T08:20:33.405721Z",
     "iopub.status.busy": "2025-05-16T08:20:33.405489Z",
     "iopub.status.idle": "2025-05-16T08:20:33.408068Z",
     "shell.execute_reply": "2025-05-16T08:20:33.407674Z",
     "shell.execute_reply.started": "2025-05-16T08:20:33.405708Z"
    }
   },
   "outputs": [],
   "source": [
    "criterion = nn.CrossEntropyLoss()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 18,
   "id": "4598b384-aacd-4e76-b547-743602e483e2",
   "metadata": {
    "execution": {
     "iopub.execute_input": "2025-05-16T08:20:33.408788Z",
     "iopub.status.busy": "2025-05-16T08:20:33.408603Z",
     "iopub.status.idle": "2025-05-16T08:20:33.414984Z",
     "shell.execute_reply": "2025-05-16T08:20:33.414567Z",
     "shell.execute_reply.started": "2025-05-16T08:20:33.408777Z"
    }
   },
   "outputs": [],
   "source": [
    "# 定义网格搜索参数\n",
    "epochs = [50, 60, 70]\n",
    "lrs = [1e-3, 1e-4, 1e-5]\n",
    "results = np.zeros((len(epochs), len(lrs)))  # 存储结果矩阵"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 25,
   "id": "f894be4e-6b63-4da7-bcfb-31010157f657",
   "metadata": {
    "ExecutionIndicator": {
     "show": true
    },
    "execution": {
     "iopub.execute_input": "2025-05-16T08:32:57.272237Z",
     "iopub.status.busy": "2025-05-16T08:32:57.271964Z",
     "iopub.status.idle": "2025-05-16T15:38:19.680652Z",
     "shell.execute_reply": "2025-05-16T15:38:19.679954Z",
     "shell.execute_reply.started": "2025-05-16T08:32:57.272222Z"
    },
    "tags": []
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\n",
      "=== Training with Epochs: 50, LR: 0.001 ===\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "train loss: 1.3782: 100%|██████████| 100/100 [00:37<00:00,  2.65it/s]\n",
      "train loss: 1.3943: 100%|██████████| 100/100 [00:38<00:00,  2.62it/s]\n",
      "train loss: 1.3632: 100%|██████████| 100/100 [00:37<00:00,  2.64it/s]\n",
      "train loss: 1.2839: 100%|██████████| 100/100 [00:38<00:00,  2.60it/s]\n",
      "train loss: 1.2068: 100%|██████████| 100/100 [00:38<00:00,  2.63it/s]\n",
      "train loss: 1.1706: 100%|██████████| 100/100 [00:38<00:00,  2.61it/s]\n",
      "train loss: 1.1507: 100%|██████████| 100/100 [00:38<00:00,  2.60it/s]\n",
      "train loss: 1.1893: 100%|██████████| 100/100 [00:38<00:00,  2.59it/s]\n",
      "train loss: 1.1137: 100%|██████████| 100/100 [00:39<00:00,  2.56it/s]\n",
      "train loss: 0.9700: 100%|██████████| 100/100 [00:38<00:00,  2.57it/s]\n",
      "train loss: 0.8406: 100%|██████████| 100/100 [00:38<00:00,  2.58it/s]\n",
      "train loss: 0.8332: 100%|██████████| 100/100 [00:38<00:00,  2.62it/s]\n",
      "train loss: 0.6815: 100%|██████████| 100/100 [00:38<00:00,  2.58it/s]\n",
      "train loss: 0.8608: 100%|██████████| 100/100 [00:39<00:00,  2.55it/s]\n",
      "train loss: 0.6317: 100%|██████████| 100/100 [00:38<00:00,  2.59it/s]\n",
      "train loss: 0.6522: 100%|██████████| 100/100 [00:38<00:00,  2.58it/s]\n",
      "train loss: 0.6561: 100%|██████████| 100/100 [00:38<00:00,  2.63it/s]\n",
      "train loss: 0.7845: 100%|██████████| 100/100 [00:38<00:00,  2.61it/s]\n",
      "train loss: 0.5340: 100%|██████████| 100/100 [00:38<00:00,  2.60it/s]\n",
      "train loss: 0.7423: 100%|██████████| 100/100 [00:38<00:00,  2.60it/s]\n",
      "train loss: 0.6399: 100%|██████████| 100/100 [00:38<00:00,  2.62it/s]\n",
      "train loss: 0.4084: 100%|██████████| 100/100 [00:38<00:00,  2.63it/s]\n",
      "train loss: 0.4232: 100%|██████████| 100/100 [00:38<00:00,  2.58it/s]\n",
      "train loss: 0.5224: 100%|██████████| 100/100 [00:38<00:00,  2.61it/s]\n",
      "train loss: 0.3432: 100%|██████████| 100/100 [00:38<00:00,  2.63it/s]\n",
      "train loss: 0.4073: 100%|██████████| 100/100 [00:38<00:00,  2.63it/s]\n",
      "train loss: 0.8898: 100%|██████████| 100/100 [00:38<00:00,  2.62it/s]\n",
      "train loss: 0.4320: 100%|██████████| 100/100 [00:38<00:00,  2.61it/s]\n",
      "train loss: 0.2568: 100%|██████████| 100/100 [00:38<00:00,  2.60it/s]\n",
      "train loss: 0.4215: 100%|██████████| 100/100 [00:38<00:00,  2.58it/s]\n",
      "train loss: 0.6400: 100%|██████████| 100/100 [00:38<00:00,  2.58it/s]\n",
      "train loss: 0.2976: 100%|██████████| 100/100 [00:38<00:00,  2.62it/s]\n",
      "train loss: 0.2470: 100%|██████████| 100/100 [00:38<00:00,  2.60it/s]\n",
      "train loss: 0.5485: 100%|██████████| 100/100 [00:38<00:00,  2.59it/s]\n",
      "train loss: 0.3262: 100%|██████████| 100/100 [00:38<00:00,  2.60it/s]\n",
      "train loss: 0.6312: 100%|██████████| 100/100 [00:38<00:00,  2.58it/s]\n",
      "train loss: 0.4746: 100%|██████████| 100/100 [00:38<00:00,  2.57it/s]\n",
      "train loss: 0.2460: 100%|██████████| 100/100 [00:38<00:00,  2.60it/s]\n",
      "train loss: 0.3791: 100%|██████████| 100/100 [00:38<00:00,  2.62it/s]\n",
      "train loss: 0.4120: 100%|██████████| 100/100 [00:38<00:00,  2.59it/s]\n",
      "train loss: 0.4076: 100%|██████████| 100/100 [00:38<00:00,  2.58it/s]\n",
      "train loss: 0.5423: 100%|██████████| 100/100 [00:38<00:00,  2.62it/s]\n",
      "train loss: 0.3123: 100%|██████████| 100/100 [00:38<00:00,  2.57it/s]\n",
      "train loss: 0.8366: 100%|██████████| 100/100 [00:38<00:00,  2.58it/s]\n",
      "train loss: 0.3833: 100%|██████████| 100/100 [00:38<00:00,  2.61it/s]\n",
      "train loss: 0.2691: 100%|██████████| 100/100 [00:38<00:00,  2.59it/s]\n",
      "train loss: 0.3424: 100%|██████████| 100/100 [00:38<00:00,  2.62it/s]\n",
      "train loss: 0.4542: 100%|██████████| 100/100 [00:38<00:00,  2.61it/s]\n",
      "train loss: 0.3977: 100%|██████████| 100/100 [00:38<00:00,  2.58it/s]\n",
      "train loss: 0.3402: 100%|██████████| 100/100 [00:38<00:00,  2.60it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\n",
      "=== Training with Epochs: 50, LR: 0.0001 ===\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "train loss: 1.3607: 100%|██████████| 100/100 [00:38<00:00,  2.57it/s]\n",
      "train loss: 1.3568: 100%|██████████| 100/100 [00:38<00:00,  2.60it/s]\n",
      "train loss: 1.3058: 100%|██████████| 100/100 [00:38<00:00,  2.59it/s]\n",
      "train loss: 1.3536: 100%|██████████| 100/100 [00:38<00:00,  2.60it/s]\n",
      "train loss: 1.2152: 100%|██████████| 100/100 [00:38<00:00,  2.58it/s]\n",
      "train loss: 1.2894: 100%|██████████| 100/100 [00:38<00:00,  2.60it/s]\n",
      "train loss: 1.2779: 100%|██████████| 100/100 [00:39<00:00,  2.56it/s]\n",
      "train loss: 1.2684: 100%|██████████| 100/100 [00:38<00:00,  2.57it/s]\n",
      "train loss: 1.1936: 100%|██████████| 100/100 [00:38<00:00,  2.59it/s]\n",
      "train loss: 1.1704: 100%|██████████| 100/100 [00:38<00:00,  2.60it/s]\n",
      "train loss: 1.2537: 100%|██████████| 100/100 [00:38<00:00,  2.57it/s]\n",
      "train loss: 1.0694: 100%|██████████| 100/100 [00:38<00:00,  2.57it/s]\n",
      "train loss: 1.0223: 100%|██████████| 100/100 [00:38<00:00,  2.60it/s]\n",
      "train loss: 1.0115: 100%|██████████| 100/100 [00:37<00:00,  2.64it/s]\n",
      "train loss: 1.1975: 100%|██████████| 100/100 [00:38<00:00,  2.57it/s]\n",
      "train loss: 1.0835: 100%|██████████| 100/100 [00:38<00:00,  2.57it/s]\n",
      "train loss: 0.9640: 100%|██████████| 100/100 [00:38<00:00,  2.58it/s]\n",
      "train loss: 1.2278: 100%|██████████| 100/100 [00:39<00:00,  2.52it/s]\n",
      "train loss: 0.9494: 100%|██████████| 100/100 [00:38<00:00,  2.57it/s]\n",
      "train loss: 0.9871: 100%|██████████| 100/100 [00:38<00:00,  2.58it/s]\n",
      "train loss: 0.8979: 100%|██████████| 100/100 [00:39<00:00,  2.54it/s]\n",
      "train loss: 1.2480: 100%|██████████| 100/100 [00:39<00:00,  2.56it/s]\n",
      "train loss: 0.9312: 100%|██████████| 100/100 [00:39<00:00,  2.52it/s]\n",
      "train loss: 0.9933: 100%|██████████| 100/100 [00:39<00:00,  2.56it/s]\n",
      "train loss: 1.0032: 100%|██████████| 100/100 [00:39<00:00,  2.53it/s]\n",
      "train loss: 0.8811: 100%|██████████| 100/100 [00:39<00:00,  2.56it/s]\n",
      "train loss: 1.0493: 100%|██████████| 100/100 [00:38<00:00,  2.60it/s]\n",
      "train loss: 0.7844: 100%|██████████| 100/100 [00:38<00:00,  2.57it/s]\n",
      "train loss: 0.9836: 100%|██████████| 100/100 [00:39<00:00,  2.53it/s]\n",
      "train loss: 0.7977: 100%|██████████| 100/100 [00:38<00:00,  2.57it/s]\n",
      "train loss: 0.8219: 100%|██████████| 100/100 [00:39<00:00,  2.54it/s]\n",
      "train loss: 0.9541: 100%|██████████| 100/100 [00:38<00:00,  2.57it/s]\n",
      "train loss: 0.7780: 100%|██████████| 100/100 [00:38<00:00,  2.60it/s]\n",
      "train loss: 0.8675: 100%|██████████| 100/100 [00:38<00:00,  2.58it/s]\n",
      "train loss: 0.8834: 100%|██████████| 100/100 [00:39<00:00,  2.56it/s]\n",
      "train loss: 0.9954: 100%|██████████| 100/100 [00:38<00:00,  2.60it/s]\n",
      "train loss: 0.7679: 100%|██████████| 100/100 [00:38<00:00,  2.58it/s]\n",
      "train loss: 0.7203: 100%|██████████| 100/100 [00:38<00:00,  2.60it/s]\n",
      "train loss: 0.8676: 100%|██████████| 100/100 [00:38<00:00,  2.59it/s]\n",
      "train loss: 0.7635: 100%|██████████| 100/100 [00:39<00:00,  2.55it/s]\n",
      "train loss: 0.7653: 100%|██████████| 100/100 [00:38<00:00,  2.57it/s]\n",
      "train loss: 0.5514: 100%|██████████| 100/100 [00:39<00:00,  2.54it/s]\n",
      "train loss: 0.5533: 100%|██████████| 100/100 [00:38<00:00,  2.59it/s]\n",
      "train loss: 0.7384: 100%|██████████| 100/100 [00:38<00:00,  2.56it/s]\n",
      "train loss: 0.8680: 100%|██████████| 100/100 [00:38<00:00,  2.60it/s]\n",
      "train loss: 0.8326: 100%|██████████| 100/100 [00:38<00:00,  2.57it/s]\n",
      "train loss: 0.7042: 100%|██████████| 100/100 [00:38<00:00,  2.57it/s]\n",
      "train loss: 0.8092: 100%|██████████| 100/100 [00:39<00:00,  2.55it/s]\n",
      "train loss: 0.4552: 100%|██████████| 100/100 [00:38<00:00,  2.58it/s]\n",
      "train loss: 0.6277: 100%|██████████| 100/100 [00:39<00:00,  2.55it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\n",
      "=== Training with Epochs: 50, LR: 1e-05 ===\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "train loss: 1.4165: 100%|██████████| 100/100 [00:38<00:00,  2.62it/s]\n",
      "train loss: 1.4265: 100%|██████████| 100/100 [00:39<00:00,  2.54it/s]\n",
      "train loss: 1.3569: 100%|██████████| 100/100 [00:38<00:00,  2.58it/s]\n",
      "train loss: 1.3780: 100%|██████████| 100/100 [00:39<00:00,  2.55it/s]\n",
      "train loss: 1.3725: 100%|██████████| 100/100 [00:38<00:00,  2.60it/s]\n",
      "train loss: 1.3689: 100%|██████████| 100/100 [00:38<00:00,  2.59it/s]\n",
      "train loss: 1.3307: 100%|██████████| 100/100 [00:38<00:00,  2.60it/s]\n",
      "train loss: 1.3446: 100%|██████████| 100/100 [00:38<00:00,  2.56it/s]\n",
      "train loss: 1.3901: 100%|██████████| 100/100 [00:38<00:00,  2.61it/s]\n",
      "train loss: 1.3352: 100%|██████████| 100/100 [00:38<00:00,  2.63it/s]\n",
      "train loss: 1.3208: 100%|██████████| 100/100 [00:38<00:00,  2.58it/s]\n",
      "train loss: 1.3463: 100%|██████████| 100/100 [00:39<00:00,  2.56it/s]\n",
      "train loss: 1.3449: 100%|██████████| 100/100 [00:39<00:00,  2.56it/s]\n",
      "train loss: 1.3209: 100%|██████████| 100/100 [00:38<00:00,  2.58it/s]\n",
      "train loss: 1.3045: 100%|██████████| 100/100 [00:38<00:00,  2.58it/s]\n",
      "train loss: 1.3664: 100%|██████████| 100/100 [00:38<00:00,  2.59it/s]\n",
      "train loss: 1.3143: 100%|██████████| 100/100 [00:38<00:00,  2.57it/s]\n",
      "train loss: 1.3089: 100%|██████████| 100/100 [00:38<00:00,  2.57it/s]\n",
      "train loss: 1.2926: 100%|██████████| 100/100 [00:38<00:00,  2.58it/s]\n",
      "train loss: 1.3184: 100%|██████████| 100/100 [00:38<00:00,  2.59it/s]\n",
      "train loss: 1.3346: 100%|██████████| 100/100 [00:38<00:00,  2.58it/s]\n",
      "train loss: 1.2878: 100%|██████████| 100/100 [00:38<00:00,  2.57it/s]\n",
      "train loss: 1.3215: 100%|██████████| 100/100 [00:38<00:00,  2.56it/s]\n",
      "train loss: 1.2780: 100%|██████████| 100/100 [00:37<00:00,  2.65it/s]\n",
      "train loss: 1.2663: 100%|██████████| 100/100 [00:38<00:00,  2.60it/s]\n",
      "train loss: 1.3565: 100%|██████████| 100/100 [00:39<00:00,  2.56it/s]\n",
      "train loss: 1.2812: 100%|██████████| 100/100 [00:39<00:00,  2.54it/s]\n",
      "train loss: 1.3263: 100%|██████████| 100/100 [00:38<00:00,  2.58it/s]\n",
      "train loss: 1.2604: 100%|██████████| 100/100 [00:38<00:00,  2.60it/s]\n",
      "train loss: 1.2500: 100%|██████████| 100/100 [00:38<00:00,  2.57it/s]\n",
      "train loss: 1.2992: 100%|██████████| 100/100 [00:38<00:00,  2.61it/s]\n",
      "train loss: 1.3137: 100%|██████████| 100/100 [00:38<00:00,  2.62it/s]\n",
      "train loss: 1.3398: 100%|██████████| 100/100 [00:38<00:00,  2.57it/s]\n",
      "train loss: 1.2815: 100%|██████████| 100/100 [00:38<00:00,  2.57it/s]\n",
      "train loss: 1.2153: 100%|██████████| 100/100 [00:38<00:00,  2.59it/s]\n",
      "train loss: 1.2100: 100%|██████████| 100/100 [00:39<00:00,  2.56it/s]\n",
      "train loss: 1.2755: 100%|██████████| 100/100 [00:38<00:00,  2.59it/s]\n",
      "train loss: 1.2827: 100%|██████████| 100/100 [00:38<00:00,  2.59it/s]\n",
      "train loss: 1.2942: 100%|██████████| 100/100 [00:38<00:00,  2.57it/s]\n",
      "train loss: 1.2700: 100%|██████████| 100/100 [00:38<00:00,  2.58it/s]\n",
      "train loss: 1.2755: 100%|██████████| 100/100 [00:38<00:00,  2.61it/s]\n",
      "train loss: 1.2207: 100%|██████████| 100/100 [00:39<00:00,  2.56it/s]\n",
      "train loss: 1.2612: 100%|██████████| 100/100 [00:38<00:00,  2.59it/s]\n",
      "train loss: 1.2576: 100%|██████████| 100/100 [00:38<00:00,  2.57it/s]\n",
      "train loss: 1.3200: 100%|██████████| 100/100 [00:38<00:00,  2.57it/s]\n",
      "train loss: 1.2023: 100%|██████████| 100/100 [00:38<00:00,  2.59it/s]\n",
      "train loss: 1.2478: 100%|██████████| 100/100 [00:39<00:00,  2.56it/s]\n",
      "train loss: 1.2527: 100%|██████████| 100/100 [00:38<00:00,  2.58it/s]\n",
      "train loss: 1.2459: 100%|██████████| 100/100 [00:38<00:00,  2.61it/s]\n",
      "train loss: 1.1504: 100%|██████████| 100/100 [00:39<00:00,  2.56it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\n",
      "=== Training with Epochs: 60, LR: 0.001 ===\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "train loss: 1.4090: 100%|██████████| 100/100 [00:38<00:00,  2.63it/s]\n",
      "train loss: 1.3805: 100%|██████████| 100/100 [00:38<00:00,  2.58it/s]\n",
      "train loss: 1.3168: 100%|██████████| 100/100 [00:38<00:00,  2.62it/s]\n",
      "train loss: 1.3337: 100%|██████████| 100/100 [00:38<00:00,  2.62it/s]\n",
      "train loss: 1.2177: 100%|██████████| 100/100 [00:38<00:00,  2.60it/s]\n",
      "train loss: 1.1085: 100%|██████████| 100/100 [00:38<00:00,  2.61it/s]\n",
      "train loss: 0.9817: 100%|██████████| 100/100 [00:37<00:00,  2.64it/s]\n",
      "train loss: 0.8200: 100%|██████████| 100/100 [00:38<00:00,  2.61it/s]\n",
      "train loss: 0.7355: 100%|██████████| 100/100 [00:38<00:00,  2.59it/s]\n",
      "train loss: 0.9991: 100%|██████████| 100/100 [00:38<00:00,  2.63it/s]\n",
      "train loss: 0.7186: 100%|██████████| 100/100 [00:38<00:00,  2.63it/s]\n",
      "train loss: 0.7284: 100%|██████████| 100/100 [00:38<00:00,  2.59it/s]\n",
      "train loss: 0.7063: 100%|██████████| 100/100 [00:38<00:00,  2.61it/s]\n",
      "train loss: 0.5584: 100%|██████████| 100/100 [00:38<00:00,  2.59it/s]\n",
      "train loss: 0.4293: 100%|██████████| 100/100 [00:38<00:00,  2.61it/s]\n",
      "train loss: 0.5117: 100%|██████████| 100/100 [00:38<00:00,  2.59it/s]\n",
      "train loss: 0.8428: 100%|██████████| 100/100 [00:38<00:00,  2.58it/s]\n",
      "train loss: 0.4876: 100%|██████████| 100/100 [00:38<00:00,  2.58it/s]\n",
      "train loss: 0.5939: 100%|██████████| 100/100 [00:38<00:00,  2.62it/s]\n",
      "train loss: 0.5416: 100%|██████████| 100/100 [00:38<00:00,  2.61it/s]\n",
      "train loss: 0.7410: 100%|██████████| 100/100 [00:38<00:00,  2.59it/s]\n",
      "train loss: 0.8258: 100%|██████████| 100/100 [00:38<00:00,  2.61it/s]\n",
      "train loss: 0.4811: 100%|██████████| 100/100 [00:38<00:00,  2.62it/s]\n",
      "train loss: 0.4195: 100%|██████████| 100/100 [00:38<00:00,  2.62it/s]\n",
      "train loss: 0.8950: 100%|██████████| 100/100 [00:38<00:00,  2.58it/s]\n",
      "train loss: 1.0129: 100%|██████████| 100/100 [00:38<00:00,  2.59it/s]\n",
      "train loss: 0.4349: 100%|██████████| 100/100 [00:38<00:00,  2.60it/s]\n",
      "train loss: 0.5406: 100%|██████████| 100/100 [00:38<00:00,  2.62it/s]\n",
      "train loss: 0.5887: 100%|██████████| 100/100 [00:38<00:00,  2.61it/s]\n",
      "train loss: 0.4617: 100%|██████████| 100/100 [00:38<00:00,  2.62it/s]\n",
      "train loss: 0.7427: 100%|██████████| 100/100 [00:38<00:00,  2.61it/s]\n",
      "train loss: 0.5704: 100%|██████████| 100/100 [00:38<00:00,  2.59it/s]\n",
      "train loss: 0.2414: 100%|██████████| 100/100 [00:37<00:00,  2.64it/s]\n",
      "train loss: 0.3694: 100%|██████████| 100/100 [00:38<00:00,  2.62it/s]\n",
      "train loss: 0.3257: 100%|██████████| 100/100 [00:38<00:00,  2.59it/s]\n",
      "train loss: 0.2438: 100%|██████████| 100/100 [00:38<00:00,  2.59it/s]\n",
      "train loss: 0.5429: 100%|██████████| 100/100 [00:38<00:00,  2.61it/s]\n",
      "train loss: 0.6161: 100%|██████████| 100/100 [00:38<00:00,  2.60it/s]\n",
      "train loss: 0.2002: 100%|██████████| 100/100 [00:38<00:00,  2.59it/s]\n",
      "train loss: 0.3792: 100%|██████████| 100/100 [00:38<00:00,  2.59it/s]\n",
      "train loss: 0.2402: 100%|██████████| 100/100 [00:38<00:00,  2.57it/s]\n",
      "train loss: 0.1919: 100%|██████████| 100/100 [00:40<00:00,  2.47it/s]\n",
      "train loss: 0.3206: 100%|██████████| 100/100 [00:40<00:00,  2.49it/s]\n",
      "train loss: 0.3960: 100%|██████████| 100/100 [00:39<00:00,  2.51it/s]\n",
      "train loss: 0.3525: 100%|██████████| 100/100 [00:38<00:00,  2.57it/s]\n",
      "train loss: 0.3301: 100%|██████████| 100/100 [00:39<00:00,  2.51it/s]\n",
      "train loss: 0.3163: 100%|██████████| 100/100 [00:39<00:00,  2.56it/s]\n",
      "train loss: 0.2133: 100%|██████████| 100/100 [00:39<00:00,  2.55it/s]\n",
      "train loss: 0.1873: 100%|██████████| 100/100 [00:38<00:00,  2.58it/s]\n",
      "train loss: 0.3792: 100%|██████████| 100/100 [00:39<00:00,  2.53it/s]\n",
      "train loss: 0.3442: 100%|██████████| 100/100 [00:38<00:00,  2.57it/s]\n",
      "train loss: 0.3151: 100%|██████████| 100/100 [00:38<00:00,  2.60it/s]\n",
      "train loss: 0.1196: 100%|██████████| 100/100 [00:39<00:00,  2.56it/s]\n",
      "train loss: 0.2587: 100%|██████████| 100/100 [00:39<00:00,  2.53it/s]\n",
      "train loss: 0.2196: 100%|██████████| 100/100 [00:39<00:00,  2.56it/s]\n",
      "train loss: 0.3430: 100%|██████████| 100/100 [00:39<00:00,  2.56it/s]\n",
      "train loss: 0.1507: 100%|██████████| 100/100 [00:39<00:00,  2.54it/s]\n",
      "train loss: 0.2052: 100%|██████████| 100/100 [00:38<00:00,  2.57it/s]\n",
      "train loss: 0.1551: 100%|██████████| 100/100 [00:39<00:00,  2.55it/s]\n",
      "train loss: 0.1563: 100%|██████████| 100/100 [00:39<00:00,  2.51it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\n",
      "=== Training with Epochs: 60, LR: 0.0001 ===\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "train loss: 1.3392: 100%|██████████| 100/100 [00:39<00:00,  2.56it/s]\n",
      "train loss: 1.3459: 100%|██████████| 100/100 [00:39<00:00,  2.53it/s]\n",
      "train loss: 1.3065: 100%|██████████| 100/100 [00:40<00:00,  2.48it/s]\n",
      "train loss: 1.2910: 100%|██████████| 100/100 [00:39<00:00,  2.53it/s]\n",
      "train loss: 1.2977: 100%|██████████| 100/100 [00:40<00:00,  2.50it/s]\n",
      "train loss: 1.2512: 100%|██████████| 100/100 [00:40<00:00,  2.49it/s]\n",
      "train loss: 1.2798: 100%|██████████| 100/100 [00:38<00:00,  2.57it/s]\n",
      "train loss: 1.2778: 100%|██████████| 100/100 [00:39<00:00,  2.53it/s]\n",
      "train loss: 1.1817: 100%|██████████| 100/100 [00:40<00:00,  2.48it/s]\n",
      "train loss: 1.2319: 100%|██████████| 100/100 [00:40<00:00,  2.46it/s]\n",
      "train loss: 1.2035: 100%|██████████| 100/100 [00:40<00:00,  2.46it/s]\n",
      "train loss: 1.2091: 100%|██████████| 100/100 [00:39<00:00,  2.55it/s]\n",
      "train loss: 1.0764: 100%|██████████| 100/100 [00:38<00:00,  2.58it/s]\n",
      "train loss: 0.9893: 100%|██████████| 100/100 [00:39<00:00,  2.54it/s]\n",
      "train loss: 1.1778: 100%|██████████| 100/100 [00:39<00:00,  2.56it/s]\n",
      "train loss: 1.1685: 100%|██████████| 100/100 [00:40<00:00,  2.50it/s]\n",
      "train loss: 1.0688: 100%|██████████| 100/100 [00:39<00:00,  2.54it/s]\n",
      "train loss: 0.9670: 100%|██████████| 100/100 [00:39<00:00,  2.52it/s]\n",
      "train loss: 1.0863: 100%|██████████| 100/100 [00:39<00:00,  2.53it/s]\n",
      "train loss: 1.0367: 100%|██████████| 100/100 [00:38<00:00,  2.57it/s]\n",
      "train loss: 1.1111: 100%|██████████| 100/100 [00:38<00:00,  2.57it/s]\n",
      "train loss: 0.9144: 100%|██████████| 100/100 [00:39<00:00,  2.56it/s]\n",
      "train loss: 1.0164: 100%|██████████| 100/100 [00:38<00:00,  2.57it/s]\n",
      "train loss: 0.9132: 100%|██████████| 100/100 [00:39<00:00,  2.55it/s]\n",
      "train loss: 0.8908: 100%|██████████| 100/100 [00:39<00:00,  2.56it/s]\n",
      "train loss: 0.9263: 100%|██████████| 100/100 [00:39<00:00,  2.55it/s]\n",
      "train loss: 1.3055: 100%|██████████| 100/100 [00:38<00:00,  2.60it/s]\n",
      "train loss: 0.8394: 100%|██████████| 100/100 [00:39<00:00,  2.53it/s]\n",
      "train loss: 0.9039: 100%|██████████| 100/100 [00:38<00:00,  2.58it/s]\n",
      "train loss: 0.8931: 100%|██████████| 100/100 [00:39<00:00,  2.54it/s]\n",
      "train loss: 0.6335: 100%|██████████| 100/100 [00:39<00:00,  2.54it/s]\n",
      "train loss: 0.9006: 100%|██████████| 100/100 [00:39<00:00,  2.54it/s]\n",
      "train loss: 1.2228: 100%|██████████| 100/100 [00:39<00:00,  2.53it/s]\n",
      "train loss: 0.8315: 100%|██████████| 100/100 [00:39<00:00,  2.56it/s]\n",
      "train loss: 0.7276: 100%|██████████| 100/100 [00:39<00:00,  2.55it/s]\n",
      "train loss: 0.9526: 100%|██████████| 100/100 [00:38<00:00,  2.58it/s]\n",
      "train loss: 0.8509: 100%|██████████| 100/100 [00:39<00:00,  2.53it/s]\n",
      "train loss: 0.9472: 100%|██████████| 100/100 [00:39<00:00,  2.54it/s]\n",
      "train loss: 0.8960: 100%|██████████| 100/100 [00:39<00:00,  2.54it/s]\n",
      "train loss: 0.8837: 100%|██████████| 100/100 [00:39<00:00,  2.54it/s]\n",
      "train loss: 0.9195: 100%|██████████| 100/100 [00:39<00:00,  2.54it/s]\n",
      "train loss: 0.7329: 100%|██████████| 100/100 [00:39<00:00,  2.51it/s]\n",
      "train loss: 0.8691: 100%|██████████| 100/100 [00:39<00:00,  2.53it/s]\n",
      "train loss: 0.9710: 100%|██████████| 100/100 [00:40<00:00,  2.48it/s]\n",
      "train loss: 0.9670: 100%|██████████| 100/100 [00:39<00:00,  2.56it/s]\n",
      "train loss: 0.9772: 100%|██████████| 100/100 [00:39<00:00,  2.52it/s]\n",
      "train loss: 0.5638: 100%|██████████| 100/100 [00:39<00:00,  2.54it/s]\n",
      "train loss: 0.4246: 100%|██████████| 100/100 [00:38<00:00,  2.57it/s]\n",
      "train loss: 1.1972: 100%|██████████| 100/100 [00:38<00:00,  2.58it/s]\n",
      "train loss: 0.7684: 100%|██████████| 100/100 [00:39<00:00,  2.53it/s]\n",
      "train loss: 0.8482: 100%|██████████| 100/100 [00:40<00:00,  2.48it/s]\n",
      "train loss: 0.7383: 100%|██████████| 100/100 [00:40<00:00,  2.47it/s]\n",
      "train loss: 0.5597: 100%|██████████| 100/100 [00:40<00:00,  2.49it/s]\n",
      "train loss: 0.6638: 100%|██████████| 100/100 [00:39<00:00,  2.51it/s]\n",
      "train loss: 0.6619: 100%|██████████| 100/100 [00:40<00:00,  2.44it/s]\n",
      "train loss: 0.7009: 100%|██████████| 100/100 [00:39<00:00,  2.50it/s]\n",
      "train loss: 0.6033: 100%|██████████| 100/100 [00:39<00:00,  2.53it/s]\n",
      "train loss: 0.7541: 100%|██████████| 100/100 [00:40<00:00,  2.50it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\n",
      "=== Training with Epochs: 60, LR: 1e-05 ===\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "train loss: 1.4578: 100%|██████████| 100/100 [00:39<00:00,  2.55it/s]\n",
      "train loss: 1.4023: 100%|██████████| 100/100 [00:39<00:00,  2.51it/s]\n",
      "train loss: 1.3734: 100%|██████████| 100/100 [00:39<00:00,  2.54it/s]\n",
      "train loss: 1.3918: 100%|██████████| 100/100 [00:40<00:00,  2.49it/s]\n",
      "train loss: 1.3258: 100%|██████████| 100/100 [00:39<00:00,  2.53it/s]\n",
      "train loss: 1.3883: 100%|██████████| 100/100 [00:39<00:00,  2.51it/s]\n",
      "train loss: 1.3604: 100%|██████████| 100/100 [00:40<00:00,  2.47it/s]\n",
      "train loss: 1.3199: 100%|██████████| 100/100 [00:40<00:00,  2.46it/s]\n",
      "train loss: 1.3264: 100%|██████████| 100/100 [00:40<00:00,  2.44it/s]\n",
      "train loss: 1.3411: 100%|██████████| 100/100 [00:40<00:00,  2.46it/s]\n",
      "train loss: 1.3424: 100%|██████████| 100/100 [00:39<00:00,  2.54it/s]\n",
      "train loss: 1.3324: 100%|██████████| 100/100 [00:39<00:00,  2.51it/s]\n",
      "train loss: 1.3299: 100%|██████████| 100/100 [00:40<00:00,  2.47it/s]\n",
      "train loss: 1.3429: 100%|██████████| 100/100 [00:41<00:00,  2.42it/s]\n",
      "train loss: 1.3326: 100%|██████████| 100/100 [00:39<00:00,  2.53it/s]\n",
      "train loss: 1.3180: 100%|██████████| 100/100 [00:39<00:00,  2.50it/s]\n",
      "train loss: 1.2805: 100%|██████████| 100/100 [00:39<00:00,  2.53it/s]\n",
      "train loss: 1.3449: 100%|██████████| 100/100 [00:40<00:00,  2.50it/s]\n",
      "train loss: 1.3195: 100%|██████████| 100/100 [00:40<00:00,  2.47it/s]\n",
      "train loss: 1.2874: 100%|██████████| 100/100 [00:39<00:00,  2.51it/s]\n",
      "train loss: 1.3085: 100%|██████████| 100/100 [00:39<00:00,  2.54it/s]\n",
      "train loss: 1.3229: 100%|██████████| 100/100 [00:39<00:00,  2.50it/s]\n",
      "train loss: 1.2805: 100%|██████████| 100/100 [00:39<00:00,  2.51it/s]\n",
      "train loss: 1.2735: 100%|██████████| 100/100 [00:39<00:00,  2.52it/s]\n",
      "train loss: 1.2924: 100%|██████████| 100/100 [00:40<00:00,  2.48it/s]\n",
      "train loss: 1.3177: 100%|██████████| 100/100 [00:40<00:00,  2.48it/s]\n",
      "train loss: 1.2744: 100%|██████████| 100/100 [00:40<00:00,  2.50it/s]\n",
      "train loss: 1.2988: 100%|██████████| 100/100 [00:39<00:00,  2.52it/s]\n",
      "train loss: 1.3296: 100%|██████████| 100/100 [00:39<00:00,  2.51it/s]\n",
      "train loss: 1.2979: 100%|██████████| 100/100 [00:39<00:00,  2.51it/s]\n",
      "train loss: 1.2416: 100%|██████████| 100/100 [00:39<00:00,  2.51it/s]\n",
      "train loss: 1.2709: 100%|██████████| 100/100 [00:38<00:00,  2.57it/s]\n",
      "train loss: 1.2343: 100%|██████████| 100/100 [00:39<00:00,  2.51it/s]\n",
      "train loss: 1.2591: 100%|██████████| 100/100 [00:39<00:00,  2.51it/s]\n",
      "train loss: 1.3022: 100%|██████████| 100/100 [00:39<00:00,  2.55it/s]\n",
      "train loss: 1.2516: 100%|██████████| 100/100 [00:40<00:00,  2.46it/s]\n",
      "train loss: 1.3639: 100%|██████████| 100/100 [00:39<00:00,  2.55it/s]\n",
      "train loss: 1.1941: 100%|██████████| 100/100 [00:39<00:00,  2.52it/s]\n",
      "train loss: 1.2371: 100%|██████████| 100/100 [00:39<00:00,  2.52it/s]\n",
      "train loss: 1.2992: 100%|██████████| 100/100 [00:40<00:00,  2.49it/s]\n",
      "train loss: 1.2875: 100%|██████████| 100/100 [00:40<00:00,  2.49it/s]\n",
      "train loss: 1.2625: 100%|██████████| 100/100 [00:40<00:00,  2.44it/s]\n",
      "train loss: 1.2971: 100%|██████████| 100/100 [00:39<00:00,  2.52it/s]\n",
      "train loss: 1.2997: 100%|██████████| 100/100 [00:38<00:00,  2.61it/s]\n",
      "train loss: 1.2740: 100%|██████████| 100/100 [00:38<00:00,  2.59it/s]\n",
      "train loss: 1.2711: 100%|██████████| 100/100 [00:38<00:00,  2.61it/s]\n",
      "train loss: 1.2962: 100%|██████████| 100/100 [00:38<00:00,  2.61it/s]\n",
      "train loss: 1.2528: 100%|██████████| 100/100 [00:38<00:00,  2.58it/s]\n",
      "train loss: 1.2529: 100%|██████████| 100/100 [00:39<00:00,  2.54it/s]\n",
      "train loss: 1.1792: 100%|██████████| 100/100 [00:38<00:00,  2.59it/s]\n",
      "train loss: 1.2822: 100%|██████████| 100/100 [00:38<00:00,  2.57it/s]\n",
      "train loss: 1.1856: 100%|██████████| 100/100 [00:38<00:00,  2.60it/s]\n",
      "train loss: 1.2090: 100%|██████████| 100/100 [00:38<00:00,  2.63it/s]\n",
      "train loss: 1.2646: 100%|██████████| 100/100 [00:38<00:00,  2.57it/s]\n",
      "train loss: 1.2659: 100%|██████████| 100/100 [00:38<00:00,  2.59it/s]\n",
      "train loss: 1.2003: 100%|██████████| 100/100 [00:39<00:00,  2.55it/s]\n",
      "train loss: 1.2044: 100%|██████████| 100/100 [00:38<00:00,  2.61it/s]\n",
      "train loss: 1.2530: 100%|██████████| 100/100 [00:38<00:00,  2.60it/s]\n",
      "train loss: 1.2323: 100%|██████████| 100/100 [00:38<00:00,  2.57it/s]\n",
      "train loss: 1.2114: 100%|██████████| 100/100 [00:38<00:00,  2.63it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\n",
      "=== Training with Epochs: 70, LR: 0.001 ===\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "train loss: 1.3932: 100%|██████████| 100/100 [00:39<00:00,  2.56it/s]\n",
      "train loss: 1.3724: 100%|██████████| 100/100 [00:38<00:00,  2.59it/s]\n",
      "train loss: 1.3116: 100%|██████████| 100/100 [00:38<00:00,  2.59it/s]\n",
      "train loss: 1.2958: 100%|██████████| 100/100 [00:39<00:00,  2.55it/s]\n",
      "train loss: 1.1939: 100%|██████████| 100/100 [00:38<00:00,  2.58it/s]\n",
      "train loss: 1.0834: 100%|██████████| 100/100 [00:39<00:00,  2.56it/s]\n",
      "train loss: 0.9485: 100%|██████████| 100/100 [00:38<00:00,  2.62it/s]\n",
      "train loss: 1.2320: 100%|██████████| 100/100 [00:38<00:00,  2.60it/s]\n",
      "train loss: 1.0286: 100%|██████████| 100/100 [00:37<00:00,  2.67it/s]\n",
      "train loss: 0.7395: 100%|██████████| 100/100 [00:38<00:00,  2.60it/s]\n",
      "train loss: 0.8754: 100%|██████████| 100/100 [00:38<00:00,  2.59it/s]\n",
      "train loss: 0.9183: 100%|██████████| 100/100 [00:38<00:00,  2.59it/s]\n",
      "train loss: 0.8171: 100%|██████████| 100/100 [00:38<00:00,  2.60it/s]\n",
      "train loss: 0.9869: 100%|██████████| 100/100 [00:38<00:00,  2.60it/s]\n",
      "train loss: 0.7800: 100%|██████████| 100/100 [00:38<00:00,  2.60it/s]\n",
      "train loss: 0.6764: 100%|██████████| 100/100 [00:38<00:00,  2.63it/s]\n",
      "train loss: 0.6062: 100%|██████████| 100/100 [00:38<00:00,  2.62it/s]\n",
      "train loss: 0.4112: 100%|██████████| 100/100 [00:38<00:00,  2.61it/s]\n",
      "train loss: 0.8011: 100%|██████████| 100/100 [00:38<00:00,  2.59it/s]\n",
      "train loss: 0.5919: 100%|██████████| 100/100 [00:38<00:00,  2.58it/s]\n",
      "train loss: 0.7923: 100%|██████████| 100/100 [00:38<00:00,  2.60it/s]\n",
      "train loss: 0.9639: 100%|██████████| 100/100 [00:38<00:00,  2.62it/s]\n",
      "train loss: 0.5319: 100%|██████████| 100/100 [00:38<00:00,  2.61it/s]\n",
      "train loss: 0.3998: 100%|██████████| 100/100 [00:38<00:00,  2.58it/s]\n",
      "train loss: 0.6484: 100%|██████████| 100/100 [00:38<00:00,  2.63it/s]\n",
      "train loss: 0.6509: 100%|██████████| 100/100 [00:38<00:00,  2.63it/s]\n",
      "train loss: 0.2817: 100%|██████████| 100/100 [00:37<00:00,  2.63it/s]\n",
      "train loss: 0.5672: 100%|██████████| 100/100 [00:38<00:00,  2.59it/s]\n",
      "train loss: 0.3405: 100%|██████████| 100/100 [00:38<00:00,  2.61it/s]\n",
      "train loss: 0.5719: 100%|██████████| 100/100 [00:38<00:00,  2.61it/s]\n",
      "train loss: 0.7015: 100%|██████████| 100/100 [00:38<00:00,  2.58it/s]\n",
      "train loss: 0.3477: 100%|██████████| 100/100 [00:38<00:00,  2.60it/s]\n",
      "train loss: 0.4986: 100%|██████████| 100/100 [00:38<00:00,  2.60it/s]\n",
      "train loss: 0.3892: 100%|██████████| 100/100 [00:38<00:00,  2.60it/s]\n",
      "train loss: 0.4609: 100%|██████████| 100/100 [00:38<00:00,  2.61it/s]\n",
      "train loss: 0.7520: 100%|██████████| 100/100 [00:38<00:00,  2.57it/s]\n",
      "train loss: 0.3000: 100%|██████████| 100/100 [00:38<00:00,  2.57it/s]\n",
      "train loss: 0.3449: 100%|██████████| 100/100 [00:37<00:00,  2.65it/s]\n",
      "train loss: 0.2854: 100%|██████████| 100/100 [00:38<00:00,  2.60it/s]\n",
      "train loss: 0.4601: 100%|██████████| 100/100 [00:38<00:00,  2.62it/s]\n",
      "train loss: 0.5341: 100%|██████████| 100/100 [00:38<00:00,  2.59it/s]\n",
      "train loss: 0.2112: 100%|██████████| 100/100 [00:38<00:00,  2.62it/s]\n",
      "train loss: 0.4607: 100%|██████████| 100/100 [00:38<00:00,  2.61it/s]\n",
      "train loss: 0.6566: 100%|██████████| 100/100 [00:38<00:00,  2.62it/s]\n",
      "train loss: 0.1123: 100%|██████████| 100/100 [00:38<00:00,  2.60it/s]\n",
      "train loss: 0.4032: 100%|██████████| 100/100 [00:38<00:00,  2.60it/s]\n",
      "train loss: 0.3789: 100%|██████████| 100/100 [00:37<00:00,  2.64it/s]\n",
      "train loss: 0.2962: 100%|██████████| 100/100 [00:38<00:00,  2.58it/s]\n",
      "train loss: 0.3030: 100%|██████████| 100/100 [00:38<00:00,  2.61it/s]\n",
      "train loss: 0.3746: 100%|██████████| 100/100 [00:37<00:00,  2.65it/s]\n",
      "train loss: 0.3035: 100%|██████████| 100/100 [00:38<00:00,  2.58it/s]\n",
      "train loss: 0.2944: 100%|██████████| 100/100 [00:38<00:00,  2.57it/s]\n",
      "train loss: 0.4277: 100%|██████████| 100/100 [00:39<00:00,  2.56it/s]\n",
      "train loss: 0.3145: 100%|██████████| 100/100 [00:38<00:00,  2.59it/s]\n",
      "train loss: 0.6204: 100%|██████████| 100/100 [00:38<00:00,  2.62it/s]\n",
      "train loss: 0.1336: 100%|██████████| 100/100 [00:38<00:00,  2.57it/s]\n",
      "train loss: 0.3485: 100%|██████████| 100/100 [00:38<00:00,  2.59it/s]\n",
      "train loss: 0.4035: 100%|██████████| 100/100 [00:39<00:00,  2.56it/s]\n",
      "train loss: 0.2680: 100%|██████████| 100/100 [00:37<00:00,  2.65it/s]\n",
      "train loss: 0.5435: 100%|██████████| 100/100 [00:37<00:00,  2.65it/s]\n",
      "train loss: 0.3599: 100%|██████████| 100/100 [00:38<00:00,  2.57it/s]\n",
      "train loss: 0.5113: 100%|██████████| 100/100 [00:37<00:00,  2.63it/s]\n",
      "train loss: 0.1346: 100%|██████████| 100/100 [00:38<00:00,  2.63it/s]\n",
      "train loss: 0.1548: 100%|██████████| 100/100 [00:38<00:00,  2.60it/s]\n",
      "train loss: 0.3047: 100%|██████████| 100/100 [00:38<00:00,  2.63it/s]\n",
      "train loss: 0.1359: 100%|██████████| 100/100 [00:39<00:00,  2.56it/s]\n",
      "train loss: 0.1841: 100%|██████████| 100/100 [00:38<00:00,  2.62it/s]\n",
      "train loss: 0.2029: 100%|██████████| 100/100 [00:38<00:00,  2.60it/s]\n",
      "train loss: 0.1768: 100%|██████████| 100/100 [00:38<00:00,  2.61it/s]\n",
      "train loss: 0.2179: 100%|██████████| 100/100 [00:39<00:00,  2.56it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\n",
      "=== Training with Epochs: 70, LR: 0.0001 ===\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "train loss: 1.3866: 100%|██████████| 100/100 [00:38<00:00,  2.63it/s]\n",
      "train loss: 1.3689: 100%|██████████| 100/100 [00:38<00:00,  2.57it/s]\n",
      "train loss: 1.3062: 100%|██████████| 100/100 [00:39<00:00,  2.56it/s]\n",
      "train loss: 1.3257: 100%|██████████| 100/100 [00:39<00:00,  2.56it/s]\n",
      "train loss: 1.2877: 100%|██████████| 100/100 [00:38<00:00,  2.58it/s]\n",
      "train loss: 1.2698: 100%|██████████| 100/100 [00:38<00:00,  2.58it/s]\n",
      "train loss: 1.2746: 100%|██████████| 100/100 [00:39<00:00,  2.55it/s]\n",
      "train loss: 1.2483: 100%|██████████| 100/100 [00:38<00:00,  2.57it/s]\n",
      "train loss: 1.2053: 100%|██████████| 100/100 [00:38<00:00,  2.58it/s]\n",
      "train loss: 1.1965: 100%|██████████| 100/100 [00:38<00:00,  2.58it/s]\n",
      "train loss: 1.1250: 100%|██████████| 100/100 [00:39<00:00,  2.54it/s]\n",
      "train loss: 1.1363: 100%|██████████| 100/100 [00:38<00:00,  2.59it/s]\n",
      "train loss: 1.0112: 100%|██████████| 100/100 [00:38<00:00,  2.58it/s]\n",
      "train loss: 1.0958: 100%|██████████| 100/100 [00:38<00:00,  2.59it/s]\n",
      "train loss: 1.1018: 100%|██████████| 100/100 [00:38<00:00,  2.61it/s]\n",
      "train loss: 1.2985: 100%|██████████| 100/100 [00:38<00:00,  2.58it/s]\n",
      "train loss: 1.0546: 100%|██████████| 100/100 [00:38<00:00,  2.62it/s]\n",
      "train loss: 0.9984: 100%|██████████| 100/100 [00:38<00:00,  2.60it/s]\n",
      "train loss: 1.0739: 100%|██████████| 100/100 [00:38<00:00,  2.62it/s]\n",
      "train loss: 1.0505: 100%|██████████| 100/100 [00:37<00:00,  2.63it/s]\n",
      "train loss: 1.1456: 100%|██████████| 100/100 [00:38<00:00,  2.58it/s]\n",
      "train loss: 1.0359: 100%|██████████| 100/100 [00:38<00:00,  2.56it/s]\n",
      "train loss: 1.0657: 100%|██████████| 100/100 [00:39<00:00,  2.54it/s]\n",
      "train loss: 0.9570: 100%|██████████| 100/100 [00:38<00:00,  2.59it/s]\n",
      "train loss: 0.9667: 100%|██████████| 100/100 [00:38<00:00,  2.59it/s]\n",
      "train loss: 0.9251: 100%|██████████| 100/100 [00:38<00:00,  2.58it/s]\n",
      "train loss: 0.8415: 100%|██████████| 100/100 [00:38<00:00,  2.61it/s]\n",
      "train loss: 0.8180: 100%|██████████| 100/100 [00:38<00:00,  2.58it/s]\n",
      "train loss: 0.8643: 100%|██████████| 100/100 [00:39<00:00,  2.55it/s]\n",
      "train loss: 0.7611: 100%|██████████| 100/100 [00:38<00:00,  2.59it/s]\n",
      "train loss: 0.9166: 100%|██████████| 100/100 [00:39<00:00,  2.56it/s]\n",
      "train loss: 0.7625: 100%|██████████| 100/100 [00:39<00:00,  2.55it/s]\n",
      "train loss: 0.9823: 100%|██████████| 100/100 [00:38<00:00,  2.59it/s]\n",
      "train loss: 0.8842: 100%|██████████| 100/100 [00:39<00:00,  2.55it/s]\n",
      "train loss: 0.9789: 100%|██████████| 100/100 [00:38<00:00,  2.58it/s]\n",
      "train loss: 1.2768: 100%|██████████| 100/100 [00:39<00:00,  2.56it/s]\n",
      "train loss: 0.8067: 100%|██████████| 100/100 [00:38<00:00,  2.58it/s]\n",
      "train loss: 0.8780: 100%|██████████| 100/100 [00:39<00:00,  2.56it/s]\n",
      "train loss: 0.9089: 100%|██████████| 100/100 [00:38<00:00,  2.58it/s]\n",
      "train loss: 0.7271: 100%|██████████| 100/100 [00:38<00:00,  2.61it/s]\n",
      "train loss: 0.8287: 100%|██████████| 100/100 [00:38<00:00,  2.61it/s]\n",
      "train loss: 0.8169: 100%|██████████| 100/100 [00:37<00:00,  2.64it/s]\n",
      "train loss: 0.8249: 100%|██████████| 100/100 [00:38<00:00,  2.61it/s]\n",
      "train loss: 0.7009: 100%|██████████| 100/100 [00:38<00:00,  2.57it/s]\n",
      "train loss: 0.9370: 100%|██████████| 100/100 [00:38<00:00,  2.60it/s]\n",
      "train loss: 0.9313: 100%|██████████| 100/100 [00:38<00:00,  2.60it/s]\n",
      "train loss: 1.1196: 100%|██████████| 100/100 [00:38<00:00,  2.60it/s]\n",
      "train loss: 0.8056: 100%|██████████| 100/100 [00:38<00:00,  2.59it/s]\n",
      "train loss: 0.8763: 100%|██████████| 100/100 [00:38<00:00,  2.59it/s]\n",
      "train loss: 0.5731: 100%|██████████| 100/100 [00:38<00:00,  2.59it/s]\n",
      "train loss: 0.7401: 100%|██████████| 100/100 [00:38<00:00,  2.58it/s]\n",
      "train loss: 0.9602: 100%|██████████| 100/100 [00:37<00:00,  2.65it/s]\n",
      "train loss: 0.9637: 100%|██████████| 100/100 [00:38<00:00,  2.58it/s]\n",
      "train loss: 0.6682: 100%|██████████| 100/100 [00:38<00:00,  2.58it/s]\n",
      "train loss: 0.6132: 100%|██████████| 100/100 [00:38<00:00,  2.61it/s]\n",
      "train loss: 0.7587: 100%|██████████| 100/100 [00:38<00:00,  2.58it/s]\n",
      "train loss: 0.6956: 100%|██████████| 100/100 [00:38<00:00,  2.59it/s]\n",
      "train loss: 0.5224: 100%|██████████| 100/100 [00:39<00:00,  2.55it/s]\n",
      "train loss: 0.6019: 100%|██████████| 100/100 [00:38<00:00,  2.62it/s]\n",
      "train loss: 0.7608: 100%|██████████| 100/100 [00:38<00:00,  2.60it/s]\n",
      "train loss: 0.8181: 100%|██████████| 100/100 [00:38<00:00,  2.59it/s]\n",
      "train loss: 0.7423: 100%|██████████| 100/100 [00:38<00:00,  2.59it/s]\n",
      "train loss: 0.5804: 100%|██████████| 100/100 [00:39<00:00,  2.56it/s]\n",
      "train loss: 0.7191: 100%|██████████| 100/100 [00:38<00:00,  2.61it/s]\n",
      "train loss: 0.5928: 100%|██████████| 100/100 [00:38<00:00,  2.62it/s]\n",
      "train loss: 0.6709: 100%|██████████| 100/100 [00:38<00:00,  2.59it/s]\n",
      "train loss: 0.7530: 100%|██████████| 100/100 [00:38<00:00,  2.59it/s]\n",
      "train loss: 1.0396: 100%|██████████| 100/100 [00:39<00:00,  2.56it/s]\n",
      "train loss: 0.8146: 100%|██████████| 100/100 [00:38<00:00,  2.61it/s]\n",
      "train loss: 0.5684: 100%|██████████| 100/100 [00:39<00:00,  2.55it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\n",
      "=== Training with Epochs: 70, LR: 1e-05 ===\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "train loss: 1.3836: 100%|██████████| 100/100 [00:38<00:00,  2.61it/s]\n",
      "train loss: 1.4063: 100%|██████████| 100/100 [00:38<00:00,  2.61it/s]\n",
      "train loss: 1.4091: 100%|██████████| 100/100 [00:38<00:00,  2.62it/s]\n",
      "train loss: 1.3529: 100%|██████████| 100/100 [00:39<00:00,  2.55it/s]\n",
      "train loss: 1.3629: 100%|██████████| 100/100 [00:38<00:00,  2.58it/s]\n",
      "train loss: 1.3211: 100%|██████████| 100/100 [00:38<00:00,  2.59it/s]\n",
      "train loss: 1.3345: 100%|██████████| 100/100 [00:38<00:00,  2.57it/s]\n",
      "train loss: 1.3143: 100%|██████████| 100/100 [00:39<00:00,  2.56it/s]\n",
      "train loss: 1.3623: 100%|██████████| 100/100 [00:38<00:00,  2.58it/s]\n",
      "train loss: 1.3584: 100%|██████████| 100/100 [00:38<00:00,  2.57it/s]\n",
      "train loss: 1.3403: 100%|██████████| 100/100 [00:38<00:00,  2.59it/s]\n",
      "train loss: 1.3674: 100%|██████████| 100/100 [00:38<00:00,  2.62it/s]\n",
      "train loss: 1.3574: 100%|██████████| 100/100 [00:39<00:00,  2.56it/s]\n",
      "train loss: 1.3544: 100%|██████████| 100/100 [00:38<00:00,  2.59it/s]\n",
      "train loss: 1.3332: 100%|██████████| 100/100 [00:38<00:00,  2.59it/s]\n",
      "train loss: 1.3174: 100%|██████████| 100/100 [00:38<00:00,  2.60it/s]\n",
      "train loss: 1.3396: 100%|██████████| 100/100 [00:39<00:00,  2.55it/s]\n",
      "train loss: 1.3108: 100%|██████████| 100/100 [00:38<00:00,  2.58it/s]\n",
      "train loss: 1.3097: 100%|██████████| 100/100 [00:38<00:00,  2.61it/s]\n",
      "train loss: 1.2695: 100%|██████████| 100/100 [00:38<00:00,  2.59it/s]\n",
      "train loss: 1.2665: 100%|██████████| 100/100 [00:38<00:00,  2.58it/s]\n",
      "train loss: 1.2872: 100%|██████████| 100/100 [00:38<00:00,  2.59it/s]\n",
      "train loss: 1.2951: 100%|██████████| 100/100 [00:39<00:00,  2.54it/s]\n",
      "train loss: 1.3270: 100%|██████████| 100/100 [00:38<00:00,  2.62it/s]\n",
      "train loss: 1.2384: 100%|██████████| 100/100 [00:38<00:00,  2.62it/s]\n",
      "train loss: 1.2959: 100%|██████████| 100/100 [00:38<00:00,  2.59it/s]\n",
      "train loss: 1.2975: 100%|██████████| 100/100 [00:38<00:00,  2.59it/s]\n",
      "train loss: 1.2753: 100%|██████████| 100/100 [00:38<00:00,  2.58it/s]\n",
      "train loss: 1.2444: 100%|██████████| 100/100 [00:39<00:00,  2.52it/s]\n",
      "train loss: 1.2958: 100%|██████████| 100/100 [00:39<00:00,  2.56it/s]\n",
      "train loss: 1.2609: 100%|██████████| 100/100 [00:38<00:00,  2.58it/s]\n",
      "train loss: 1.2734: 100%|██████████| 100/100 [00:39<00:00,  2.56it/s]\n",
      "train loss: 1.2711: 100%|██████████| 100/100 [00:39<00:00,  2.56it/s]\n",
      "train loss: 1.1879: 100%|██████████| 100/100 [00:38<00:00,  2.61it/s]\n",
      "train loss: 1.3161: 100%|██████████| 100/100 [00:38<00:00,  2.57it/s]\n",
      "train loss: 1.3113: 100%|██████████| 100/100 [00:39<00:00,  2.55it/s]\n",
      "train loss: 1.2111: 100%|██████████| 100/100 [00:38<00:00,  2.57it/s]\n",
      "train loss: 1.2753: 100%|██████████| 100/100 [00:37<00:00,  2.64it/s]\n",
      "train loss: 1.2615: 100%|██████████| 100/100 [00:38<00:00,  2.58it/s]\n",
      "train loss: 1.2449: 100%|██████████| 100/100 [00:37<00:00,  2.64it/s]\n",
      "train loss: 1.2608: 100%|██████████| 100/100 [00:38<00:00,  2.60it/s]\n",
      "train loss: 1.2514: 100%|██████████| 100/100 [00:38<00:00,  2.60it/s]\n",
      "train loss: 1.2433: 100%|██████████| 100/100 [00:38<00:00,  2.58it/s]\n",
      "train loss: 1.1887: 100%|██████████| 100/100 [00:38<00:00,  2.59it/s]\n",
      "train loss: 1.2580: 100%|██████████| 100/100 [00:38<00:00,  2.60it/s]\n",
      "train loss: 1.2272: 100%|██████████| 100/100 [00:38<00:00,  2.59it/s]\n",
      "train loss: 1.2777: 100%|██████████| 100/100 [00:38<00:00,  2.60it/s]\n",
      "train loss: 1.2788: 100%|██████████| 100/100 [00:39<00:00,  2.54it/s]\n",
      "train loss: 1.2124: 100%|██████████| 100/100 [00:39<00:00,  2.56it/s]\n",
      "train loss: 1.1949: 100%|██████████| 100/100 [00:39<00:00,  2.56it/s]\n",
      "train loss: 1.1878: 100%|██████████| 100/100 [00:38<00:00,  2.59it/s]\n",
      "train loss: 1.2390: 100%|██████████| 100/100 [00:39<00:00,  2.56it/s]\n",
      "train loss: 1.2844: 100%|██████████| 100/100 [00:38<00:00,  2.58it/s]\n",
      "train loss: 1.1827: 100%|██████████| 100/100 [00:39<00:00,  2.55it/s]\n",
      "train loss: 1.3006: 100%|██████████| 100/100 [00:38<00:00,  2.60it/s]\n",
      "train loss: 1.1691: 100%|██████████| 100/100 [00:38<00:00,  2.60it/s]\n",
      "train loss: 1.1883: 100%|██████████| 100/100 [00:39<00:00,  2.55it/s]\n",
      "train loss: 1.2781: 100%|██████████| 100/100 [00:38<00:00,  2.58it/s]\n",
      "train loss: 1.1298: 100%|██████████| 100/100 [00:38<00:00,  2.61it/s]\n",
      "train loss: 1.1803: 100%|██████████| 100/100 [00:38<00:00,  2.57it/s]\n",
      "train loss: 1.1172: 100%|██████████| 100/100 [00:38<00:00,  2.62it/s]\n",
      "train loss: 1.2260: 100%|██████████| 100/100 [00:38<00:00,  2.59it/s]\n",
      "train loss: 1.2246: 100%|██████████| 100/100 [00:38<00:00,  2.57it/s]\n",
      "train loss: 1.0789: 100%|██████████| 100/100 [00:39<00:00,  2.56it/s]\n",
      "train loss: 1.2132: 100%|██████████| 100/100 [00:38<00:00,  2.62it/s]\n",
      "train loss: 1.1365: 100%|██████████| 100/100 [00:38<00:00,  2.60it/s]\n",
      "train loss: 1.1081: 100%|██████████| 100/100 [00:38<00:00,  2.62it/s]\n"
     ]
    }
   ],
   "source": [
    "# 网格搜索主循环\n",
    "# ... 前面代码保持不变 ...\n",
    "\n",
    "# 网格搜索主循环\n",
    "for epoch_idx, num_epochs in enumerate(epochs):\n",
    "    for lr_idx, lr in enumerate(lrs):\n",
    "        print(f\"\\n=== Training with Epochs: {num_epochs}, LR: {lr} ===\")\n",
    "        \n",
    "        # 初始化模型和优化器\n",
    "        model = RNAModel(config.model_config).to(config.device)\n",
    "        optimizer = Adam(model.parameters(), lr=lr)\n",
    "        \n",
    "        best_valid_recovery = 0\n",
    "        best_ckpt_path = f\"{train_config.output_dir}/best_epoch{num_epochs}_lr{lr}.pt\"\n",
    "        \n",
    "        os.makedirs(os.path.dirname(best_ckpt_path), exist_ok=True)  # 新增目录创建\n",
    "        for epoch in range(num_epochs):\n",
    "            model.train()\n",
    "            epoch_loss = 0\n",
    "            train_pbar = tqdm(train_loader)\n",
    "            for batch in train_pbar:\n",
    "                X, S, mask, lengths, names = batch\n",
    "                X = X.to(config.device)\n",
    "                S = S.to(config.device)\n",
    "                mask = mask.to(config.device)\n",
    "                logits, S, _ = model(X, S, mask)\n",
    "                loss = criterion(logits, S)\n",
    "                loss.backward()\n",
    "                train_pbar.set_description(f'train loss: {loss.item():.4f}')\n",
    "                optimizer.step()\n",
    "                optimizer.zero_grad()\n",
    "                epoch_loss += loss.item()\n",
    "            \n",
    "            # 验证阶段\n",
    "            model.eval()\n",
    "            with torch.no_grad():\n",
    "                recovery_list = []\n",
    "                for batch in valid_loader:\n",
    "                    X, S, mask, lengths, names = batch\n",
    "                    X = X.to(config.device)\n",
    "                    S = S.to(config.device)\n",
    "                    mask = mask.to(config.device)\n",
    "                    logits, S, _ = model(X, S, mask)\n",
    "                    probs = F.softmax(logits, dim=-1)\n",
    "                    samples = probs.argmax(dim=-1)\n",
    "                    \n",
    "                    start_idx = 0\n",
    "                    for length in lengths:\n",
    "                        end_idx = start_idx + length.item()\n",
    "                        sample = samples[start_idx: end_idx]\n",
    "                        gt_S = S[start_idx: end_idx]\n",
    "                        recovery = (sample == gt_S).sum() / len(sample)\n",
    "                        recovery_list.append(recovery.cpu().numpy())\n",
    "                        start_idx = end_idx\n",
    "                \n",
    "                valid_recovery = np.mean(recovery_list)\n",
    "                if valid_recovery > best_valid_recovery:\n",
    "                    best_valid_recovery = valid_recovery\n",
    "                    torch.save(model.state_dict(), best_ckpt_path)\n",
    "        \n",
    "        # 记录最佳验证结果\n",
    "        results[epoch_idx, lr_idx] = best_valid_recovery\n",
    "\n",
    "# 绘制热力图\n",
    "plot_heatmap(results, epochs, lrs)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "528c5443",
   "metadata": {},
   "source": [
    "### training"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "fa10ad13-dc08-4515-b2e0-3fe96bee7d0d",
   "metadata": {
    "execution": {
     "iopub.status.busy": "2025-05-16T08:20:33.428388Z",
     "iopub.status.idle": "2025-05-16T08:20:33.428551Z",
     "shell.execute_reply": "2025-05-16T08:20:33.428477Z",
     "shell.execute_reply.started": "2025-05-16T08:20:33.428470Z"
    }
   },
   "outputs": [],
   "source": [
    "optimizer = Adam(model.parameters(), train_config.lr)\n",
    "criterion = nn.CrossEntropyLoss()\n",
    "if not os.path.exists(train_config.output_dir):\n",
    "    os.makedirs(train_config.output_dir)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "e18a8175-3562-4f27-a273-1b7725959ae4",
   "metadata": {
    "execution": {
     "iopub.status.busy": "2025-05-16T08:20:33.429212Z",
     "iopub.status.idle": "2025-05-16T08:20:33.429426Z",
     "shell.execute_reply": "2025-05-16T08:20:33.429336Z",
     "shell.execute_reply.started": "2025-05-16T08:20:33.429327Z"
    }
   },
   "outputs": [],
   "source": [
    "best_valid_recovery = 0\n",
    "for epoch in range(train_config.epoch):\n",
    "    model.train()\n",
    "    epoch_loss = 0\n",
    "    train_pbar = tqdm(train_loader)\n",
    "    for batch in train_pbar:\n",
    "        X, S, mask, lengths, names = batch\n",
    "        X = X.to(config.device)\n",
    "        S = S.to(config.device)\n",
    "        mask = mask.to(config.device)\n",
    "        logits, S, _ = model(X, S, mask)\n",
    "        loss = criterion(logits, S)\n",
    "        loss.backward()\n",
    "        train_pbar.set_description('train loss: {:.4f}'.format(loss.item()))\n",
    "        optimizer.step()\n",
    "        optimizer.zero_grad()\n",
    "        epoch_loss += loss.item()\n",
    "    \n",
    "    epoch_loss /= len(train_loader)\n",
    "    print('Epoch {}/{}, Loss: {:.4f}'.format(epoch + 1, train_config.epoch, epoch_loss))\n",
    "    \n",
    "    model.eval()\n",
    "    with torch.no_grad():\n",
    "        recovery_list = []\n",
    "        for batch in tqdm(valid_loader):\n",
    "            X, S, mask, lengths, names = batch\n",
    "            X = X.to(config.device)\n",
    "            S = S.to(config.device)\n",
    "            mask = mask.to(config.device)\n",
    "            logits, S, _ = model(X, S, mask)\n",
    "            probs = F.softmax(logits, dim=-1)\n",
    "            samples = probs.argmax(dim=-1)\n",
    "            start_idx = 0\n",
    "            for length in lengths:\n",
    "                end_idx = start_idx + length.item()\n",
    "                sample = samples[start_idx: end_idx]\n",
    "                gt_S = S[start_idx: end_idx]\n",
    "                arr = sample==gt_S\n",
    "                recovery = (sample==gt_S).sum() / len(sample)\n",
    "                recovery_list.append(recovery.cpu().numpy())\n",
    "                start_idx = end_idx\n",
    "        valid_recovery = np.mean(recovery_list)\n",
    "        print('Epoch {}/{}, recovery: {:.4f}'.format(epoch + 1, train_config.epoch, valid_recovery))\n",
    "        if valid_recovery > best_valid_recovery:\n",
    "            best_valid_recovery = valid_recovery\n",
    "            torch.save(model.state_dict(), os.path.join(train_config.output_dir, 'best.pt'))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "0f86d866-5712-4987-98a7-64606adb72f8",
   "metadata": {
    "execution": {
     "iopub.status.busy": "2025-05-16T08:20:33.429977Z",
     "iopub.status.idle": "2025-05-16T08:20:33.430329Z",
     "shell.execute_reply": "2025-05-16T08:20:33.430230Z",
     "shell.execute_reply.started": "2025-05-16T08:20:33.430220Z"
    }
   },
   "outputs": [],
   "source": [
    "eval_model = RNAModel(config.model_config).to(config.device)\n",
    "checkpoint_path = train_config.ckpt_path\n",
    "print(\"loading checkpoint from path:\", checkpoint_path)\n",
    "eval_model.load_state_dict(torch.load(checkpoint_path, map_location='cpu'), strict=True)\n",
    "eval_model.to(config.device)\n",
    "eval_model.eval()"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "beac3fe1",
   "metadata": {},
   "source": [
    "### test"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "10158a05-4192-4f93-b173-a7ba4dc25a96",
   "metadata": {
    "execution": {
     "iopub.status.busy": "2025-05-16T08:20:33.430900Z",
     "iopub.status.idle": "2025-05-16T08:20:33.431123Z",
     "shell.execute_reply": "2025-05-16T08:20:33.431027Z",
     "shell.execute_reply.started": "2025-05-16T08:20:33.431021Z"
    }
   },
   "outputs": [],
   "source": [
    "with torch.no_grad():\n",
    "    result_list = []\n",
    "    for batch in tqdm(test_loader):\n",
    "        X, S, mask, lengths, names = batch\n",
    "        X = X.to(config.device)\n",
    "        S = S.to(config.device)\n",
    "        mask = mask.to(config.device)\n",
    "        logits, S, _ = eval_model(X, S, mask)\n",
    "        probs = F.softmax(logits, dim=-1)\n",
    "        samples = probs.argmax(dim=-1)\n",
    "        start_idx = 0\n",
    "        for length in lengths:\n",
    "            end_idx = start_idx + length.item()\n",
    "            sample = samples[start_idx: end_idx]\n",
    "            print()\n",
    "            gt_S = S[start_idx: end_idx]\n",
    "            recovery = (sample==gt_S).sum() / len(sample)\n",
    "            recovery_list.append(recovery.cpu().numpy())\n",
    "            start_idx = end_idx\n",
    "    test_recovery = np.mean(recovery_list)\n",
    "    print('test recovery: {:.4f}'.format(test_recovery))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "638f8159-a08f-456f-9b16-aaa18b37cda0",
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3 (ipykernel)",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.11.11"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
