{
 "cells": [
  {
   "cell_type": "markdown",
   "id": "3068c6e9",
   "metadata": {},
   "source": [
    "### RNA 逆转录 Baseline (Based on RDesign)\n",
    "This code establishes a baseline for ​RNA inverse folding​ (sequence design from structure), adapted from [A4Bio/RDesign](https://github.com/A4Bio/RDesign). It processes RNA structural coordinates (.npy files) paired with sequences (FASTA), splits data into train/valid/test sets, and creates PyTorch datasets where inputs are 3D backbone coordinates (P, C4', O3' atoms) and targets are corresponding RNA sequences. \n",
    "\n",
    "RNA有四种碱基：AGCU, adenine（腺嘌呤）、 Uracil（尿嘧啶）, Cytosine（胞嘧啶）和 Guanine（鸟嘌呤）\n",
    "\n",
    "RNA的序列结构：\n",
    "\n",
    "            碱基                碱基                 碱基\n",
    "磷酸基团-五碳糖(核糖)-磷酸基团-五碳糖(核糖)-磷酸基团-五碳糖(核糖)\n",
    "\n",
    "5'-羟基和3'-羟基中间是P, 碳2和碳4都连着羟基 2'-OH, 4'-OH, 碳1和含氮碱基相连，碳2的羟基比较活跃易水解\n",
    "```text\n",
    "主链方向：C5' → C3'\n",
    "  ...-P-O5'–C5'–C4'–C3'–O3'-P-...\n",
    "            |     \n",
    "            C1'  \n",
    "            / \\ \n",
    "            碱基\n",
    "核糖(5碳糖)主链原子（O5'-C5'-C4'-C3'-O3'），磷酸基团P，碱基N1/N9\n",
    "```"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "id": "a1a25400",
   "metadata": {},
   "outputs": [],
   "source": [
    "# !pip install torch, pandas, biopython"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "id": "159f7029-9f98-48b1-aa8f-d3ab43894deb",
   "metadata": {},
   "outputs": [],
   "source": [
    "import os\n",
    "import torch\n",
    "import random\n",
    "import numpy as np\n",
    "import pandas as pd\n",
    "import torch.nn as nn\n",
    "from tqdm import tqdm\n",
    "from typing import List, Dict\n",
    "from torch.optim import Adam\n",
    "import torch.nn.functional as F\n",
    "from torch.utils.data import Dataset\n",
    "from torch.utils.data import DataLoader\n",
    "from dataclasses import dataclass, field\n",
    "from torch_scatter import scatter_sum, scatter_softmax\n",
    "from Bio import SeqIO  # pip install biopython\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "id": "1faeb703",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "/workspace/sais_medicine\n"
     ]
    }
   ],
   "source": [
    "# Define Configuration Classes\n",
    "\n",
    "\n",
    "root_dir = os.path.dirname(os.getcwd())\n",
    "print(root_dir)\n",
    "datapath = 'saistraindata'\n",
    "datapath = 'saisdata'\n",
    "@dataclass\n",
    "class DataConfig:\n",
    "    datapath: str = f'{root_dir}/{datapath}'  # 数据根目录\n",
    "    fasta_data_path: str = f'{datapath}/seqs'  # 标签数据\n",
    "    coords_data_path: str = f'{datapath}/coords'  # 特征数据\n",
    "    outputs_path: str = f'{root_dir}/outputs'\n",
    "    \n",
    "    train_npy_data_dir: str = f'{datapath}/coords'\n",
    "    valid_npy_data_dir: str = f'{datapath}/coords'\n",
    "    test_npy_data_dir: str = f'{datapath}/coords'\n",
    "    \n",
    "    train_data_path: str = f'{root_dir}/outputs/public_train_data.csv'\n",
    "    valid_data_path: str = f'{root_dir}/outputs/public_valid_data.csv'\n",
    "    test_data_path: str = f'{root_dir}/outputs/public_test_data.csv'\n",
    "\n",
    "@dataclass\n",
    "class ModelConfig:\n",
    "    smoothing: float = 0.1\n",
    "    hidden: int = 128\n",
    "    vocab_size: int = 4  # 明确指定为 int 类型\n",
    "    k_neighbors: int = 30  # 明确指定为 int 类型\n",
    "    dropout: float = 0.1\n",
    "    node_feat_types: List[str] = field(default_factory=lambda: ['angle', 'distance', 'direction'])  # 使用 field 避免可变对象问题\n",
    "    edge_feat_types: List[str] = field(default_factory=lambda: ['orientation', 'distance', 'direction'])  # 同上\n",
    "    num_encoder_layers: int = 3\n",
    "    num_decoder_layers: int = 3  # 修正为整数，去掉多余的小数点\n",
    "\n",
    "@dataclass\n",
    "class TrainConfig:\n",
    "    batch_size: int = 8\n",
    "    epoch: int = 10\n",
    "    lr: float = 0.001\n",
    "    output_dir: str = f'{root_dir}/code/weights'\n",
    "    ckpt_path: str = f'{root_dir}/code/weights/best.pt'\n",
    "\n",
    "@dataclass\n",
    "class Config:\n",
    "    pipeline: str = 'train'\n",
    "    seed: int = 2025\n",
    "    device: str = 'cuda' if torch.cuda.is_available() else 'cpu'\n",
    "    data_config: DataConfig = DataConfig()\n",
    "    model_config: ModelConfig = ModelConfig()\n",
    "    train_config: TrainConfig = TrainConfig()\n",
    "    seq_vocab: str = \"AUCG\""
   ]
  },
  {
   "cell_type": "markdown",
   "id": "f6c9a137",
   "metadata": {},
   "source": [
    "### Data processing\n",
    "This code implements an RNA data processing pipeline for machine learning tasks using PyTorch and Biopython."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "id": "a6c2f4ec-4be4-4b8f-a0c6-d213ca157a86",
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "100%|██████████| 2317/2317 [00:00<00:00, 26190.59it/s]\n"
     ]
    }
   ],
   "source": [
    "# Define function to read FASTA files using Biopython\n",
    "def read_fasta_biopython(file_path):\n",
    "    sequences = {}\n",
    "    for record in SeqIO.parse(file_path, \"fasta\"):\n",
    "        sequences[record.id] = str(record.seq)\n",
    "    return sequences\n",
    "\n",
    "# 读取FASTA文件，即标签\n",
    "train_file_list = os.listdir(DataConfig.fasta_data_path)\n",
    "content_dict = {\n",
    "    \"pdb_id\": [],\n",
    "    \"seq\": []\n",
    "}\n",
    "for file in tqdm(train_file_list):\n",
    "    data_path = os.path.join(DataConfig.fasta_data_path, file)\n",
    "    sequences = read_fasta_biopython(data_path)\n",
    "    content_dict[\"pdb_id\"].append(list(sequences.keys())[0])\n",
    "    content_dict[\"seq\"].append(list(sequences.values())[0])\n",
    "\n",
    "data = pd.DataFrame(content_dict)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "fa48eb54",
   "metadata": {},
   "source": [
    "## 读一下数据"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "id": "4678b144",
   "metadata": {},
   "outputs": [],
   "source": [
    "data_path = f\"{DataConfig.coords_data_path}/1A9N_1_Q.npy\"\n",
    "data_path = f\"{DataConfig.coords_data_path}/3JBU_1_v.npy\"\n",
    "\n",
    "coords = np.load(data_path)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "9760eda1",
   "metadata": {},
   "source": [
    "## 可视化\n",
    "\n",
    "+ 缺失值最好用相同碱基的均值填充\n",
    "+ 临近碱基通过均值计算邻域"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "id": "b2036844",
   "metadata": {},
   "outputs": [
    {
     "ename": "NameError",
     "evalue": "name 'coords' is not defined",
     "output_type": "error",
     "traceback": [
      "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
      "\u001b[0;31mNameError\u001b[0m                                 Traceback (most recent call last)",
      "Cell \u001b[0;32mIn[1], line 6\u001b[0m\n\u001b[1;32m      2\u001b[0m \u001b[38;5;28;01mimport\u001b[39;00m\u001b[38;5;250m \u001b[39m\u001b[38;5;21;01mmatplotlib\u001b[39;00m\u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01mpyplot\u001b[39;00m\u001b[38;5;250m \u001b[39m\u001b[38;5;28;01mas\u001b[39;00m\u001b[38;5;250m \u001b[39m\u001b[38;5;21;01mplt\u001b[39;00m\n\u001b[1;32m      3\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m\u001b[38;5;250m \u001b[39m\u001b[38;5;21;01mmpl_toolkits\u001b[39;00m\u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01mmplot3d\u001b[39;00m\u001b[38;5;250m \u001b[39m\u001b[38;5;28;01mimport\u001b[39;00m Axes3D\n\u001b[0;32m----> 6\u001b[0m points \u001b[38;5;241m=\u001b[39m np\u001b[38;5;241m.\u001b[39mnan_to_num(\u001b[43mcoords\u001b[49m, nan\u001b[38;5;241m=\u001b[39m\u001b[38;5;241m0.0\u001b[39m)\n\u001b[1;32m      7\u001b[0m points \u001b[38;5;241m=\u001b[39m points[:, :, :]\n\u001b[1;32m      8\u001b[0m cls_id \u001b[38;5;241m=\u001b[39m np\u001b[38;5;241m.\u001b[39marray(\u001b[38;5;28mrange\u001b[39m(points\u001b[38;5;241m.\u001b[39mshape[\u001b[38;5;241m0\u001b[39m]))\u001b[38;5;241m.\u001b[39mreshape(\u001b[38;5;241m-\u001b[39m\u001b[38;5;241m1\u001b[39m, \u001b[38;5;241m1\u001b[39m)  \u001b[38;5;66;03m# 根据基团上色\u001b[39;00m\n",
      "\u001b[0;31mNameError\u001b[0m: name 'coords' is not defined"
     ]
    }
   ],
   "source": [
    "import numpy as np\n",
    "import matplotlib.pyplot as plt\n",
    "from mpl_toolkits.mplot3d import Axes3D\n",
    "\n",
    "\n",
    "points = np.nan_to_num(coords, nan=0.0)\n",
    "points = points[:20, :, :]\n",
    "cls_id = np.array(range(points.shape[0])).reshape(-1, 1)  # 根据基团上色\n",
    "# np.random.shuffle(cls_id)\n",
    "\n",
    "categories = np.zeros((points.shape[0], points.shape[1])) + cls_id\n",
    "categories = categories.reshape(-1)\n",
    "points = points.reshape(-1, 3)\n",
    "print(points.shape, categories.shape)\n",
    "\n",
    "\n",
    "# 创建一个新的图形\n",
    "fig = plt.figure(figsize=(7, 7))\n",
    "ax = fig.add_subplot(111, projection='3d')\n",
    "\n",
    "# 获取所有不同的类别\n",
    "unique_categories = np.unique(categories)\n",
    "\n",
    "# 为每个类别分配一种颜色\n",
    "colors = plt.colormaps.get_cmap('tab20').resampled(len(unique_categories))  # tab10', tab20', 'gist_rainbow'\n",
    "# colors = plt.colormaps['tab20c']\n",
    "print(colors)\n",
    "\n",
    "\n",
    "# 绘制点云\n",
    "for i, category in enumerate(unique_categories):\n",
    "    # 找到属于当前类别的点\n",
    "    points_in_category = points[categories == category]\n",
    "    ax.scatter(points_in_category[:, 0], points_in_category[:, 1], points_in_category[:, 2],\n",
    "               c=np.array([colors(i)]), label=f'C {category}')\n",
    "\n",
    "# 添加图例\n",
    "plt.legend()\n",
    "\n",
    "# 设置轴标签\n",
    "ax.set_xlabel('X Label')\n",
    "ax.set_ylabel('Y Label')\n",
    "ax.set_zlabel('Z Label')\n",
    "\n",
    "# 显示图形\n",
    "plt.show()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "id": "0a38d3a4",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "{'name': '7Y7R_1_F', 'coords': {'P': array([[-69.91799927,   1.93599999, -48.22100067],\n",
      "       [-66.27200317,  -1.19500005, -49.98099899],\n",
      "       [-63.77299881,  -5.46999979, -52.2120018 ],\n",
      "       [-62.18999863, -11.99199963, -52.14400101],\n",
      "       [-62.84999847, -17.57600021, -49.36600113]]), \"O5'\": array([[-68.59600067,   1.08399999, -47.99100113],\n",
      "       [-64.91200256,  -2.01900005, -50.04999924],\n",
      "       [-62.77000046,  -6.65700006, -52.55899811],\n",
      "       [-62.17200089, -12.90200043, -50.83399963],\n",
      "       [-63.23500061, -17.98800087, -47.87099838]]), \"C5'\": array([[-67.82099915,   1.26199996, -46.81800079],\n",
      "       [-63.72200012,  -1.421     , -50.53900146],\n",
      "       [-62.90000153,  -7.93100023, -51.9469986 ],\n",
      "       [-61.01300049, -13.64799976, -50.50699997],\n",
      "       [-62.36600113, -18.75799942, -47.04299927]]), \"C4'\": array([[-66.35099792,   1.37800002, -47.13299942],\n",
      "       [-62.52600098,  -2.31100011, -50.31200027],\n",
      "       [-61.61500168,  -8.3739996 , -51.2820015 ],\n",
      "       [-61.00299835, -14.13000011, -49.07799911],\n",
      "       [-63.07400131, -19.18000031, -45.78300095]]), \"C3'\": array([[-65.71099854,   0.154     , -47.77999878],\n",
      "       [-62.76800156,  -3.80599999, -50.4469986 ],\n",
      "       [-61.61000061,  -9.81499958, -50.77999878],\n",
      "       [-62.01499939, -15.19099998, -48.68299866],\n",
      "       [-64.42099762, -19.875     , -45.97600174]]), \"O3'\": array([[-65.86499786,   0.15000001, -49.19400024],\n",
      "       [-62.80099869,  -4.2579999 , -51.79100037],\n",
      "       [-61.23699951, -10.74400043, -51.78499985],\n",
      "       [-61.69300079, -16.48200035, -49.16500092],\n",
      "       [         nan,          nan,          nan]])}, 'seq': 'CGACG'}\n"
     ]
    }
   ],
   "source": [
    "# Define RNADataset Class and Seeding Function\n",
    "class RNADatasetV2(Dataset):\n",
    "    def __init__(self, data_path, is_train=True):\n",
    "        super(RNADatasetV2, self).__init__()\n",
    "        self.npy_dir = data_path + \"/coords\"\n",
    "        self.name_list = [i[:-4] for i in os.listdir(data_path + \"/coords\")]\n",
    "        self.seq_dir =  data_path + \"/seqs/\"\n",
    "        self.cache = {}\n",
    "        self.is_train = is_train\n",
    "\n",
    "    def __len__(self):\n",
    "        return len(self.name_list)\n",
    "\n",
    "    def get_pdb_id(self, idx):\n",
    "        return self.name_list[idx]\n",
    "\n",
    "    def merge_coords_file_path(self, pdb_id):\n",
    "        return os.path.join(self.npy_dir, pdb_id + '.npy')\n",
    "\n",
    "    def load_feature(self, pdb_id):\n",
    "        coords = np.load(self.merge_coords_file_path(pdb_id))\n",
    "        feature = {\n",
    "            \"name\": pdb_id,\n",
    "            \"coords\": {\n",
    "                \"P\": coords[:, 0, :],\n",
    "                \"O5'\": coords[:, 1, :],\n",
    "                \"C5'\": coords[:, 2, :],\n",
    "                \"C4'\": coords[:, 3, :],\n",
    "                \"C3'\": coords[:, 4, :],\n",
    "                \"O3'\": coords[:, 5, :],\n",
    "            }\n",
    "        }\n",
    "\n",
    "        return feature\n",
    "    \n",
    "    def load_seq(self, pdb_id):\n",
    "        return list(read_fasta_biopython(self.seq_dir + pdb_id + \".fasta\").values())[0]\n",
    "    \n",
    "    def first_load(self, idx):\n",
    "        pdb_id = self.get_pdb_id(idx)\n",
    "        feature = self.load_feature(pdb_id)\n",
    "        if self.is_train:\n",
    "            feature[\"seq\"] = self.load_seq(pdb_id)\n",
    "        else:\n",
    "            feature[\"seq\"] = None\n",
    "        return feature\n",
    "\n",
    "    def __getitem__(self, idx):\n",
    "        if idx in self.cache:\n",
    "            return self.cache[idx]\n",
    "        else:\n",
    "            data = self.first_load(idx)\n",
    "            self.cache[idx] = data\n",
    "            return data\n",
    "    \n",
    "    def __iter__(self):\n",
    "        for idx in range(len(self)):\n",
    "            yield self.__getitem__(idx)\n",
    "\n",
    "    def get_lengths_by_indices(self, indices):\n",
    "        lengths = []\n",
    "        for idx in indices:\n",
    "            pdb_id = self.get_pdb_id(idx)\n",
    "            file_path = self.merge_coords_file_path(pdb_id)\n",
    "            with open(file_path, 'rb') as f:\n",
    "                # 读取文件头的前8字节（魔数和版本号）\n",
    "                version = np.lib.format.read_magic(f)\n",
    "                # 读取文件头信息（包含shape/dtype等）\n",
    "                shape, _, _ = np.lib.format._read_array_header(f, version)\n",
    "            lengths.append(shape[0])\n",
    "        return lengths\n",
    "    \n",
    "dataset = RNADatasetV2(\n",
    "    data_path=DataConfig.datapath,\n",
    "    is_train=True\n",
    ")\n",
    "print(dataset[0])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "id": "bffd6013-4f5b-42bf-9b05-42bd6041daa9",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "seeding done!!!\n"
     ]
    }
   ],
   "source": [
    "def seeding(seed):\n",
    "    np.random.seed(seed)\n",
    "    random.seed(seed)\n",
    "    os.environ['PYTHONHASHSEED'] = str(seed)\n",
    "    torch.manual_seed(seed)\n",
    "    torch.cuda.manual_seed(seed)\n",
    "    torch.cuda.manual_seed_all(seed)\n",
    "    torch.backends.cudnn.deterministic = True\n",
    "    torch.backends.cudnn.benchmark = False\n",
    "    print('seeding done!!!')\n",
    "config = Config()\n",
    "data_config = config.data_config\n",
    "train_config = config.train_config\n",
    "seeding(config.seed)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "id": "85a61f93-e518-4d50-aa87-9e4bc1f74516",
   "metadata": {},
   "outputs": [],
   "source": [
    "from torch.utils.data import DataLoader, random_split\n",
    "\n",
    "train_dataset, valid_dataset = random_split(dataset, lengths=[int(0.9 * len(dataset)), len(dataset) -  int(0.9 * len(dataset))])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "id": "44892589",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "'5MSG_1_M'"
      ]
     },
     "execution_count": 10,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "train_dataset[0]['name']"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "5dbc500a",
   "metadata": {},
   "source": [
    "### RNA Inverse Folding Framework\n",
    "\n",
    "This framework integrates geometric featurization with GNNs to predict RNA sequences compatible with input 3D conformations.\n",
    "该框架将几何特征化与图神经网络（GNNs）相结合，用于预测与输入三维构象兼容的RNA序列"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "id": "e049e40a",
   "metadata": {},
   "outputs": [],
   "source": [
    "def prepare_rna_batch(rna_samples):\n",
    "    \"\"\"将RNA样本批次转换为神经网络可用的张量格式\n",
    "    \n",
    "    参数:\n",
    "        rna_samples: 包含多个RNA样本的列表，每个样本应包含:\n",
    "            - 'seq': 碱基序列字符串(如\"AUCG\")\n",
    "            - 'coords': 原子坐标字典\n",
    "            - 'name': 样本标识名\n",
    "    \n",
    "    返回:\n",
    "        features: 形状[B, L_max, 6, 3]的原子坐标张量\n",
    "        labels: 形状[B, L_max]的碱基类别张量  \n",
    "        valid_mask: 有效位置掩码\n",
    "        seq_lengths: 各样本实际长度\n",
    "        sample_names: 样本名称列表\n",
    "    \"\"\"\n",
    "    tag_list = list(\"AUCG\")\n",
    "    idx2tag = {i: tag for i, tag in enumerate(tag_list)}\n",
    "    tag2idx = {tag: i for i, tag in enumerate(tag_list)}\n",
    "    # KEY_ATOMS = [\"P\", \"O5'\", \"C5'\", \"C4'\", \"C3'\", \"O3'\", \"N\"]\n",
    "    KEY_ATOMS = [\"P\", \"O5'\", \"C5'\", \"C4'\", \"C3'\", \"O3'\"]\n",
    "    \n",
    "    # 初始化维度\n",
    "    batch_size = len(rna_samples)\n",
    "    seq_lengths = [b[\"coords\"][\"P\"].shape[0] for b in rna_samples]  # 获取RNA序列长度\n",
    "    max_len = max(seq_lengths)  # 当前batch最大长度\n",
    "    \n",
    "    # 预分配张量（直接初始化为最终类型）\n",
    "    coords = np.zeros((batch_size, max_len, 6, 3), dtype=np.float32)  # batch, 序列长度，6个原子位置，每个原子坐标\n",
    "    labels = np.full((batch_size, max_len), -1, dtype=np.int64)\n",
    "    mask = np.zeros((batch_size, max_len), dtype=np.float32)\n",
    "    \n",
    "    for i, sample in enumerate(rna_samples):\n",
    "        curr_len = seq_lengths[i]\n",
    "        # 向量化处理原子坐标\n",
    "        atom_stack = np.stack([np.nan_to_num(sample['coords'][atom], nan=0.0) for atom in KEY_ATOMS],\n",
    "                              axis=1)  # N 6 3\n",
    "        # 有效位置标记\n",
    "        valid_pos = np.isfinite(atom_stack.sum((1,2)))\n",
    "        actual_len = min(curr_len, sum(valid_pos))\n",
    "        # 单样本赋值\n",
    "        coords[i,:actual_len] = atom_stack[valid_pos][:actual_len]\n",
    "        mask[i,:actual_len] = 1.0\n",
    "        \n",
    "        # 有标签的话读取标签\n",
    "        seq = np.array([c for c in sample['seq']], dtype=np.str_)  # 字符串转列表再转numpy\n",
    "        labels[i,:actual_len] = [tag2idx[b] for b in seq[valid_pos][:actual_len]]\n",
    "    \n",
    "    features, labels, valid_mask, seq_lengths, sample_names = (torch.from_numpy(coords),\n",
    "        torch.from_numpy(labels),\n",
    "        torch.from_numpy(mask),\n",
    "        np.array(seq_lengths),\n",
    "        [s['name'] for s in rna_samples])\n",
    "    \n",
    "    return features, labels, valid_mask, seq_lengths, sample_names\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "id": "318463dd",
   "metadata": {},
   "outputs": [],
   "source": [
    "# prepare_rna_batch([train_dataset[0]])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "id": "0d8d9a12-eb92-404f-8cd4-0998d310c1f5",
   "metadata": {},
   "outputs": [],
   "source": [
    "train_loader = DataLoader(train_dataset,\n",
    "        batch_size=train_config.batch_size,\n",
    "        shuffle=True,\n",
    "        num_workers=0,\n",
    "        collate_fn=prepare_rna_batch)\n",
    "\n",
    "valid_loader = DataLoader(valid_dataset,\n",
    "        batch_size=train_config.batch_size,\n",
    "        shuffle=False,\n",
    "        num_workers=0,\n",
    "        collate_fn=prepare_rna_batch)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "id": "1ed12564-029e-496c-8826-29f98c6b580c",
   "metadata": {},
   "outputs": [],
   "source": [
    "def gather_edges(edges, neighbor_idx):\n",
    "    neighbors = neighbor_idx.unsqueeze(-1).expand(-1, -1, -1, edges.size(-1))\n",
    "    return torch.gather(edges, 2, neighbors)\n",
    "\n",
    "def gather_nodes(nodes, neighbor_idx):\n",
    "    neighbors_flat = neighbor_idx.view((neighbor_idx.shape[0], -1))\n",
    "    neighbors_flat = neighbors_flat.unsqueeze(-1).expand(-1, -1, nodes.size(2))\n",
    "    neighbor_features = torch.gather(nodes, 1, neighbors_flat)\n",
    "    neighbor_features = neighbor_features.view(list(neighbor_idx.shape)[:3] + [-1])\n",
    "    return neighbor_features\n",
    "\n",
    "def gather_nodes_t(nodes, neighbor_idx):\n",
    "    idx_flat = neighbor_idx.unsqueeze(-1).expand(-1, -1, nodes.size(2))\n",
    "    return torch.gather(nodes, 1, idx_flat)\n",
    "\n",
    "def cat_neighbors_nodes(h_nodes, h_neighbors, E_idx):\n",
    "    h_nodes = gather_nodes(h_nodes, E_idx)\n",
    "    return torch.cat([h_neighbors, h_nodes], -1)\n",
    "\n",
    "\n",
    "class MPNNLayer(nn.Module):\n",
    "    # 简单的聚合边特征到节点特征\n",
    "    def __init__(self, num_hidden, num_in, dropout=0.1, num_heads=None, scale=30):\n",
    "        super(MPNNLayer, self).__init__()\n",
    "        self.num_hidden = num_hidden\n",
    "        self.num_in = num_in\n",
    "        self.scale = scale\n",
    "        self.dropout = nn.Dropout(dropout)\n",
    "        self.norm1 = nn.LayerNorm(num_hidden)\n",
    "        self.norm2 = nn.LayerNorm(num_hidden)\n",
    "\n",
    "        self.W1 = nn.Linear(num_hidden + num_in, num_hidden, bias=True)\n",
    "        self.W2 = nn.Linear(num_hidden, num_hidden, bias=True)\n",
    "        self.W3 = nn.Linear(num_hidden, num_hidden, bias=True)\n",
    "        self.act = nn.ReLU()\n",
    "\n",
    "        self.dense = nn.Sequential(\n",
    "            nn.Linear(num_hidden, num_hidden*4),\n",
    "            nn.ReLU(),\n",
    "            nn.Linear(num_hidden*4, num_hidden)\n",
    "        )\n",
    "\n",
    "    def forward(self, h_V, h_E, edge_idx, batch_id=None):\n",
    "        src_idx, dst_idx = edge_idx[0], edge_idx[1]\n",
    "        h_message = self.W3(self.act(self.W2(self.act(self.W1(h_E)))))\n",
    "        # 特征聚合, 按源节点聚合，即分组相加\n",
    "        dh = scatter_sum(h_message, src_idx, dim=0) / self.scale\n",
    "        # 随机丢一些边的特征\n",
    "        h_V = self.norm1(h_V + self.dropout(dh))\n",
    "        # skip-connection\n",
    "        dh = self.dense(h_V)\n",
    "        h_V = self.norm2(h_V + self.dropout(dh))\n",
    "        return h_V\n",
    "\n",
    "\n",
    "class Normalize(nn.Module):\n",
    "    def __init__(self, size, epsilon=1e-6):\n",
    "        super(Normalize, self).__init__()\n",
    "        self.gain = nn.Parameter(torch.ones(size))  # 注册为模型参数，自动启用梯度，自动进行优化器更新\n",
    "        self.bias = nn.Parameter(torch.zeros(size))\n",
    "        self.epsilon = epsilon\n",
    "\n",
    "    def forward(self, x, dim=-1):\n",
    "        mu = x.mean(dim, keepdim=True)\n",
    "        sigma = torch.sqrt(x.var(dim, keepdim=True) + self.epsilon)\n",
    "        gain = self.gain\n",
    "        bias = self.bias\n",
    "        if dim != -1:\n",
    "            shape = [1] * len(mu.size())\n",
    "            shape[dim] = self.gain.size()[0]\n",
    "            gain = gain.view(shape)\n",
    "            bias = bias.view(shape)\n",
    "        return gain * (x - mu) / (sigma + self.epsilon) + bias\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "id": "102ead4b-e635-464b-a3c2-68bda9f2e9fc",
   "metadata": {},
   "outputs": [],
   "source": [
    "feat_dims = {\n",
    "    'node': {\n",
    "        'angle': 12,\n",
    "        'distance': 80,\n",
    "        'direction': 9,\n",
    "    },\n",
    "    'edge': {\n",
    "        'orientation': 4,\n",
    "        'distance': 96,\n",
    "        'direction': 15,\n",
    "    }\n",
    "}\n",
    "\n",
    "def nan_to_num(tensor, nan=0.0):\n",
    "    idx = torch.isnan(tensor)\n",
    "    tensor[idx] = nan\n",
    "    return tensor\n",
    "\n",
    "def _normalize(tensor, dim=-1):\n",
    "    return nan_to_num(\n",
    "        torch.div(tensor, torch.norm(tensor, dim=dim, keepdim=True)))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "id": "394e9ca4",
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/tmp/ipykernel_2918145/775164200.py:106: UserWarning: Using torch.cross without specifying the dim arg is deprecated.\n",
      "Please either pass the dim explicitly or simply use torch.linalg.cross.\n",
      "The default value of dim will change to agree with that of linalg.cross in a future release. (Triggered internally at ../aten/src/ATen/native/Cross.cpp:62.)\n",
      "  n_0 = _normalize(torch.cross(u_0, u_1), dim=-1)\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "(tensor([[-0.4144,  0.5260, -0.7009,  ..., -0.3861, -0.4514, -0.5509],\n",
       "         [ 0.0485,  1.6897,  0.7964,  ...,  0.1432, -2.0114, -0.9406],\n",
       "         [-1.8836,  0.4120,  1.1787,  ..., -0.0210, -0.8041,  1.4507],\n",
       "         ...,\n",
       "         [-0.2640, -0.6193,  0.7905,  ...,  0.4720, -0.5782,  0.0439],\n",
       "         [-0.0651, -0.5557, -0.1819,  ..., -0.7416, -1.0573, -0.6298],\n",
       "         [ 0.1787, -0.7834, -0.1288,  ...,  0.0683, -0.5948, -1.7782]],\n",
       "        grad_fn=<AddBackward0>),\n",
       " tensor([[ 0.3127, -0.3550,  0.7528,  ..., -0.8481, -1.3503, -2.6340],\n",
       "         [ 0.5608,  0.1181,  0.0199,  ...,  0.3139, -0.8102, -2.1186],\n",
       "         [-0.2306, -0.7480,  0.0852,  ...,  0.0653, -0.4137, -2.5760],\n",
       "         ...,\n",
       "         [ 0.9498, -0.3751, -0.2886,  ..., -0.1735, -0.2591,  0.0705],\n",
       "         [-0.3577, -1.0201, -0.5517,  ..., -0.0221, -0.6717, -0.6469],\n",
       "         [-0.5075, -0.3688, -0.7479,  ..., -0.5138,  0.6461, -0.9692]],\n",
       "        grad_fn=<AddBackward0>),\n",
       " tensor([[  0,   0,   0,  ..., 127, 127, 127],\n",
       "         [  0,   7,  29,  ...,  51,  46,  47]]),\n",
       " tensor([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n",
       "         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n",
       "         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n",
       "         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n",
       "         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n",
       "         0, 0, 0, 0, 0, 0, 0, 0]))"
      ]
     },
     "execution_count": 16,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "from torch_geometric.data import Data, Batch\n",
    "\n",
    "\n",
    "# 图结构数据生成器\n",
    "class RNAGraphBuilder(nn.Module):\n",
    "    # 边特征维度，结点特征维度，结点特征类型，边特征类型，RBF核数，K近邻数，数据增强，dropout\n",
    "    # 结点特征：['angle', 'distance', 'direction'], 边特征：['orientation', 'distance', 'direction']\n",
    "\n",
    "    def __init__(self, edge_features, node_features, num_rbf=16, top_k=30, augment_eps=0.):\n",
    "        super(RNAGraphBuilder, self).__init__()\n",
    "        self.num_rbf = num_rbf\n",
    "        self.top_k = top_k\n",
    "        # 随机增强系数\n",
    "        self.augment_eps = augment_eps\n",
    "        \n",
    "        self.node_feat_types = ['angle', 'distance', 'direction']\n",
    "        self.edge_feat_types = ['orientation', 'distance', 'direction']\n",
    "        node_in = sum([feat_dims['node'][feat] for feat in self.node_feat_types])\n",
    "        edge_in = sum([feat_dims['edge'][feat] for feat in self.edge_feat_types])\n",
    "        self.node_embedding = nn.Linear(node_in,  node_features, bias=True)\n",
    "        self.edge_embedding = nn.Linear(edge_in, edge_features, bias=True)\n",
    "        self.norm_nodes = Normalize(node_features)\n",
    "        self.norm_edges = Normalize(edge_features)\n",
    "\n",
    "\n",
    "    @staticmethod\n",
    "    def build_graph(coord, seq, k_neighbors):\n",
    "        \"\"\"将坐标和序列转换为图结构\"\"\"\n",
    "        num_nodes = coord.shape[0]\n",
    "        # 节点特征：展平每个节点的7个骨架点坐标\n",
    "        x = torch.tensor(coord.reshape(num_nodes, -1), dtype=torch.float32)  # [N, 7*3]\n",
    "        \n",
    "        # 边构建：基于序列顺序的k近邻连接\n",
    "        edge_index = []\n",
    "        for i in range(num_nodes):\n",
    "            # 连接前k和后k个节点\n",
    "            neighbors = list(range(max(0, i-k_neighbors), i)) + \\\n",
    "                       list(range(i+1, min(num_nodes, i+1+k_neighbors)))\n",
    "            for j in neighbors:\n",
    "                edge_index.append([i, j])\n",
    "                edge_index.append([j, i])  # 双向连接\n",
    "        \n",
    "        edge_index = torch.tensor(edge_index, dtype=torch.long).t().contiguous()\n",
    "        # 节点标签\n",
    "        y = torch.tensor([Config.seq_vocab.index(c) for c in seq], dtype=torch.long)\n",
    "        return Data(x=x, edge_index=edge_index, y=y, num_nodes=num_nodes)\n",
    "\n",
    "\n",
    "    def _dist(self, X, mask, top_k, eps=1E-6):\n",
    "        mask_2D = torch.unsqueeze(mask,1) * torch.unsqueeze(mask,2)  # 两两之间求掩码 B, N -> B, N, N\n",
    "        dX = torch.unsqueeze(X, 1) - torch.unsqueeze(X, 2)  # 两两之间求距离 B, N, 3 -> B, N, N, 3\n",
    "        D = (1. - mask_2D)*10000 + mask_2D* torch.sqrt(torch.sum(dX**2, 3) + eps)  #  B, N, N 求欧式距离, 无效值距离设置10000的很大值\n",
    "        D_max, _ = torch.max(D, -1, keepdim=True)  # 获取每行的最大值\n",
    "        D_adjust = D + (1. - mask_2D) * (D_max+1)  # 无效值设置为最大值+1, 不会被选中\n",
    "        D_neighbors, E_idx = torch.topk(D_adjust, min(top_k, D_adjust.shape[-1]), dim=-1, largest=False) # 选择topk近邻\n",
    "        return D_neighbors, E_idx  # 返回每个节点的邻居特征和邻居索引\n",
    "\n",
    "\n",
    "    def _rbf(self, D, num_rbf):\n",
    "        D_min, D_max, D_count = 0., 20., num_rbf\n",
    "        D_mu = torch.linspace(D_min, D_max, D_count, device=D.device)\n",
    "        D_mu = D_mu.view([1,1,1,-1])\n",
    "        D_sigma = (D_max - D_min) / D_count\n",
    "        D_expand = torch.unsqueeze(D, -1)\n",
    "        return torch.exp(-((D_expand - D_mu) / D_sigma)**2)\n",
    "\n",
    "\n",
    "    def _get_rbf(self, A, B, E_idx=None, num_rbf=16):\n",
    "        if E_idx is not None:\n",
    "            D_A_B = torch.sqrt(torch.sum((A[:,:,None,:] - B[:,None,:,:])**2,-1) + 1e-6)\n",
    "            D_A_B_neighbors = gather_edges(D_A_B[:,:,:,None], E_idx)[:,:,:,0]\n",
    "            RBF_A_B = self._rbf(D_A_B_neighbors, num_rbf)\n",
    "        else:\n",
    "            D_A_B = torch.sqrt(torch.sum((A[:,:,None,:] - B[:,:,None,:])**2, -1) + 1e-6)\n",
    "            RBF_A_B = self._rbf(D_A_B, num_rbf)\n",
    "        return RBF_A_B\n",
    "\n",
    "\n",
    "    def _quaternions(self, R):\n",
    "        diag = torch.diagonal(R, dim1=-2, dim2=-1)\n",
    "        Rxx, Ryy, Rzz = diag.unbind(-1)\n",
    "        magnitudes = 0.5 * torch.sqrt(torch.abs(1 + torch.stack([\n",
    "              Rxx - Ryy - Rzz, \n",
    "            - Rxx + Ryy - Rzz, \n",
    "            - Rxx - Ryy + Rzz\n",
    "        ], -1)))\n",
    "        _R = lambda i,j: R[:,:,:,i,j]\n",
    "        signs = torch.sign(torch.stack([\n",
    "            _R(2,1) - _R(1,2),\n",
    "            _R(0,2) - _R(2,0),\n",
    "            _R(1,0) - _R(0,1)\n",
    "        ], -1))\n",
    "        xyz = signs * magnitudes\n",
    "        w = torch.sqrt(F.relu(1 + diag.sum(-1, keepdim=True))) / 2.\n",
    "        Q = torch.cat((xyz, w), -1)\n",
    "        Q = F.normalize(Q, dim=-1)\n",
    "        return Q\n",
    "\n",
    "\n",
    "    def _orientations_coarse(self, X, E_idx, eps=1e-6):\n",
    "        V = X.clone()\n",
    "        X = X[:,:,:6,:].reshape(X.shape[0], 6*X.shape[1], 3) \n",
    "        dX = X[:,1:,:] - X[:,:-1,:]\n",
    "        U = _normalize(dX, dim=-1)\n",
    "        u_0, u_1 = U[:,:-2,:], U[:,1:-1,:]\n",
    "        n_0 = _normalize(torch.cross(u_0, u_1), dim=-1)\n",
    "        b_1 = _normalize(u_0 - u_1, dim=-1)\n",
    "        \n",
    "        # select C3'\n",
    "        n_0 = n_0[:,4::6,:] \n",
    "        b_1 = b_1[:,4::6,:]\n",
    "        X = X[:,4::6,:]\n",
    "\n",
    "        Q = torch.stack((b_1, n_0, torch.cross(b_1, n_0)), 2)\n",
    "        Q = Q.view(list(Q.shape[:2]) + [9])\n",
    "        Q = F.pad(Q, (0,0,0,1), 'constant', 0) # [16, 464, 9]\n",
    "\n",
    "        Q_neighbors = gather_nodes(Q, E_idx) # [16, 464, 30, 9]\n",
    "        P_neighbors = gather_nodes(V[:,:,0,:], E_idx) # [16, 464, 30, 3]\n",
    "        O5_neighbors = gather_nodes(V[:,:,1,:], E_idx)\n",
    "        C5_neighbors = gather_nodes(V[:,:,2,:], E_idx)\n",
    "        C4_neighbors = gather_nodes(V[:,:,3,:], E_idx)\n",
    "        O3_neighbors = gather_nodes(V[:,:,5,:], E_idx)\n",
    "        \n",
    "        Q = Q.view(list(Q.shape[:2]) + [3,3]).unsqueeze(2) # [16, 464, 1, 3, 3]\n",
    "        Q_neighbors = Q_neighbors.view(list(Q_neighbors.shape[:3]) + [3,3]) # [16, 464, 30, 3, 3]\n",
    "\n",
    "        dX = torch.stack([P_neighbors,O5_neighbors,C5_neighbors,C4_neighbors,O3_neighbors], dim=3) - X[:,:,None,None,:] # [16, 464, 30, 3]\n",
    "        dU = torch.matmul(Q[:,:,:,None,:,:], dX[...,None]).squeeze(-1) # [16, 464, 30, 3] 邻居的相对坐标\n",
    "        B, N, K = dU.shape[:3]\n",
    "        E_direct = _normalize(dU, dim=-1)\n",
    "        E_direct = E_direct.reshape(B, N, K,-1)\n",
    "        R = torch.matmul(Q.transpose(-1,-2), Q_neighbors)\n",
    "        E_orient = self._quaternions(R)\n",
    "        \n",
    "        dX_inner = V[:,:,[0,2,3],:] - X.unsqueeze(-2)\n",
    "        dU_inner = torch.matmul(Q, dX_inner.unsqueeze(-1)).squeeze(-1)\n",
    "        dU_inner = _normalize(dU_inner, dim=-1)\n",
    "        V_direct = dU_inner.reshape(B,N,-1)\n",
    "        return V_direct, E_direct, E_orient\n",
    "\n",
    "\n",
    "    def _dihedrals(self, X, eps=1e-7):\n",
    "        # P, O5', C5', C4', C3', O3'\n",
    "        X = X[:,:,:6,:].reshape(X.shape[0], 6*X.shape[1], 3)\n",
    "\n",
    "        # Shifted slices of unit vectors\n",
    "        # https://iupac.qmul.ac.uk/misc/pnuc2.html#220\n",
    "        # https://x3dna.org/highlights/torsion-angles-of-nucleic-acid-structures\n",
    "        # alpha:   O3'_{i-1} P_i O5'_i C5'_i\n",
    "        # beta:    P_i O5'_i C5'_i C4'_i\n",
    "        # gamma:   O5'_i C5'_i C4'_i C3'_i\n",
    "        # delta:   C5'_i C4'_i C3'_i O3'_i\n",
    "        # epsilon: C4'_i C3'_i O3'_i P_{i+1}\n",
    "        # zeta:    C3'_i O3'_i P_{i+1} O5'_{i+1} \n",
    "        # What's more:\n",
    "        #   chi: C1' - N9 \n",
    "        #   chi is different for (C, T, U) and (A, G) https://x3dna.org/highlights/the-chi-x-torsion-angle-characterizes-base-sugar-relative-orientation\n",
    "\n",
    "        dX = X[:, 5:, :] - X[:, :-5, :] # O3'-P, P-O5', O5'-C5', C5'-C4', ...\n",
    "        U = F.normalize(dX, dim=-1)\n",
    "        u_2 = U[:,:-2,:]  # O3'-P, P-O5', ...\n",
    "        u_1 = U[:,1:-1,:] # P-O5', O5'-C5', ...\n",
    "        u_0 = U[:,2:,:]   # O5'-C5', C5'-C4', ...\n",
    "        # Backbone normals\n",
    "        n_2 = F.normalize(torch.cross(u_2, u_1, dim=-1), dim=-1)\n",
    "        n_1 = F.normalize(torch.cross(u_1, u_0, dim=-1), dim=-1)\n",
    "\n",
    "        # Angle between normals\n",
    "        cosD = (n_2 * n_1).sum(-1)\n",
    "        cosD = torch.clamp(cosD, -1+eps, 1-eps)\n",
    "        D = torch.sign((u_2 * n_1).sum(-1)) * torch.acos(cosD)\n",
    "        \n",
    "        D = F.pad(D, (3,4), 'constant', 0)\n",
    "        D = D.view((D.size(0), D.size(1) //6, 6))\n",
    "        return torch.cat((torch.cos(D), torch.sin(D)), 2) # return D_features\n",
    "\n",
    "\n",
    "    def encode_node_features(self, X, mask_bool, E_idx):\n",
    "        # node features\n",
    "        h_V = []\n",
    "        node_mask_select = lambda x: torch.masked_select(x, mask_bool.unsqueeze(-1)).reshape(-1, x.shape[-1])\n",
    "        node_feat_types = self.node_feat_types\n",
    "        # angle\n",
    "        V_angle = node_mask_select(self._dihedrals(X))\n",
    "        # distance\n",
    "        # P, O5', C5', C4', C3', O3'\n",
    "        atom_P = X[:, :, 0, :]\n",
    "        atom_O5_ = X[:, :, 1, :]\n",
    "        atom_C5_ = X[:, :, 2, :]\n",
    "        atom_C4_ = X[:, :, 3, :]\n",
    "        atom_C3_ = X[:, :, 4, :]\n",
    "        atom_O3_ = X[:, :, 5, :]\n",
    "        node_list = ['O5_-P', 'C5_-P', 'C4_-P', 'C3_-P', 'O3_-P']\n",
    "        V_dist = []\n",
    "        for pair in node_list:\n",
    "            atom1, atom2 = pair.split('-')\n",
    "            V_dist.append(node_mask_select(self._get_rbf(vars()['atom_' + atom1], vars()['atom_' + atom2], None, self.num_rbf).squeeze()))\n",
    "        V_dist = torch.cat(tuple(V_dist), dim=-1).squeeze()\n",
    "        # direction\n",
    "        V_direct, E_direct, E_orient = self._orientations_coarse(X, E_idx)\n",
    "        V_direct = node_mask_select(V_direct)\n",
    "        if 'angle' in node_feat_types:\n",
    "            h_V.append(V_angle)\n",
    "        if 'distance' in node_feat_types:\n",
    "            h_V.append(V_dist)\n",
    "        if 'direction' in node_feat_types:\n",
    "            h_V.append(V_direct)\n",
    "        \n",
    "        return self.node_embedding(torch.cat(h_V, dim=-1))\n",
    "\n",
    "    def encode_edge_features(self, X, mask_attend, E_idx):\n",
    "        # edge features\n",
    "        h_E = []\n",
    "        edge_mask_select = lambda x: torch.masked_select(x, mask_attend.unsqueeze(-1)).reshape(-1,x.shape[-1])\n",
    "        edge_feat_types = self.edge_feat_types\n",
    "        # direction\n",
    "        V_direct, E_direct, E_orient = self._orientations_coarse(X, E_idx)\n",
    "        E_direct, E_orient = list(map(lambda x: edge_mask_select(x), [E_direct, E_orient]))\n",
    "        # P, O5', C5', C4', C3', O3'\n",
    "        atom_P = X[:, :, 0, :]\n",
    "        atom_O5_ = X[:, :, 1, :]\n",
    "        atom_C5_ = X[:, :, 2, :]\n",
    "        atom_C4_ = X[:, :, 3, :]\n",
    "        atom_C3_ = X[:, :, 4, :]\n",
    "        atom_O3_ = X[:, :, 5, :]\n",
    "        edge_list = ['P-P', 'O5_-P', 'C5_-P', 'C4_-P', 'C3_-P', 'O3_-P']\n",
    "        E_dist = [] \n",
    "        for pair in edge_list:\n",
    "            atom1, atom2 = pair.split('-')\n",
    "            E_dist.append(edge_mask_select(self._get_rbf(vars()['atom_' + atom1], vars()['atom_' + atom2], E_idx, self.num_rbf)))\n",
    "        E_dist = torch.cat(tuple(E_dist), dim=-1)\n",
    "        \n",
    "        if 'orientation' in edge_feat_types:\n",
    "            h_E.append(E_orient)\n",
    "        if 'distance' in edge_feat_types:\n",
    "            h_E.append(E_dist)\n",
    "        if 'direction' in edge_feat_types:\n",
    "            h_E.append(E_direct)\n",
    "        return self.edge_embedding(torch.cat(h_E, dim=-1))\n",
    "\n",
    "\n",
    "    def forward(self, X, mask):\n",
    "        \"\"\"\n",
    "        X: [B, N, 6, 3], batch,  N最大序列长度，6个原子位置，每个原子坐标\n",
    "        mask: [B, N]  # 标识每个样本序列的有效长度1有效，0无效\n",
    "        \"\"\"\n",
    "        if self.training and self.augment_eps > 0:\n",
    "            X = X + self.augment_eps * torch.randn_like(X)\n",
    "\n",
    "        # Build k-Nearest Neighbors graph\n",
    "        B, N, _,_ = X.shape\n",
    "        # P, O5', C5', C4', C3', O3'\n",
    "        atom_P = X[:, :, 0, :]\n",
    "        atom_O5_ = X[:, :, 1, :]\n",
    "        atom_C5_ = X[:, :, 2, :]\n",
    "        atom_C4_ = X[:, :, 3, :]\n",
    "        atom_C3_ = X[:, :, 4, :]\n",
    "        atom_O3_ = X[:, :, 5, :]\n",
    "        # atom_N19_ = X[:, :, 6, :]\n",
    "\n",
    "        X_backbone = atom_P  # 以P基团为中心判断邻域\n",
    "        D_neighbors, E_idx = self._dist(X_backbone, mask, self.top_k) # 获取topk近邻  返回每个节点的邻居和邻居id\n",
    "\n",
    "        mask_bool = (mask==1)\n",
    "        mask_attend = gather_nodes(mask.unsqueeze(-1), E_idx).squeeze(-1)\n",
    "        mask_attend = (mask.unsqueeze(-1) * mask_attend) == 1\n",
    "        \n",
    "        h_V = self.encode_node_features(X, mask_bool, E_idx)\n",
    "        h_E = self.encode_edge_features(X, mask_attend, E_idx)\n",
    "        \n",
    "        # Embed the nodes\n",
    "        h_V = self.norm_nodes(h_V)\n",
    "        h_E = self.norm_edges(h_E)\n",
    "\n",
    "        # prepare the variables to return\n",
    "        shift = mask.sum(dim=1).cumsum(dim=0) - mask.sum(dim=1)  # cumsum累积求和\n",
    "        src = shift.view(B,1,1) + E_idx\n",
    "        src = torch.masked_select(src, mask_attend).view(1,-1)\n",
    "        dst = shift.view(B,1,1) + torch.arange(0, N, device=src.device).view(1,-1,1).expand_as(mask_attend)\n",
    "        dst = torch.masked_select(dst, mask_attend).view(1,-1)\n",
    "        E_idx = torch.cat((dst, src), dim=0).long()\n",
    "\n",
    "        sparse_idx = mask.nonzero()\n",
    "        X = X[sparse_idx[:,0], sparse_idx[:,1], :, :]\n",
    "        batch_id = sparse_idx[:,0]\n",
    "        return h_V, h_E, E_idx, batch_id\n",
    "    \n",
    "X = torch.randn(1, 128, 6, 3)\n",
    "mask = torch.ones(1, 128)\n",
    "rna_graph = RNAGraphBuilder(128, 128)\n",
    "rna_graph(X, mask)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 17,
   "id": "05186b76",
   "metadata": {},
   "outputs": [],
   "source": [
    "\n",
    "class RNAFeatures(nn.Module):\n",
    "    def __init__(self, edge_features, node_features, node_feat_types=[], edge_feat_types=[], num_rbf=16, top_k=30,\n",
    "                 augment_eps=0., dropout=0.1):\n",
    "        super(RNAFeatures, self).__init__()\n",
    "        \"\"\"Extract RNA Features\"\"\"\n",
    "        self.edge_features = edge_features\n",
    "        self.node_features = node_features\n",
    "        self.top_k = top_k\n",
    "        self.augment_eps = augment_eps\n",
    "        self.num_rbf = num_rbf\n",
    "        self.dropout = nn.Dropout(dropout)\n",
    "        self.node_feat_types = node_feat_types\n",
    "        self.edge_feat_types = edge_feat_types\n",
    "\n",
    "        node_in = sum([feat_dims['node'][feat] for feat in node_feat_types])\n",
    "        edge_in = sum([feat_dims['edge'][feat] for feat in edge_feat_types])\n",
    "        self.node_embedding = nn.Linear(node_in, node_features, bias=True)\n",
    "        self.edge_embedding = nn.Linear(edge_in, edge_features, bias=True)\n",
    "        self.norm_nodes = Normalize(node_features)\n",
    "        self.norm_edges = Normalize(edge_features)\n",
    "\n",
    "    def _dist(self, X, mask, eps=1E-6):\n",
    "        mask_2D = torch.unsqueeze(mask, 1) * torch.unsqueeze(mask, 2)\n",
    "        dX = torch.unsqueeze(X, 1) - torch.unsqueeze(X, 2)\n",
    "        D = (1. - mask_2D) * 10000 + mask_2D * torch.sqrt(torch.sum(dX ** 2, 3) + eps)\n",
    "\n",
    "        D_max, _ = torch.max(D, -1, keepdim=True)\n",
    "        D_adjust = D + (1. - mask_2D) * (D_max + 1)\n",
    "        D_neighbors, E_idx = torch.topk(D_adjust, min(self.top_k, D_adjust.shape[-1]), dim=-1, largest=False)\n",
    "        return D_neighbors, E_idx\n",
    "\n",
    "    def _rbf(self, D):\n",
    "        D_min, D_max, D_count = 0., 20., self.num_rbf\n",
    "        D_mu = torch.linspace(D_min, D_max, D_count, device=D.device)\n",
    "        D_mu = D_mu.view([1, 1, 1, -1])\n",
    "        D_sigma = (D_max - D_min) / D_count\n",
    "        D_expand = torch.unsqueeze(D, -1)\n",
    "        return torch.exp(-((D_expand - D_mu) / D_sigma) ** 2)\n",
    "\n",
    "    def _get_rbf__node(self, A, B):\n",
    "        D_A_B = torch.sqrt(torch.sum((A[:, :, None, :] - B[:, :, None, :]) ** 2, -1) + 1e-6)\n",
    "        return self._rbf(D_A_B)\n",
    "\n",
    "    def _get_rbf__edge(self, A, B, E_idx):\n",
    "        D_A_B = torch.sqrt(torch.sum((A[:, :, None, :] - B[:, None, :, :]) ** 2, -1) + 1e-6)\n",
    "        D_A_B_neighbors = gather_edges(D_A_B[:, :, :, None], E_idx)[:, :, :, 0]\n",
    "        return self._rbf(D_A_B_neighbors)\n",
    "\n",
    "    def _quaternions(self, R):\n",
    "        diag = torch.diagonal(R, dim1=-2, dim2=-1)\n",
    "        Rxx, Ryy, Rzz = diag.unbind(-1)\n",
    "        magnitudes = 0.5 * torch.sqrt(torch.abs(1 + torch.stack([\n",
    "            Rxx - Ryy - Rzz,\n",
    "            - Rxx + Ryy - Rzz,\n",
    "            - Rxx - Ryy + Rzz\n",
    "        ], -1)))\n",
    "        _R = lambda i, j: R[:, :, :, i, j]\n",
    "        signs = torch.sign(torch.stack([\n",
    "            _R(2, 1) - _R(1, 2),\n",
    "            _R(0, 2) - _R(2, 0),\n",
    "            _R(1, 0) - _R(0, 1)\n",
    "        ], -1))\n",
    "        xyz = signs * magnitudes\n",
    "        w = torch.sqrt(F.relu(1 + diag.sum(-1, keepdim=True))) / 2.\n",
    "        Q = torch.cat((xyz, w), -1)\n",
    "        Q = F.normalize(Q, dim=-1)\n",
    "        return Q\n",
    "\n",
    "    def _orientations_coarse(self, X, E_idx, eps=1e-6):\n",
    "        V = X.clone()\n",
    "        X = X[:, :, :6, :].reshape(X.shape[0], 6 * X.shape[1], 3)\n",
    "        dX = X[:, 1:, :] - X[:, :-1, :]\n",
    "        U = _normalize(dX, dim=-1)\n",
    "        u_0, u_1 = U[:, :-2, :], U[:, 1:-1, :]\n",
    "        n_0 = _normalize(torch.cross(u_0, u_1, dim=-1), dim=-1)\n",
    "        b_1 = _normalize(u_0 - u_1, dim=-1)\n",
    "\n",
    "        # select C3'\n",
    "        n_0 = n_0[:, 4::6, :]\n",
    "        b_1 = b_1[:, 4::6, :]\n",
    "        X = X[:, 4::6, :]\n",
    "        Q = torch.stack((b_1, n_0, torch.cross(b_1, n_0, dim=-1)), 2)\n",
    "        Q = Q.view(list(Q.shape[:2]) + [9])\n",
    "        Q = F.pad(Q, (0, 0, 0, 1), 'constant', 0)  # [16, 464, 9]\n",
    "\n",
    "        Q_neighbors = gather_nodes(Q, E_idx)  # [16, 464, 30, 9]\n",
    "        P_neighbors = gather_nodes(V[:, :, 0, :], E_idx)  # [16, 464, 30, 3]\n",
    "        O5_neighbors = gather_nodes(V[:, :, 1, :], E_idx)\n",
    "        C5_neighbors = gather_nodes(V[:, :, 2, :], E_idx)\n",
    "        C4_neighbors = gather_nodes(V[:, :, 3, :], E_idx)\n",
    "        O3_neighbors = gather_nodes(V[:, :, 5, :], E_idx)\n",
    "\n",
    "        Q = Q.view(list(Q.shape[:2]) + [3, 3]).unsqueeze(2)  # [16, 464, 1, 3, 3]\n",
    "        Q_neighbors = Q_neighbors.view(list(Q_neighbors.shape[:3]) + [3, 3])  # [16, 464, 30, 3, 3]\n",
    "\n",
    "        dX = torch.stack([P_neighbors, O5_neighbors, C5_neighbors, C4_neighbors, O3_neighbors], dim=3) - X[:, :, None,\n",
    "                                                                                                         None,\n",
    "                                                                                                         :]  # [16, 464, 30, 3]\n",
    "        dU = torch.matmul(Q[:, :, :, None, :, :], dX[..., None]).squeeze(-1)  # [16, 464, 30, 3] 邻居的相对坐标\n",
    "        B, N, K = dU.shape[:3]\n",
    "        E_direct = _normalize(dU, dim=-1)\n",
    "        E_direct = E_direct.reshape(B, N, K, -1)\n",
    "        R = torch.matmul(Q.transpose(-1, -2), Q_neighbors)\n",
    "        E_orient = self._quaternions(R)\n",
    "\n",
    "        dX_inner = V[:, :, [0, 2, 3], :] - X.unsqueeze(-2)\n",
    "        dU_inner = torch.matmul(Q, dX_inner.unsqueeze(-1)).squeeze(-1)\n",
    "        dU_inner = _normalize(dU_inner, dim=-1)\n",
    "        V_direct = dU_inner.reshape(B, N, -1)\n",
    "        return V_direct, E_direct, E_orient\n",
    "\n",
    "    def _dihedrals(self, X, eps=1e-7):\n",
    "        # P, O5', C5', C4', C3', O3'\n",
    "        X = X[:, :, :6, :].reshape(X.shape[0], 6 * X.shape[1], 3)\n",
    "\n",
    "        # Shifted slices of unit vectors\n",
    "        # https://iupac.qmul.ac.uk/misc/pnuc2.html#220\n",
    "        # https://x3dna.org/highlights/torsion-angles-of-nucleic-acid-structures\n",
    "        # alpha:   O3'_{i-1} P_i O5'_i C5'_i\n",
    "        # beta:    P_i O5'_i C5'_i C4'_i\n",
    "        # gamma:   O5'_i C5'_i C4'_i C3'_i\n",
    "        # delta:   C5'_i C4'_i C3'_i O3'_i\n",
    "        # epsilon: C4'_i C3'_i O3'_i P_{i+1}\n",
    "        # zeta:    C3'_i O3'_i P_{i+1} O5'_{i+1}\n",
    "        # What's more:\n",
    "        #   chi: C1' - N9\n",
    "        #   chi is different for (C, T, U) and (A, G) https://x3dna.org/highlights/the-chi-x-torsion-angle-characterizes-base-sugar-relative-orientation\n",
    "\n",
    "        dX = X[:, 5:, :] - X[:, :-5, :]  # O3'-P, P-O5', O5'-C5', C5'-C4', ...\n",
    "        U = F.normalize(dX, dim=-1)\n",
    "        u_2 = U[:, :-2, :]  # O3'-P, P-O5', ...\n",
    "        u_1 = U[:, 1:-1, :]  # P-O5', O5'-C5', ...\n",
    "        u_0 = U[:, 2:, :]  # O5'-C5', C5'-C4', ...\n",
    "        # Backbone normals\n",
    "        n_2 = F.normalize(torch.cross(u_2, u_1, dim=-1), dim=-1)\n",
    "        n_1 = F.normalize(torch.cross(u_1, u_0, dim=-1), dim=-1)\n",
    "\n",
    "        # Angle between normals\n",
    "        cosD = (n_2 * n_1).sum(-1)\n",
    "        cosD = torch.clamp(cosD, -1 + eps, 1 - eps)\n",
    "        D = torch.sign((u_2 * n_1).sum(-1)) * torch.acos(cosD)\n",
    "        D = F.pad(D, (3, 4), 'constant', 0)\n",
    "        D = D.view((D.size(0), D.size(1) // 6, 6))\n",
    "        return torch.cat((torch.cos(D), torch.sin(D)), 2)  # return D_features\n",
    "\n",
    "    def forward(self, X, mask):\n",
    "        if self.training and self.augment_eps > 0:\n",
    "            X = X + self.augment_eps * torch.randn_like(X)\n",
    "\n",
    "        # Build k-Nearest Neighbors graph\n",
    "        B, N, _, _ = X.shape\n",
    "        # P, O5', C5', C4', C3', O3'\n",
    "        atom_dict = dict(zip(['P', 'O5_', 'C5_', 'C4_', 'C3_', 'O3_'], X.unbind(dim=2)))\n",
    "\n",
    "        X_backbone = atom_dict[\"P\"]\n",
    "        D_neighbors, E_idx = self._dist(X_backbone, mask)\n",
    "\n",
    "        mask_attend = gather_nodes(mask.unsqueeze(-1), E_idx).squeeze(-1)\n",
    "        mask_attend = (mask.unsqueeze(-1) * mask_attend) == 1\n",
    "        edge_mask_select = lambda x: torch.masked_select(x, mask_attend.unsqueeze(-1)).reshape(-1, x.shape[-1])\n",
    "        node_mask_select = lambda x: torch.masked_select(x, mask.bool().unsqueeze(-1)).reshape(-1, x.shape[-1])\n",
    "\n",
    "        # node features\n",
    "        h_V = []\n",
    "        # angle\n",
    "        V_angle = self._dihedrals(X)\n",
    "        V_angle = node_mask_select(V_angle)\n",
    "        # distance\n",
    "        node_list = ['O5_-P', 'C5_-P', 'C4_-P', 'C3_-P', 'O3_-P']\n",
    "        V_dist = []\n",
    "\n",
    "        for pair in node_list:\n",
    "            atom1, atom2 = pair.split('-')\n",
    "            V_dist.append(node_mask_select(\n",
    "                self._get_rbf__node(atom_dict[atom1], atom_dict[atom2]).squeeze()))\n",
    "        V_dist = torch.cat(tuple(V_dist), dim=-1).squeeze()\n",
    "        # direction\n",
    "        V_direct, E_direct, E_orient = self._orientations_coarse(X, E_idx)\n",
    "        V_direct = node_mask_select(V_direct)\n",
    "        E_direct, E_orient = list(map(lambda x: edge_mask_select(x), [E_direct, E_orient]))\n",
    "\n",
    "        # edge features\n",
    "        h_E = []\n",
    "        # dist\n",
    "        edge_list = ['P-P', 'O5_-P', 'C5_-P', 'C4_-P', 'C3_-P', 'O3_-P']\n",
    "        E_dist = []\n",
    "        for pair in edge_list:\n",
    "            atom1, atom2 = pair.split('-')\n",
    "            E_dist.append(\n",
    "                edge_mask_select(self._get_rbf__edge(atom_dict[atom1], atom_dict[atom2], E_idx)))\n",
    "        E_dist = torch.cat(tuple(E_dist), dim=-1)\n",
    "\n",
    "        if 'angle' in self.node_feat_types:\n",
    "            h_V.append(V_angle)\n",
    "        if 'distance' in self.node_feat_types:\n",
    "            h_V.append(V_dist)\n",
    "        if 'direction' in self.node_feat_types:\n",
    "            h_V.append(V_direct)\n",
    "\n",
    "        if 'orientation' in self.edge_feat_types:\n",
    "            h_E.append(E_orient)\n",
    "        if 'distance' in self.edge_feat_types:\n",
    "            h_E.append(E_dist)\n",
    "        if 'direction' in self.edge_feat_types:\n",
    "            h_E.append(E_direct)\n",
    "\n",
    "        # Embed the nodes\n",
    "        h_V = self.norm_nodes(self.node_embedding(torch.cat(h_V, dim=-1)))\n",
    "        h_E = self.norm_edges(self.edge_embedding(torch.cat(h_E, dim=-1)))\n",
    "\n",
    "        # prepare the variables to return\n",
    "        shift = mask.sum(dim=1).cumsum(dim=0) - mask.sum(dim=1)\n",
    "        src = shift.view(B, 1, 1) + E_idx\n",
    "        src = torch.masked_select(src, mask_attend).view(1, -1)\n",
    "        dst = shift.view(B, 1, 1) + torch.arange(0, N, device=src.device).view(1, -1, 1).expand_as(mask_attend)\n",
    "        dst = torch.masked_select(dst, mask_attend).view(1, -1)\n",
    "        E_idx = torch.cat((dst, src), dim=0).long()\n",
    "\n",
    "        sparse_idx = mask.nonzero()\n",
    "        X = X[sparse_idx[:, 0], sparse_idx[:, 1], :, :]\n",
    "        batch_id = sparse_idx[:,0]\n",
    "        return h_V, h_E, E_idx, batch_id"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 18,
   "id": "81d99837",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([1, 1])"
      ]
     },
     "execution_count": 18,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "a = torch.tensor([[1,2],[1,4]])\n",
    "mask_bool = a == 1\n",
    "S = torch.masked_select(a, mask_bool)\n",
    "S"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 19,
   "id": "e12b215f-66ff-46cb-8645-733655cbf66f",
   "metadata": {},
   "outputs": [],
   "source": [
    "class RNAModel(nn.Module):\n",
    "    def __init__(self, model_config):\n",
    "        super(RNAModel, self).__init__()\n",
    "\n",
    "        self.smoothing = model_config.smoothing\n",
    "        self.node_features = self.edge_features = model_config.hidden\n",
    "        self.hidden_dim = model_config.hidden\n",
    "        self.vocab = model_config.vocab_size\n",
    "\n",
    "        \n",
    "        # self.features = RNAGraphBuilder(model_config.hidden, model_config.hidden,\n",
    "        #                                 top_k=model_config.k_neighbors)\n",
    "        self.features = RNAFeatures(\n",
    "            model_config.hidden, model_config.hidden,\n",
    "            top_k=model_config.k_neighbors,\n",
    "            dropout=model_config.dropout,\n",
    "            node_feat_types=model_config.node_feat_types,\n",
    "            edge_feat_types=model_config.edge_feat_types,\n",
    "            augment_eps=0.1  # 增加数据增强\n",
    "        )\n",
    "\n",
    "        layer = MPNNLayer\n",
    "        self.W_s = nn.Embedding(model_config.vocab_size, self.hidden_dim)\n",
    "        self.encoder_layers = nn.ModuleList([\n",
    "            layer(self.hidden_dim, self.hidden_dim*2, dropout=model_config.dropout)\n",
    "            for _ in range(model_config.num_encoder_layers)])\n",
    "        self.decoder_layers = nn.ModuleList([\n",
    "            layer(self.hidden_dim, self.hidden_dim*2, dropout=model_config.dropout)\n",
    "            for _ in range(model_config.num_decoder_layers)])\n",
    "\n",
    "        self.projection_head = nn.Sequential(\n",
    "            nn.Linear(self.hidden_dim, self.hidden_dim, bias=False), \n",
    "            nn.ReLU(inplace=True),  # nn.GELU(), nn.Dropout(model_config.dropout),\n",
    "            nn.Linear(self.hidden_dim, self.hidden_dim, bias=True)\n",
    "        )\n",
    "        # 结点四分类\n",
    "        self.readout = nn.Linear(self.hidden_dim, model_config.vocab_size, bias=True)\n",
    "\n",
    "        # 更谨慎的初始化\n",
    "        self.init_weights()\n",
    "\n",
    "    def init_weights(self):\n",
    "        for p in self.parameters():\n",
    "            if p.dim() > 1:\n",
    "                if isinstance(p, nn.Linear):\n",
    "                    nn.init.xavier_uniform_(p, gain=nn.init.calculate_gain('gelu'))\n",
    "                elif isinstance(p, nn.LayerNorm):\n",
    "                    nn.init.constant_(p.weight, 1.0)\n",
    "                    nn.init.constant_(p.bias, 0.0)\n",
    "\n",
    "    def forward(self, X, mask):\n",
    "        # 抽取结点特征，边特征， 边索引，批次索引\n",
    "        h_V, h_E, E_idx, batch_id = self.features(X, mask)\n",
    "        \n",
    "        # 边特征保持不变，聚合边特征到结点\n",
    "        for enc_layer in self.encoder_layers:\n",
    "            h_EV = torch.cat([h_E, h_V[E_idx[0]], h_V[E_idx[1]]], dim=-1)\n",
    "            h_V = enc_layer(h_V, h_EV, E_idx, batch_id)\n",
    "        # 边特征保持不变，再次聚合边特征到结点\n",
    "        for dec_layer in self.decoder_layers:\n",
    "            h_EV = torch.cat([h_E, h_V[E_idx[0]], h_V[E_idx[1]]], dim=-1)\n",
    "            h_V = dec_layer(h_V, h_EV, E_idx, batch_id)\n",
    "\n",
    "        graph_embs = []\n",
    "        # 遍历每个样本\n",
    "        for b_id in range(batch_id[-1].item()+1):\n",
    "            # 求均值\n",
    "            b_data = h_V[batch_id == b_id].mean(0)\n",
    "            graph_embs.append(b_data)\n",
    "        graph_embs = torch.stack(graph_embs, dim=0)\n",
    "        # 全连接层\n",
    "        graph_prjs = self.projection_head(graph_embs)\n",
    "        # 碱基四分类器\n",
    "        logits = self.readout(h_V)\n",
    "        return logits, graph_prjs\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 20,
   "id": "6fce9f8d-d53a-48a1-a082-1047dca20c5c",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "RNAModel(\n",
      "  (features): RNAFeatures(\n",
      "    (dropout): Dropout(p=0.1, inplace=False)\n",
      "    (node_embedding): Linear(in_features=101, out_features=128, bias=True)\n",
      "    (edge_embedding): Linear(in_features=115, out_features=128, bias=True)\n",
      "    (norm_nodes): Normalize()\n",
      "    (norm_edges): Normalize()\n",
      "  )\n",
      "  (W_s): Embedding(4, 128)\n",
      "  (encoder_layers): ModuleList(\n",
      "    (0-2): 3 x MPNNLayer(\n",
      "      (dropout): Dropout(p=0.1, inplace=False)\n",
      "      (norm1): LayerNorm((128,), eps=1e-05, elementwise_affine=True)\n",
      "      (norm2): LayerNorm((128,), eps=1e-05, elementwise_affine=True)\n",
      "      (W1): Linear(in_features=384, out_features=128, bias=True)\n",
      "      (W2): Linear(in_features=128, out_features=128, bias=True)\n",
      "      (W3): Linear(in_features=128, out_features=128, bias=True)\n",
      "      (act): ReLU()\n",
      "      (dense): Sequential(\n",
      "        (0): Linear(in_features=128, out_features=512, bias=True)\n",
      "        (1): ReLU()\n",
      "        (2): Linear(in_features=512, out_features=128, bias=True)\n",
      "      )\n",
      "    )\n",
      "  )\n",
      "  (decoder_layers): ModuleList(\n",
      "    (0-2): 3 x MPNNLayer(\n",
      "      (dropout): Dropout(p=0.1, inplace=False)\n",
      "      (norm1): LayerNorm((128,), eps=1e-05, elementwise_affine=True)\n",
      "      (norm2): LayerNorm((128,), eps=1e-05, elementwise_affine=True)\n",
      "      (W1): Linear(in_features=384, out_features=128, bias=True)\n",
      "      (W2): Linear(in_features=128, out_features=128, bias=True)\n",
      "      (W3): Linear(in_features=128, out_features=128, bias=True)\n",
      "      (act): ReLU()\n",
      "      (dense): Sequential(\n",
      "        (0): Linear(in_features=128, out_features=512, bias=True)\n",
      "        (1): ReLU()\n",
      "        (2): Linear(in_features=512, out_features=128, bias=True)\n",
      "      )\n",
      "    )\n",
      "  )\n",
      "  (projection_head): Sequential(\n",
      "    (0): Linear(in_features=128, out_features=128, bias=False)\n",
      "    (1): ReLU(inplace=True)\n",
      "    (2): Linear(in_features=128, out_features=128, bias=True)\n",
      "  )\n",
      "  (readout): Linear(in_features=128, out_features=4, bias=True)\n",
      ")\n",
      "True\n"
     ]
    }
   ],
   "source": [
    "model = RNAModel(config.model_config).to(config.device)\n",
    "print(model)\n",
    "print(model.training)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "528c5443",
   "metadata": {},
   "source": [
    "### training\n",
    "\n",
    "+ 怎么选择优化器和学习率\n",
    "\n",
    "    + 常用范围：0.001（Adam系）或 0.01（SGD）48。\n",
    "    + 小数据集调参：先用 0.01 测试，根据训练波动调整（波动大则降，收敛慢则升）\n",
    "\n",
    "```text\n",
    "新手：‌AdamW/Adam + 初始LR=3e-4 + 余弦退火‌\n",
    "老手：‌SGD+动量 + 多阶段LR衰减 + 梯度裁剪‌\n",
    "最终以验证集性能为 golden  standard\n",
    "```"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 21,
   "id": "fa10ad13-dc08-4515-b2e0-3fe96bee7d0d",
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/root/miniconda3/envs/pyg/lib/python3.10/site-packages/torch/optim/lr_scheduler.py:62: UserWarning: The verbose parameter is deprecated. Please use get_last_lr() to access the learning rate.\n",
      "  warnings.warn(\n"
     ]
    }
   ],
   "source": [
    "from torch import optim\n",
    "import torch.amp as amp\n",
    "from torch.optim.lr_scheduler import LambdaLR\n",
    "\n",
    "# 使用AdamW优化器\n",
    "optimizer = optim.AdamW(\n",
    "    model.parameters(),\n",
    "    lr=5e-4,\n",
    "    weight_decay=0.01  # 添加权重衰减\n",
    ")\n",
    "\n",
    "# 学习率调度器\n",
    "scheduler = optim.lr_scheduler.ReduceLROnPlateau(\n",
    "    optimizer, mode='max', factor=0.5, patience=3, verbose=True\n",
    ")  # 根据验证集的表现调整学习率，如果结果经过patience个epoch不改善就降低学习率\n",
    "\n",
    "\n",
    "# 在训练脚本中添加\n",
    "class LabelSmoothingLoss(nn.Module):\n",
    "    def __init__(self, classes=4, smoothing=0.2, ignore_index=-100):  # 增加平滑系数\n",
    "        super(LabelSmoothingLoss, self).__init__()\n",
    "        self.confidence = 1.0 - smoothing\n",
    "        self.smoothing = smoothing\n",
    "        self.classes = classes\n",
    "        self.ignore_index = ignore_index\n",
    "\n",
    "    def forward(self, pred, target):\n",
    "        pred = pred.log_softmax(dim=-1)\n",
    "        with torch.no_grad():\n",
    "            true_dist = torch.zeros_like(pred)\n",
    "            true_dist.fill_(self.smoothing / (self.classes - 1))\n",
    "            true_dist.scatter_(1, target.data.unsqueeze(1), self.confidence)\n",
    "        return torch.mean(torch.sum(-true_dist * pred, dim=-1))\n",
    "\n",
    "device = config.device\n",
    "scaler = amp.GradScaler(device)  # 混合精度训练"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 22,
   "id": "dac46261",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "2610"
      ]
     },
     "execution_count": 22,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "train_config.epoch * len(train_loader)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 25,
   "id": "2c96f610",
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 1 [Train]:   0%|          | 0/261 [00:00<?, ?it/s]"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 1 [Train]: 100%|██████████| 261/261 [00:27<00:00,  9.63it/s, loss=1.26]\n",
      "Epoch 1 [Valid]: 100%|██████████| 29/29 [00:02<00:00, 11.48it/s, recovery=0.539]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 1: Train Loss=1.2839, Valid Loss=1.2499, Recovery=0.5093, Recovery_seq=0.4056\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 2 [Train]: 100%|██████████| 261/261 [00:27<00:00,  9.44it/s, loss=1.13]\n",
      "Epoch 2 [Valid]: 100%|██████████| 29/29 [00:02<00:00, 11.49it/s, recovery=0.606]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 2: Train Loss=1.2419, Valid Loss=1.2040, Recovery=0.5586, Recovery_seq=0.4346\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 3 [Train]: 100%|██████████| 261/261 [00:27<00:00,  9.50it/s, loss=1.26]\n",
      "Epoch 3 [Valid]: 100%|██████████| 29/29 [00:02<00:00, 11.40it/s, recovery=0.635]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 3: Train Loss=1.2038, Valid Loss=1.1776, Recovery=0.5837, Recovery_seq=0.4506\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 4 [Train]: 100%|██████████| 261/261 [00:28<00:00,  9.28it/s, loss=1.07]\n",
      "Epoch 4 [Valid]: 100%|██████████| 29/29 [00:02<00:00, 11.29it/s, recovery=0.699]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 4: Train Loss=1.1707, Valid Loss=1.1293, Recovery=0.6358, Recovery_seq=0.4752\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 5 [Train]: 100%|██████████| 261/261 [00:28<00:00,  9.22it/s, loss=1.16] \n",
      "Epoch 5 [Valid]: 100%|██████████| 29/29 [00:02<00:00, 11.20it/s, recovery=0.707]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 5: Train Loss=1.1469, Valid Loss=1.1287, Recovery=0.6380, Recovery_seq=0.4675\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 6 [Train]: 100%|██████████| 261/261 [00:28<00:00,  9.23it/s, loss=1.31] \n",
      "Epoch 6 [Valid]: 100%|██████████| 29/29 [00:02<00:00, 11.15it/s, recovery=0.722]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 6: Train Loss=1.1272, Valid Loss=1.1054, Recovery=0.6580, Recovery_seq=0.4897\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 7 [Train]: 100%|██████████| 261/261 [00:28<00:00,  9.32it/s, loss=1.29] \n",
      "Epoch 7 [Valid]: 100%|██████████| 29/29 [00:02<00:00, 11.07it/s, recovery=0.758]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 7: Train Loss=1.1092, Valid Loss=1.0805, Recovery=0.6824, Recovery_seq=0.5019\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 8 [Train]: 100%|██████████| 261/261 [00:27<00:00,  9.33it/s, loss=1.02] \n",
      "Epoch 8 [Valid]: 100%|██████████| 29/29 [00:02<00:00, 11.02it/s, recovery=0.747]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 8: Train Loss=1.0879, Valid Loss=1.0761, Recovery=0.6887, Recovery_seq=0.5036\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 9 [Train]: 100%|██████████| 261/261 [00:28<00:00,  9.32it/s, loss=1.21] \n",
      "Epoch 9 [Valid]: 100%|██████████| 29/29 [00:02<00:00, 11.08it/s, recovery=0.785]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 9: Train Loss=1.0749, Valid Loss=1.0498, Recovery=0.7092, Recovery_seq=0.5078\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 10 [Train]: 100%|██████████| 261/261 [00:27<00:00,  9.43it/s, loss=1.11] \n",
      "Epoch 10 [Valid]: 100%|██████████| 29/29 [00:02<00:00, 10.90it/s, recovery=0.782]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 10: Train Loss=1.0619, Valid Loss=1.0485, Recovery=0.7115, Recovery_seq=0.5106\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 11 [Train]: 100%|██████████| 261/261 [00:28<00:00,  9.25it/s, loss=1.03] \n",
      "Epoch 11 [Valid]: 100%|██████████| 29/29 [00:02<00:00, 11.10it/s, recovery=0.809]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 11: Train Loss=1.0528, Valid Loss=1.0242, Recovery=0.7334, Recovery_seq=0.5259\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 12 [Train]: 100%|██████████| 261/261 [00:28<00:00,  9.24it/s, loss=0.965]\n",
      "Epoch 12 [Valid]: 100%|██████████| 29/29 [00:02<00:00, 11.01it/s, recovery=0.796]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 12: Train Loss=1.0303, Valid Loss=1.0414, Recovery=0.7158, Recovery_seq=0.5232\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 13 [Train]: 100%|██████████| 261/261 [00:27<00:00,  9.42it/s, loss=1.08] \n",
      "Epoch 13 [Valid]: 100%|██████████| 29/29 [00:02<00:00, 11.06it/s, recovery=0.815]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 13: Train Loss=1.0211, Valid Loss=1.0253, Recovery=0.7366, Recovery_seq=0.5293\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 14 [Train]: 100%|██████████| 261/261 [00:27<00:00,  9.40it/s, loss=1.38] \n",
      "Epoch 14 [Valid]: 100%|██████████| 29/29 [00:02<00:00, 10.68it/s, recovery=0.794]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 14: Train Loss=1.0251, Valid Loss=1.0275, Recovery=0.7310, Recovery_seq=0.5212\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 15 [Train]: 100%|██████████| 261/261 [00:27<00:00,  9.42it/s, loss=1.26] \n",
      "Epoch 15 [Valid]: 100%|██████████| 29/29 [00:02<00:00, 11.07it/s, recovery=0.814]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 15: Train Loss=1.0149, Valid Loss=1.0083, Recovery=0.7493, Recovery_seq=0.5368\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 16 [Train]: 100%|██████████| 261/261 [00:27<00:00,  9.56it/s, loss=1.11] \n",
      "Epoch 16 [Valid]: 100%|██████████| 29/29 [00:02<00:00, 10.99it/s, recovery=0.816]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 16: Train Loss=1.0079, Valid Loss=1.0071, Recovery=0.7525, Recovery_seq=0.5449\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 17 [Train]: 100%|██████████| 261/261 [00:27<00:00,  9.33it/s, loss=0.935]\n",
      "Epoch 17 [Valid]: 100%|██████████| 29/29 [00:02<00:00, 11.16it/s, recovery=0.821]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 17: Train Loss=1.0018, Valid Loss=1.0061, Recovery=0.7534, Recovery_seq=0.5375\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 18 [Train]: 100%|██████████| 261/261 [00:27<00:00,  9.47it/s, loss=0.889]\n",
      "Epoch 18 [Valid]: 100%|██████████| 29/29 [00:02<00:00, 11.09it/s, recovery=0.844]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 18: Train Loss=0.9962, Valid Loss=0.9894, Recovery=0.7654, Recovery_seq=0.5583\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 19 [Train]: 100%|██████████| 261/261 [00:27<00:00,  9.33it/s, loss=0.976]\n",
      "Epoch 19 [Valid]: 100%|██████████| 29/29 [00:02<00:00, 10.95it/s, recovery=0.834]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 19: Train Loss=0.9857, Valid Loss=0.9950, Recovery=0.7586, Recovery_seq=0.5356\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 20 [Train]: 100%|██████████| 261/261 [00:28<00:00,  9.25it/s, loss=1.15] \n",
      "Epoch 20 [Valid]: 100%|██████████| 29/29 [00:02<00:00, 11.03it/s, recovery=0.848]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 20: Train Loss=0.9899, Valid Loss=0.9831, Recovery=0.7729, Recovery_seq=0.5617\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 21 [Train]: 100%|██████████| 261/261 [00:28<00:00,  9.19it/s, loss=1.11] \n",
      "Epoch 21 [Valid]: 100%|██████████| 29/29 [00:02<00:00, 10.59it/s, recovery=0.844]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 21: Train Loss=0.9713, Valid Loss=0.9806, Recovery=0.7758, Recovery_seq=0.5590\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 22 [Train]: 100%|██████████| 261/261 [00:28<00:00,  9.31it/s, loss=0.982]\n",
      "Epoch 22 [Valid]: 100%|██████████| 29/29 [00:02<00:00, 11.05it/s, recovery=0.863]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 22: Train Loss=0.9721, Valid Loss=0.9728, Recovery=0.7868, Recovery_seq=0.5675\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 23 [Train]: 100%|██████████| 261/261 [00:27<00:00,  9.48it/s, loss=1.12] \n",
      "Epoch 23 [Valid]: 100%|██████████| 29/29 [00:02<00:00, 11.07it/s, recovery=0.862]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 23: Train Loss=0.9652, Valid Loss=0.9705, Recovery=0.7846, Recovery_seq=0.5705\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 24 [Train]: 100%|██████████| 261/261 [00:27<00:00,  9.44it/s, loss=1.12] \n",
      "Epoch 24 [Valid]: 100%|██████████| 29/29 [00:02<00:00, 11.05it/s, recovery=0.853]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 24: Train Loss=0.9644, Valid Loss=0.9792, Recovery=0.7792, Recovery_seq=0.5585\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 25 [Train]: 100%|██████████| 261/261 [00:27<00:00,  9.44it/s, loss=1.14] \n",
      "Epoch 25 [Valid]: 100%|██████████| 29/29 [00:02<00:00, 11.04it/s, recovery=0.858]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 25: Train Loss=0.9624, Valid Loss=0.9762, Recovery=0.7772, Recovery_seq=0.5637\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 26 [Train]: 100%|██████████| 261/261 [00:27<00:00,  9.47it/s, loss=0.961]\n",
      "Epoch 26 [Valid]: 100%|██████████| 29/29 [00:02<00:00, 11.06it/s, recovery=0.877]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 26: Train Loss=0.9631, Valid Loss=0.9656, Recovery=0.7868, Recovery_seq=0.5716\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 27 [Train]: 100%|██████████| 261/261 [00:28<00:00,  9.32it/s, loss=0.948]\n",
      "Epoch 27 [Valid]: 100%|██████████| 29/29 [00:02<00:00, 11.09it/s, recovery=0.886]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 27: Train Loss=0.9328, Valid Loss=0.9475, Recovery=0.8054, Recovery_seq=0.5815\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 28 [Train]: 100%|██████████| 261/261 [00:28<00:00,  9.27it/s, loss=1.18] \n",
      "Epoch 28 [Valid]: 100%|██████████| 29/29 [00:02<00:00, 11.06it/s, recovery=0.881]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 28: Train Loss=0.9362, Valid Loss=0.9476, Recovery=0.8038, Recovery_seq=0.5851\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 29 [Train]: 100%|██████████| 261/261 [00:27<00:00,  9.33it/s, loss=0.798]\n",
      "Epoch 29 [Valid]: 100%|██████████| 29/29 [00:02<00:00, 11.08it/s, recovery=0.886]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 29: Train Loss=0.9288, Valid Loss=0.9486, Recovery=0.8046, Recovery_seq=0.5885\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 30 [Train]: 100%|██████████| 261/261 [00:27<00:00,  9.45it/s, loss=1.14] \n",
      "Epoch 30 [Valid]: 100%|██████████| 29/29 [00:02<00:00, 11.08it/s, recovery=0.894]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 30: Train Loss=0.9329, Valid Loss=0.9407, Recovery=0.8098, Recovery_seq=0.5936\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 31 [Train]: 100%|██████████| 261/261 [00:27<00:00,  9.38it/s, loss=1.14] \n",
      "Epoch 31 [Valid]: 100%|██████████| 29/29 [00:02<00:00, 11.06it/s, recovery=0.893]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 31: Train Loss=0.9271, Valid Loss=0.9443, Recovery=0.8089, Recovery_seq=0.5883\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 32 [Train]: 100%|██████████| 261/261 [00:28<00:00,  9.27it/s, loss=0.936]\n",
      "Epoch 32 [Valid]: 100%|██████████| 29/29 [00:02<00:00, 11.01it/s, recovery=0.891]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 32: Train Loss=0.9211, Valid Loss=0.9408, Recovery=0.8134, Recovery_seq=0.5962\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 33 [Train]: 100%|██████████| 261/261 [00:28<00:00,  9.28it/s, loss=0.824]\n",
      "Epoch 33 [Valid]: 100%|██████████| 29/29 [00:02<00:00, 11.10it/s, recovery=0.888]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 33: Train Loss=0.9273, Valid Loss=0.9426, Recovery=0.8098, Recovery_seq=0.5938\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 34 [Train]: 100%|██████████| 261/261 [00:27<00:00,  9.42it/s, loss=0.789]\n",
      "Epoch 34 [Valid]: 100%|██████████| 29/29 [00:02<00:00, 11.09it/s, recovery=0.888]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 34: Train Loss=0.9297, Valid Loss=0.9410, Recovery=0.8113, Recovery_seq=0.5971\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 35 [Train]: 100%|██████████| 261/261 [00:27<00:00,  9.38it/s, loss=0.966]\n",
      "Epoch 35 [Valid]: 100%|██████████| 29/29 [00:02<00:00, 11.03it/s, recovery=0.892]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 35: Train Loss=0.9210, Valid Loss=0.9370, Recovery=0.8154, Recovery_seq=0.5923\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 36 [Train]: 100%|██████████| 261/261 [00:27<00:00,  9.45it/s, loss=1.08] \n",
      "Epoch 36 [Valid]: 100%|██████████| 29/29 [00:02<00:00, 11.08it/s, recovery=0.894]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 36: Train Loss=0.9179, Valid Loss=0.9377, Recovery=0.8129, Recovery_seq=0.5928\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 37 [Train]: 100%|██████████| 261/261 [00:27<00:00,  9.40it/s, loss=1]    \n",
      "Epoch 37 [Valid]: 100%|██████████| 29/29 [00:02<00:00, 10.92it/s, recovery=0.89] \n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 37: Train Loss=0.9190, Valid Loss=0.9341, Recovery=0.8169, Recovery_seq=0.6027\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 38 [Train]: 100%|██████████| 261/261 [00:27<00:00,  9.46it/s, loss=1.27] \n",
      "Epoch 38 [Valid]: 100%|██████████| 29/29 [00:02<00:00, 11.10it/s, recovery=0.896]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 38: Train Loss=0.9140, Valid Loss=0.9315, Recovery=0.8197, Recovery_seq=0.6034\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 39 [Train]: 100%|██████████| 261/261 [00:28<00:00,  9.21it/s, loss=0.852]\n",
      "Epoch 39 [Valid]: 100%|██████████| 29/29 [00:02<00:00, 10.73it/s, recovery=0.9]  \n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 39: Train Loss=0.9158, Valid Loss=0.9354, Recovery=0.8189, Recovery_seq=0.6041\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 40 [Train]: 100%|██████████| 261/261 [00:28<00:00,  9.28it/s, loss=0.815]\n",
      "Epoch 40 [Valid]: 100%|██████████| 29/29 [00:02<00:00, 10.97it/s, recovery=0.899]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 40: Train Loss=0.9099, Valid Loss=0.9342, Recovery=0.8180, Recovery_seq=0.5970\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 41 [Train]: 100%|██████████| 261/261 [00:27<00:00,  9.53it/s, loss=0.946]\n",
      "Epoch 41 [Valid]: 100%|██████████| 29/29 [00:02<00:00, 11.07it/s, recovery=0.899]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 41: Train Loss=0.9130, Valid Loss=0.9338, Recovery=0.8196, Recovery_seq=0.6048\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 42 [Train]: 100%|██████████| 261/261 [00:28<00:00,  9.23it/s, loss=0.913]\n",
      "Epoch 42 [Valid]: 100%|██████████| 29/29 [00:02<00:00, 10.93it/s, recovery=0.888]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 42: Train Loss=0.9092, Valid Loss=0.9319, Recovery=0.8179, Recovery_seq=0.5989\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 43 [Train]: 100%|██████████| 261/261 [00:27<00:00,  9.40it/s, loss=0.913]\n",
      "Epoch 43 [Valid]: 100%|██████████| 29/29 [00:02<00:00, 10.71it/s, recovery=0.895]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 43: Train Loss=0.8996, Valid Loss=0.9290, Recovery=0.8245, Recovery_seq=0.6068\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 44 [Train]: 100%|██████████| 261/261 [00:28<00:00,  9.26it/s, loss=0.99] \n",
      "Epoch 44 [Valid]: 100%|██████████| 29/29 [00:02<00:00, 10.71it/s, recovery=0.902]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 44: Train Loss=0.8974, Valid Loss=0.9270, Recovery=0.8273, Recovery_seq=0.6049\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 45 [Train]: 100%|██████████| 261/261 [00:27<00:00,  9.33it/s, loss=0.815]\n",
      "Epoch 45 [Valid]: 100%|██████████| 29/29 [00:02<00:00, 11.07it/s, recovery=0.905]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 45: Train Loss=0.8986, Valid Loss=0.9270, Recovery=0.8281, Recovery_seq=0.6141\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 46 [Train]: 100%|██████████| 261/261 [00:27<00:00,  9.44it/s, loss=0.886]\n",
      "Epoch 46 [Valid]: 100%|██████████| 29/29 [00:02<00:00, 11.08it/s, recovery=0.905]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 46: Train Loss=0.8959, Valid Loss=0.9262, Recovery=0.8275, Recovery_seq=0.6068\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 47 [Train]: 100%|██████████| 261/261 [00:27<00:00,  9.45it/s, loss=0.803]\n",
      "Epoch 47 [Valid]: 100%|██████████| 29/29 [00:02<00:00, 11.02it/s, recovery=0.906]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 47: Train Loss=0.8872, Valid Loss=0.9238, Recovery=0.8287, Recovery_seq=0.6080\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 48 [Train]: 100%|██████████| 261/261 [00:27<00:00,  9.49it/s, loss=0.984]\n",
      "Epoch 48 [Valid]: 100%|██████████| 29/29 [00:02<00:00, 11.06it/s, recovery=0.903]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 48: Train Loss=0.8824, Valid Loss=0.9236, Recovery=0.8298, Recovery_seq=0.6092\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 49 [Train]: 100%|██████████| 261/261 [00:27<00:00,  9.50it/s, loss=0.854]\n",
      "Epoch 49 [Valid]: 100%|██████████| 29/29 [00:02<00:00, 11.08it/s, recovery=0.904]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 49: Train Loss=0.8918, Valid Loss=0.9237, Recovery=0.8301, Recovery_seq=0.6138\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 50 [Train]: 100%|██████████| 261/261 [00:28<00:00,  9.25it/s, loss=0.765]\n",
      "Epoch 50 [Valid]: 100%|██████████| 29/29 [00:02<00:00, 11.02it/s, recovery=0.902]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 50: Train Loss=0.8891, Valid Loss=0.9201, Recovery=0.8305, Recovery_seq=0.6090\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 51 [Train]: 100%|██████████| 261/261 [00:28<00:00,  9.28it/s, loss=0.796]\n",
      "Epoch 51 [Valid]: 100%|██████████| 29/29 [00:02<00:00, 11.10it/s, recovery=0.905]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 51: Train Loss=0.8805, Valid Loss=0.9246, Recovery=0.8304, Recovery_seq=0.6104\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 52 [Train]: 100%|██████████| 261/261 [00:27<00:00,  9.47it/s, loss=1.16] \n",
      "Epoch 52 [Valid]: 100%|██████████| 29/29 [00:02<00:00, 11.20it/s, recovery=0.904]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 52: Train Loss=0.8886, Valid Loss=0.9215, Recovery=0.8308, Recovery_seq=0.6130\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 53 [Train]: 100%|██████████| 261/261 [00:27<00:00,  9.34it/s, loss=0.814]\n",
      "Epoch 53 [Valid]: 100%|██████████| 29/29 [00:02<00:00, 11.10it/s, recovery=0.906]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 53: Train Loss=0.8815, Valid Loss=0.9246, Recovery=0.8288, Recovery_seq=0.6121\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 54 [Train]: 100%|██████████| 261/261 [00:27<00:00,  9.43it/s, loss=1.15] \n",
      "Epoch 54 [Valid]: 100%|██████████| 29/29 [00:02<00:00, 11.21it/s, recovery=0.902]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 54: Train Loss=0.8855, Valid Loss=0.9246, Recovery=0.8278, Recovery_seq=0.6025\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 55 [Train]: 100%|██████████| 261/261 [00:27<00:00,  9.56it/s, loss=0.765]\n",
      "Epoch 55 [Valid]: 100%|██████████| 29/29 [00:02<00:00, 11.18it/s, recovery=0.902]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 55: Train Loss=0.8838, Valid Loss=0.9228, Recovery=0.8298, Recovery_seq=0.6107\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 56 [Train]: 100%|██████████| 261/261 [00:27<00:00,  9.44it/s, loss=0.82] \n",
      "Epoch 56 [Valid]: 100%|██████████| 29/29 [00:02<00:00, 11.21it/s, recovery=0.904]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 56: Train Loss=0.8831, Valid Loss=0.9214, Recovery=0.8321, Recovery_seq=0.6138\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 57 [Train]: 100%|██████████| 261/261 [00:27<00:00,  9.45it/s, loss=1.09] \n",
      "Epoch 57 [Valid]: 100%|██████████| 29/29 [00:02<00:00, 11.21it/s, recovery=0.907]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 57: Train Loss=0.8771, Valid Loss=0.9191, Recovery=0.8334, Recovery_seq=0.6162\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 58 [Train]: 100%|██████████| 261/261 [00:27<00:00,  9.46it/s, loss=0.957]\n",
      "Epoch 58 [Valid]: 100%|██████████| 29/29 [00:02<00:00, 11.30it/s, recovery=0.907]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 58: Train Loss=0.8748, Valid Loss=0.9237, Recovery=0.8311, Recovery_seq=0.6096\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 59 [Train]: 100%|██████████| 261/261 [00:27<00:00,  9.50it/s, loss=0.824]\n",
      "Epoch 59 [Valid]: 100%|██████████| 29/29 [00:02<00:00, 11.22it/s, recovery=0.906]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 59: Train Loss=0.8824, Valid Loss=0.9215, Recovery=0.8331, Recovery_seq=0.6153\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 60 [Train]: 100%|██████████| 261/261 [00:27<00:00,  9.60it/s, loss=0.935]\n",
      "Epoch 60 [Valid]: 100%|██████████| 29/29 [00:02<00:00, 11.24it/s, recovery=0.909]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 60: Train Loss=0.8781, Valid Loss=0.9218, Recovery=0.8329, Recovery_seq=0.6122\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 61 [Train]: 100%|██████████| 261/261 [00:27<00:00,  9.57it/s, loss=0.823]\n",
      "Epoch 61 [Valid]: 100%|██████████| 29/29 [00:02<00:00, 11.15it/s, recovery=0.91] \n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 61: Train Loss=0.8759, Valid Loss=0.9178, Recovery=0.8361, Recovery_seq=0.6164\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 62 [Train]: 100%|██████████| 261/261 [00:27<00:00,  9.43it/s, loss=0.76] \n",
      "Epoch 62 [Valid]: 100%|██████████| 29/29 [00:02<00:00, 10.88it/s, recovery=0.902]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 62: Train Loss=0.8736, Valid Loss=0.9182, Recovery=0.8345, Recovery_seq=0.6219\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 63 [Train]: 100%|██████████| 261/261 [00:27<00:00,  9.42it/s, loss=0.977]\n",
      "Epoch 63 [Valid]: 100%|██████████| 29/29 [00:02<00:00, 11.20it/s, recovery=0.903]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 63: Train Loss=0.8757, Valid Loss=0.9180, Recovery=0.8346, Recovery_seq=0.6200\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 64 [Train]: 100%|██████████| 261/261 [00:27<00:00,  9.51it/s, loss=0.9]  \n",
      "Epoch 64 [Valid]: 100%|██████████| 29/29 [00:02<00:00, 11.19it/s, recovery=0.908]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 64: Train Loss=0.8761, Valid Loss=0.9161, Recovery=0.8341, Recovery_seq=0.6162\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 65 [Train]: 100%|██████████| 261/261 [00:27<00:00,  9.53it/s, loss=0.81] \n",
      "Epoch 65 [Valid]: 100%|██████████| 29/29 [00:02<00:00, 11.23it/s, recovery=0.907]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 65: Train Loss=0.8783, Valid Loss=0.9248, Recovery=0.8327, Recovery_seq=0.6124\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 66 [Train]: 100%|██████████| 261/261 [00:27<00:00,  9.66it/s, loss=0.837]\n",
      "Epoch 66 [Valid]: 100%|██████████| 29/29 [00:02<00:00, 11.21it/s, recovery=0.907]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 66: Train Loss=0.8703, Valid Loss=0.9193, Recovery=0.8367, Recovery_seq=0.6174\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 67 [Train]: 100%|██████████| 261/261 [00:27<00:00,  9.55it/s, loss=0.784]\n",
      "Epoch 67 [Valid]: 100%|██████████| 29/29 [00:02<00:00, 11.22it/s, recovery=0.908]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 67: Train Loss=0.8716, Valid Loss=0.9166, Recovery=0.8375, Recovery_seq=0.6183\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 68 [Train]: 100%|██████████| 261/261 [00:27<00:00,  9.49it/s, loss=0.87] \n",
      "Epoch 68 [Valid]: 100%|██████████| 29/29 [00:02<00:00, 11.18it/s, recovery=0.91] \n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 68: Train Loss=0.8690, Valid Loss=0.9156, Recovery=0.8380, Recovery_seq=0.6209\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 69 [Train]: 100%|██████████| 261/261 [00:27<00:00,  9.36it/s, loss=0.76] \n",
      "Epoch 69 [Valid]: 100%|██████████| 29/29 [00:02<00:00, 11.16it/s, recovery=0.913]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 69: Train Loss=0.8642, Valid Loss=0.9159, Recovery=0.8386, Recovery_seq=0.6243\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 70 [Train]: 100%|██████████| 261/261 [00:26<00:00,  9.73it/s, loss=1.01] \n",
      "Epoch 70 [Valid]: 100%|██████████| 29/29 [00:02<00:00, 11.20it/s, recovery=0.909]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 70: Train Loss=0.8651, Valid Loss=0.9174, Recovery=0.8372, Recovery_seq=0.6144\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 71 [Train]: 100%|██████████| 261/261 [00:26<00:00,  9.70it/s, loss=0.754]\n",
      "Epoch 71 [Valid]: 100%|██████████| 29/29 [00:02<00:00, 11.23it/s, recovery=0.912]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 71: Train Loss=0.8645, Valid Loss=0.9165, Recovery=0.8383, Recovery_seq=0.6215\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 72 [Train]: 100%|██████████| 261/261 [00:27<00:00,  9.55it/s, loss=1.07] \n",
      "Epoch 72 [Valid]: 100%|██████████| 29/29 [00:02<00:00, 11.21it/s, recovery=0.911]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 72: Train Loss=0.8694, Valid Loss=0.9155, Recovery=0.8388, Recovery_seq=0.6176\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 73 [Train]: 100%|██████████| 261/261 [00:26<00:00,  9.74it/s, loss=1.05] \n",
      "Epoch 73 [Valid]: 100%|██████████| 29/29 [00:02<00:00, 11.23it/s, recovery=0.913]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 73: Train Loss=0.8649, Valid Loss=0.9138, Recovery=0.8397, Recovery_seq=0.6234\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 74 [Train]: 100%|██████████| 261/261 [00:26<00:00,  9.85it/s, loss=0.993]\n",
      "Epoch 74 [Valid]: 100%|██████████| 29/29 [00:02<00:00, 11.23it/s, recovery=0.909]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 74: Train Loss=0.8663, Valid Loss=0.9168, Recovery=0.8381, Recovery_seq=0.6211\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 75 [Train]: 100%|██████████| 261/261 [00:26<00:00,  9.83it/s, loss=0.831]\n",
      "Epoch 75 [Valid]: 100%|██████████| 29/29 [00:02<00:00, 11.27it/s, recovery=0.907]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 75: Train Loss=0.8657, Valid Loss=0.9158, Recovery=0.8378, Recovery_seq=0.6191\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 76 [Train]: 100%|██████████| 261/261 [00:27<00:00,  9.40it/s, loss=0.961]\n",
      "Epoch 76 [Valid]: 100%|██████████| 29/29 [00:02<00:00, 11.29it/s, recovery=0.907]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 76: Train Loss=0.8650, Valid Loss=0.9150, Recovery=0.8390, Recovery_seq=0.6218\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 77 [Train]: 100%|██████████| 261/261 [00:27<00:00,  9.37it/s, loss=0.834]\n",
      "Epoch 77 [Valid]: 100%|██████████| 29/29 [00:02<00:00, 11.24it/s, recovery=0.911]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 77: Train Loss=0.8588, Valid Loss=0.9140, Recovery=0.8396, Recovery_seq=0.6228\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 78 [Train]: 100%|██████████| 261/261 [00:27<00:00,  9.36it/s, loss=0.878]\n",
      "Epoch 78 [Valid]: 100%|██████████| 29/29 [00:02<00:00, 11.30it/s, recovery=0.913]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 78: Train Loss=0.8634, Valid Loss=0.9145, Recovery=0.8403, Recovery_seq=0.6258\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 79 [Train]: 100%|██████████| 261/261 [00:26<00:00,  9.82it/s, loss=0.849]\n",
      "Epoch 79 [Valid]: 100%|██████████| 29/29 [00:02<00:00, 11.21it/s, recovery=0.914]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 79: Train Loss=0.8603, Valid Loss=0.9148, Recovery=0.8398, Recovery_seq=0.6253\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 80 [Train]: 100%|██████████| 261/261 [00:26<00:00,  9.86it/s, loss=0.895]\n",
      "Epoch 80 [Valid]: 100%|██████████| 29/29 [00:02<00:00, 11.28it/s, recovery=0.913]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 80: Train Loss=0.8595, Valid Loss=0.9146, Recovery=0.8406, Recovery_seq=0.6226\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 81 [Train]: 100%|██████████| 261/261 [00:26<00:00,  9.80it/s, loss=0.785]\n",
      "Epoch 81 [Valid]: 100%|██████████| 29/29 [00:02<00:00, 11.21it/s, recovery=0.912]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 81: Train Loss=0.8555, Valid Loss=0.9117, Recovery=0.8407, Recovery_seq=0.6234\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 82 [Train]: 100%|██████████| 261/261 [00:26<00:00,  9.67it/s, loss=0.889]\n",
      "Epoch 82 [Valid]: 100%|██████████| 29/29 [00:02<00:00, 11.27it/s, recovery=0.912]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 82: Train Loss=0.8613, Valid Loss=0.9127, Recovery=0.8401, Recovery_seq=0.6211\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 83 [Train]: 100%|██████████| 261/261 [00:26<00:00,  9.78it/s, loss=0.854]\n",
      "Epoch 83 [Valid]: 100%|██████████| 29/29 [00:02<00:00, 11.26it/s, recovery=0.914]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 83: Train Loss=0.8622, Valid Loss=0.9157, Recovery=0.8408, Recovery_seq=0.6254\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 84 [Train]: 100%|██████████| 261/261 [00:26<00:00,  9.84it/s, loss=1.18] \n",
      "Epoch 84 [Valid]: 100%|██████████| 29/29 [00:02<00:00, 11.27it/s, recovery=0.914]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 84: Train Loss=0.8622, Valid Loss=0.9136, Recovery=0.8417, Recovery_seq=0.6268\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 85 [Train]: 100%|██████████| 261/261 [00:26<00:00,  9.69it/s, loss=0.991]\n",
      "Epoch 85 [Valid]: 100%|██████████| 29/29 [00:02<00:00, 11.25it/s, recovery=0.914]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 85: Train Loss=0.8587, Valid Loss=0.9139, Recovery=0.8398, Recovery_seq=0.6228\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 86 [Train]: 100%|██████████| 261/261 [00:26<00:00,  9.67it/s, loss=1.19] \n",
      "Epoch 86 [Valid]: 100%|██████████| 29/29 [00:02<00:00, 11.27it/s, recovery=0.912]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 86: Train Loss=0.8662, Valid Loss=0.9135, Recovery=0.8415, Recovery_seq=0.6263\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 87 [Train]: 100%|██████████| 261/261 [00:26<00:00,  9.70it/s, loss=0.798]\n",
      "Epoch 87 [Valid]: 100%|██████████| 29/29 [00:02<00:00, 11.23it/s, recovery=0.913]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 87: Train Loss=0.8581, Valid Loss=0.9123, Recovery=0.8421, Recovery_seq=0.6281\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 88 [Train]: 100%|██████████| 261/261 [00:27<00:00,  9.41it/s, loss=0.761]\n",
      "Epoch 88 [Valid]: 100%|██████████| 29/29 [00:02<00:00, 11.31it/s, recovery=0.915]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 88: Train Loss=0.8540, Valid Loss=0.9141, Recovery=0.8416, Recovery_seq=0.6280\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 89 [Train]: 100%|██████████| 261/261 [00:27<00:00,  9.45it/s, loss=0.755]\n",
      "Epoch 89 [Valid]: 100%|██████████| 29/29 [00:02<00:00, 11.28it/s, recovery=0.911]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 89: Train Loss=0.8609, Valid Loss=0.9135, Recovery=0.8410, Recovery_seq=0.6274\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 90 [Train]: 100%|██████████| 261/261 [00:27<00:00,  9.56it/s, loss=0.777]\n",
      "Epoch 90 [Valid]: 100%|██████████| 29/29 [00:02<00:00, 11.30it/s, recovery=0.915]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 90: Train Loss=0.8631, Valid Loss=0.9134, Recovery=0.8409, Recovery_seq=0.6267\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 91 [Train]: 100%|██████████| 261/261 [00:27<00:00,  9.38it/s, loss=0.745]\n",
      "Epoch 91 [Valid]: 100%|██████████| 29/29 [00:02<00:00, 11.30it/s, recovery=0.914]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 91: Train Loss=0.8549, Valid Loss=0.9133, Recovery=0.8425, Recovery_seq=0.6279\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 92 [Train]: 100%|██████████| 261/261 [00:27<00:00,  9.48it/s, loss=0.909]\n",
      "Epoch 92 [Valid]: 100%|██████████| 29/29 [00:02<00:00, 11.21it/s, recovery=0.914]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 92: Train Loss=0.8591, Valid Loss=0.9153, Recovery=0.8411, Recovery_seq=0.6193\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 93 [Train]: 100%|██████████| 261/261 [00:27<00:00,  9.44it/s, loss=0.871]\n",
      "Epoch 93 [Valid]: 100%|██████████| 29/29 [00:02<00:00, 11.19it/s, recovery=0.914]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 93: Train Loss=0.8568, Valid Loss=0.9140, Recovery=0.8402, Recovery_seq=0.6212\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 94 [Train]: 100%|██████████| 261/261 [00:27<00:00,  9.43it/s, loss=0.768]\n",
      "Epoch 94 [Valid]: 100%|██████████| 29/29 [00:02<00:00, 11.31it/s, recovery=0.913]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 94: Train Loss=0.8547, Valid Loss=0.9121, Recovery=0.8419, Recovery_seq=0.6261\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 95 [Train]: 100%|██████████| 261/261 [00:27<00:00,  9.43it/s, loss=0.82] \n",
      "Epoch 95 [Valid]: 100%|██████████| 29/29 [00:02<00:00, 11.29it/s, recovery=0.912]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 95: Train Loss=0.8534, Valid Loss=0.9138, Recovery=0.8409, Recovery_seq=0.6244\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 96 [Train]: 100%|██████████| 261/261 [00:27<00:00,  9.46it/s, loss=0.921]\n",
      "Epoch 96 [Valid]: 100%|██████████| 29/29 [00:02<00:00, 11.29it/s, recovery=0.912]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 96: Train Loss=0.8591, Valid Loss=0.9126, Recovery=0.8416, Recovery_seq=0.6255\n",
      "Early stopping at epoch 96\n"
     ]
    }
   ],
   "source": [
    "# 训练循环\n",
    "best_valid_recovery = 0\n",
    "num_epoch_no_improve = 0\n",
    "max_patience = 5\n",
    "train_logs = []\n",
    "weight_path = './weights4'\n",
    "os.makedirs(weight_path, exist_ok=True)\n",
    "criterion = LabelSmoothingLoss()\n",
    "for epoch in range(1, 200):  # 最大100个epoch\n",
    "    # 训练阶段\n",
    "    model.train()\n",
    "    epoch_loss = 0\n",
    "    train_pbar = tqdm(train_loader, desc=f'Epoch {epoch} [Train]')\n",
    "    \n",
    "    for batch in train_pbar:\n",
    "        X, S, mask, lengths, names = batch\n",
    "        X = X.to(device, non_blocking=True)\n",
    "        S = S.to(device, non_blocking=True)\n",
    "        mask = mask.to(device, non_blocking=True)\n",
    "        # print(X.shape)\n",
    "        # print(S.shape)\n",
    "        # print(mask.shape)\n",
    "        S_masked = torch.masked_select(S, (mask == 1))\n",
    "        # print(S_masked.shape)\n",
    "        optimizer.zero_grad()\n",
    "        \n",
    "        # 混合精度训练\n",
    "        with amp.autocast(device):\n",
    "            logits, _  = model(X, mask)\n",
    "            # print(logits.shape)\n",
    "            loss = criterion(logits, S_masked)\n",
    "        \n",
    "        scaler.scale(loss).backward()\n",
    "        scaler.step(optimizer)\n",
    "        scaler.update()\n",
    "        \n",
    "        epoch_loss += loss.item()\n",
    "        train_pbar.set_postfix({'loss': loss.item()})\n",
    "\n",
    "    epoch_loss /= len(train_loader)\n",
    "    \n",
    "    # 验证阶段\n",
    "    model.eval()\n",
    "    valid_loss = 0\n",
    "    recovery_list = []\n",
    "    recovery_list_seq = []\n",
    "    \n",
    "    with torch.no_grad():\n",
    "        valid_pbar = tqdm(valid_loader, desc=f'Epoch {epoch} [Valid]')\n",
    "        for batch in valid_pbar:\n",
    "            X, S, mask, lengths, names = batch\n",
    "            X = X.to(device, non_blocking=True)\n",
    "            S = S.to(device, non_blocking=True)\n",
    "            mask = mask.to(device, non_blocking=True)\n",
    "            S_masked = torch.masked_select(S, (mask == 1))\n",
    "            logits, _ = model(X, mask)\n",
    "            loss = criterion(logits, S_masked)\n",
    "            valid_loss += loss.item()\n",
    "            ###############################\n",
    "            # preds = F.softmax(logits, dim=-1).argmax(dim=-1)\n",
    "            ###############################\n",
    "            preds = logits.argmax(dim=-1)\n",
    "            recovery = torch.eq(preds, S_masked).float().mean().item()\n",
    "            recovery_list.append(recovery)\n",
    "            valid_pbar.set_postfix({'recovery': recovery})\n",
    "            \n",
    "            preds = preds.cpu().numpy()\n",
    "            S_masked = S_masked.cpu().numpy()\n",
    "            split_indices = np.cumsum(lengths)[:-1]\n",
    "            batch_preds = np.split(preds, split_indices, axis=0)\n",
    "            batch_gt = np.split(S_masked, split_indices, axis=0)\n",
    "            \n",
    "            for batch_seq, gt_seq in zip(batch_preds, batch_gt):\n",
    "                recovery = (batch_seq==gt_seq).sum() / len(gt_seq)\n",
    "                recovery_list_seq.append(recovery.item())\n",
    "\n",
    "    valid_loss /= len(valid_loader)\n",
    "    valid_recovery = np.mean(recovery_list)\n",
    "    valid_recovery_seq = np.mean(recovery_list_seq)\n",
    "    print(f'Epoch {epoch}: Train Loss={epoch_loss:.4f}, Valid Loss={valid_loss:.4f}, Recovery={valid_recovery:.4f}, Recovery_seq={valid_recovery_seq:.4f}')\n",
    "\n",
    "    # 记录日志\n",
    "    train_logs.append((epoch_loss, valid_loss, valid_recovery, valid_recovery_seq))\n",
    "    \n",
    "     # 保存模型\n",
    "    if epoch % 50 == 0:\n",
    "        torch.save(model.state_dict(), f'{weight_path}/epoch_{epoch}.pt')\n",
    "    torch.save(model.state_dict(), f'{weight_path}/last.pt')\n",
    "    \n",
    "    \n",
    "    # 学习率调整\n",
    "    scheduler.step(valid_recovery)\n",
    "    # 早停\n",
    "    if valid_recovery > best_valid_recovery:\n",
    "        best_valid_recovery = valid_recovery\n",
    "        num_epoch_no_improve = 0\n",
    "        torch.save(model.state_dict(), f'{weight_path}/best.pt')\n",
    "    else:\n",
    "        num_epoch_no_improve += 1\n",
    "        if num_epoch_no_improve >= max_patience:\n",
    "            print(f'Early stopping at epoch {epoch}')\n",
    "            break"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "289b78b0",
   "metadata": {},
   "source": [
    "## 序列交叉熵示例"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 24,
   "id": "c8fff2cf",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "序列交叉熵损失: 1.4258\n"
     ]
    }
   ],
   "source": [
    "\n",
    "import torch\n",
    "import torch.nn as nn\n",
    "\n",
    "# 模拟数据\n",
    "logits = torch.tensor([[[1.2, 3.0, -0.5, 0.1], [0.5, 2.1, -1.0, 0.3], [0.1, -0.5, 1.5, 2.0]]])\n",
    "labels = torch.tensor([[1, 2, 3]])  # 目标词索引\n",
    "mask = torch.tensor([[1, 1, 1]])    # 有效位置掩码（无<PAD>）\n",
    "\n",
    "# 计算损失\n",
    "criterion = nn.CrossEntropyLoss(ignore_index=0, reduction='none')  # 忽略<PAD>\n",
    "loss = criterion(logits.view(-1, 4), labels.view(-1)) * mask.view(-1)\n",
    "final_loss = loss.sum() / mask.sum()  # 按有效位置平均\n",
    "print(f\"序列交叉熵损失: {final_loss.item():.4f}\")\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 25,
   "id": "9af13790",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "1.5625e-05\n"
     ]
    }
   ],
   "source": [
    "import torch.optim as optim\n",
    " \n",
    "\n",
    "# 查看所有参数组的学习率\n",
    "for param_group in optimizer.param_groups:\n",
    "    print(param_group['lr'])\n"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "2122bf1d",
   "metadata": {},
   "source": [
    "### 显存不足报错\n",
    "\n",
    "RuntimeError: CUDA error: an illegal memory access was encountered\n",
    "CUDA kernel errors might be asynchronously reported at some other API call,so the stacktrace below might be incorrect.\n",
    "For debugging consider passing CUDA_LAUNCH_BLOCKING=1.\n"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "beac3fe1",
   "metadata": {},
   "source": [
    "### test"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 26,
   "id": "086fc510",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "loading checkpoint from path: /workspace/sais_medicine/code/weights/best.pt\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "RNAModel(\n",
       "  (features): RNAFeatures(\n",
       "    (dropout): Dropout(p=0.1, inplace=False)\n",
       "    (node_embedding): Linear(in_features=101, out_features=128, bias=True)\n",
       "    (edge_embedding): Linear(in_features=115, out_features=128, bias=True)\n",
       "    (norm_nodes): Normalize()\n",
       "    (norm_edges): Normalize()\n",
       "  )\n",
       "  (W_s): Embedding(4, 128)\n",
       "  (encoder_layers): ModuleList(\n",
       "    (0-2): 3 x MPNNLayer(\n",
       "      (dropout): Dropout(p=0.1, inplace=False)\n",
       "      (norm1): LayerNorm((128,), eps=1e-05, elementwise_affine=True)\n",
       "      (norm2): LayerNorm((128,), eps=1e-05, elementwise_affine=True)\n",
       "      (W1): Linear(in_features=384, out_features=128, bias=True)\n",
       "      (W2): Linear(in_features=128, out_features=128, bias=True)\n",
       "      (W3): Linear(in_features=128, out_features=128, bias=True)\n",
       "      (act): ReLU()\n",
       "      (dense): Sequential(\n",
       "        (0): Linear(in_features=128, out_features=512, bias=True)\n",
       "        (1): ReLU()\n",
       "        (2): Linear(in_features=512, out_features=128, bias=True)\n",
       "      )\n",
       "    )\n",
       "  )\n",
       "  (decoder_layers): ModuleList(\n",
       "    (0-2): 3 x MPNNLayer(\n",
       "      (dropout): Dropout(p=0.1, inplace=False)\n",
       "      (norm1): LayerNorm((128,), eps=1e-05, elementwise_affine=True)\n",
       "      (norm2): LayerNorm((128,), eps=1e-05, elementwise_affine=True)\n",
       "      (W1): Linear(in_features=384, out_features=128, bias=True)\n",
       "      (W2): Linear(in_features=128, out_features=128, bias=True)\n",
       "      (W3): Linear(in_features=128, out_features=128, bias=True)\n",
       "      (act): ReLU()\n",
       "      (dense): Sequential(\n",
       "        (0): Linear(in_features=128, out_features=512, bias=True)\n",
       "        (1): ReLU()\n",
       "        (2): Linear(in_features=512, out_features=128, bias=True)\n",
       "      )\n",
       "    )\n",
       "  )\n",
       "  (projection_head): Sequential(\n",
       "    (0): Linear(in_features=128, out_features=128, bias=False)\n",
       "    (1): ReLU(inplace=True)\n",
       "    (2): Linear(in_features=128, out_features=128, bias=True)\n",
       "  )\n",
       "  (readout): Linear(in_features=128, out_features=4, bias=True)\n",
       ")"
      ]
     },
     "execution_count": 26,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "eval_model = RNAModel(config.model_config).to(config.device)\n",
    "checkpoint_path = train_config.ckpt_path\n",
    "print(\"loading checkpoint from path:\", checkpoint_path)\n",
    "eval_model.load_state_dict(torch.load(checkpoint_path, map_location='cpu', weights_only=True), strict=True)\n",
    "eval_model.to(config.device)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 27,
   "id": "10158a05-4192-4f93-b173-a7ba4dc25a96",
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "100%|██████████| 29/29 [00:02<00:00, 11.58it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "test recovery: 0.6729\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "\n"
     ]
    }
   ],
   "source": [
    "recovery_list = []\n",
    "eval_model.eval()\n",
    "with torch.no_grad():\n",
    "    result_list = []\n",
    "    for batch in tqdm(valid_loader):\n",
    "        X, S, mask, lengths, names = batch\n",
    "        X = X.to(config.device)\n",
    "        S = S.to(config.device)\n",
    "        mask = mask.to(config.device)\n",
    "        logits, _ = eval_model(X, mask)\n",
    "        S = torch.masked_select(S, (mask==1))  # 选择有效的标签\n",
    "        probs = F.softmax(logits, dim=-1)\n",
    "        samples = probs.argmax(dim=-1)\n",
    "        start_idx = 0\n",
    "        for length in lengths:\n",
    "            end_idx = start_idx + length.item()\n",
    "            sample = samples[start_idx: end_idx]\n",
    "            result_list.append(sample)\n",
    "            gt_S = S[start_idx: end_idx]\n",
    "            recovery = (sample==gt_S).sum() / len(sample)\n",
    "            recovery_list.append(recovery.cpu().numpy())\n",
    "            start_idx = end_idx\n",
    "    test_recovery = np.mean(recovery_list)\n",
    "    print('test recovery: {:.4f}'.format(test_recovery))"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "b00ce785",
   "metadata": {},
   "source": [
    "### Predict"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 28,
   "id": "bfb6d926",
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/tmp/ipykernel_2814534/2328507921.py:5: FutureWarning: You are using `torch.load` with `weights_only=False` (the current default value), which uses the default pickle module implicitly. It is possible to construct malicious pickle data which will execute arbitrary code during unpickling (See https://github.com/pytorch/pytorch/blob/main/SECURITY.md#untrusted-models for more details). In a future release, the default value for `weights_only` will be flipped to `True`. This limits the functions that could be executed during unpickling. Arbitrary objects will no longer be allowed to be loaded via this mode unless they are explicitly allowlisted by the user via `torch.serialization.add_safe_globals`. We recommend you start setting `weights_only=True` for any use case where you don't have full control of the loaded file. Please open an issue on GitHub for any issues related to this experimental feature.\n",
      "  state_dict = torch.load(train_config.ckpt_path, map_location=Config.device)\n",
      "100%|██████████| 29/29 [00:02<00:00, 11.55it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "test recovery: 0.6729\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "\n"
     ]
    }
   ],
   "source": [
    "model = RNAModel(config.model_config).to(Config.device)\n",
    "# print(model)\n",
    "# 加载模型权重\n",
    "\n",
    "state_dict = torch.load(train_config.ckpt_path, map_location=Config.device)\n",
    "model.load_state_dict(state_dict)\n",
    "\n",
    "\n",
    "recovery_list = []\n",
    "model.eval()\n",
    "with torch.no_grad():\n",
    "    result_list = []\n",
    "    for batch in tqdm(valid_loader):\n",
    "        X, S, mask, lengths, names = batch\n",
    "        X = X.to(config.device)\n",
    "        S = S.to(config.device)\n",
    "        mask = mask.to(config.device)\n",
    "        logits, _ = model(X, mask)\n",
    "        S = torch.masked_select(S, (mask==1))  # 选择有效的标签\n",
    "        probs = F.softmax(logits, dim=-1)\n",
    "        samples = probs.argmax(dim=-1)\n",
    "        start_idx = 0\n",
    "        for length in lengths:\n",
    "            end_idx = start_idx + length.item()\n",
    "            sample = samples[start_idx: end_idx]\n",
    "            gt_S = S[start_idx: end_idx]\n",
    "            result_list.append(sample)\n",
    "            recovery = (sample==gt_S).sum() / len(sample)\n",
    "            recovery_list.append(recovery.cpu().numpy())\n",
    "            start_idx = end_idx\n",
    "    test_recovery = np.mean(recovery_list)\n",
    "    print('test recovery: {:.4f}'.format(test_recovery))\n",
    " "
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "test",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.10.18"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
