{
 "cells": [
  {
   "cell_type": "markdown",
   "id": "3068c6e9",
   "metadata": {},
   "source": [
    "### RNA 逆转录 Baseline (Based on RDesign)\n",
    "This code establishes a baseline for ​RNA inverse folding​ (sequence design from structure), adapted from [A4Bio/RDesign](https://github.com/A4Bio/RDesign). It processes RNA structural coordinates (.npy files) paired with sequences (FASTA), splits data into train/valid/test sets, and creates PyTorch datasets where inputs are 3D backbone coordinates (P, C4', O3' atoms) and targets are corresponding RNA sequences. \n",
    "\n",
    "RNA有四种碱基：AGCU, adenine（腺嘌呤）、 Uracil（尿嘧啶）, Cytosine（胞嘧啶）和 Guanine（鸟嘌呤）\n",
    "\n",
    "RNA的序列结构：\n",
    "\n",
    "            碱基                碱基                 碱基\n",
    "磷酸基团-五碳糖(核糖)-磷酸基团-五碳糖(核糖)-磷酸基团-五碳糖(核糖)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "id": "a1a25400",
   "metadata": {},
   "outputs": [],
   "source": [
    "# !pip install torch, pandas, biopython"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "id": "159f7029-9f98-48b1-aa8f-d3ab43894deb",
   "metadata": {},
   "outputs": [],
   "source": [
    "import os\n",
    "import torch\n",
    "import random\n",
    "import numpy as np\n",
    "import pandas as pd\n",
    "import torch.nn as nn\n",
    "from tqdm import tqdm\n",
    "from typing import List\n",
    "from torch.optim import Adam\n",
    "import torch.nn.functional as F\n",
    "from torch.utils.data import Dataset\n",
    "from torch.utils.data import DataLoader\n",
    "from dataclasses import dataclass, field\n",
    "from torch_scatter import scatter_sum, scatter_softmax\n",
    "from Bio import SeqIO  # pip install biopython"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "id": "1faeb703",
   "metadata": {},
   "outputs": [],
   "source": [
    "# Define Configuration Classes\n",
    "root_dir = r'D:/OneDrive - stu.ahu.edu.cn/RNA预测/sais_medicine'\n",
    "\n",
    "@dataclass\n",
    "class DataConfig:\n",
    "    fasta_data_path: str = f'{root_dir}/saistraindata/seqs'  # 标签数据\n",
    "    coords_data_path: str = f'{root_dir}/saistraindata/coords'  # 特征数据\n",
    "    outputs_path: str = f'{root_dir}/outputs'\n",
    "    \n",
    "    train_npy_data_dir: str = f'{root_dir}/saistraindata/coords'\n",
    "    valid_npy_data_dir: str = f'{root_dir}/saistraindata/coords'\n",
    "    test_npy_data_dir: str = f'{root_dir}/saistraindata/coords'\n",
    "    \n",
    "    train_data_path: str = f'{root_dir}/outputs/public_train_data.csv'\n",
    "    valid_data_path: str = f'{root_dir}/outputs/public_valid_data.csv'\n",
    "    test_data_path: str = f'{root_dir}/outputs/public_test_data.csv'\n",
    "\n",
    "@dataclass\n",
    "class ModelConfig:\n",
    "    smoothing: float = 0.1\n",
    "    hidden: int = 128\n",
    "    vocab_size: int = 4  # 明确指定为 int 类型\n",
    "    k_neighbors: int = 30  # 明确指定为 int 类型\n",
    "    dropout: float = 0.1\n",
    "    node_feat_types: List[str] = field(default_factory=lambda: ['angle', 'distance', 'direction'])  # 使用 field 避免可变对象问题\n",
    "    edge_feat_types: List[str] = field(default_factory=lambda: ['orientation', 'distance', 'direction'])  # 同上\n",
    "    num_encoder_layers: int = 3\n",
    "    num_decoder_layers: int = 3  # 修正为整数，去掉多余的小数点\n",
    "\n",
    "@dataclass\n",
    "class TrainConfig:\n",
    "    batch_size: int = 8\n",
    "    epoch: int = 30\n",
    "    lr: float = 0.001\n",
    "    output_dir: str = f'{root_dir}/outputs/ckpts'\n",
    "    ckpt_path: str = f'{root_dir}/outputs/ckpts/best.pt'\n",
    "\n",
    "@dataclass\n",
    "class Config:\n",
    "    pipeline: str = 'train'\n",
    "    seed: int = 2025\n",
    "    device: str = 'cuda' if torch.cuda.is_available() else 'cpu'\n",
    "    data_config: DataConfig = DataConfig()\n",
    "    model_config: ModelConfig = ModelConfig()\n",
    "    train_config: TrainConfig = TrainConfig()"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "f6c9a137",
   "metadata": {},
   "source": [
    "### Data processing\n",
    "This code implements an RNA data processing pipeline for machine learning tasks using PyTorch and Biopython."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "id": "a6c2f4ec-4be4-4b8f-a0c6-d213ca157a86",
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "100%|██████████| 2317/2317 [00:01<00:00, 1857.44it/s]\n"
     ]
    }
   ],
   "source": [
    "# Define function to read FASTA files using Biopython\n",
    "def read_fasta_biopython(file_path):\n",
    "    sequences = {}\n",
    "    for record in SeqIO.parse(file_path, \"fasta\"):\n",
    "        sequences[record.id] = str(record.seq)\n",
    "    return sequences\n",
    "\n",
    "# 读取FASTA文件，即标签\n",
    "train_file_list = os.listdir(DataConfig.fasta_data_path)\n",
    "content_dict = {\n",
    "    \"pdb_id\": [],\n",
    "    \"seq\": []\n",
    "}\n",
    "for file in tqdm(train_file_list):\n",
    "    data_path = os.path.join(DataConfig.fasta_data_path, file)\n",
    "    sequences = read_fasta_biopython(data_path)\n",
    "    content_dict[\"pdb_id\"].append(list(sequences.keys())[0])\n",
    "    content_dict[\"seq\"].append(list(sequences.values())[0])\n",
    "\n",
    "data = pd.DataFrame(content_dict)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "1ffa5dce",
   "metadata": {},
   "outputs": [],
   "source": [
    "data\n",
    "DataConfig.train_data_path"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "0b4968da",
   "metadata": {},
   "source": [
    "## 拆分训练集, 测试集和验证集"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "id": "3a924975-46ec-49ab-a115-8487f96aa142",
   "metadata": {},
   "outputs": [],
   "source": [
    "# Split data into train, validation, and test sets\n",
    "import os\n",
    "\n",
    "\n",
    "os.makedirs(DataConfig.outputs_path, exist_ok=True)\n",
    "\n",
    "\n",
    "np.random.seed(42)\n",
    "split = np.random.choice(['train', 'valid', 'test'], size=len(data), p=[0.7, 0.2, 0.1])\n",
    "data['split'] = split\n",
    "train_data = data[data['split']=='train']\n",
    "valid_data = data[data['split']=='valid']\n",
    "test_data = data[data['split']=='test']\n",
    "train_data.to_csv(DataConfig.train_data_path, index=False)\n",
    "valid_data.to_csv(DataConfig.valid_data_path, index=False)\n",
    "test_data.to_csv(DataConfig.test_data_path, index=False)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "fa48eb54",
   "metadata": {},
   "source": [
    "## 读一下数据"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "id": "4678b144",
   "metadata": {},
   "outputs": [],
   "source": [
    "data_path = f\"{DataConfig.coords_data_path}/1A9N_1_Q.npy\"\n",
    "coords = np.load(data_path)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "f9ee6dd7",
   "metadata": {},
   "outputs": [],
   "source": [
    "coords.shape  # (n, 7, 3), n是n个原子基团, 7是每个基团的7种原子, 3是三维坐标"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "8be4df7f",
   "metadata": {},
   "outputs": [],
   "source": [
    "coords[7]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "id": "ef567b16-a1f2-4a00-bc6a-a7d6daceb6cc",
   "metadata": {},
   "outputs": [],
   "source": [
    "# Define RNADataset Class and Seeding Function\n",
    "class RNADataset(Dataset):\n",
    "    def __init__(self, data_path, npy_dir):\n",
    "        super(RNADataset, self).__init__()\n",
    "        self.data = pd.read_csv(data_path)\n",
    "        self.npy_dir = npy_dir\n",
    "        self.seq_list = self.data['seq'].to_list()\n",
    "        self.name_list = self.data['pdb_id'].to_list()\n",
    "\n",
    "    def __len__(self):\n",
    "        return len(self.name_list)\n",
    "\n",
    "    def __getitem__(self, idx):\n",
    "        seq = self.seq_list[idx]\n",
    "        pdb_id = self.name_list[idx]\n",
    "        coords = np.load(os.path.join(self.npy_dir, pdb_id + '.npy'))\n",
    "\n",
    "        feature = {\n",
    "            \"name\": pdb_id,\n",
    "            \"seq\": seq,\n",
    "            \"coords\": {\n",
    "                \"P\": coords[:, 0, :],\n",
    "                \"O5'\": coords[:, 1, :],\n",
    "                \"C5'\": coords[:, 2, :],\n",
    "                \"C4'\": coords[:, 3, :],\n",
    "                \"C3'\": coords[:, 4, :],\n",
    "                \"O3'\": coords[:, 5, :],\n",
    "                \"N\": coords[:, 6, :],\n",
    "            }\n",
    "        }\n",
    "\n",
    "        return feature"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "id": "bffd6013-4f5b-42bf-9b05-42bd6041daa9",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "seeding done!!!\n"
     ]
    }
   ],
   "source": [
    "def seeding(seed):\n",
    "    np.random.seed(seed)\n",
    "    random.seed(seed)\n",
    "    os.environ['PYTHONHASHSEED'] = str(seed)\n",
    "    torch.manual_seed(seed)\n",
    "    torch.cuda.manual_seed(seed)\n",
    "    torch.cuda.manual_seed_all(seed)\n",
    "    torch.backends.cudnn.deterministic = True\n",
    "    torch.backends.cudnn.benchmark = False\n",
    "    print('seeding done!!!')\n",
    "config = Config()\n",
    "data_config = config.data_config\n",
    "train_config = config.train_config\n",
    "seeding(config.seed)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "id": "85a61f93-e518-4d50-aa87-9e4bc1f74516",
   "metadata": {},
   "outputs": [],
   "source": [
    "train_dataset = RNADataset(\n",
    "    data_path=data_config.train_data_path,\n",
    "    npy_dir=data_config.train_npy_data_dir,\n",
    ")\n",
    "valid_dataset = RNADataset(\n",
    "    data_path=data_config.valid_data_path,\n",
    "    npy_dir=data_config.valid_npy_data_dir,\n",
    ")\n",
    "test_dataset = RNADataset(\n",
    "    data_path=data_config.test_data_path,\n",
    "    npy_dir=data_config.test_npy_data_dir,\n",
    ")"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "5dbc500a",
   "metadata": {},
   "source": [
    "### RNA Inverse Folding Framework\n",
    "\n",
    "This framework integrates geometric featurization with GNNs to predict RNA sequences compatible with input 3D conformations.\n",
    "该框架将几何特征化与图神经网络（GNNs）相结合，用于预测与输入三维构象兼容的RNA序列"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 30,
   "id": "f7d5fa3e-c392-49ee-bde9-ab96c8650dbf",
   "metadata": {},
   "outputs": [],
   "source": [
    "def featurize(batch):\n",
    "    # 数据聚合函数\n",
    "    alphabet = 'AUCG'\n",
    "    B = len(batch)\n",
    "    lengths = np.array([len(b['seq']) for b in batch], dtype=np.int32)\n",
    "    L_max = max([len(b['seq']) for b in batch])\n",
    "    X = np.zeros([B, L_max, 6, 3])  # 特征\n",
    "    S = np.zeros([B, L_max], dtype=np.int32)  # 标注\n",
    "    names = []\n",
    "\n",
    "    # Build the batch\n",
    "    for i, b in enumerate(batch):\n",
    "        #x = np.stack([b['coords'][c] for c in [\"P\", \"O5'\", \"C5'\", \"C4'\", \"C3'\", \"O3'\"]], 1)\n",
    "        x = np.stack([np.nan_to_num(b['coords'][c], nan=0.0) for c in [\"P\", \"O5'\", \"C5'\", \"C4'\", \"C3'\", \"O3'\"]], 1)\n",
    "        l = len(b['seq'])\n",
    "        x_pad = np.pad(x, [[0, L_max-l], [0,0], [0,0]], 'constant', constant_values=(np.nan, ))\n",
    "        X[i,:,:,:] = x_pad\n",
    "        indices = np.asarray([alphabet.index(a) for a in b['seq']], dtype=np.int32)\n",
    "        S[i, :l] = indices\n",
    "        names.append(b['name'])\n",
    "    \n",
    "    mask = np.isfinite(np.sum(X,(2,3))).astype(np.float32)\n",
    "    numbers = np.sum(mask, axis=1).astype(np.int32)\n",
    "    S_new = np.zeros_like(S)\n",
    "    X_new = np.zeros_like(X)+np.nan\n",
    "    for i, n in enumerate(numbers):\n",
    "        X_new[i,:n,::] = X[i][mask[i]==1]\n",
    "        S_new[i,:n] = S[i][mask[i]==1]\n",
    "\n",
    "    X = X_new\n",
    "    S = S_new\n",
    "    isnan = np.isnan(X)\n",
    "    mask = np.isfinite(np.sum(X,(2,3))).astype(np.float32)\n",
    "    X[isnan] = 0.\n",
    "    #mask = np.isfinite(np.sum(X,(2,3))).astype(np.float32)\n",
    "    # Conversion\n",
    "    S = torch.from_numpy(S).to(dtype=torch.long)\n",
    "    X = torch.from_numpy(X).to(dtype=torch.float32)\n",
    "    mask = torch.from_numpy(mask).to(dtype=torch.float32)\n",
    "    return X, S, mask, lengths, names"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "56906d8f",
   "metadata": {},
   "outputs": [],
   "source": [
    "import numpy as np\n",
    "\n",
    "\n",
    "tag_list = list(\"AUCG\")\n",
    "idx2tag = {i: tag for i, tag in enumerate(tag_list)}\n",
    "tag2idx = {tag: i for i, tag in enumerate(tag_list)}\n",
    "\n",
    "def featurize(batch, has_label=True):\n",
    "    # alphabet = 'AUCG'\n",
    "    B = len(batch)\n",
    "    lengths = np.array([b[\"coords\"][\"P\"].shape[0] for b in batch], dtype=np.int32)\n",
    "    L_max = max([b[\"coords\"][\"P\"].shape[0] for b in batch])\n",
    "    X = np.zeros([B, L_max, 6, 3])\n",
    "    S = []\n",
    "    names = []\n",
    "\n",
    "    # Build the batch\n",
    "    for i, b in enumerate(batch):\n",
    "        x = np.stack([np.nan_to_num(b['coords'][c], nan=0.0) for c in [\"P\", \"O5'\", \"C5'\", \"C4'\", \"C3'\", \"O3'\"]], 1)\n",
    "        l = b[\"coords\"][\"P\"].shape[0]\n",
    "        x_pad = np.pad(x, [[0, L_max - l], [0, 0], [0, 0]], 'constant', constant_values=(np.nan,))\n",
    "        X[i, :, :, :] = x_pad\n",
    "        if has_label:\n",
    "            S.extend([tag2idx[a] for a in b['seq']])\n",
    "        names.append(b['name'])\n",
    "\n",
    "    mask = np.isfinite(np.sum(X, (2, 3))).astype(np.float32)\n",
    "    numbers = np.sum(mask, axis=1).astype(np.int32)\n",
    "    X_new = np.zeros_like(X) + np.nan\n",
    "    for i, n in enumerate(numbers):\n",
    "        X_new[i, :n, ::] = X[i][mask[i] == 1]\n",
    "    X = X_new\n",
    "    isnan = np.isnan(X)\n",
    "    mask = np.isfinite(np.sum(X, (2, 3))).astype(np.float32)\n",
    "    X[isnan] = 0.\n",
    "    # Conversion\n",
    "    X = torch.from_numpy(X).to(dtype=torch.float32)\n",
    "    mask = torch.from_numpy(mask).to(dtype=torch.float32)\n",
    "    if has_label:\n",
    "        S = torch.as_tensor(S)\n",
    "        return X, mask, lengths, names, S\n",
    "    else:\n",
    "        return X, mask, lengths, names"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "id": "e049e40a",
   "metadata": {},
   "outputs": [],
   "source": [
    "def prepare_rna_batch(rna_samples):\n",
    "    \"\"\"将RNA样本批次转换为神经网络可用的张量格式\n",
    "    \n",
    "    参数:\n",
    "        rna_samples: 包含多个RNA样本的列表，每个样本应包含:\n",
    "            - 'seq': 碱基序列字符串(如\"AUCG\")\n",
    "            - 'coords': 原子坐标字典\n",
    "            - 'name': 样本标识名\n",
    "    \n",
    "    返回:\n",
    "        features: 形状[B, L_max, 6, 3]的原子坐标张量\n",
    "        labels: 形状[B, L_max]的碱基类别张量  \n",
    "        valid_mask: 有效位置掩码\n",
    "        seq_lengths: 各样本实际长度\n",
    "        sample_names: 样本名称列表\n",
    "    \"\"\"\n",
    "    tag_list = list(\"AUCG\")\n",
    "    idx2tag = {i: tag for i, tag in enumerate(tag_list)}\n",
    "    tag2idx = {tag: i for i, tag in enumerate(tag_list)}\n",
    "    KEY_ATOMS = [\"P\", \"O5'\", \"C5'\", \"C4'\", \"C3'\", \"O3'\"]\n",
    "    \n",
    "    # 初始化维度\n",
    "    batch_size = len(rna_samples)\n",
    "    seq_lengths = [b[\"coords\"][\"P\"].shape[0] for b in rna_samples]  # 获取RNA序列长度\n",
    "    max_len = max(seq_lengths)  # 当前batch最大长度\n",
    "    \n",
    "    # 预分配张量（直接初始化为最终类型）\n",
    "    coords = np.zeros((batch_size, max_len, 6, 3), dtype=np.float32)  # batch, 序列长度，6个原子位置，每个原子坐标\n",
    "    labels = np.full((batch_size, max_len), -1, dtype=np.int64)\n",
    "    mask = np.zeros((batch_size, max_len), dtype=np.float32)\n",
    "    \n",
    "    for i, sample in enumerate(rna_samples):\n",
    "        curr_len = seq_lengths[i]\n",
    "        # 向量化处理原子坐标\n",
    "        atom_stack = np.stack([np.nan_to_num(sample['coords'][atom], nan=0.0) for atom in KEY_ATOMS],\n",
    "                              axis=1)  # N 6 3\n",
    "        # 有效位置标记\n",
    "        valid_pos = np.isfinite(atom_stack.sum((1,2)))\n",
    "        actual_len = min(curr_len, sum(valid_pos))\n",
    "        # 单样本赋值\n",
    "        coords[i,:actual_len] = atom_stack[valid_pos][:actual_len]\n",
    "        mask[i,:actual_len] = 1.0\n",
    "        \n",
    "        # 有标签的话读取标签\n",
    "        seq = np.array([c for c in sample['seq']], dtype=np.str_)  # 字符串转列表再转numpy\n",
    "        labels[i,:actual_len] = [tag2idx[b] for b in seq[valid_pos][:actual_len]]\n",
    "    \n",
    "    features, labels, valid_mask, seq_lengths, sample_names = (torch.from_numpy(coords),\n",
    "        torch.from_numpy(labels),\n",
    "        torch.from_numpy(mask),\n",
    "        np.array(seq_lengths),\n",
    "        [s['name'] for s in rna_samples])\n",
    "    \n",
    "    return features, labels, valid_mask, seq_lengths, sample_names\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 94,
   "id": "318463dd",
   "metadata": {},
   "outputs": [],
   "source": [
    "# prepare_rna_batch([train_dataset[0]])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "id": "0d8d9a12-eb92-404f-8cd4-0998d310c1f5",
   "metadata": {},
   "outputs": [],
   "source": [
    "train_loader = DataLoader(train_dataset,\n",
    "        batch_size=train_config.batch_size,\n",
    "        shuffle=True,\n",
    "        num_workers=0,\n",
    "        collate_fn=prepare_rna_batch)\n",
    "\n",
    "valid_loader = DataLoader(valid_dataset,\n",
    "        batch_size=train_config.batch_size,\n",
    "        shuffle=False,\n",
    "        num_workers=0,\n",
    "        collate_fn=prepare_rna_batch)\n",
    "\n",
    "test_loader = DataLoader(test_dataset,\n",
    "        batch_size=train_config.batch_size,\n",
    "        shuffle=False,\n",
    "        num_workers=0,\n",
    "        collate_fn=prepare_rna_batch)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "id": "1ed12564-029e-496c-8826-29f98c6b580c",
   "metadata": {},
   "outputs": [],
   "source": [
    "def gather_edges(edges, neighbor_idx):\n",
    "    neighbors = neighbor_idx.unsqueeze(-1).expand(-1, -1, -1, edges.size(-1))\n",
    "    return torch.gather(edges, 2, neighbors)\n",
    "\n",
    "def gather_nodes(nodes, neighbor_idx):\n",
    "    neighbors_flat = neighbor_idx.view((neighbor_idx.shape[0], -1))\n",
    "    neighbors_flat = neighbors_flat.unsqueeze(-1).expand(-1, -1, nodes.size(2))\n",
    "    neighbor_features = torch.gather(nodes, 1, neighbors_flat)\n",
    "    neighbor_features = neighbor_features.view(list(neighbor_idx.shape)[:3] + [-1])\n",
    "    return neighbor_features\n",
    "\n",
    "def gather_nodes_t(nodes, neighbor_idx):\n",
    "    idx_flat = neighbor_idx.unsqueeze(-1).expand(-1, -1, nodes.size(2))\n",
    "    return torch.gather(nodes, 1, idx_flat)\n",
    "\n",
    "def cat_neighbors_nodes(h_nodes, h_neighbors, E_idx):\n",
    "    h_nodes = gather_nodes(h_nodes, E_idx)\n",
    "    return torch.cat([h_neighbors, h_nodes], -1)\n",
    "\n",
    "\n",
    "class MPNNLayer(nn.Module):\n",
    "    # 简单的聚合边特征到节点特征\n",
    "    def __init__(self, num_hidden, num_in, dropout=0.1, num_heads=None, scale=30):\n",
    "        super(MPNNLayer, self).__init__()\n",
    "        self.num_hidden = num_hidden\n",
    "        self.num_in = num_in\n",
    "        self.scale = scale\n",
    "        self.dropout = nn.Dropout(dropout)\n",
    "        self.norm1 = nn.LayerNorm(num_hidden)\n",
    "        self.norm2 = nn.LayerNorm(num_hidden)\n",
    "\n",
    "        self.W1 = nn.Linear(num_hidden + num_in, num_hidden, bias=True)\n",
    "        self.W2 = nn.Linear(num_hidden, num_hidden, bias=True)\n",
    "        self.W3 = nn.Linear(num_hidden, num_hidden, bias=True)\n",
    "        self.act = nn.ReLU()\n",
    "\n",
    "        self.dense = nn.Sequential(\n",
    "            nn.Linear(num_hidden, num_hidden*4),\n",
    "            nn.ReLU(),\n",
    "            nn.Linear(num_hidden*4, num_hidden)\n",
    "        )\n",
    "\n",
    "    def forward(self, h_V, h_E, edge_idx, batch_id=None):\n",
    "        src_idx, dst_idx = edge_idx[0], edge_idx[1]\n",
    "        h_message = self.W3(self.act(self.W2(self.act(self.W1(h_E)))))\n",
    "        # 特征聚合, 按源节点聚合，即分组相加\n",
    "        dh = scatter_sum(h_message, src_idx, dim=0) / self.scale\n",
    "        # 随机丢一些边的特征\n",
    "        h_V = self.norm1(h_V + self.dropout(dh))\n",
    "        # skip-connection\n",
    "        dh = self.dense(h_V)\n",
    "        h_V = self.norm2(h_V + self.dropout(dh))\n",
    "        return h_V\n",
    "\n",
    "\n",
    "class Normalize(nn.Module):\n",
    "    def __init__(self, size, epsilon=1e-6):\n",
    "        super(Normalize, self).__init__()\n",
    "        self.gain = nn.Parameter(torch.ones(size))\n",
    "        self.bias = nn.Parameter(torch.zeros(size))\n",
    "        self.epsilon = epsilon\n",
    "\n",
    "    def forward(self, x, dim=-1):\n",
    "        mu = x.mean(dim, keepdim=True)\n",
    "        sigma = torch.sqrt(x.var(dim, keepdim=True) + self.epsilon)\n",
    "        gain = self.gain\n",
    "        bias = self.bias\n",
    "        if dim != -1:\n",
    "            shape = [1] * len(mu.size())\n",
    "            shape[dim] = self.gain.size()[0]\n",
    "            gain = gain.view(shape)\n",
    "            bias = bias.view(shape)\n",
    "        return gain * (x - mu) / (sigma + self.epsilon) + bias\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 17,
   "id": "102ead4b-e635-464b-a3c2-68bda9f2e9fc",
   "metadata": {},
   "outputs": [],
   "source": [
    "feat_dims = {\n",
    "    'node': {\n",
    "        'angle': 12,\n",
    "        'distance': 80,\n",
    "        'direction': 9,\n",
    "    },\n",
    "    'edge': {\n",
    "        'orientation': 4,\n",
    "        'distance': 96,\n",
    "        'direction': 15,\n",
    "    }\n",
    "}\n",
    "\n",
    "def nan_to_num(tensor, nan=0.0):\n",
    "    idx = torch.isnan(tensor)\n",
    "    tensor[idx] = nan\n",
    "    return tensor\n",
    "\n",
    "def _normalize(tensor, dim=-1):\n",
    "    return nan_to_num(\n",
    "        torch.div(tensor, torch.norm(tensor, dim=dim, keepdim=True)))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 18,
   "id": "fddb2bad-8121-404e-bd34-5f1be813f0de",
   "metadata": {},
   "outputs": [],
   "source": [
    "class RNAFeatures(nn.Module):\n",
    "    # 边特征维度，结点特征维度，结点特征类型，边特征类型，RBF核数，K近邻数，数据增强，dropout\n",
    "    # 结点特征：['angle', 'distance', 'direction'], 边特征：['orientation', 'distance', 'direction']\n",
    "    def __init__(self, edge_features, node_features, node_feat_types=[], edge_feat_types=[], num_rbf=16, top_k=30, augment_eps=0., dropout=0.1, args=None):\n",
    "        super(RNAFeatures, self).__init__()\n",
    "        \"\"\"Extract RNA Features\"\"\"\n",
    "        self.edge_features = edge_features\n",
    "        self.node_features = node_features\n",
    "        self.top_k = top_k\n",
    "        self.augment_eps = augment_eps \n",
    "        self.num_rbf = num_rbf\n",
    "        self.dropout = nn.Dropout(dropout)\n",
    "        self.node_feat_types = node_feat_types\n",
    "        self.edge_feat_types = edge_feat_types\n",
    "\n",
    "        node_in = sum([feat_dims['node'][feat] for feat in node_feat_types])\n",
    "        edge_in = sum([feat_dims['edge'][feat] for feat in edge_feat_types])\n",
    "        self.node_embedding = nn.Linear(node_in,  node_features, bias=True)\n",
    "        self.edge_embedding = nn.Linear(edge_in, edge_features, bias=True)\n",
    "        self.norm_nodes = Normalize(node_features)\n",
    "        self.norm_edges = Normalize(edge_features)\n",
    "        \n",
    "    def _dist(self, X, mask, eps=1E-6):\n",
    "        mask_2D = torch.unsqueeze(mask,1) * torch.unsqueeze(mask,2)  # 两两之间求掩码 B, N -> B, N, N\n",
    "        dX = torch.unsqueeze(X,1) - torch.unsqueeze(X,2)  # 两两之间求距离 B, N, 3 -> B, N, N, 3\n",
    "        D = (1. - mask_2D)*10000 + mask_2D* torch.sqrt(torch.sum(dX**2, 3) + eps)  #  B, N, N 求欧式距离, 无效值距离设置10000的很大值\n",
    "\n",
    "        D_max, _ = torch.max(D, -1, keepdim=True)  # 获取每行的最大值\n",
    "        D_adjust = D + (1. - mask_2D) * (D_max+1)  # 无效值设置为最大值+1, 不会被选中\n",
    "        D_neighbors, E_idx = torch.topk(D_adjust, min(self.top_k, D_adjust.shape[-1]), dim=-1, largest=False) # 选择topk近邻\n",
    "        return D_neighbors, E_idx\n",
    "    \n",
    "    def _rbf(self, D):\n",
    "        D_min, D_max, D_count = 0., 20., self.num_rbf\n",
    "        D_mu = torch.linspace(D_min, D_max, D_count, device=D.device)\n",
    "        D_mu = D_mu.view([1,1,1,-1])\n",
    "        D_sigma = (D_max - D_min) / D_count\n",
    "        D_expand = torch.unsqueeze(D, -1)\n",
    "        return torch.exp(-((D_expand - D_mu) / D_sigma)**2)\n",
    "    \n",
    "    def _get_rbf(self, A, B, E_idx=None, num_rbf=16):\n",
    "        if E_idx is not None:\n",
    "            D_A_B = torch.sqrt(torch.sum((A[:,:,None,:] - B[:,None,:,:])**2,-1) + 1e-6)\n",
    "            D_A_B_neighbors = gather_edges(D_A_B[:,:,:,None], E_idx)[:,:,:,0]\n",
    "            RBF_A_B = self._rbf(D_A_B_neighbors)\n",
    "        else:\n",
    "            D_A_B = torch.sqrt(torch.sum((A[:,:,None,:] - B[:,:,None,:])**2,-1) + 1e-6)\n",
    "            RBF_A_B = self._rbf(D_A_B)\n",
    "        return RBF_A_B\n",
    "    \n",
    "    def _quaternions(self, R):\n",
    "        diag = torch.diagonal(R, dim1=-2, dim2=-1)\n",
    "        Rxx, Ryy, Rzz = diag.unbind(-1)\n",
    "        magnitudes = 0.5 * torch.sqrt(torch.abs(1 + torch.stack([\n",
    "              Rxx - Ryy - Rzz, \n",
    "            - Rxx + Ryy - Rzz, \n",
    "            - Rxx - Ryy + Rzz\n",
    "        ], -1)))\n",
    "        _R = lambda i,j: R[:,:,:,i,j]\n",
    "        signs = torch.sign(torch.stack([\n",
    "            _R(2,1) - _R(1,2),\n",
    "            _R(0,2) - _R(2,0),\n",
    "            _R(1,0) - _R(0,1)\n",
    "        ], -1))\n",
    "        xyz = signs * magnitudes\n",
    "        w = torch.sqrt(F.relu(1 + diag.sum(-1, keepdim=True))) / 2.\n",
    "        Q = torch.cat((xyz, w), -1)\n",
    "        Q = F.normalize(Q, dim=-1)\n",
    "        return Q\n",
    "    \n",
    "    def _orientations_coarse(self, X, E_idx, eps=1e-6):\n",
    "        V = X.clone()\n",
    "        X = X[:,:,:6,:].reshape(X.shape[0], 6*X.shape[1], 3) \n",
    "        dX = X[:,1:,:] - X[:,:-1,:]\n",
    "        U = _normalize(dX, dim=-1)\n",
    "        u_0, u_1 = U[:,:-2,:], U[:,1:-1,:]\n",
    "        n_0 = _normalize(torch.cross(u_0, u_1), dim=-1)\n",
    "        b_1 = _normalize(u_0 - u_1, dim=-1)\n",
    "        \n",
    "        # select C3'\n",
    "        n_0 = n_0[:,4::6,:] \n",
    "        b_1 = b_1[:,4::6,:]\n",
    "        X = X[:,4::6,:]\n",
    "\n",
    "        Q = torch.stack((b_1, n_0, torch.cross(b_1, n_0)), 2)\n",
    "        Q = Q.view(list(Q.shape[:2]) + [9])\n",
    "        Q = F.pad(Q, (0,0,0,1), 'constant', 0) # [16, 464, 9]\n",
    "\n",
    "        Q_neighbors = gather_nodes(Q, E_idx) # [16, 464, 30, 9]\n",
    "        P_neighbors = gather_nodes(V[:,:,0,:], E_idx) # [16, 464, 30, 3]\n",
    "        O5_neighbors = gather_nodes(V[:,:,1,:], E_idx)\n",
    "        C5_neighbors = gather_nodes(V[:,:,2,:], E_idx)\n",
    "        C4_neighbors = gather_nodes(V[:,:,3,:], E_idx)\n",
    "        O3_neighbors = gather_nodes(V[:,:,5,:], E_idx)\n",
    "        \n",
    "        Q = Q.view(list(Q.shape[:2]) + [3,3]).unsqueeze(2) # [16, 464, 1, 3, 3]\n",
    "        Q_neighbors = Q_neighbors.view(list(Q_neighbors.shape[:3]) + [3,3]) # [16, 464, 30, 3, 3]\n",
    "\n",
    "        dX = torch.stack([P_neighbors,O5_neighbors,C5_neighbors,C4_neighbors,O3_neighbors], dim=3) - X[:,:,None,None,:] # [16, 464, 30, 3]\n",
    "        dU = torch.matmul(Q[:,:,:,None,:,:], dX[...,None]).squeeze(-1) # [16, 464, 30, 3] 邻居的相对坐标\n",
    "        B, N, K = dU.shape[:3]\n",
    "        E_direct = _normalize(dU, dim=-1)\n",
    "        E_direct = E_direct.reshape(B, N, K,-1)\n",
    "        R = torch.matmul(Q.transpose(-1,-2), Q_neighbors)\n",
    "        E_orient = self._quaternions(R)\n",
    "        \n",
    "        dX_inner = V[:,:,[0,2,3],:] - X.unsqueeze(-2)\n",
    "        dU_inner = torch.matmul(Q, dX_inner.unsqueeze(-1)).squeeze(-1)\n",
    "        dU_inner = _normalize(dU_inner, dim=-1)\n",
    "        V_direct = dU_inner.reshape(B,N,-1)\n",
    "        return V_direct, E_direct, E_orient\n",
    "    \n",
    "    def _dihedrals(self, X, eps=1e-7):\n",
    "        # P, O5', C5', C4', C3', O3'\n",
    "        X = X[:,:,:6,:].reshape(X.shape[0], 6*X.shape[1], 3)\n",
    "\n",
    "        # Shifted slices of unit vectors\n",
    "        # https://iupac.qmul.ac.uk/misc/pnuc2.html#220\n",
    "        # https://x3dna.org/highlights/torsion-angles-of-nucleic-acid-structures\n",
    "        # alpha:   O3'_{i-1} P_i O5'_i C5'_i\n",
    "        # beta:    P_i O5'_i C5'_i C4'_i\n",
    "        # gamma:   O5'_i C5'_i C4'_i C3'_i\n",
    "        # delta:   C5'_i C4'_i C3'_i O3'_i\n",
    "        # epsilon: C4'_i C3'_i O3'_i P_{i+1}\n",
    "        # zeta:    C3'_i O3'_i P_{i+1} O5'_{i+1} \n",
    "        # What's more:\n",
    "        #   chi: C1' - N9 \n",
    "        #   chi is different for (C, T, U) and (A, G) https://x3dna.org/highlights/the-chi-x-torsion-angle-characterizes-base-sugar-relative-orientation\n",
    "\n",
    "        dX = X[:, 5:, :] - X[:, :-5, :] # O3'-P, P-O5', O5'-C5', C5'-C4', ...\n",
    "        U = F.normalize(dX, dim=-1)\n",
    "        u_2 = U[:,:-2,:]  # O3'-P, P-O5', ...\n",
    "        u_1 = U[:,1:-1,:] # P-O5', O5'-C5', ...\n",
    "        u_0 = U[:,2:,:]   # O5'-C5', C5'-C4', ...\n",
    "        # Backbone normals\n",
    "        n_2 = F.normalize(torch.cross(u_2, u_1), dim=-1)\n",
    "        n_1 = F.normalize(torch.cross(u_1, u_0), dim=-1)\n",
    "\n",
    "        # Angle between normals\n",
    "        cosD = (n_2 * n_1).sum(-1)\n",
    "        cosD = torch.clamp(cosD, -1+eps, 1-eps)\n",
    "        D = torch.sign((u_2 * n_1).sum(-1)) * torch.acos(cosD)\n",
    "        \n",
    "        D = F.pad(D, (3,4), 'constant', 0)\n",
    "        D = D.view((D.size(0), D.size(1) //6, 6))\n",
    "        return torch.cat((torch.cos(D), torch.sin(D)), 2) # return D_features\n",
    "    \n",
    "    def forward(self, X, mask):\n",
    "        \"\"\"\n",
    "        X: [B, N, 6, 3], batch,  N最大序列长度，6个原子位置，每个原子坐标\n",
    "        mask: [B, N]  # 标识每个样本序列的有效长度1有效，0无效\n",
    "        \"\"\"\n",
    "        if self.training and self.augment_eps > 0:\n",
    "            X = X + self.augment_eps * torch.randn_like(X)\n",
    "\n",
    "        # Build k-Nearest Neighbors graph\n",
    "        B, N, _,_ = X.shape\n",
    "        # P, O5', C5', C4', C3', O3'\n",
    "        atom_P = X[:, :, 0, :]\n",
    "        atom_O5_ = X[:, :, 1, :]\n",
    "        atom_C5_ = X[:, :, 2, :]\n",
    "        atom_C4_ = X[:, :, 3, :]\n",
    "        atom_C3_ = X[:, :, 4, :] \n",
    "        atom_O3_ = X[:, :, 5, :]\n",
    "\n",
    "        X_backbone = atom_P  # 以P基团为中心判断邻域\n",
    "        D_neighbors, E_idx = self._dist(X_backbone, mask) # 获取topk近邻  \n",
    "\n",
    "        mask_bool = (mask==1)\n",
    "        mask_attend = gather_nodes(mask.unsqueeze(-1), E_idx).squeeze(-1)\n",
    "        mask_attend = (mask.unsqueeze(-1) * mask_attend) == 1\n",
    "        edge_mask_select = lambda x: torch.masked_select(x, mask_attend.unsqueeze(-1)).reshape(-1,x.shape[-1])\n",
    "        node_mask_select = lambda x: torch.masked_select(x, mask_bool.unsqueeze(-1)).reshape(-1, x.shape[-1])\n",
    "\n",
    "        # node features\n",
    "        h_V = []\n",
    "        # angle\n",
    "        V_angle = node_mask_select(self._dihedrals(X))\n",
    "        # distance\n",
    "        node_list = ['O5_-P', 'C5_-P', 'C4_-P', 'C3_-P', 'O3_-P']\n",
    "        V_dist = []\n",
    "        \n",
    "        for pair in node_list:\n",
    "            atom1, atom2 = pair.split('-')\n",
    "            V_dist.append(node_mask_select(self._get_rbf(vars()['atom_' + atom1], vars()['atom_' + atom2], None, self.num_rbf).squeeze()))\n",
    "        V_dist = torch.cat(tuple(V_dist), dim=-1).squeeze()\n",
    "        # direction\n",
    "        V_direct, E_direct, E_orient = self._orientations_coarse(X, E_idx)\n",
    "        V_direct = node_mask_select(V_direct)\n",
    "        E_direct, E_orient = list(map(lambda x: edge_mask_select(x), [E_direct, E_orient]))\n",
    "\n",
    "        # edge features\n",
    "        h_E = []\n",
    "        # dist\n",
    "        edge_list = ['P-P', 'O5_-P', 'C5_-P', 'C4_-P', 'C3_-P', 'O3_-P']\n",
    "        E_dist = [] \n",
    "        for pair in edge_list:\n",
    "            atom1, atom2 = pair.split('-')\n",
    "            E_dist.append(edge_mask_select(self._get_rbf(vars()['atom_' + atom1], vars()['atom_' + atom2], E_idx, self.num_rbf)))\n",
    "        E_dist = torch.cat(tuple(E_dist), dim=-1)\n",
    "\n",
    "        if 'angle' in self.node_feat_types:\n",
    "            h_V.append(V_angle)\n",
    "        if 'distance' in self.node_feat_types:\n",
    "            h_V.append(V_dist)\n",
    "        if 'direction' in self.node_feat_types:\n",
    "            h_V.append(V_direct)\n",
    "\n",
    "        if 'orientation' in self.edge_feat_types:\n",
    "            h_E.append(E_orient)\n",
    "        if 'distance' in self.edge_feat_types:\n",
    "            h_E.append(E_dist)\n",
    "        if 'direction' in self.edge_feat_types:\n",
    "            h_E.append(E_direct)\n",
    "            \n",
    "        # Embed the nodes\n",
    "        h_V = self.norm_nodes(self.node_embedding(torch.cat(h_V, dim=-1)))\n",
    "        h_E = self.norm_edges(self.edge_embedding(torch.cat(h_E, dim=-1)))\n",
    "\n",
    "        # prepare the variables to return\n",
    "        # S = torch.masked_select(S, (mask==1))\n",
    "        shift = mask.sum(dim=1).cumsum(dim=0) - mask.sum(dim=1)  # cumsum累积求和\n",
    "        src = shift.view(B,1,1) + E_idx\n",
    "        src = torch.masked_select(src, mask_attend).view(1,-1)\n",
    "        dst = shift.view(B,1,1) + torch.arange(0, N, device=src.device).view(1,-1,1).expand_as(mask_attend)\n",
    "        dst = torch.masked_select(dst, mask_attend).view(1,-1)\n",
    "        E_idx = torch.cat((dst, src), dim=0).long()\n",
    "\n",
    "        sparse_idx = mask.nonzero()\n",
    "        X = X[sparse_idx[:,0], sparse_idx[:,1], :, :]\n",
    "        batch_id = sparse_idx[:,0]\n",
    "        return h_V, h_E, E_idx, batch_id"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 19,
   "id": "81d99837",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([1, 1])"
      ]
     },
     "execution_count": 19,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "a = torch.tensor([[1,2],[1,4]])\n",
    "mask_bool = a == 1\n",
    "mask_bool\n",
    "S = torch.masked_select(a, mask_bool)\n",
    "S"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 20,
   "id": "e12b215f-66ff-46cb-8645-733655cbf66f",
   "metadata": {},
   "outputs": [],
   "source": [
    "class RNAModel(nn.Module):\n",
    "    def __init__(self, model_config):\n",
    "        super(RNAModel, self).__init__()\n",
    "\n",
    "        self.smoothing = model_config.smoothing\n",
    "        self.node_features = self.edge_features = model_config.hidden\n",
    "        self.hidden_dim = model_config.hidden\n",
    "        self.vocab = model_config.vocab_size\n",
    "\n",
    "        self.features = RNAFeatures(\n",
    "            model_config.hidden, model_config.hidden, \n",
    "            top_k=model_config.k_neighbors, \n",
    "            dropout=model_config.dropout,\n",
    "            node_feat_types=model_config.node_feat_types, \n",
    "            edge_feat_types=model_config.edge_feat_types,\n",
    "            args=model_config\n",
    "        )\n",
    "\n",
    "        layer = MPNNLayer\n",
    "        self.W_s = nn.Embedding(model_config.vocab_size, self.hidden_dim)\n",
    "        self.encoder_layers = nn.ModuleList([\n",
    "            layer(self.hidden_dim, self.hidden_dim*2, dropout=model_config.dropout)\n",
    "            for _ in range(model_config.num_encoder_layers)])\n",
    "        self.decoder_layers = nn.ModuleList([\n",
    "            layer(self.hidden_dim, self.hidden_dim*2, dropout=model_config.dropout)\n",
    "            for _ in range(model_config.num_decoder_layers)])\n",
    "\n",
    "        self.projection_head = nn.Sequential(\n",
    "            nn.Linear(self.hidden_dim, self.hidden_dim, bias=False), \n",
    "            nn.ReLU(inplace=True), \n",
    "            nn.Linear(self.hidden_dim, self.hidden_dim, bias=True)\n",
    "        )\n",
    "        # 结点四分类\n",
    "        self.readout = nn.Linear(self.hidden_dim, model_config.vocab_size, bias=True)\n",
    "\n",
    "        for p in self.parameters():\n",
    "            if p.dim() > 1:\n",
    "                nn.init.xavier_uniform_(p)\n",
    "\n",
    "\n",
    "    def forward(self, X, mask):\n",
    "        # 抽取结点特征，边特征， 边索引，批次索引\n",
    "        h_V, h_E, E_idx, batch_id = self.features(X, mask)\n",
    "        \n",
    "        # 边特征保持不变，聚合边特征到结点\n",
    "        for enc_layer in self.encoder_layers:\n",
    "            h_EV = torch.cat([h_E, h_V[E_idx[0]], h_V[E_idx[1]]], dim=-1)\n",
    "            h_V = enc_layer(h_V, h_EV, E_idx, batch_id)\n",
    "        # 边特征保持不变，再次聚合边特征到结点\n",
    "        for dec_layer in self.decoder_layers:\n",
    "            h_EV = torch.cat([h_E, h_V[E_idx[0]], h_V[E_idx[1]]], dim=-1)\n",
    "            h_V = dec_layer(h_V, h_EV, E_idx, batch_id)\n",
    "\n",
    "        graph_embs = []\n",
    "        # 遍历每个样本\n",
    "        for b_id in range(batch_id[-1].item()+1):\n",
    "            # 求均值\n",
    "            b_data = h_V[batch_id == b_id].mean(0)\n",
    "            graph_embs.append(b_data)\n",
    "        graph_embs = torch.stack(graph_embs, dim=0)\n",
    "        # 全连接层\n",
    "        graph_prjs = self.projection_head(graph_embs)\n",
    "        # 碱基四分类器\n",
    "        logits = self.readout(h_V)\n",
    "        return logits, graph_prjs\n",
    "\n",
    "\n",
    "    # 废弃\n",
    "    def sample(self, X, S, mask=None):\n",
    "        X, gt_S, h_V, h_E, E_idx, batch_id = self.features(X, S, mask) \n",
    "\n",
    "        for enc_layer in self.encoder_layers:\n",
    "            h_EV = torch.cat([h_E, h_V[E_idx[0]], h_V[E_idx[1]]], dim=-1)\n",
    "            h_V = enc_layer(h_V, h_EV, E_idx, batch_id)\n",
    "\n",
    "        for dec_layer in self.decoder_layers:\n",
    "            h_EV = torch.cat([h_E, h_V[E_idx[0]], h_V[E_idx[1]]], dim=-1)\n",
    "            h_V = dec_layer(h_V, h_EV, E_idx, batch_id)\n",
    "\n",
    "        logits = self.readout(h_V)\n",
    "        return logits, gt_S"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 21,
   "id": "6fce9f8d-d53a-48a1-a082-1047dca20c5c",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "RNAModel(\n",
      "  (features): RNAFeatures(\n",
      "    (dropout): Dropout(p=0.1, inplace=False)\n",
      "    (node_embedding): Linear(in_features=101, out_features=128, bias=True)\n",
      "    (edge_embedding): Linear(in_features=115, out_features=128, bias=True)\n",
      "    (norm_nodes): Normalize()\n",
      "    (norm_edges): Normalize()\n",
      "  )\n",
      "  (W_s): Embedding(4, 128)\n",
      "  (encoder_layers): ModuleList(\n",
      "    (0): MPNNLayer(\n",
      "      (dropout): Dropout(p=0.1, inplace=False)\n",
      "      (norm1): LayerNorm((128,), eps=1e-05, elementwise_affine=True)\n",
      "      (norm2): LayerNorm((128,), eps=1e-05, elementwise_affine=True)\n",
      "      (W1): Linear(in_features=384, out_features=128, bias=True)\n",
      "      (W2): Linear(in_features=128, out_features=128, bias=True)\n",
      "      (W3): Linear(in_features=128, out_features=128, bias=True)\n",
      "      (act): ReLU()\n",
      "      (dense): Sequential(\n",
      "        (0): Linear(in_features=128, out_features=512, bias=True)\n",
      "        (1): ReLU()\n",
      "        (2): Linear(in_features=512, out_features=128, bias=True)\n",
      "      )\n",
      "    )\n",
      "    (1): MPNNLayer(\n",
      "      (dropout): Dropout(p=0.1, inplace=False)\n",
      "      (norm1): LayerNorm((128,), eps=1e-05, elementwise_affine=True)\n",
      "      (norm2): LayerNorm((128,), eps=1e-05, elementwise_affine=True)\n",
      "      (W1): Linear(in_features=384, out_features=128, bias=True)\n",
      "      (W2): Linear(in_features=128, out_features=128, bias=True)\n",
      "      (W3): Linear(in_features=128, out_features=128, bias=True)\n",
      "      (act): ReLU()\n",
      "      (dense): Sequential(\n",
      "        (0): Linear(in_features=128, out_features=512, bias=True)\n",
      "        (1): ReLU()\n",
      "        (2): Linear(in_features=512, out_features=128, bias=True)\n",
      "      )\n",
      "    )\n",
      "    (2): MPNNLayer(\n",
      "      (dropout): Dropout(p=0.1, inplace=False)\n",
      "      (norm1): LayerNorm((128,), eps=1e-05, elementwise_affine=True)\n",
      "      (norm2): LayerNorm((128,), eps=1e-05, elementwise_affine=True)\n",
      "      (W1): Linear(in_features=384, out_features=128, bias=True)\n",
      "      (W2): Linear(in_features=128, out_features=128, bias=True)\n",
      "      (W3): Linear(in_features=128, out_features=128, bias=True)\n",
      "      (act): ReLU()\n",
      "      (dense): Sequential(\n",
      "        (0): Linear(in_features=128, out_features=512, bias=True)\n",
      "        (1): ReLU()\n",
      "        (2): Linear(in_features=512, out_features=128, bias=True)\n",
      "      )\n",
      "    )\n",
      "  )\n",
      "  (decoder_layers): ModuleList(\n",
      "    (0): MPNNLayer(\n",
      "      (dropout): Dropout(p=0.1, inplace=False)\n",
      "      (norm1): LayerNorm((128,), eps=1e-05, elementwise_affine=True)\n",
      "      (norm2): LayerNorm((128,), eps=1e-05, elementwise_affine=True)\n",
      "      (W1): Linear(in_features=384, out_features=128, bias=True)\n",
      "      (W2): Linear(in_features=128, out_features=128, bias=True)\n",
      "      (W3): Linear(in_features=128, out_features=128, bias=True)\n",
      "      (act): ReLU()\n",
      "      (dense): Sequential(\n",
      "        (0): Linear(in_features=128, out_features=512, bias=True)\n",
      "        (1): ReLU()\n",
      "        (2): Linear(in_features=512, out_features=128, bias=True)\n",
      "      )\n",
      "    )\n",
      "    (1): MPNNLayer(\n",
      "      (dropout): Dropout(p=0.1, inplace=False)\n",
      "      (norm1): LayerNorm((128,), eps=1e-05, elementwise_affine=True)\n",
      "      (norm2): LayerNorm((128,), eps=1e-05, elementwise_affine=True)\n",
      "      (W1): Linear(in_features=384, out_features=128, bias=True)\n",
      "      (W2): Linear(in_features=128, out_features=128, bias=True)\n",
      "      (W3): Linear(in_features=128, out_features=128, bias=True)\n",
      "      (act): ReLU()\n",
      "      (dense): Sequential(\n",
      "        (0): Linear(in_features=128, out_features=512, bias=True)\n",
      "        (1): ReLU()\n",
      "        (2): Linear(in_features=512, out_features=128, bias=True)\n",
      "      )\n",
      "    )\n",
      "    (2): MPNNLayer(\n",
      "      (dropout): Dropout(p=0.1, inplace=False)\n",
      "      (norm1): LayerNorm((128,), eps=1e-05, elementwise_affine=True)\n",
      "      (norm2): LayerNorm((128,), eps=1e-05, elementwise_affine=True)\n",
      "      (W1): Linear(in_features=384, out_features=128, bias=True)\n",
      "      (W2): Linear(in_features=128, out_features=128, bias=True)\n",
      "      (W3): Linear(in_features=128, out_features=128, bias=True)\n",
      "      (act): ReLU()\n",
      "      (dense): Sequential(\n",
      "        (0): Linear(in_features=128, out_features=512, bias=True)\n",
      "        (1): ReLU()\n",
      "        (2): Linear(in_features=512, out_features=128, bias=True)\n",
      "      )\n",
      "    )\n",
      "  )\n",
      "  (projection_head): Sequential(\n",
      "    (0): Linear(in_features=128, out_features=128, bias=False)\n",
      "    (1): ReLU(inplace=True)\n",
      "    (2): Linear(in_features=128, out_features=128, bias=True)\n",
      "  )\n",
      "  (readout): Linear(in_features=128, out_features=4, bias=True)\n",
      ")\n"
     ]
    }
   ],
   "source": [
    "model = RNAModel(config.model_config).to(config.device)\n",
    "print(model)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "528c5443",
   "metadata": {},
   "source": [
    "### training"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 22,
   "id": "fa10ad13-dc08-4515-b2e0-3fe96bee7d0d",
   "metadata": {},
   "outputs": [],
   "source": [
    "optimizer = Adam(model.parameters(), train_config.lr)\n",
    "criterion = nn.CrossEntropyLoss()\n",
    "if not os.path.exists(train_config.output_dir):\n",
    "    os.makedirs(train_config.output_dir)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 24,
   "id": "e18a8175-3562-4f27-a273-1b7725959ae4",
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "train loss: 1.4456: 100%|██████████| 204/204 [01:34<00:00,  2.16it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 1/30, Loss: 1.4573\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "100%|██████████| 59/59 [00:13<00:00,  4.49it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 1/30, recovery: 0.2342\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "train loss: 1.3491: 100%|██████████| 204/204 [01:04<00:00,  3.16it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 2/30, Loss: 1.3664\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "100%|██████████| 59/59 [00:10<00:00,  5.58it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 2/30, recovery: 0.3287\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "train loss: 1.2329: 100%|██████████| 204/204 [01:03<00:00,  3.23it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 3/30, Loss: 1.2914\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "100%|██████████| 59/59 [00:10<00:00,  5.58it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 3/30, recovery: 0.3683\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "train loss: 1.1363: 100%|██████████| 204/204 [01:03<00:00,  3.23it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 4/30, Loss: 1.2046\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "100%|██████████| 59/59 [00:10<00:00,  5.75it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 4/30, recovery: 0.4033\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "train loss: 0.8332: 100%|██████████| 204/204 [01:00<00:00,  3.38it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 5/30, Loss: 1.1030\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "100%|██████████| 59/59 [00:10<00:00,  5.70it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 5/30, recovery: 0.4275\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "train loss: 1.2136: 100%|██████████| 204/204 [01:01<00:00,  3.33it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 6/30, Loss: 1.0136\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "100%|██████████| 59/59 [00:10<00:00,  5.68it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 6/30, recovery: 0.4532\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "train loss: 0.7282: 100%|██████████| 204/204 [01:01<00:00,  3.31it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 7/30, Loss: 0.9211\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "100%|██████████| 59/59 [00:10<00:00,  5.64it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 7/30, recovery: 0.4768\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "train loss: 1.3308: 100%|██████████| 204/204 [01:01<00:00,  3.34it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 8/30, Loss: 0.8581\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "100%|██████████| 59/59 [00:10<00:00,  5.63it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 8/30, recovery: 0.4788\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "train loss: 0.8089: 100%|██████████| 204/204 [01:01<00:00,  3.31it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 9/30, Loss: 0.8296\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "100%|██████████| 59/59 [00:10<00:00,  5.72it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 9/30, recovery: 0.4987\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "train loss: 1.3162: 100%|██████████| 204/204 [01:01<00:00,  3.30it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 10/30, Loss: 0.7678\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "100%|██████████| 59/59 [00:10<00:00,  5.66it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 10/30, recovery: 0.4779\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "train loss: 0.8419: 100%|██████████| 204/204 [01:02<00:00,  3.29it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 11/30, Loss: 0.7453\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "100%|██████████| 59/59 [00:10<00:00,  5.71it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 11/30, recovery: 0.5090\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "train loss: 0.2535: 100%|██████████| 204/204 [01:01<00:00,  3.33it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 12/30, Loss: 0.7069\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "100%|██████████| 59/59 [00:10<00:00,  5.61it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 12/30, recovery: 0.5241\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "train loss: 0.7679: 100%|██████████| 204/204 [01:01<00:00,  3.31it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 13/30, Loss: 0.6614\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "100%|██████████| 59/59 [00:10<00:00,  5.72it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 13/30, recovery: 0.5214\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "train loss: 1.0850: 100%|██████████| 204/204 [01:01<00:00,  3.29it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 14/30, Loss: 0.6368\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "100%|██████████| 59/59 [00:10<00:00,  5.67it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 14/30, recovery: 0.5286\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "train loss: 1.4093: 100%|██████████| 204/204 [01:02<00:00,  3.26it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 15/30, Loss: 0.6205\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "100%|██████████| 59/59 [00:10<00:00,  5.73it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 15/30, recovery: 0.5240\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "train loss: 0.4484: 100%|██████████| 204/204 [01:02<00:00,  3.24it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 16/30, Loss: 0.5849\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "100%|██████████| 59/59 [00:10<00:00,  5.65it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 16/30, recovery: 0.5410\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "train loss: 1.1181: 100%|██████████| 204/204 [01:01<00:00,  3.30it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 17/30, Loss: 0.5763\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "100%|██████████| 59/59 [00:10<00:00,  5.68it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 17/30, recovery: 0.5513\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "train loss: 0.8484: 100%|██████████| 204/204 [01:01<00:00,  3.31it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 18/30, Loss: 0.5608\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "100%|██████████| 59/59 [00:10<00:00,  5.65it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 18/30, recovery: 0.5510\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "train loss: 1.1517: 100%|██████████| 204/204 [01:01<00:00,  3.34it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 19/30, Loss: 0.5638\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "100%|██████████| 59/59 [00:10<00:00,  5.62it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 19/30, recovery: 0.5476\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "train loss: 0.2833: 100%|██████████| 204/204 [01:02<00:00,  3.26it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 20/30, Loss: 0.5233\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "100%|██████████| 59/59 [00:10<00:00,  5.68it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 20/30, recovery: 0.5576\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "train loss: 0.7415: 100%|██████████| 204/204 [01:01<00:00,  3.30it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 21/30, Loss: 0.5157\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "100%|██████████| 59/59 [00:10<00:00,  5.63it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 21/30, recovery: 0.5508\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "train loss: 0.8164: 100%|██████████| 204/204 [01:01<00:00,  3.29it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 22/30, Loss: 0.5071\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "100%|██████████| 59/59 [00:10<00:00,  5.63it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 22/30, recovery: 0.5575\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "train loss: 0.9731: 100%|██████████| 204/204 [01:01<00:00,  3.30it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 23/30, Loss: 0.4933\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "100%|██████████| 59/59 [00:10<00:00,  5.68it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 23/30, recovery: 0.5557\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "train loss: 0.6989: 100%|██████████| 204/204 [01:02<00:00,  3.26it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 24/30, Loss: 0.4882\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "100%|██████████| 59/59 [00:10<00:00,  5.64it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 24/30, recovery: 0.5487\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "train loss: 1.0897: 100%|██████████| 204/204 [01:03<00:00,  3.23it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 25/30, Loss: 0.4754\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "100%|██████████| 59/59 [00:10<00:00,  5.71it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 25/30, recovery: 0.5711\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "train loss: 0.3488: 100%|██████████| 204/204 [01:06<00:00,  3.07it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 26/30, Loss: 0.4354\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "100%|██████████| 59/59 [00:11<00:00,  5.02it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 26/30, recovery: 0.5624\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "train loss: 0.1192: 100%|██████████| 204/204 [01:08<00:00,  2.98it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 27/30, Loss: 0.4532\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "100%|██████████| 59/59 [00:11<00:00,  5.03it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 27/30, recovery: 0.5696\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "train loss: 0.2959: 100%|██████████| 204/204 [01:07<00:00,  3.03it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 28/30, Loss: 0.4342\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "100%|██████████| 59/59 [00:11<00:00,  5.10it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 28/30, recovery: 0.5701\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "train loss: 1.1697: 100%|██████████| 204/204 [01:07<00:00,  3.01it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 29/30, Loss: 0.4446\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "100%|██████████| 59/59 [00:11<00:00,  5.04it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 29/30, recovery: 0.5679\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "train loss: 0.4947: 100%|██████████| 204/204 [01:04<00:00,  3.16it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 30/30, Loss: 0.4264\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "100%|██████████| 59/59 [00:11<00:00,  5.34it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 30/30, recovery: 0.5706\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "\n"
     ]
    }
   ],
   "source": [
    "best_valid_recovery = 0\n",
    "for epoch in range(train_config.epoch):\n",
    "    model.train()\n",
    "    epoch_loss = 0\n",
    "    train_pbar = tqdm(train_loader)\n",
    "    for batch in train_pbar:\n",
    "        X, S, mask, lengths, names = batch\n",
    "        X = X.to(config.device)\n",
    "        S = S.to(config.device)\n",
    "        mask = mask.to(config.device)\n",
    "        logits, _ = model(X, mask)\n",
    "        S = torch.masked_select(S, (mask==1))  # 选择有效的标签\n",
    "        loss = criterion(logits, S)\n",
    "        loss.backward()\n",
    "        train_pbar.set_description('train loss: {:.4f}'.format(loss.item()))\n",
    "        optimizer.step()\n",
    "        optimizer.zero_grad()\n",
    "        epoch_loss += loss.item()\n",
    "    \n",
    "    epoch_loss /= len(train_loader)\n",
    "    print('Epoch {}/{}, Loss: {:.4f}'.format(epoch + 1, train_config.epoch, epoch_loss))\n",
    "    \n",
    "    model.eval()\n",
    "    with torch.no_grad():\n",
    "        recovery_list = []\n",
    "        for batch in tqdm(valid_loader):\n",
    "            X, S, mask, lengths, names = batch\n",
    "            X = X.to(config.device)\n",
    "            S = S.to(config.device)\n",
    "            mask = mask.to(config.device)\n",
    "            logits, _ = model(X, mask)\n",
    "            S = torch.masked_select(S, (mask==1))  # 选择有效的标签\n",
    "            probs = F.softmax(logits, dim=-1)\n",
    "            samples = probs.argmax(dim=-1)\n",
    "            start_idx = 0\n",
    "            for length in lengths:\n",
    "                end_idx = start_idx + length.item()\n",
    "                sample = samples[start_idx: end_idx]\n",
    "                gt_S = S[start_idx: end_idx]\n",
    "                arr = sample==gt_S\n",
    "                recovery = (sample==gt_S).sum() / len(sample)\n",
    "                recovery_list.append(recovery.cpu().numpy())\n",
    "                start_idx = end_idx\n",
    "        valid_recovery = np.mean(recovery_list)\n",
    "        print('Epoch {}/{}, recovery: {:.4f}'.format(epoch + 1, train_config.epoch, valid_recovery))\n",
    "        if valid_recovery > best_valid_recovery:\n",
    "            best_valid_recovery = valid_recovery\n",
    "            torch.save(model.state_dict(), os.path.join(train_config.output_dir, 'best.pt'))"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "2122bf1d",
   "metadata": {},
   "source": [
    "### 显存不足报错\n",
    "\n",
    "RuntimeError: CUDA error: an illegal memory access was encountered\n",
    "CUDA kernel errors might be asynchronously reported at some other API call,so the stacktrace below might be incorrect.\n",
    "For debugging consider passing CUDA_LAUNCH_BLOCKING=1.\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "0f86d866-5712-4987-98a7-64606adb72f8",
   "metadata": {},
   "outputs": [],
   "source": [
    "eval_model = RNAModel(config.model_config).to(config.device)\n",
    "checkpoint_path = train_config.ckpt_path\n",
    "print(\"loading checkpoint from path:\", checkpoint_path)\n",
    "eval_model.load_state_dict(torch.load(checkpoint_path, map_location='cpu'), strict=True)\n",
    "eval_model.to(config.device)\n",
    "eval_model.eval()"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "beac3fe1",
   "metadata": {},
   "source": [
    "### test"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "10158a05-4192-4f93-b173-a7ba4dc25a96",
   "metadata": {},
   "outputs": [],
   "source": [
    "with torch.no_grad():\n",
    "    result_list = []\n",
    "    for batch in tqdm(test_loader):\n",
    "        X, S, mask, lengths, names = batch\n",
    "        X = X.to(config.device)\n",
    "        S = S.to(config.device)\n",
    "        mask = mask.to(config.device)\n",
    "        logits, _ = eval_model(X, mask)\n",
    "        S = torch.masked_select(S, (mask==1))  # 选择有效的标签\n",
    "        probs = F.softmax(logits, dim=-1)\n",
    "        samples = probs.argmax(dim=-1)\n",
    "        start_idx = 0\n",
    "        for length in lengths:\n",
    "            end_idx = start_idx + length.item()\n",
    "            sample = samples[start_idx: end_idx]\n",
    "            # print()\n",
    "            gt_S = S[start_idx: end_idx]\n",
    "            recovery = (sample==gt_S).sum() / len(sample)\n",
    "            recovery_list.append(recovery.cpu().numpy())\n",
    "            start_idx = end_idx\n",
    "    test_recovery = np.mean(recovery_list)\n",
    "    print('test recovery: {:.4f}'.format(test_recovery))"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "b00ce785",
   "metadata": {},
   "source": [
    "### Predict"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "id": "bfb6d926",
   "metadata": {},
   "outputs": [
    {
     "ename": "NameError",
     "evalue": "name 'RNAModel' is not defined",
     "output_type": "error",
     "traceback": [
      "\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
      "\u001b[1;31mNameError\u001b[0m                                 Traceback (most recent call last)",
      "Cell \u001b[1;32mIn[1], line 1\u001b[0m\n\u001b[1;32m----> 1\u001b[0m model \u001b[38;5;241m=\u001b[39m \u001b[43mRNAModel\u001b[49m(config\u001b[38;5;241m.\u001b[39mmodel_config)\u001b[38;5;241m.\u001b[39mto(Config\u001b[38;5;241m.\u001b[39mdevice)\n\u001b[0;32m      2\u001b[0m \u001b[38;5;66;03m# print(model)\u001b[39;00m\n\u001b[0;32m      3\u001b[0m \u001b[38;5;66;03m# 加载模型权重\u001b[39;00m\n\u001b[0;32m      4\u001b[0m state_dict \u001b[38;5;241m=\u001b[39m torch\u001b[38;5;241m.\u001b[39mload(TrainConfig\u001b[38;5;241m.\u001b[39mckpt_path, map_location\u001b[38;5;241m=\u001b[39mConfig\u001b[38;5;241m.\u001b[39mdevice)\n",
      "\u001b[1;31mNameError\u001b[0m: name 'RNAModel' is not defined"
     ]
    }
   ],
   "source": [
    "model = RNAModel(config.model_config).to(Config.device)\n",
    "# print(model)\n",
    "# 加载模型权重\n",
    "state_dict = torch.load(TrainConfig.ckpt_path, map_location=Config.device)\n",
    "model.load_state_dict(state_dict)\n",
    "model.eval()\n",
    "\n",
    "recovery_list = []\n",
    "with torch.no_grad():\n",
    "    result_list = []\n",
    "    for batch in tqdm(test_loader):\n",
    "        X, S, mask, lengths, names = batch\n",
    "        X = X.to(config.device)\n",
    "        S = S.to(config.device)\n",
    "        mask = mask.to(config.device)\n",
    "        S = torch.masked_select(S, (mask==1))  # 选择有效的标签\n",
    "        logits, _ = model(X, mask)\n",
    "        probs = F.softmax(logits, dim=-1)\n",
    "        samples = probs.argmax(dim=-1)\n",
    "        start_idx = 0\n",
    "        for length in lengths:\n",
    "            end_idx = start_idx + length.item()\n",
    "            sample = samples[start_idx: end_idx]\n",
    "            # print()\n",
    "            \n",
    "            gt_S = S[start_idx: end_idx]\n",
    "            recovery = (sample==gt_S).sum() / len(sample)\n",
    "            recovery_list.append(recovery.cpu().numpy())\n",
    "            start_idx = end_idx\n",
    "    test_recovery = np.mean(recovery_list)\n",
    "    print('test recovery: {:.4f}'.format(test_recovery))"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "test",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.8.19"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
