from dataclasses import dataclass, field
from torch.utils.data import Dataset
from torch.utils.data import DataLoader
import pandas as pd
import random
import os
import torch
import torch.nn as nn
import numpy as np
from torch_scatter import scatter_sum, scatter_softmax
import torch.nn.functional as F
from typing import List
from torch.optim import Adam
from tqdm import tqdm
from Bio import SeqIO

# Define function to read FASTA files using Biopython
def read_fasta_biopython(file_path):
    sequences = {}
    for record in SeqIO.parse(file_path, "fasta"):
        sequences[record.id] = str(record.seq)
    return sequences

if __name__ == "__main__":
    # 数据加载与预处理
    train_file_list = os.listdir("./RNAdesignv1/train/seqs")
    content_dict = {
        "pdb_id": [],
        "seq": []
    }
    for file in tqdm(train_file_list):
        sequences = read_fasta_biopython("./RNAdesignv1/train/seqs/" + file)
        content_dict["pdb_id"].append(list(sequences.keys())[0])
        content_dict["seq"].append(list(sequences.values())[0])

    data = pd.DataFrame(content_dict)
    
    # 数据集划分
    split = np.random.choice(['train', 'valid', 'test'], size=len(data), p=[0.7, 0.2, 0.1])
    data['split'] = split
    train_data = data[data['split']=='train']
    valid_data = data[data['split']=='valid']
    test_data = data[data['split']=='test']
    train_data.to_csv("public_train_data.csv", index=False)
    valid_data.to_csv("public_valid_data.csv", index=False)
    test_data.to_csv("public_test_data.csv", index=False)

# Define Configuration Classes
@dataclass
class DataConfig:
    train_npy_data_dir: str = './RNAdesignv1/train/coords'
    train_data_path: str = 'public_train_data.csv'
    valid_npy_data_dir: str = './RNAdesignv1/train/coords'
    valid_data_path: str = 'public_valid_data.csv'
    test_npy_data_dir: str = './RNAdesignv1/train/coords'
    test_data_path: str = 'public_test_data.csv'

@dataclass
class ModelConfig:
    smoothing: float = 0.1
    hidden: int = 128
    vocab_size: int = 4  # 明确指定为 int 类型
    k_neighbors: int = 30  # 明确指定为 int 类型
    dropout: float = 0.1
    node_feat_types: List[str] = field(default_factory=lambda: ['angle', 'distance', 'direction'])  # 使用 field 避免可变对象问题
    edge_feat_types: List[str] = field(default_factory=lambda: ['orientation', 'distance', 'direction'])  # 同上
    num_encoder_layers: int = 3
    num_decoder_layers: int = 3  # 修正为整数，去掉多余的小数点
    
# 修改训练配置为网格搜索
@dataclass
class TrainConfig:
    batch_size: int = 16
    epoch: int = 100       #epoch: int = 30
    lr: float = 0.001
    output_dir: str = 'ckpts/public_v2'
    ckpt_path: str = 'ckpts/public_v2/best.pt'

@dataclass
class Config:
    pipeline: str = 'train'
    seed: int = 2025
    device: str = 'cuda:0'
    data_config: DataConfig = field(default_factory=DataConfig)  # 修改这里
    model_config: ModelConfig = field(default_factory=ModelConfig)  # 修改这里
    train_config: TrainConfig = field(default_factory=TrainConfig)  # 修改这里
    # data_config: DataConfig = DataConfig()
    # model_config: ModelConfig = ModelConfig()
    # train_config: TrainConfig = TrainConfig()

def plot_heatmap(results_matrix, epochs, lrs):
        import seaborn as sns
        import matplotlib.pyplot as plt

        plt.figure(figsize=(10, 6))
        sns.heatmap(results_matrix, annot=True, fmt=".2%", 
                    xticklabels=lrs, yticklabels=epochs,
                    cmap="YlGnBu", cbar_kws={'label': 'Recovery Rate'})
        plt.title("Hyperparameter Grid Search Results")
        plt.xlabel("Learning Rate")
        plt.ylabel("Epochs")
        plt.tight_layout()
        plt.savefig("grid_search_heatmap.png")
        plt.close()

# Define RNADataset Class and Seeding Function
class RNADataset(Dataset):
    def __init__(self, data_path, npy_dir):
        super(RNADataset, self).__init__()
        self.data = pd.read_csv(data_path)
        self.npy_dir = npy_dir
        self.seq_list = self.data['seq'].to_list()
        self.name_list = self.data['pdb_id'].to_list()

    def __len__(self):
        return len(self.name_list)
    
    def plot_heatmap(results_matrix, epochs, lrs):
        import seaborn as sns
        import matplotlib.pyplot as plt

        plt.figure(figsize=(10, 6))
        sns.heatmap(results_matrix, annot=True, fmt=".2%", 
                    xticklabels=lrs, yticklabels=epochs,
                    cmap="YlGnBu", cbar_kws={'label': 'Recovery Rate'})
        plt.title("Hyperparameter Grid Search Results")
        plt.xlabel("Learning Rate")
        plt.ylabel("Epochs")
        plt.tight_layout()
        plt.savefig("grid_search_heatmap.png")
        plt.close()
    
    def __getitem__(self, idx):
        seq = self.seq_list[idx]
        pdb_id = self.name_list[idx]
        coords = np.load(os.path.join(self.npy_dir, pdb_id + '.npy'))

        feature = {
            "name": pdb_id,
            "seq": seq,
            "coords": {
                "P": coords[:, 0, :],
                "O5'": coords[:, 1, :],
                "C5'": coords[:, 2, :],
                "C4'": coords[:, 3, :],
                "C3'": coords[:, 4, :],
                "O3'": coords[:, 5, :],
            }
        }

        return feature

def seeding(seed):
    np.random.seed(seed)
    random.seed(seed)
    os.environ['PYTHONHASHSEED'] = str(seed)
    torch.manual_seed(seed)
    torch.cuda.manual_seed(seed)
    torch.cuda.manual_seed_all(seed)
    torch.backends.cudnn.deterministic = True
    torch.backends.cudnn.benchmark = False
    print('seeding done!!!')
config = Config()
data_config = config.data_config
train_config = config.train_config
seeding(config.seed)


train_dataset = RNADataset(
    data_path=data_config.train_data_path,
    npy_dir=data_config.train_npy_data_dir,
)
valid_dataset = RNADataset(
    data_path=data_config.valid_data_path,
    npy_dir=data_config.valid_npy_data_dir,
)
test_dataset = RNADataset(
    data_path=data_config.test_data_path,
    npy_dir=data_config.test_npy_data_dir,
)

def featurize(batch):
    alphabet = 'AUCG'
    B = len(batch)
    lengths = np.array([len(b['seq']) for b in batch], dtype=np.int32)
    L_max = max([len(b['seq']) for b in batch])
    X = np.zeros([B, L_max, 6, 3])
    S = np.zeros([B, L_max], dtype=np.int32)
    names = []

    # Build the batch
    for i, b in enumerate(batch):
        #x = np.stack([b['coords'][c] for c in ["P", "O5'", "C5'", "C4'", "C3'", "O3'"]], 1)
        x = np.stack([np.nan_to_num(b['coords'][c], nan=0.0) for c in ["P", "O5'", "C5'", "C4'", "C3'", "O3'"]], 1)
        l = len(b['seq'])
        x_pad = np.pad(x, [[0, L_max-l], [0,0], [0,0]], 'constant', constant_values=(np.nan, ))
        X[i,:,:,:] = x_pad
        indices = np.asarray([alphabet.index(a) for a in b['seq']], dtype=np.int32)
        S[i, :l] = indices
        names.append(b['name'])
    
    mask = np.isfinite(np.sum(X,(2,3))).astype(np.float32)
    numbers = np.sum(mask, axis=1).astype(np.int32)
    S_new = np.zeros_like(S)
    X_new = np.zeros_like(X)+np.nan
    for i, n in enumerate(numbers):
        X_new[i,:n,::] = X[i][mask[i]==1]
        S_new[i,:n] = S[i][mask[i]==1]

    X = X_new
    S = S_new
    isnan = np.isnan(X)
    mask = np.isfinite(np.sum(X,(2,3))).astype(np.float32)
    X[isnan] = 0.
    #mask = np.isfinite(np.sum(X,(2,3))).astype(np.float32)
    # Conversion
    S = torch.from_numpy(S).to(dtype=torch.long)
    X = torch.from_numpy(X).to(dtype=torch.float32)
    mask = torch.from_numpy(mask).to(dtype=torch.float32)
    return X, S, mask, lengths, names

train_loader = DataLoader(train_dataset,
        batch_size=train_config.batch_size,
        shuffle=True,
        num_workers=0,
        collate_fn=featurize)

valid_loader = DataLoader(valid_dataset,
        batch_size=train_config.batch_size,
        shuffle=False,
        num_workers=0,
        collate_fn=featurize)

test_loader = DataLoader(test_dataset,
        batch_size=train_config.batch_size,
        shuffle=False,
        num_workers=0,
        collate_fn=featurize)

def gather_edges(edges, neighbor_idx):
    neighbors = neighbor_idx.unsqueeze(-1).expand(-1, -1, -1, edges.size(-1))
    return torch.gather(edges, 2, neighbors)

def gather_nodes(nodes, neighbor_idx):
    neighbors_flat = neighbor_idx.view((neighbor_idx.shape[0], -1))
    neighbors_flat = neighbors_flat.unsqueeze(-1).expand(-1, -1, nodes.size(2))
    neighbor_features = torch.gather(nodes, 1, neighbors_flat)
    neighbor_features = neighbor_features.view(list(neighbor_idx.shape)[:3] + [-1])
    return neighbor_features

def gather_nodes_t(nodes, neighbor_idx):
    idx_flat = neighbor_idx.unsqueeze(-1).expand(-1, -1, nodes.size(2))
    return torch.gather(nodes, 1, idx_flat)

def cat_neighbors_nodes(h_nodes, h_neighbors, E_idx):
    h_nodes = gather_nodes(h_nodes, E_idx)
    return torch.cat([h_neighbors, h_nodes], -1)


class MPNNLayer(nn.Module):
    def __init__(self, num_hidden, num_in, dropout=0.1, num_heads=None, scale=30):
        super(MPNNLayer, self).__init__()
        self.num_hidden = num_hidden
        self.num_in = num_in
        self.scale = scale
        self.dropout = nn.Dropout(dropout)
        self.norm1 = nn.LayerNorm(num_hidden)
        self.norm2 = nn.LayerNorm(num_hidden)

        self.W1 = nn.Linear(num_hidden + num_in, num_hidden, bias=True)
        self.W2 = nn.Linear(num_hidden, num_hidden, bias=True)
        self.W3 = nn.Linear(num_hidden, num_hidden, bias=True)
        self.act = nn.ReLU()

        self.dense = nn.Sequential(
            nn.Linear(num_hidden, num_hidden*4),
            nn.ReLU(),
            nn.Linear(num_hidden*4, num_hidden)
        )

    def forward(self, h_V, h_E, edge_idx, batch_id=None):
        src_idx, dst_idx = edge_idx[0], edge_idx[1]
        h_message = self.W3(self.act(self.W2(self.act(self.W1(h_E)))))
        dh = scatter_sum(h_message, src_idx, dim=0) / self.scale
        h_V = self.norm1(h_V + self.dropout(dh))
        dh = self.dense(h_V)
        h_V = self.norm2(h_V + self.dropout(dh))
        return h_V

class Normalize(nn.Module):
    def __init__(self, features, epsilon=1e-6):
        super(Normalize, self).__init__()
        self.gain = nn.Parameter(torch.ones(features))
        self.bias = nn.Parameter(torch.zeros(features))
        self.epsilon = epsilon

    def forward(self, x, dim=-1):
        mu = x.mean(dim, keepdim=True)
        sigma = torch.sqrt(x.var(dim, keepdim=True) + self.epsilon)
        gain = self.gain
        bias = self.bias
        if dim != -1:
            shape = [1] * len(mu.size())
            shape[dim] = self.gain.size()[0]
            gain = gain.view(shape)
            bias = bias.view(shape)
        return gain * (x - mu) / (sigma + self.epsilon) + bias

feat_dims = {
    'node': {
        'angle': 12,
        'distance': 80,
        'direction': 9,
    },
    'edge': {
        'orientation': 4,
        'distance': 96,
        'direction': 15,
    }
}


def nan_to_num(tensor, nan=0.0):
    idx = torch.isnan(tensor)
    tensor[idx] = nan
    return tensor

def _normalize(tensor, dim=-1):
    return nan_to_num(
        torch.div(tensor, torch.norm(tensor, dim=dim, keepdim=True)))


class RNAFeatures(nn.Module):
    def __init__(self, edge_features, node_features, node_feat_types=[], edge_feat_types=[], num_rbf=16, top_k=30, augment_eps=0., dropout=0.1, args=None):
        super(RNAFeatures, self).__init__()
        """Extract RNA Features"""
        self.edge_features = edge_features
        self.node_features = node_features
        self.top_k = top_k
        self.augment_eps = augment_eps 
        self.num_rbf = num_rbf
        self.dropout = nn.Dropout(dropout)
        self.node_feat_types = node_feat_types
        self.edge_feat_types = edge_feat_types

        node_in = sum([feat_dims['node'][feat] for feat in node_feat_types])
        edge_in = sum([feat_dims['edge'][feat] for feat in edge_feat_types])
        self.node_embedding = nn.Linear(node_in,  node_features, bias=True)
        self.edge_embedding = nn.Linear(edge_in, edge_features, bias=True)
        self.norm_nodes = Normalize(node_features)
        self.norm_edges = Normalize(edge_features)
        
    def _dist(self, X, mask, eps=1E-6):
        mask_2D = torch.unsqueeze(mask,1) * torch.unsqueeze(mask,2)
        dX = torch.unsqueeze(X,1) - torch.unsqueeze(X,2)
        D = (1. - mask_2D)*10000 + mask_2D* torch.sqrt(torch.sum(dX**2, 3) + eps)

        D_max, _ = torch.max(D, -1, keepdim=True)
        D_adjust = D + (1. - mask_2D) * (D_max+1)
        D_neighbors, E_idx = torch.topk(D_adjust, min(self.top_k, D_adjust.shape[-1]), dim=-1, largest=False)
        return D_neighbors, E_idx
    
    def _rbf(self, D):
        D_min, D_max, D_count = 0., 20., self.num_rbf
        D_mu = torch.linspace(D_min, D_max, D_count, device=D.device)
        D_mu = D_mu.view([1,1,1,-1])
        D_sigma = (D_max - D_min) / D_count
        D_expand = torch.unsqueeze(D, -1)
        return torch.exp(-((D_expand - D_mu) / D_sigma)**2)
    
    def _get_rbf(self, A, B, E_idx=None, num_rbf=16):
        if E_idx is not None:
            D_A_B = torch.sqrt(torch.sum((A[:,:,None,:] - B[:,None,:,:])**2,-1) + 1e-6)
            D_A_B_neighbors = gather_edges(D_A_B[:,:,:,None], E_idx)[:,:,:,0]
            RBF_A_B = self._rbf(D_A_B_neighbors)
        else:
            D_A_B = torch.sqrt(torch.sum((A[:,:,None,:] - B[:,:,None,:])**2,-1) + 1e-6)
            RBF_A_B = self._rbf(D_A_B)
        return RBF_A_B
    
    def _quaternions(self, R):
        diag = torch.diagonal(R, dim1=-2, dim2=-1)
        Rxx, Ryy, Rzz = diag.unbind(-1)
        magnitudes = 0.5 * torch.sqrt(torch.abs(1 + torch.stack([
              Rxx - Ryy - Rzz, 
            - Rxx + Ryy - Rzz, 
            - Rxx - Ryy + Rzz
        ], -1)))
        _R = lambda i,j: R[:,:,:,i,j]
        signs = torch.sign(torch.stack([
            _R(2,1) - _R(1,2),
            _R(0,2) - _R(2,0),
            _R(1,0) - _R(0,1)
        ], -1))
        xyz = signs * magnitudes
        w = torch.sqrt(F.relu(1 + diag.sum(-1, keepdim=True))) / 2.
        Q = torch.cat((xyz, w), -1)
        Q = F.normalize(Q, dim=-1)
        return Q
    
    def _orientations_coarse(self, X, E_idx, eps=1e-6):
        V = X.clone()
        X = X[:,:,:6,:].reshape(X.shape[0], 6*X.shape[1], 3) 
        dX = X[:,1:,:] - X[:,:-1,:]
        U = _normalize(dX, dim=-1)
        u_0, u_1 = U[:,:-2,:], U[:,1:-1,:]
        n_0 = _normalize(torch.cross(u_0, u_1), dim=-1)
        b_1 = _normalize(u_0 - u_1, dim=-1)
        
        # select C3'
        n_0 = n_0[:,4::6,:] 
        b_1 = b_1[:,4::6,:]
        X = X[:,4::6,:]

        Q = torch.stack((b_1, n_0, torch.cross(b_1, n_0)), 2)
        Q = Q.view(list(Q.shape[:2]) + [9])
        Q = F.pad(Q, (0,0,0,1), 'constant', 0) # [16, 464, 9]

        Q_neighbors = gather_nodes(Q, E_idx) # [16, 464, 30, 9]
        P_neighbors = gather_nodes(V[:,:,0,:], E_idx) # [16, 464, 30, 3]
        O5_neighbors = gather_nodes(V[:,:,1,:], E_idx)
        C5_neighbors = gather_nodes(V[:,:,2,:], E_idx)
        C4_neighbors = gather_nodes(V[:,:,3,:], E_idx)
        O3_neighbors = gather_nodes(V[:,:,5,:], E_idx)
        
        Q = Q.view(list(Q.shape[:2]) + [3,3]).unsqueeze(2) # [16, 464, 1, 3, 3]
        Q_neighbors = Q_neighbors.view(list(Q_neighbors.shape[:3]) + [3,3]) # [16, 464, 30, 3, 3]

        dX = torch.stack([P_neighbors,O5_neighbors,C5_neighbors,C4_neighbors,O3_neighbors], dim=3) - X[:,:,None,None,:] # [16, 464, 30, 3]
        dU = torch.matmul(Q[:,:,:,None,:,:], dX[...,None]).squeeze(-1) # [16, 464, 30, 3] 邻居的相对坐标
        B, N, K = dU.shape[:3]
        E_direct = _normalize(dU, dim=-1)
        E_direct = E_direct.reshape(B, N, K,-1)
        R = torch.matmul(Q.transpose(-1,-2), Q_neighbors)
        E_orient = self._quaternions(R)
        
        dX_inner = V[:,:,[0,2,3],:] - X.unsqueeze(-2)
        dU_inner = torch.matmul(Q, dX_inner.unsqueeze(-1)).squeeze(-1)
        dU_inner = _normalize(dU_inner, dim=-1)
        V_direct = dU_inner.reshape(B,N,-1)
        return V_direct, E_direct, E_orient
    
    def _dihedrals(self, X, eps=1e-7):
        # P, O5', C5', C4', C3', O3'
        X = X[:,:,:6,:].reshape(X.shape[0], 6*X.shape[1], 3)

        # Shifted slices of unit vectors
        # https://iupac.qmul.ac.uk/misc/pnuc2.html#220
        # https://x3dna.org/highlights/torsion-angles-of-nucleic-acid-structures
        # alpha:   O3'_{i-1} P_i O5'_i C5'_i
        # beta:    P_i O5'_i C5'_i C4'_i
        # gamma:   O5'_i C5'_i C4'_i C3'_i
        # delta:   C5'_i C4'_i C3'_i O3'_i
        # epsilon: C4'_i C3'_i O3'_i P_{i+1}
        # zeta:    C3'_i O3'_i P_{i+1} O5'_{i+1} 
        # What's more:
        #   chi: C1' - N9 
        #   chi is different for (C, T, U) and (A, G) https://x3dna.org/highlights/the-chi-x-torsion-angle-characterizes-base-sugar-relative-orientation

        dX = X[:, 5:, :] - X[:, :-5, :] # O3'-P, P-O5', O5'-C5', C5'-C4', ...
        U = F.normalize(dX, dim=-1)
        u_2 = U[:,:-2,:]  # O3'-P, P-O5', ...
        u_1 = U[:,1:-1,:] # P-O5', O5'-C5', ...
        u_0 = U[:,2:,:]   # O5'-C5', C5'-C4', ...
        # Backbone normals
        n_2 = F.normalize(torch.cross(u_2, u_1), dim=-1)
        n_1 = F.normalize(torch.cross(u_1, u_0), dim=-1)

        # Angle between normals
        cosD = (n_2 * n_1).sum(-1)
        cosD = torch.clamp(cosD, -1+eps, 1-eps)
        D = torch.sign((u_2 * n_1).sum(-1)) * torch.acos(cosD)
        
        D = F.pad(D, (3,4), 'constant', 0)
        D = D.view((D.size(0), D.size(1) //6, 6))
        return torch.cat((torch.cos(D), torch.sin(D)), 2) # return D_features
    
    def forward(self, X, S, mask):
        if self.training and self.augment_eps > 0:
            X = X + self.augment_eps * torch.randn_like(X)

        # Build k-Nearest Neighbors graph
        B, N, _,_ = X.shape
        # P, O5', C5', C4', C3', O3'
        atom_P = X[:, :, 0, :]
        atom_O5_ = X[:, :, 1, :]
        atom_C5_ = X[:, :, 2, :]
        atom_C4_ = X[:, :, 3, :]
        atom_C3_ = X[:, :, 4, :] 
        atom_O3_ = X[:, :, 5, :]

        X_backbone = atom_P
        D_neighbors, E_idx = self._dist(X_backbone, mask)        

        mask_bool = (mask==1)
        mask_attend = gather_nodes(mask.unsqueeze(-1), E_idx).squeeze(-1)
        mask_attend = (mask.unsqueeze(-1) * mask_attend) == 1
        edge_mask_select = lambda x: torch.masked_select(x, mask_attend.unsqueeze(-1)).reshape(-1,x.shape[-1])
        node_mask_select = lambda x: torch.masked_select(x, mask_bool.unsqueeze(-1)).reshape(-1, x.shape[-1])

        # node features
        h_V = []
        # angle
        V_angle = node_mask_select(self._dihedrals(X))
        # distance
        node_list = ['O5_-P', 'C5_-P', 'C4_-P', 'C3_-P', 'O3_-P']
        V_dist = []
        
        for pair in node_list:
            atom1, atom2 = pair.split('-')
            V_dist.append(node_mask_select(self._get_rbf(vars()['atom_' + atom1], vars()['atom_' + atom2], None, self.num_rbf).squeeze()))
        V_dist = torch.cat(tuple(V_dist), dim=-1).squeeze()
        # direction
        V_direct, E_direct, E_orient = self._orientations_coarse(X, E_idx)
        V_direct = node_mask_select(V_direct)
        E_direct, E_orient = list(map(lambda x: edge_mask_select(x), [E_direct, E_orient]))

        # edge features
        h_E = []
        # dist
        edge_list = ['P-P', 'O5_-P', 'C5_-P', 'C4_-P', 'C3_-P', 'O3_-P']
        E_dist = [] 
        for pair in edge_list:
            atom1, atom2 = pair.split('-')
            E_dist.append(edge_mask_select(self._get_rbf(vars()['atom_' + atom1], vars()['atom_' + atom2], E_idx, self.num_rbf)))
        E_dist = torch.cat(tuple(E_dist), dim=-1)

        if 'angle' in self.node_feat_types:
            h_V.append(V_angle)
        if 'distance' in self.node_feat_types:
            h_V.append(V_dist)
        if 'direction' in self.node_feat_types:
            h_V.append(V_direct)

        if 'orientation' in self.edge_feat_types:
            h_E.append(E_orient)
        if 'distance' in self.edge_feat_types:
            h_E.append(E_dist)
        if 'direction' in self.edge_feat_types:
            h_E.append(E_direct)
            
        # Embed the nodes
        h_V = self.norm_nodes(self.node_embedding(torch.cat(h_V, dim=-1)))
        h_E = self.norm_edges(self.edge_embedding(torch.cat(h_E, dim=-1)))

        # prepare the variables to return
        S = torch.masked_select(S, mask_bool)
        shift = mask.sum(dim=1).cumsum(dim=0) - mask.sum(dim=1)
        src = shift.view(B,1,1) + E_idx
        src = torch.masked_select(src, mask_attend).view(1,-1)
        dst = shift.view(B,1,1) + torch.arange(0, N, device=src.device).view(1,-1,1).expand_as(mask_attend)
        dst = torch.masked_select(dst, mask_attend).view(1,-1)
        E_idx = torch.cat((dst, src), dim=0).long()

        sparse_idx = mask.nonzero()
        X = X[sparse_idx[:,0], sparse_idx[:,1], :, :]
        batch_id = sparse_idx[:,0]
        return X, S, h_V, h_E, E_idx, batch_id

class RNAModel(nn.Module):
    def __init__(self, model_config):
        super(RNAModel, self).__init__()

        self.smoothing = model_config.smoothing
        self.node_features = self.edge_features = model_config.hidden
        self.hidden_dim = model_config.hidden
        self.vocab = model_config.vocab_size

        self.features = RNAFeatures(
            model_config.hidden, model_config.hidden, 
            top_k=model_config.k_neighbors, 
            dropout=model_config.dropout,
            node_feat_types=model_config.node_feat_types, 
            edge_feat_types=model_config.edge_feat_types,
            args=model_config
        )

        layer = MPNNLayer
        self.W_s = nn.Embedding(model_config.vocab_size, self.hidden_dim)
        self.encoder_layers = nn.ModuleList([
            layer(self.hidden_dim, self.hidden_dim*2, dropout=model_config.dropout)
            for _ in range(model_config.num_encoder_layers)])
        self.decoder_layers = nn.ModuleList([
            layer(self.hidden_dim, self.hidden_dim*2, dropout=model_config.dropout)
            for _ in range(model_config.num_decoder_layers)])

        self.projection_head = nn.Sequential(
            nn.Linear(self.hidden_dim, self.hidden_dim, bias=False), 
            nn.ReLU(inplace=True), 
            nn.Linear(self.hidden_dim, self.hidden_dim, bias=True)
        )

        self.readout = nn.Linear(self.hidden_dim, model_config.vocab_size, bias=True)

        for p in self.parameters():
            if p.dim() > 1:
                nn.init.xavier_uniform_(p)

    def forward(self, X, S, mask):
        X, S, h_V, h_E, E_idx, batch_id = self.features(X, S, mask)
        for enc_layer in self.encoder_layers:
            h_EV = torch.cat([h_E, h_V[E_idx[0]], h_V[E_idx[1]]], dim=-1)
            h_V = enc_layer(h_V, h_EV, E_idx, batch_id)

        for dec_layer in self.decoder_layers:
            h_EV = torch.cat([h_E, h_V[E_idx[0]], h_V[E_idx[1]]], dim=-1)
            h_V = dec_layer(h_V, h_EV, E_idx, batch_id)

        graph_embs = []
        for b_id in range(batch_id[-1].item()+1):
            b_data = h_V[batch_id == b_id].mean(0)
            graph_embs.append(b_data)
        graph_embs = torch.stack(graph_embs, dim=0)
        graph_prjs = self.projection_head(graph_embs)

        logits = self.readout(h_V)
        return logits, S, graph_prjs

    def sample(self, X, S, mask=None):
        X, gt_S, h_V, h_E, E_idx, batch_id = self.features(X, S, mask) 

        for enc_layer in self.encoder_layers:
            h_EV = torch.cat([h_E, h_V[E_idx[0]], h_V[E_idx[1]]], dim=-1)
            h_V = enc_layer(h_V, h_EV, E_idx, batch_id)

        for dec_layer in self.decoder_layers:
            h_EV = torch.cat([h_E, h_V[E_idx[0]], h_V[E_idx[1]]], dim=-1)
            h_V = dec_layer(h_V, h_EV, E_idx, batch_id)

        logits = self.readout(h_V)
        return logits, gt_S

model = RNAModel(config.model_config).to(config.device)
print(model)

# criterion = nn.CrossEntropyLoss()

# # 定义网格搜索参数
# epochs = [50, 60, 70]
# lrs = [1e-3, 1e-4, 1e-5]
# results = np.zeros((len(epochs), len(lrs)))  # 存储结果矩阵

# # 以下代码需要包裹在 __main__ 保护中
# if __name__ == "__main__":
#     # 网格搜索主循环
#     for epoch_idx, num_epochs in enumerate(epochs):
#         for lr_idx, lr in enumerate(lrs):
#             print(f"\n=== Training with Epochs: {num_epochs}, LR: {lr} ===")
            
#             # 初始化模型和优化器
#             model = RNAModel(config.model_config).to(config.device)
#             optimizer = Adam(model.parameters(), lr=lr)
            
#             best_valid_recovery = 0
#             best_ckpt_path = f"{train_config.output_dir}/best_epoch{num_epochs}_lr{lr}.pt"
            
#             os.makedirs(os.path.dirname(best_ckpt_path), exist_ok=True)  # 新增目录创建
#             for epoch in range(num_epochs):
#                 model.train()
#                 epoch_loss = 0
#                 train_pbar = tqdm(train_loader)
#                 for batch in train_pbar:
#                     X, S, mask, lengths, names = batch
#                     X = X.to(config.device)
#                     S = S.to(config.device)
#                     mask = mask.to(config.device)
#                     logits, S, _ = model(X, S, mask)
#                     loss = criterion(logits, S)
#                     loss.backward()
#                     train_pbar.set_description(f'train loss: {loss.item():.4f}')
#                     optimizer.step()
#                     optimizer.zero_grad()
#                     epoch_loss += loss.item()
                
#                 # 验证阶段
#                 model.eval()
#                 with torch.no_grad():
#                     recovery_list = []
#                     for batch in valid_loader:
#                         X, S, mask, lengths, names = batch
#                         X = X.to(config.device)
#                         S = S.to(config.device)
#                         mask = mask.to(config.device)
#                         logits, S, _ = model(X, S, mask)
#                         probs = F.softmax(logits, dim=-1)
#                         samples = probs.argmax(dim=-1)
                        
#                         start_idx = 0
#                         for length in lengths:
#                             end_idx = start_idx + length.item()
#                             sample = samples[start_idx: end_idx]
#                             gt_S = S[start_idx: end_idx]
#                             recovery = (sample == gt_S).sum() / len(sample)
#                             recovery_list.append(recovery.cpu().numpy())
#                             start_idx = end_idx
                    
#                     valid_recovery = np.mean(recovery_list)
#                     if valid_recovery > best_valid_recovery:
#                         best_valid_recovery = valid_recovery
#                         torch.save(model.state_dict(), best_ckpt_path)
            
#             # 记录最佳验证结果
#             results[epoch_idx, lr_idx] = best_valid_recovery

# # 绘制热力图
# plot_heatmap(results, epochs, lrs)

optimizer = Adam(model.parameters(), train_config.lr)
criterion = nn.CrossEntropyLoss()
if not os.path.exists(train_config.output_dir):
    os.makedirs(train_config.output_dir)

best_valid_recovery = 0
for epoch in range(train_config.epoch):
    model.train()
    epoch_loss = 0
    train_pbar = tqdm(train_loader)
    for batch in train_pbar:
        X, S, mask, lengths, names = batch
        X = X.to(config.device)
        S = S.to(config.device)
        mask = mask.to(config.device)
        logits, S, _ = model(X, S, mask)
        loss = criterion(logits, S)
        loss.backward()
        train_pbar.set_description('train loss: {:.4f}'.format(loss.item()))
        optimizer.step()
        optimizer.zero_grad()
        epoch_loss += loss.item()
    
    epoch_loss /= len(train_loader)
    print('Epoch {}/{}, Loss: {:.4f}'.format(epoch + 1, train_config.epoch, epoch_loss))
    
    model.eval()
    with torch.no_grad():
        recovery_list = []
        for batch in tqdm(valid_loader):
            X, S, mask, lengths, names = batch
            X = X.to(config.device)
            S = S.to(config.device)
            mask = mask.to(config.device)
            logits, S, _ = model(X, S, mask)
            probs = F.softmax(logits, dim=-1)
            samples = probs.argmax(dim=-1)
            start_idx = 0
            for length in lengths:
                end_idx = start_idx + length.item()
                sample = samples[start_idx: end_idx]
                gt_S = S[start_idx: end_idx]
                arr = sample==gt_S
                recovery = (sample==gt_S).sum() / len(sample)
                recovery_list.append(recovery.cpu().numpy())
                start_idx = end_idx
        valid_recovery = np.mean(recovery_list)
        print('Epoch {}/{}, recovery: {:.4f}'.format(epoch + 1, train_config.epoch, valid_recovery))
        if valid_recovery > best_valid_recovery:
            best_valid_recovery = valid_recovery
            torch.save(model.state_dict(), os.path.join(train_config.output_dir, 'best.pt'))

eval_model = RNAModel(config.model_config).to(config.device)
checkpoint_path = train_config.ckpt_path
print("loading checkpoint from path:", checkpoint_path)
eval_model.load_state_dict(torch.load(checkpoint_path, map_location='cpu'), strict=True)
eval_model.to(config.device)
eval_model.eval()

with torch.no_grad():
    result_list = []
    for batch in tqdm(test_loader):
        X, S, mask, lengths, names = batch
        X = X.to(config.device)
        S = S.to(config.device)
        mask = mask.to(config.device)
        logits, S, _ = eval_model(X, S, mask)
        probs = F.softmax(logits, dim=-1)
        samples = probs.argmax(dim=-1)
        start_idx = 0
        for length in lengths:
            end_idx = start_idx + length.item()
            sample = samples[start_idx: end_idx]
            print()
            gt_S = S[start_idx: end_idx]
            recovery = (sample==gt_S).sum() / len(sample)
            recovery_list.append(recovery.cpu().numpy())
            start_idx = end_idx
    test_recovery = np.mean(recovery_list)
    print('test recovery: {:.4f}'.format(test_recovery))