from typing import List, Optional

import torch
import torch.nn.functional as F
from dataclasses import dataclass, field
from torch import nn
import numpy as np

def broadcast(src: torch.Tensor, other: torch.Tensor, dim: int):
    if dim < 0:
        dim = other.dim() + dim
    if src.dim() == 1:
        for _ in range(0, dim):
            src = src.unsqueeze(0)
    for _ in range(src.dim(), other.dim()):
        src = src.unsqueeze(-1)
    src = src.expand(other.size())
    return src

def scatter_sum(src: torch.Tensor, index: torch.Tensor, dim: int = -1,
                out: Optional[torch.Tensor] = None,
                dim_size: Optional[int] = None) -> torch.Tensor:
    index = broadcast(index, src, dim)
    if out is None:
        size = list(src.size())
        if dim_size is not None:
            size[dim] = dim_size
        elif index.numel() == 0:
            size[dim] = 0
        else:
            # print(index.shape)
            size[dim] = index.max() + 1
        out = torch.zeros(size, dtype=src.dtype, device=src.device)
        return out.scatter_add_(dim, index, src)
    else:
        return out.scatter_add_(dim, index, src)

@dataclass
class ModelConfig:
    smoothing: float = 0.2  # 增加平滑系数
    hidden: int = 128
    vocab_size: int = 4
    k_neighbors: int = 30
    dropout: float = 0.3  # 增加dropout率
    node_feat_types: List[str] = field(default_factory=lambda: ['angle', 'distance', 'direction'])
    edge_feat_types: List[str] = field(default_factory=lambda: ['orientation', 'distance', 'direction'])
    num_encoder_layers: int = 2  # 减少层数防止过拟合
    num_decoder_layers: int = 2
    attention_heads: int = 4  # 新增注意力头数

def gather_edges(edges, neighbor_idx):
    neighbors = neighbor_idx.unsqueeze(-1)
    neighbors = neighbors.expand(-1, -1, -1, edges.size(-1))
    return torch.gather(edges, 2, neighbors)

def gather_nodes(nodes, neighbor_idx):
    neighbors_flat = neighbor_idx.view((neighbor_idx.shape[0], -1))
    neighbors_flat = neighbors_flat.unsqueeze(-1)
    neighbors_flat = neighbors_flat.expand(-1, -1, nodes.size(2))
    neighbor_features = torch.gather(nodes, 1, neighbors_flat)
    neighbor_features = neighbor_features.view(list(neighbor_idx.shape)[:3] + [-1])
    return neighbor_features

def gather_nodes_t(nodes, neighbor_idx):
    idx_flat = neighbor_idx.unsqueeze(-1).expand(-1, -1, nodes.size(2))
    return torch.gather(nodes, 1, idx_flat)

def cat_neighbors_nodes(h_nodes, h_neighbors, E_idx):
    h_nodes = gather_nodes(h_nodes, E_idx)
    return torch.cat([h_neighbors, h_nodes], -1)


class MPNNLayer(nn.Module):
    def __init__(self, num_hidden, num_in, dropout=0.3, num_heads=4, scale=30):
        super(MPNNLayer, self).__init__()
        self.num_hidden = num_hidden
        self.num_in = num_in
        self.scale = scale
        self.dropout = nn.Dropout(dropout)
        self.norm1 = nn.LayerNorm(num_hidden)
        self.norm2 = nn.LayerNorm(num_hidden)

        # 改进的消息传递机制
        self.W1 = nn.Linear(self.num_in , num_hidden, bias=True)
        self.W2 = nn.Linear(num_hidden, num_hidden, bias=True)
        self.W3 = nn.Linear(num_hidden, num_hidden, bias=True)
        # 添加多头注意力机制
        self.attention = nn.MultiheadAttention(num_hidden, num_heads, dropout=dropout)
        self.act = nn.GELU()  # 改用GELU激活函数
        self.dense = nn.Sequential(
            nn.Linear(num_hidden, num_hidden*4),
            nn.GELU(),
            nn.Dropout(dropout),
            nn.Linear(num_hidden*4, num_hidden)
        )

    def forward(self, h_V, h_E, edge_idx):
        src_idx, dst_idx = edge_idx[0], edge_idx[1]
        # 改进的消息传递
        # print(h_E.shape) orch.Size([277035, 384])
        h_message = self.W3(self.act(self.W2(self.act(self.W1(h_E)))))
        # 添加注意力机制
        h_V_attn = h_V.unsqueeze(0)  # (1, N, H)
        attn_output, _ = self.attention(h_V_attn, h_V_attn, h_V_attn)
        h_V = h_V + self.dropout(attn_output.squeeze(0))
        dh = scatter_sum(h_message, src_idx, dim=0) / self.scale
        h_V = self.norm1(h_V + self.dropout(dh))
        # 更强大的前馈网络
        dh = self.dense(h_V)
        h_V = self.norm2(h_V + self.dropout(dh))
        return h_V

class Normalize(nn.Module):
    def __init__(self, features, epsilon=1e-6):
        super(Normalize, self).__init__()
        self.gain = nn.Parameter(torch.ones(features))
        self.bias = nn.Parameter(torch.zeros(features))
        self.epsilon = epsilon

    def forward(self, x, dim=-1):
        mu = x.mean(dim, keepdim=True)
        sigma = torch.sqrt(x.var(dim, keepdim=True) + self.epsilon)
        gain = self.gain
        bias = self.bias
        return gain * (x - mu) / (sigma + self.epsilon) + bias


feat_dims = {
    'node': {
        'angle': 12,
        'distance': 80,
        'direction': 9,
    },
    'edge': {
        'orientation': 4,
        'distance': 96,
        'direction': 15,
    }
}


def nan_to_num(tensor, nan=0.0):
    idx = torch.isnan(tensor)
    tensor[idx] = nan
    return tensor

def _normalize(tensor, dim=-1):
    return nan_to_num(
        torch.div(tensor, torch.norm(tensor, dim=dim, keepdim=True)))

class RNAFeatures(nn.Module):
    def __init__(self, edge_features, node_features, node_feat_types=[], edge_feat_types=[], 
                 num_rbf=16, top_k=30, augment_eps=0.1, dropout=0.3):  # 增加augment_eps
        super(RNAFeatures, self).__init__()
        self.edge_features = edge_features
        self.node_features = node_features
        self.top_k = top_k
        self.augment_eps = augment_eps
        self.num_rbf = num_rbf
        self.dropout = nn.Dropout(dropout)
        self.node_feat_types = node_feat_types
        self.edge_feat_types = edge_feat_types
        node_in = sum([feat_dims['node'][feat] for feat in node_feat_types])
        edge_in = sum([feat_dims['edge'][feat] for feat in edge_feat_types])
        # 更强大的嵌入层
        self.node_embedding = nn.Sequential(
            nn.Linear(node_in, node_features*2),
            nn.GELU(),
            nn.Dropout(dropout),
            nn.Linear(node_features*2, node_features)
        )
        self.edge_embedding = nn.Sequential(
            nn.Linear(edge_in, edge_features*2),
            nn.GELU(),
            nn.Dropout(dropout),
            nn.Linear(edge_features*2, edge_features)
        )
        self.norm_nodes = Normalize(node_features)
        self.norm_edges = Normalize(edge_features)

    def _dist(self, X, mask, eps=1E-6):
        mask_2D = torch.unsqueeze(mask, 1) * torch.unsqueeze(mask, 2)
        dX = torch.unsqueeze(X, 1) - torch.unsqueeze(X, 2)
        D = (1. - mask_2D) * 10000 + mask_2D * torch.sqrt(torch.sum(dX ** 2, 3) + eps)

        D_max, _ = torch.max(D, -1, keepdim=True)
        D_adjust = D + (1. - mask_2D) * (D_max + 1)
        D_neighbors, E_idx = torch.topk(D_adjust, min(self.top_k, D_adjust.shape[-1]), dim=-1, largest=False)
        return D_neighbors, E_idx

    def _rbf(self, D):
        D_min, D_max, D_count = 0., 20., self.num_rbf
        D_mu = torch.linspace(D_min, D_max, D_count, device=D.device)
        D_mu = D_mu.view([1, 1, 1, -1])
        D_sigma = (D_max - D_min) / D_count
        D_expand = torch.unsqueeze(D, -1)
        return torch.exp(-((D_expand - D_mu) / D_sigma) ** 2)

    def _get_rbf__node(self, A, B):
        D_A_B = torch.sqrt(torch.sum((A[:, :, None, :] - B[:, :, None, :]) ** 2, -1) + 1e-6)
        return self._rbf(D_A_B)

    def _get_rbf__edge(self, A, B, E_idx):
        D_A_B = torch.sqrt(torch.sum((A[:, :, None, :] - B[:, None, :, :]) ** 2, -1) + 1e-6)
        D_A_B_neighbors = gather_edges(D_A_B[:, :, :, None], E_idx)[:, :, :, 0]
        return self._rbf(D_A_B_neighbors)

    def _quaternions(self, R):
        diag = torch.diagonal(R, dim1=-2, dim2=-1)
        Rxx, Ryy, Rzz = diag.unbind(-1)
        magnitudes = 0.5 * torch.sqrt(torch.abs(1 + torch.stack([
            Rxx - Ryy - Rzz,
            - Rxx + Ryy - Rzz,
            - Rxx - Ryy + Rzz
        ], -1)))
        _R = lambda i, j: R[:, :, :, i, j]
        signs = torch.sign(torch.stack([
            _R(2, 1) - _R(1, 2),
            _R(0, 2) - _R(2, 0),
            _R(1, 0) - _R(0, 1)
        ], -1))
        xyz = signs * magnitudes
        w = torch.sqrt(F.relu(1 + diag.sum(-1, keepdim=True))) / 2.
        Q = torch.cat((xyz, w), -1)
        Q = F.normalize(Q, dim=-1)
        return Q

    def _orientations_coarse(self, X, E_idx, eps=1e-6):
        V = X.clone()
        X = X[:, :, :6, :].reshape(X.shape[0], 6 * X.shape[1], 3)
        dX = X[:, 1:, :] - X[:, :-1, :]
        U = _normalize(dX, dim=-1)
        u_0, u_1 = U[:, :-2, :], U[:, 1:-1, :]
        n_0 = _normalize(torch.cross(u_0, u_1, dim=-1), dim=-1)
        b_1 = _normalize(u_0 - u_1, dim=-1)

        # select C3'
        n_0 = n_0[:, 4::6, :]
        b_1 = b_1[:, 4::6, :]
        X = X[:, 4::6, :]
        Q = torch.stack((b_1, n_0, torch.cross(b_1, n_0, dim=-1)), 2)
        Q = Q.view(list(Q.shape[:2]) + [9])
        Q = F.pad(Q, (0, 0, 0, 1), 'constant', 0)  # [16, 464, 9]

        Q_neighbors = gather_nodes(Q, E_idx)  # [16, 464, 30, 9]
        P_neighbors = gather_nodes(V[:, :, 0, :], E_idx)  # [16, 464, 30, 3]
        O5_neighbors = gather_nodes(V[:, :, 1, :], E_idx)
        C5_neighbors = gather_nodes(V[:, :, 2, :], E_idx)
        C4_neighbors = gather_nodes(V[:, :, 3, :], E_idx)
        O3_neighbors = gather_nodes(V[:, :, 5, :], E_idx)

        Q = Q.view(list(Q.shape[:2]) + [3, 3]).unsqueeze(2)  # [16, 464, 1, 3, 3]
        Q_neighbors = Q_neighbors.view(list(Q_neighbors.shape[:3]) + [3, 3])  # [16, 464, 30, 3, 3]

        dX = torch.stack([P_neighbors, O5_neighbors, C5_neighbors, C4_neighbors, O3_neighbors], dim=3) - X[:, :, None,
                                                                                                         None,
                                                                                                         :]  # [16, 464, 30, 3]
        dU = torch.matmul(Q[:, :, :, None, :, :], dX[..., None]).squeeze(-1)  # [16, 464, 30, 3] 邻居的相对坐标
        B, N, K = dU.shape[:3]
        E_direct = _normalize(dU, dim=-1)
        E_direct = E_direct.reshape(B, N, K, -1)
        R = torch.matmul(Q.transpose(-1, -2), Q_neighbors)
        E_orient = self._quaternions(R)

        dX_inner = V[:, :, [0, 2, 3], :] - X.unsqueeze(-2)
        dU_inner = torch.matmul(Q, dX_inner.unsqueeze(-1)).squeeze(-1)
        dU_inner = _normalize(dU_inner, dim=-1)
        V_direct = dU_inner.reshape(B, N, -1)
        return V_direct, E_direct, E_orient

    def _dihedrals(self, X, eps=1e-7):
        # P, O5', C5', C4', C3', O3'
        X = X[:, :, :6, :].reshape(X.shape[0], 6 * X.shape[1], 3)

        # Shifted slices of unit vectors
        # https://iupac.qmul.ac.uk/misc/pnuc2.html#220
        # https://x3dna.org/highlights/torsion-angles-of-nucleic-acid-structures
        # alpha:   O3'_{i-1} P_i O5'_i C5'_i
        # beta:    P_i O5'_i C5'_i C4'_i
        # gamma:   O5'_i C5'_i C4'_i C3'_i
        # delta:   C5'_i C4'_i C3'_i O3'_i
        # epsilon: C4'_i C3'_i O3'_i P_{i+1}
        # zeta:    C3'_i O3'_i P_{i+1} O5'_{i+1}
        # What's more:
        #   chi: C1' - N9
        #   chi is different for (C, T, U) and (A, G) https://x3dna.org/highlights/the-chi-x-torsion-angle-characterizes-base-sugar-relative-orientation

        dX = X[:, 5:, :] - X[:, :-5, :]  # O3'-P, P-O5', O5'-C5', C5'-C4', ...
        U = F.normalize(dX, dim=-1)
        u_2 = U[:, :-2, :]  # O3'-P, P-O5', ...
        u_1 = U[:, 1:-1, :]  # P-O5', O5'-C5', ...
        u_0 = U[:, 2:, :]  # O5'-C5', C5'-C4', ...
        # Backbone normals
        n_2 = F.normalize(torch.cross(u_2, u_1, dim=-1), dim=-1)
        n_1 = F.normalize(torch.cross(u_1, u_0, dim=-1), dim=-1)

        # Angle between normals
        cosD = (n_2 * n_1).sum(-1)
        cosD = torch.clamp(cosD, -1 + eps, 1 - eps)
        D = torch.sign((u_2 * n_1).sum(-1)) * torch.acos(cosD)
        D = F.pad(D, (3, 4), 'constant', 0)
        D = D.view((D.size(0), D.size(1) // 6, 6))
        return torch.cat((torch.cos(D), torch.sin(D)), 2)  # return D_features

    def forward(self, X, mask):
        # 增强数据扰动
        if self.training and self.augment_eps > 0:
            # 高斯噪声
            noise = torch.randn_like(X) * self.augment_eps
            # 随机旋转
            angle = torch.rand(3, device=X.device) * 2 * np.pi
            rot_x = torch.tensor([
                [1, 0, 0],
                [0, torch.cos(angle[0]), -torch.sin(angle[0])],
                [0, torch.sin(angle[0]), torch.cos(angle[0])]
            ], device=X.device)
            rot_y = torch.tensor([
                [torch.cos(angle[1]), 0, torch.sin(angle[1])],
                [0, 1, 0],
                [-torch.sin(angle[1]), 0, torch.cos(angle[1])]
            ], device=X.device)
            rot_z = torch.tensor([
                [torch.cos(angle[2]), -torch.sin(angle[2]), 0],
                [torch.sin(angle[2]), torch.cos(angle[2]), 0],
                [0, 0, 1]
            ], device=X.device)
            rotation = rot_x @ rot_y @ rot_z
            X = torch.einsum('bnij,jk->bnik', X + noise, rotation)
        if self.training and self.augment_eps > 0:
            X = X + self.augment_eps * torch.randn_like(X)

        # Build k-Nearest Neighbors graph
        B, N, _, _ = X.shape
        # P, O5', C5', C4', C3', O3'
        atom_dict = dict(zip(['P', 'O5_', 'C5_', 'C4_', 'C3_', 'O3_'], X.unbind(dim=2)))

        X_backbone = atom_dict["P"]
        D_neighbors, E_idx = self._dist(X_backbone, mask)

        mask_attend = gather_nodes(mask.unsqueeze(-1), E_idx).squeeze(-1)
        mask_attend = (mask.unsqueeze(-1) * mask_attend) == 1
        edge_mask_select = lambda x: torch.masked_select(x, mask_attend.unsqueeze(-1)).reshape(-1, x.shape[-1])
        node_mask_select = lambda x: torch.masked_select(x, mask.bool().unsqueeze(-1)).reshape(-1, x.shape[-1])

        # node features
        h_V = []
        # angle
        V_angle = self._dihedrals(X)
        V_angle = node_mask_select(V_angle)
        # distance
        node_list = ['O5_-P', 'C5_-P', 'C4_-P', 'C3_-P', 'O3_-P']
        V_dist = []

        for pair in node_list:
            atom1, atom2 = pair.split('-')
            V_dist.append(node_mask_select(
                self._get_rbf__node(atom_dict[atom1], atom_dict[atom2]).squeeze()))
        V_dist = torch.cat(tuple(V_dist), dim=-1).squeeze()
        # direction
        V_direct, E_direct, E_orient = self._orientations_coarse(X, E_idx)
        V_direct = node_mask_select(V_direct)
        E_direct, E_orient = list(map(lambda x: edge_mask_select(x), [E_direct, E_orient]))

        # edge features
        h_E = []
        # dist
        edge_list = ['P-P', 'O5_-P', 'C5_-P', 'C4_-P', 'C3_-P', 'O3_-P']
        E_dist = []
        for pair in edge_list:
            atom1, atom2 = pair.split('-')
            E_dist.append(
                edge_mask_select(self._get_rbf__edge(atom_dict[atom1], atom_dict[atom2], E_idx)))
        E_dist = torch.cat(tuple(E_dist), dim=-1)

        if 'angle' in self.node_feat_types:
            h_V.append(V_angle)
        if 'distance' in self.node_feat_types:
            h_V.append(V_dist)
        if 'direction' in self.node_feat_types:
            h_V.append(V_direct)

        if 'orientation' in self.edge_feat_types:
            h_E.append(E_orient)
        if 'distance' in self.edge_feat_types:
            h_E.append(E_dist)
        if 'direction' in self.edge_feat_types:
            h_E.append(E_direct)

        # Embed the nodes
        h_V = self.norm_nodes(self.node_embedding(torch.cat(h_V, dim=-1)))
        h_E = self.norm_edges(self.edge_embedding(torch.cat(h_E, dim=-1)))

        # prepare the variables to return
        shift = mask.sum(dim=1).cumsum(dim=0) - mask.sum(dim=1)
        src = shift.view(B, 1, 1) + E_idx
        src = torch.masked_select(src, mask_attend).view(1, -1)
        dst = shift.view(B, 1, 1) + torch.arange(0, N, device=src.device).view(1, -1, 1).expand_as(mask_attend)
        dst = torch.masked_select(dst, mask_attend).view(1, -1)
        E_idx = torch.cat((dst, src), dim=0).long()

        sparse_idx = mask.nonzero()
        X = X[sparse_idx[:, 0], sparse_idx[:, 1], :, :]
        return X, h_V, h_E, E_idx

class RNAModel(nn.Module):
    def __init__(self):
        super(RNAModel, self).__init__()
        model_config = ModelConfig()

        self.smoothing = model_config.smoothing
        self.node_features = self.edge_features = model_config.hidden
        self.hidden_dim = model_config.hidden
        self.vocab = model_config.vocab_size

        self.features = RNAFeatures(
            model_config.hidden, model_config.hidden,
            top_k=model_config.k_neighbors,
            dropout=model_config.dropout,
            node_feat_types=model_config.node_feat_types,
            edge_feat_types=model_config.edge_feat_types,
            augment_eps=0.1  # 增加数据增强
        )
        self.W_s = nn.Embedding(model_config.vocab_size, self.hidden_dim)
        # 添加全局LayerNorm
        self.encoder_norm = nn.LayerNorm(self.hidden_dim)
        self.decoder_norm = nn.LayerNorm(self.hidden_dim)
        # 使用改进的MPNNLayer
        self.encoder_layers = nn.ModuleList([
            MPNNLayer(self.hidden_dim, self.hidden_dim*3, dropout=model_config.dropout, num_heads=model_config.attention_heads)
            for _ in range(model_config.num_encoder_layers)])
        self.decoder_layers = nn.ModuleList([
            MPNNLayer(self.hidden_dim, self.hidden_dim*3, dropout=model_config.dropout, num_heads=model_config.attention_heads)
            for _ in range(model_config.num_decoder_layers)])
        # 更强大的输出层
        self.readout = nn.Sequential(
            nn.Linear(self.hidden_dim, self.hidden_dim*2),
            nn.GELU(),
            nn.Dropout(model_config.dropout),
            nn.Linear(self.hidden_dim*2, model_config.vocab_size)
        )
        # 更谨慎的初始化
        self.init_weights()

    def init_weights(self):
        for p in self.parameters():
            if p.dim() > 1:
                if isinstance(p, nn.Linear):
                    nn.init.xavier_uniform_(p, gain=nn.init.calculate_gain('gelu'))
                elif isinstance(p, nn.LayerNorm):
                    nn.init.constant_(p.weight, 1.0)
                    nn.init.constant_(p.bias, 0.0)

    def forward(self, X, mask):
        X, h_V, h_E, E_idx = self.features(X, mask)
        # 编码器
        for enc_layer in self.encoder_layers:
            h_EV = torch.cat([h_E, h_V[E_idx[0]], h_V[E_idx[1]]], dim=-1)
            h_V = enc_layer(h_V, h_EV, E_idx)
        h_V = self.encoder_norm(h_V)
        # 解码器
        for dec_layer in self.decoder_layers:
            h_EV = torch.cat([h_E, h_V[E_idx[0]], h_V[E_idx[1]]], dim=-1)
            h_V = dec_layer(h_V, h_EV, E_idx)
        h_V = self.decoder_norm(h_V)
        logits = self.readout(h_V)
        return logits
