import os
import torch
import random
import numpy as np
import pandas as pd
import torch.nn as nn
from tqdm import tqdm
from typing import List, Dict
from torch.optim import Adam
import torch.nn.functional as F
from torch.utils.data import Dataset
from torch.utils.data import DataLoader
from dataclasses import dataclass, field
from torch_scatter import scatter_sum, scatter_softmax
from Bio import SeqIO  # pip install biopython


class RNAModel(nn.Module):
    def __init__(self, model_config):
        super(RNAModel, self).__init__()

        self.smoothing = model_config.smoothing
        self.node_features = self.edge_features = model_config.hidden
        self.hidden_dim = model_config.hidden
        self.vocab = model_config.vocab_size

        
        # self.features = RNAGraphBuilder(
        #     model_config.hidden, model_config.hidden,
        #     top_k=model_config.k_neighbors,
        #     augment_eps=0.1  # 增加数据增强
        # )
        self.features = RNAFeatures(
            model_config.hidden, model_config.hidden,
            top_k=model_config.k_neighbors,
            dropout=model_config.dropout,
            node_feat_types=model_config.node_feat_types,
            edge_feat_types=model_config.edge_feat_types,
            augment_eps=0.1  # 增加数据增强
        )

        layer = MPNNLayer
        self.W_s = nn.Embedding(model_config.vocab_size, self.hidden_dim)
        self.encoder_layers = nn.ModuleList([
            layer(self.hidden_dim, self.hidden_dim*2, dropout=model_config.dropout)
            for _ in range(model_config.num_encoder_layers)])
        self.decoder_layers = nn.ModuleList([
            layer(self.hidden_dim, self.hidden_dim*2, dropout=model_config.dropout)
            for _ in range(model_config.num_decoder_layers)])

        self.projection_head = nn.Sequential(
            nn.Linear(self.hidden_dim, self.hidden_dim, bias=False), 
            nn.ReLU(inplace=True), 
            nn.Linear(self.hidden_dim, self.hidden_dim, bias=True)
        )
        # 结点四分类
        self.readout = nn.Linear(self.hidden_dim, model_config.vocab_size, bias=True)

        # 更谨慎的初始化
        self.init_weights()

    def init_weights(self):
        for p in self.parameters():
            if p.dim() > 1:
                if isinstance(p, nn.Linear):
                    nn.init.xavier_uniform_(p, gain=nn.init.calculate_gain('gelu'))
                elif isinstance(p, nn.LayerNorm):
                    nn.init.constant_(p.weight, 1.0)
                    nn.init.constant_(p.bias, 0.0)

    def forward(self, X, mask):
        # 抽取结点特征，边特征， 边索引，批次索引
        h_V, h_E, E_idx, batch_id = self.features(X, mask)
        
        # 边特征保持不变，聚合边特征到结点
        for enc_layer in self.encoder_layers:
            h_EV = torch.cat([h_E, h_V[E_idx[0]], h_V[E_idx[1]]], dim=-1)
            h_V = enc_layer(h_V, h_EV, E_idx, batch_id)
        # 边特征保持不变，再次聚合边特征到结点
        for dec_layer in self.decoder_layers:
            h_EV = torch.cat([h_E, h_V[E_idx[0]], h_V[E_idx[1]]], dim=-1)
            h_V = dec_layer(h_V, h_EV, E_idx, batch_id)

        graph_embs = []
        # 遍历每个样本
        for b_id in range(batch_id[-1].item()+1):
            # 求均值
            b_data = h_V[batch_id == b_id].mean(0)
            graph_embs.append(b_data)
        graph_embs = torch.stack(graph_embs, dim=0)
        # 全连接层
        graph_prjs = self.projection_head(graph_embs)
        # 碱基四分类器
        logits = self.readout(h_V)
        return logits, graph_prjs



class RNAFeatures(nn.Module):
    def __init__(self, edge_features, node_features, node_feat_types=[], edge_feat_types=[], num_rbf=16, top_k=30,
                 augment_eps=0., dropout=0.1):
        super(RNAFeatures, self).__init__()
        """Extract RNA Features"""
        self.edge_features = edge_features
        self.node_features = node_features
        self.top_k = top_k
        self.augment_eps = augment_eps
        self.num_rbf = num_rbf
        self.dropout = nn.Dropout(dropout)
        self.node_feat_types = node_feat_types
        self.edge_feat_types = edge_feat_types

        node_in = sum([feat_dims['node'][feat] for feat in node_feat_types])
        edge_in = sum([feat_dims['edge'][feat] for feat in edge_feat_types])
        self.node_embedding = nn.Linear(node_in, node_features, bias=True)
        self.edge_embedding = nn.Linear(edge_in, edge_features, bias=True)
        self.norm_nodes = Normalize(node_features)
        self.norm_edges = Normalize(edge_features)

    def _dist(self, X, mask, eps=1E-6):
        mask_2D = torch.unsqueeze(mask, 1) * torch.unsqueeze(mask, 2)
        dX = torch.unsqueeze(X, 1) - torch.unsqueeze(X, 2)
        D = (1. - mask_2D) * 10000 + mask_2D * torch.sqrt(torch.sum(dX ** 2, 3) + eps)

        D_max, _ = torch.max(D, -1, keepdim=True)
        D_adjust = D + (1. - mask_2D) * (D_max + 1)
        D_neighbors, E_idx = torch.topk(D_adjust, min(self.top_k, D_adjust.shape[-1]), dim=-1, largest=False)
        return D_neighbors, E_idx

    def _rbf(self, D):
        D_min, D_max, D_count = 0., 20., self.num_rbf
        D_mu = torch.linspace(D_min, D_max, D_count, device=D.device)
        D_mu = D_mu.view([1, 1, 1, -1])
        D_sigma = (D_max - D_min) / D_count
        D_expand = torch.unsqueeze(D, -1)
        return torch.exp(-((D_expand - D_mu) / D_sigma) ** 2)

    def _get_rbf__node(self, A, B):
        D_A_B = torch.sqrt(torch.sum((A[:, :, None, :] - B[:, :, None, :]) ** 2, -1) + 1e-6)
        return self._rbf(D_A_B)

    def _get_rbf__edge(self, A, B, E_idx):
        D_A_B = torch.sqrt(torch.sum((A[:, :, None, :] - B[:, None, :, :]) ** 2, -1) + 1e-6)
        D_A_B_neighbors = gather_edges(D_A_B[:, :, :, None], E_idx)[:, :, :, 0]
        return self._rbf(D_A_B_neighbors)

    def _quaternions(self, R):
        diag = torch.diagonal(R, dim1=-2, dim2=-1)
        Rxx, Ryy, Rzz = diag.unbind(-1)
        magnitudes = 0.5 * torch.sqrt(torch.abs(1 + torch.stack([
            Rxx - Ryy - Rzz,
            - Rxx + Ryy - Rzz,
            - Rxx - Ryy + Rzz
        ], -1)))
        _R = lambda i, j: R[:, :, :, i, j]
        signs = torch.sign(torch.stack([
            _R(2, 1) - _R(1, 2),
            _R(0, 2) - _R(2, 0),
            _R(1, 0) - _R(0, 1)
        ], -1))
        xyz = signs * magnitudes
        w = torch.sqrt(F.relu(1 + diag.sum(-1, keepdim=True))) / 2.
        Q = torch.cat((xyz, w), -1)
        Q = F.normalize(Q, dim=-1)
        return Q

    def _orientations_coarse(self, X, E_idx, eps=1e-6):
        V = X.clone()
        X = X[:, :, :6, :].reshape(X.shape[0], 6 * X.shape[1], 3)
        dX = X[:, 1:, :] - X[:, :-1, :]
        U = _normalize(dX, dim=-1)
        u_0, u_1 = U[:, :-2, :], U[:, 1:-1, :]
        n_0 = _normalize(torch.cross(u_0, u_1, dim=-1), dim=-1)
        b_1 = _normalize(u_0 - u_1, dim=-1)

        # select C3'
        n_0 = n_0[:, 4::6, :]
        b_1 = b_1[:, 4::6, :]
        X = X[:, 4::6, :]
        Q = torch.stack((b_1, n_0, torch.cross(b_1, n_0, dim=-1)), 2)
        Q = Q.view(list(Q.shape[:2]) + [9])
        Q = F.pad(Q, (0, 0, 0, 1), 'constant', 0)  # [16, 464, 9]

        Q_neighbors = gather_nodes(Q, E_idx)  # [16, 464, 30, 9]
        P_neighbors = gather_nodes(V[:, :, 0, :], E_idx)  # [16, 464, 30, 3]
        O5_neighbors = gather_nodes(V[:, :, 1, :], E_idx)
        C5_neighbors = gather_nodes(V[:, :, 2, :], E_idx)
        C4_neighbors = gather_nodes(V[:, :, 3, :], E_idx)
        O3_neighbors = gather_nodes(V[:, :, 5, :], E_idx)

        Q = Q.view(list(Q.shape[:2]) + [3, 3]).unsqueeze(2)  # [16, 464, 1, 3, 3]
        Q_neighbors = Q_neighbors.view(list(Q_neighbors.shape[:3]) + [3, 3])  # [16, 464, 30, 3, 3]

        dX = torch.stack([P_neighbors, O5_neighbors, C5_neighbors, C4_neighbors, O3_neighbors], dim=3) - X[:, :, None,
                                                                                                         None,
                                                                                                         :]  # [16, 464, 30, 3]
        dU = torch.matmul(Q[:, :, :, None, :, :], dX[..., None]).squeeze(-1)  # [16, 464, 30, 3] 邻居的相对坐标
        B, N, K = dU.shape[:3]
        E_direct = _normalize(dU, dim=-1)
        E_direct = E_direct.reshape(B, N, K, -1)
        R = torch.matmul(Q.transpose(-1, -2), Q_neighbors)
        E_orient = self._quaternions(R)

        dX_inner = V[:, :, [0, 2, 3], :] - X.unsqueeze(-2)
        dU_inner = torch.matmul(Q, dX_inner.unsqueeze(-1)).squeeze(-1)
        dU_inner = _normalize(dU_inner, dim=-1)
        V_direct = dU_inner.reshape(B, N, -1)
        return V_direct, E_direct, E_orient

    def _dihedrals(self, X, eps=1e-7):
        # P, O5', C5', C4', C3', O3'
        X = X[:, :, :6, :].reshape(X.shape[0], 6 * X.shape[1], 3)

        # Shifted slices of unit vectors
        # https://iupac.qmul.ac.uk/misc/pnuc2.html#220
        # https://x3dna.org/highlights/torsion-angles-of-nucleic-acid-structures
        # alpha:   O3'_{i-1} P_i O5'_i C5'_i
        # beta:    P_i O5'_i C5'_i C4'_i
        # gamma:   O5'_i C5'_i C4'_i C3'_i
        # delta:   C5'_i C4'_i C3'_i O3'_i
        # epsilon: C4'_i C3'_i O3'_i P_{i+1}
        # zeta:    C3'_i O3'_i P_{i+1} O5'_{i+1}
        # What's more:
        #   chi: C1' - N9
        #   chi is different for (C, T, U) and (A, G) https://x3dna.org/highlights/the-chi-x-torsion-angle-characterizes-base-sugar-relative-orientation

        dX = X[:, 5:, :] - X[:, :-5, :]  # O3'-P, P-O5', O5'-C5', C5'-C4', ...
        U = F.normalize(dX, dim=-1)
        u_2 = U[:, :-2, :]  # O3'-P, P-O5', ...
        u_1 = U[:, 1:-1, :]  # P-O5', O5'-C5', ...
        u_0 = U[:, 2:, :]  # O5'-C5', C5'-C4', ...
        # Backbone normals
        n_2 = F.normalize(torch.cross(u_2, u_1, dim=-1), dim=-1)
        n_1 = F.normalize(torch.cross(u_1, u_0, dim=-1), dim=-1)

        # Angle between normals
        cosD = (n_2 * n_1).sum(-1)
        cosD = torch.clamp(cosD, -1 + eps, 1 - eps)
        D = torch.sign((u_2 * n_1).sum(-1)) * torch.acos(cosD)
        D = F.pad(D, (3, 4), 'constant', 0)
        D = D.view((D.size(0), D.size(1) // 6, 6))
        return torch.cat((torch.cos(D), torch.sin(D)), 2)  # return D_features

    def forward(self, X, mask):
        if self.training and self.augment_eps > 0:
            X = X + self.augment_eps * torch.randn_like(X)

        # Build k-Nearest Neighbors graph
        B, N, _, _ = X.shape
        # P, O5', C5', C4', C3', O3'
        atom_dict = dict(zip(['P', 'O5_', 'C5_', 'C4_', 'C3_', 'O3_'], X.unbind(dim=2)))

        X_backbone = atom_dict["P"]
        D_neighbors, E_idx = self._dist(X_backbone, mask)

        mask_attend = gather_nodes(mask.unsqueeze(-1), E_idx).squeeze(-1)
        mask_attend = (mask.unsqueeze(-1) * mask_attend) == 1
        edge_mask_select = lambda x: torch.masked_select(x, mask_attend.unsqueeze(-1)).reshape(-1, x.shape[-1])
        node_mask_select = lambda x: torch.masked_select(x, mask.bool().unsqueeze(-1)).reshape(-1, x.shape[-1])

        # node features
        h_V = []
        # angle
        V_angle = self._dihedrals(X)
        V_angle = node_mask_select(V_angle)
        # distance
        node_list = ['O5_-P', 'C5_-P', 'C4_-P', 'C3_-P', 'O3_-P']
        V_dist = []

        for pair in node_list:
            atom1, atom2 = pair.split('-')
            V_dist.append(node_mask_select(
                self._get_rbf__node(atom_dict[atom1], atom_dict[atom2]).squeeze()))
        V_dist = torch.cat(tuple(V_dist), dim=-1).squeeze()
        # direction
        V_direct, E_direct, E_orient = self._orientations_coarse(X, E_idx)
        V_direct = node_mask_select(V_direct)
        E_direct, E_orient = list(map(lambda x: edge_mask_select(x), [E_direct, E_orient]))

        # edge features
        h_E = []
        # dist
        edge_list = ['P-P', 'O5_-P', 'C5_-P', 'C4_-P', 'C3_-P', 'O3_-P']
        E_dist = []
        for pair in edge_list:
            atom1, atom2 = pair.split('-')
            E_dist.append(
                edge_mask_select(self._get_rbf__edge(atom_dict[atom1], atom_dict[atom2], E_idx)))
        E_dist = torch.cat(tuple(E_dist), dim=-1)

        if 'angle' in self.node_feat_types:
            h_V.append(V_angle)
        if 'distance' in self.node_feat_types:
            h_V.append(V_dist)
        if 'direction' in self.node_feat_types:
            h_V.append(V_direct)

        if 'orientation' in self.edge_feat_types:
            h_E.append(E_orient)
        if 'distance' in self.edge_feat_types:
            h_E.append(E_dist)
        if 'direction' in self.edge_feat_types:
            h_E.append(E_direct)

        # Embed the nodes
        h_V = self.norm_nodes(self.node_embedding(torch.cat(h_V, dim=-1)))
        h_E = self.norm_edges(self.edge_embedding(torch.cat(h_E, dim=-1)))

        # prepare the variables to return
        shift = mask.sum(dim=1).cumsum(dim=0) - mask.sum(dim=1)
        src = shift.view(B, 1, 1) + E_idx
        src = torch.masked_select(src, mask_attend).view(1, -1)
        dst = shift.view(B, 1, 1) + torch.arange(0, N, device=src.device).view(1, -1, 1).expand_as(mask_attend)
        dst = torch.masked_select(dst, mask_attend).view(1, -1)
        E_idx = torch.cat((dst, src), dim=0).long()

        sparse_idx = mask.nonzero()
        X = X[sparse_idx[:, 0], sparse_idx[:, 1], :, :]
        batch_id = sparse_idx[:,0]
        return h_V, h_E, E_idx, batch_id
    
feat_dims = {
    'node': {
        'angle': 12,
        'distance': 80,
        'direction': 9,
    },
    'edge': {
        'orientation': 4,
        'distance': 96,
        'direction': 15,
    }
}

def nan_to_num(tensor, nan=0.0):
    idx = torch.isnan(tensor)
    tensor[idx] = nan
    return tensor

def _normalize(tensor, dim=-1):
    return nan_to_num(
        torch.div(tensor, torch.norm(tensor, dim=dim, keepdim=True)))
    
def gather_edges(edges, neighbor_idx):
    neighbors = neighbor_idx.unsqueeze(-1).expand(-1, -1, -1, edges.size(-1))
    return torch.gather(edges, 2, neighbors)

def gather_nodes(nodes, neighbor_idx):
    neighbors_flat = neighbor_idx.view((neighbor_idx.shape[0], -1))
    neighbors_flat = neighbors_flat.unsqueeze(-1).expand(-1, -1, nodes.size(2))
    neighbor_features = torch.gather(nodes, 1, neighbors_flat)
    neighbor_features = neighbor_features.view(list(neighbor_idx.shape)[:3] + [-1])
    return neighbor_features

def gather_nodes_t(nodes, neighbor_idx):
    idx_flat = neighbor_idx.unsqueeze(-1).expand(-1, -1, nodes.size(2))
    return torch.gather(nodes, 1, idx_flat)

def cat_neighbors_nodes(h_nodes, h_neighbors, E_idx):
    h_nodes = gather_nodes(h_nodes, E_idx)
    return torch.cat([h_neighbors, h_nodes], -1)


class MPNNLayer(nn.Module):
    # 简单的聚合边特征到节点特征
    def __init__(self, num_hidden, num_in, dropout=0.1, num_heads=None, scale=30):
        super(MPNNLayer, self).__init__()
        self.num_hidden = num_hidden
        self.num_in = num_in
        self.scale = scale
        self.dropout = nn.Dropout(dropout)
        self.norm1 = nn.LayerNorm(num_hidden)
        self.norm2 = nn.LayerNorm(num_hidden)

        self.W1 = nn.Linear(num_hidden + num_in, num_hidden, bias=True)
        self.W2 = nn.Linear(num_hidden, num_hidden, bias=True)
        self.W3 = nn.Linear(num_hidden, num_hidden, bias=True)
        self.act = nn.ReLU()

        self.dense = nn.Sequential(
            nn.Linear(num_hidden, num_hidden*4),
            nn.ReLU(),
            nn.Linear(num_hidden*4, num_hidden)
        )

    def forward(self, h_V, h_E, edge_idx, batch_id=None):
        src_idx, dst_idx = edge_idx[0], edge_idx[1]
        h_message = self.W3(self.act(self.W2(self.act(self.W1(h_E)))))
        # 特征聚合, 按源节点聚合，即分组相加
        dh = scatter_sum(h_message, src_idx, dim=0) / self.scale
        # 随机丢一些边的特征
        h_V = self.norm1(h_V + self.dropout(dh))
        # skip-connection
        dh = self.dense(h_V)
        h_V = self.norm2(h_V + self.dropout(dh))
        return h_V


class Normalize(nn.Module):
    def __init__(self, size, epsilon=1e-6):
        super(Normalize, self).__init__()
        self.gain = nn.Parameter(torch.ones(size))  # 注册为模型参数，自动启用梯度，自动进行优化器更新
        self.bias = nn.Parameter(torch.zeros(size))
        self.epsilon = epsilon

    def forward(self, x, dim=-1):
        mu = x.mean(dim, keepdim=True)
        sigma = torch.sqrt(x.var(dim, keepdim=True) + self.epsilon)
        gain = self.gain
        bias = self.bias
        if dim != -1:
            shape = [1] * len(mu.size())
            shape[dim] = self.gain.size()[0]
            gain = gain.view(shape)
            bias = bias.view(shape)
        return gain * (x - mu) / (sigma + self.epsilon) + bias

# Define Configuration Classes
root_dir = os.path.dirname(os.getcwd())
print(root_dir)
datapath = 'saistraindata'
datapath = 'saisdata'
@dataclass
class DataConfig:
    datapath: str = f'{root_dir}/{datapath}'  # 数据根目录
    fasta_data_path: str = f'{datapath}/seqs'  # 标签数据
    coords_data_path: str = f'{datapath}/coords'  # 特征数据
    outputs_path: str = f'{root_dir}/outputs'
    
    train_npy_data_dir: str = f'{datapath}/coords'
    valid_npy_data_dir: str = f'{datapath}/coords'
    test_npy_data_dir: str = f'{datapath}/coords'
    
    train_data_path: str = f'{root_dir}/outputs/public_train_data.csv'
    valid_data_path: str = f'{root_dir}/outputs/public_valid_data.csv'
    test_data_path: str = f'{root_dir}/outputs/public_test_data.csv'

@dataclass
class ModelConfig:
    smoothing: float = 0.1
    hidden: int = 128
    vocab_size: int = 4  # 明确指定为 int 类型
    k_neighbors: int = 30  # 明确指定为 int 类型
    dropout: float = 0.1
    node_feat_types: List[str] = field(default_factory=lambda: ['angle', 'distance', 'direction'])  # 使用 field 避免可变对象问题
    edge_feat_types: List[str] = field(default_factory=lambda: ['orientation', 'distance', 'direction'])  # 同上
    num_encoder_layers: int = 3
    num_decoder_layers: int = 3  # 修正为整数，去掉多余的小数点

@dataclass
class TrainConfig:
    batch_size: int = 8
    epoch: int = 10
    lr: float = 0.001
    output_dir: str = f'{root_dir}/code/weights'
    ckpt_path: str = f'{root_dir}/code/weights/best.pt'

@dataclass
class Config:
    pipeline: str = 'train'
    seed: int = 2025
    device: str = 'cuda' if torch.cuda.is_available() else 'cpu'
    data_config: DataConfig = DataConfig()
    model_config: ModelConfig = ModelConfig()
    train_config: TrainConfig = TrainConfig()
    seq_vocab: str = "AUCG"