import os
import math
import torch
import numpy as np
import torch.nn as nn
from Bio import SeqIO
import torch_geometric
import torch.optim as optim
import torch.nn.functional as F
from torch_geometric.data import Data, Batch
from torch_geometric.nn import GCNConv, global_mean_pool
from typing import List, Tuple, Optional


# 模型：RiboDiffusion
# 论文：https://arxiv.org/abs/2404.11199
# github: https://github.com/ml4bio/ribodiffusion
# https://blog.csdn.net/Pythonliu7/article/details/147362312
 
class Config:
    seed = 42
    device = "cuda" if torch.cuda.is_available() else "cpu"
    batch_size = 16
    lr = 1e-4
    epochs = 50
    seq_vocab = "AUCG"
    coord_dims = 7  # 7个骨架点
    hidden_dim = 256
    k_neighbors = 8  # 增加近邻数以捕获更多结构信息
    n_heads = 8
    n_layers = 6
    dropout = 0.1
    timesteps = 1000  # 扩散步数
    sampling_timesteps = 100  # 采样步数
    num_samples = 5  # 每个结构生成的序列数
 
def calc_seq_recovery(gt_seq: str, pred_seq: str) -> float:
    """计算序列恢复率"""
    true_count = 0
    length = len(gt_seq)
    if len(pred_seq) < length:
        pred_seq = pred_seq + "X" * (length - len(pred_seq))
    for i in range(length):
        if gt_seq[i] == pred_seq[i]:
            true_count += 1
    return true_count / length
 
class PositionalEncoding(nn.Module):
    """位置编码"""
    def __init__(self, d_model: int, max_len: int = 5000):
        super().__init__()
        position = torch.arange(max_len).unsqueeze(1)
        div_term = torch.exp(torch.arange(0, d_model, 2) * (-math.log(10000.0) / d_model))
        pe = torch.zeros(max_len, 1, d_model)
        pe[:, 0, 0::2] = torch.sin(position * div_term)
        pe[:, 0, 1::2] = torch.cos(position * div_term)
        self.register_buffer('pe', pe)
 
    def forward(self, x: torch.Tensor) -> torch.Tensor:
        return x + self.pe[:x.size(0)]
 
class StructureModule(nn.Module):
    """结构模块：处理3D骨架信息"""
    def __init__(self):
        super().__init__()
        self.encoder = nn.Sequential(
            nn.Linear(Config.coord_dims * 3, Config.hidden_dim),
            nn.ReLU(),
            nn.Dropout(Config.dropout)
        )
        
        self.conv_layers = nn.ModuleList([
            GCNConv(Config.hidden_dim, Config.hidden_dim)
            for _ in range(Config.n_layers)
        ])
        
        self.layer_norms = nn.ModuleList([
            nn.LayerNorm(Config.hidden_dim)
            for _ in range(Config.n_layers)
        ])
 
    def forward(self, data: Data) -> torch.Tensor:
        x = self.encoder(data.x)
        
        for conv, norm in zip(self.conv_layers, self.layer_norms):
            identity = x
            x = conv(x, data.edge_index)
            x = F.relu(x)
            x = F.dropout(x, p=Config.dropout, training=self.training)
            x = norm(x + identity)
        
        return x
 
class SequenceModule(nn.Module):
    """序列模块：处理序列信息"""
    def __init__(self):
        super().__init__()
        self.pos_encoder = PositionalEncoding(Config.hidden_dim)
        
        encoder_layer = nn.TransformerEncoderLayer(
            d_model=Config.hidden_dim,
            nhead=Config.n_heads,
            dim_feedforward=Config.hidden_dim * 4,
            dropout=Config.dropout,
            batch_first=True
        )
        self.transformer = nn.TransformerEncoder(encoder_layer, Config.n_layers)
        
        self.proj_out = nn.Linear(Config.hidden_dim, len(Config.seq_vocab))
 
    def forward(self, x: torch.Tensor, mask: Optional[torch.Tensor] = None) -> torch.Tensor:
        x = self.pos_encoder(x)
        x = self.transformer(x, src_key_padding_mask=mask)
        return self.proj_out(x)

class RiboDiffusion(nn.Module):
    """RiboDiffusion模型"""
    def __init__(self):
        super().__init__()
        self.structure_module = StructureModule()
        self.sequence_module = SequenceModule()
        
        # 扩散相关参数
        self.beta = torch.linspace(0.0001, 0.02, Config.timesteps).to(Config.device)
        self.alpha = 1. - self.beta
        self.alpha_bar = torch.cumprod(self.alpha, dim=0)
 
    def get_noise_schedule(self, t: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
        alpha_bar = self.alpha_bar[t]
        sqrt_one_minus_alpha_bar = torch.sqrt(1 - alpha_bar)
        return torch.sqrt(alpha_bar), sqrt_one_minus_alpha_bar
 
    def q_sample(self, x: torch.Tensor, t: torch.Tensor, noise: torch.Tensor) -> torch.Tensor:
        """前向扩散过程"""
        sqrt_alpha_bar, sqrt_one_minus_alpha_bar = self.get_noise_schedule(t)
        return sqrt_alpha_bar.view(-1, 1, 1) * x + sqrt_one_minus_alpha_bar.view(-1, 1, 1) * noise
 
    def p_sample(self, x: torch.Tensor, t: torch.Tensor, structure_feat: torch.Tensor) -> torch.Tensor:
        """反向扩散过程的单步"""
        bsz = x.shape[0]
        sqrt_alpha_bar, sqrt_one_minus_alpha_bar = self.get_noise_schedule(t)
        
        # 预测噪声
        pred_noise = self.sequence_module(x + structure_feat)
        
        # 计算均值
        pred_x_start = (x - sqrt_one_minus_alpha_bar.view(-1, 1, 1) * pred_noise) / sqrt_alpha_bar.view(-1, 1, 1)
        pred_x_start = torch.clamp(pred_x_start, -1, 1)
        
        posterior_variance = self.beta[t].view(-1, 1, 1)
        noise = torch.randn_like(x)
        
        return pred_x_start + torch.sqrt(posterior_variance) * noise
 
    def forward(self, data: Data) -> torch.Tensor:
        """训练时的前向传播"""
        # 获取结构特征
        structure_feat = self.structure_module(data)
        
        # 准备序列数据
        x = F.one_hot(data.y, num_classes=len(Config.seq_vocab)).float()
        
        # 添加噪声
        t = torch.randint(0, Config.timesteps, (x.shape[0],), device=x.device)
        noise = torch.randn_like(x)
        noisy_x = self.q_sample(x, t, noise)
        
        # 预测噪声
        pred_noise = self.sequence_module(noisy_x + structure_feat)
        
        return F.mse_loss(pred_noise, noise)
 
    @torch.no_grad()
    def sample(self, data: Data, num_samples: int = 1) -> List[str]:
        """采样生成序列"""
        self.eval()
        structure_feat = self.structure_module(data)
        
        sequences = []
        for _ in range(num_samples):
            # 从噪声开始
            x = torch.randn(data.num_nodes, len(Config.seq_vocab)).to(Config.device)
            
            # 逐步去噪
            for t in range(Config.timesteps - 1, -1, -1):
                t_batch = torch.full((x.shape[0],), t, device=x.device, dtype=torch.long)
                x = self.p_sample(x, t_batch, structure_feat)
            
            # 转换为序列
            pred_seq = ''.join([Config.seq_vocab[i] for i in x.argmax(dim=-1).cpu().numpy()])
            sequences.append(pred_seq)
        
        return sequences
 
class RNADataset(torch.utils.data.Dataset):
    """RNA数据集"""
    def __init__(self, coords_dir: str, seqs_dir: str):
        self.samples = []
        
        for fname in os.listdir(coords_dir):
            if not fname.endswith('.npy'):
                continue
                
            # 加载坐标数据
            coord = np.load(os.path.join(coords_dir, fname))
            coord = np.nan_to_num(coord, nan=0.0)
            
            # 创建图数据
            num_nodes = coord.shape[0]
            x = torch.tensor(coord.reshape(num_nodes, -1), dtype=torch.float32)
            
            # 构建边
            edge_index = []
            for i in range(num_nodes):
                neighbors = list(range(max(0, i-Config.k_neighbors), i)) + \
                           list(range(i+1, min(num_nodes, i+1+Config.k_neighbors)))
                for j in neighbors:
                    edge_index.append([i, j])
                    edge_index.append([j, i])
            
            edge_index = torch.tensor(edge_index, dtype=torch.long).t().contiguous()
            
            # 加载序列
            seq_id = os.path.splitext(fname)[0]
            seq = next(SeqIO.parse(os.path.join(seqs_dir, f"{seq_id}.fasta"), "fasta")).seq
            y = torch.tensor([Config.seq_vocab.index(c) for c in seq], dtype=torch.long)
            
            self.samples.append(Data(x=x, edge_index=edge_index, y=y, num_nodes=num_nodes))
 
    def __len__(self) -> int:
        return len(self.samples)
 
    def __getitem__(self, idx: int) -> Data:
        return self.samples[idx]
 
def train_epoch(model: RiboDiffusion, 
                loader: torch_geometric.loader.DataLoader,
                optimizer: torch.optim.Optimizer) -> float:
    """训练一个epoch"""
    model.train()
    total_loss = 0
    
    for batch in loader:
        batch = batch.to(Config.device)
        optimizer.zero_grad()
        
        loss = model(batch)
        loss.backward()
        optimizer.step()
        
        total_loss += loss.item()
    
    return total_loss / len(loader)
 
def evaluate(model: RiboDiffusion, 
            loader: torch_geometric.loader.DataLoader) -> float:
    """评估模型"""
    model.eval()
    total_recovery = 0
    count = 0
    
    with torch.no_grad():
        for batch in loader:
            batch = batch.to(Config.device)
            gt_seq = ''.join([Config.seq_vocab[i] for i in batch.y.cpu().numpy()])
            
            # 生成多个序列并计算平均恢复率
            pred_seqs = model.sample(batch, num_samples=Config.num_samples)
            recovery_rates = [calc_seq_recovery(gt_seq, pred_seq) for pred_seq in pred_seqs]
            total_recovery += sum(recovery_rates) / len(recovery_rates)
            count += 1
    
    return total_recovery / count
 
def main():
    # 设置随机种子
    torch.manual_seed(Config.seed)
    np.random.seed(Config.seed)
    
    # 加载数据集
    dataset = RNADataset(
        "saisdata/coords",
        "saisdata/seqs"
    )
    
    # 划分数据集
    train_size = int(0.8 * len(dataset))
    val_size = (len(dataset) - train_size) // 2
    test_size = len(dataset) - train_size - val_size
    
    train_set, val_set, test_set = torch.utils.data.random_split(
        dataset, [train_size, val_size, test_size]
    )
    
    # 创建数据加载器
    train_loader = torch_geometric.loader.DataLoader(
        train_set, batch_size=Config.batch_size, shuffle=True
    )
    val_loader = torch_geometric.loader.DataLoader(
        val_set, batch_size=Config.batch_size
    )
    test_loader = torch_geometric.loader.DataLoader(
        test_set, batch_size=Config.batch_size
    )
    
    # 初始化模型
    model = RiboDiffusion().to(Config.device)
    optimizer = optim.AdamW(model.parameters(), lr=Config.lr)
    scheduler = optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=Config.epochs)
    
    # 训练循环
    best_recovery = 0
    for epoch in range(Config.epochs):
        train_loss = train_epoch(model, train_loader, optimizer)
        val_recovery = evaluate(model, val_loader)
        scheduler.step()
        
        print(f"Epoch {epoch+1}/{Config.epochs}")
        print(f"Train Loss: {train_loss:.4f} | Val Recovery: {val_recovery:.4f}")
        
        # 保存最佳模型
        if val_recovery > best_recovery:
            best_recovery = val_recovery
            torch.save(model.state_dict(), "best_ribodiffusion_model.pth")
    
    # 最终测试
    model.load_state_dict(torch.load("best_ribodiffusion_model.pth"))
    test_recovery = evaluate(model, test_loader)
    print(f"\nTest Recovery Rate: {test_recovery:.4f}")


if __name__ == "__main__":
    main()
