| """ |
| SBModel — Anisotropic Schrödinger Bridge model. |
| |
| Shared backbone with scDFM, dual output heads (velocity + score), |
| plus AnisotropicSigmaNet for per-gene diffusion coefficients. |
| """ |
|
|
| import torch |
| import torch.nn as nn |
| from torch import Tensor |
| from typing import Optional, Tuple |
|
|
| from .layers import AnisotropicSigmaNet, ScoreDecoder |
| from .._scdfm_imports import ( |
| GeneadaLN, |
| ContinuousValueEncoder, |
| GeneEncoder, |
| BatchLabelEncoder, |
| TimestepEmbedder, |
| ExprDecoder, |
| DifferentialTransformerBlock, |
| PerceiverBlock, |
| DiffPerceiverBlock, |
| ) |
|
|
|
|
| class SBModel(nn.Module): |
| """ |
| Anisotropic Schrödinger Bridge model. |
| |
| forward(gene_id, cell_1, x_t, t, perturbation_id) |
| → (pred_velocity, pred_score, sigma_g) |
| |
| - pred_velocity: (B, G) PF-ODE velocity (target = x_T - x₀) |
| - pred_score: (B, G) score function (target = conditional score) |
| - sigma_g: (B, G) per-gene diffusion coefficient in [σ_min, σ_max] |
| """ |
|
|
| def __init__( |
| self, |
| ntoken: int = 6000, |
| d_model: int = 128, |
| nhead: int = 8, |
| d_hid: int = 512, |
| nlayers: int = 4, |
| dropout: float = 0.1, |
| fusion_method: str = "differential_perceiver", |
| perturbation_function: str = "crisper", |
| use_perturbation_interaction: bool = True, |
| mask_path: str = None, |
| |
| sigma_min: float = 0.01, |
| sigma_max: float = 2.0, |
| sigma_init: float = 0.5, |
| sigma_hidden_dim: int = 256, |
| sigma_num_layers: int = 2, |
| |
| score_head_depth: int = 2, |
| use_score: bool = True, |
| ): |
| super().__init__() |
| self.d_model = d_model |
| self.fusion_method = fusion_method |
| self.perturbation_function = perturbation_function |
| self.use_score = use_score |
|
|
| |
| self.t_embedder = TimestepEmbedder(d_model) |
|
|
| |
| self.perturbation_embedder = BatchLabelEncoder(ntoken, d_model) |
|
|
| |
| self.value_encoder_1 = ContinuousValueEncoder(d_model, dropout) |
| self.value_encoder_2 = ContinuousValueEncoder(d_model, dropout) |
| self.encoder = GeneEncoder( |
| ntoken, d_model, |
| use_perturbation_interaction=use_perturbation_interaction, |
| mask_path=mask_path, |
| ) |
|
|
| self.fusion_layer = nn.Sequential( |
| nn.Linear(2 * d_model, d_model), |
| nn.GELU(), |
| nn.Linear(d_model, d_model), |
| nn.LayerNorm(d_model), |
| ) |
|
|
| |
| if fusion_method == "differential_transformer": |
| self.blocks = nn.ModuleList([ |
| DifferentialTransformerBlock(d_model, nhead, i, mlp_ratio=4.0) |
| for i in range(nlayers) |
| ]) |
| elif fusion_method == "differential_perceiver": |
| self.blocks = nn.ModuleList([ |
| DiffPerceiverBlock(d_model, nhead, i, mlp_ratio=4.0) |
| for i in range(nlayers) |
| ]) |
| elif fusion_method == "perceiver": |
| self.blocks = nn.ModuleList([ |
| PerceiverBlock(d_model, d_model, heads=nhead, mlp_ratio=4.0, dropout=0.1) |
| for _ in range(nlayers) |
| ]) |
| else: |
| raise ValueError(f"Invalid fusion method: {fusion_method}") |
|
|
| |
| self.gene_adaLN = nn.ModuleList([ |
| GeneadaLN(d_model, dropout) for _ in range(nlayers) |
| ]) |
| self.adapter_layer = nn.ModuleList([ |
| nn.Sequential( |
| nn.Linear(2 * d_model, d_model), |
| nn.LeakyReLU(), |
| nn.Dropout(dropout), |
| nn.Linear(d_model, d_model), |
| nn.LeakyReLU(), |
| ) |
| for _ in range(nlayers) |
| ]) |
|
|
| |
| self.final_layer = ExprDecoder(d_model, explicit_zero_prob=False, use_batch_labels=True) |
|
|
| |
| if use_score: |
| self.score_decoder = ScoreDecoder(d_model, depth=score_head_depth) |
|
|
| |
| self.sigma_net = AnisotropicSigmaNet( |
| d_model=d_model, |
| hidden_dim=sigma_hidden_dim, |
| num_layers=sigma_num_layers, |
| sigma_min=sigma_min, |
| sigma_max=sigma_max, |
| sigma_init=sigma_init, |
| ) |
|
|
| self.initialize_weights() |
|
|
| def initialize_weights(self): |
| def _basic_init(module): |
| if isinstance(module, nn.Linear): |
| torch.nn.init.xavier_uniform_(module.weight) |
| if module.bias is not None: |
| nn.init.constant_(module.bias, 0) |
| self.apply(_basic_init) |
| |
| self.sigma_net._init_bias(self.sigma_net.sigma_min + |
| (self.sigma_net.sigma_max - self.sigma_net.sigma_min) * 0.5) |
|
|
| def get_perturbation_emb( |
| self, |
| perturbation_id: Optional[Tensor] = None, |
| perturbation_emb: Optional[Tensor] = None, |
| cell_1: Optional[Tensor] = None, |
| ) -> Tensor: |
| """Get perturbation embedding, replicating scDFM logic.""" |
| assert perturbation_emb is None or perturbation_id is None |
| if perturbation_id is not None: |
| if self.perturbation_function == "crisper": |
| perturbation_emb = self.encoder(perturbation_id) |
| else: |
| perturbation_emb = self.perturbation_embedder(perturbation_id) |
| perturbation_emb = perturbation_emb.mean(1) |
| elif perturbation_emb is not None: |
| perturbation_emb = perturbation_emb.to(cell_1.device, dtype=cell_1.dtype) |
| if perturbation_emb.dim() == 1: |
| perturbation_emb = perturbation_emb.unsqueeze(0) |
| if perturbation_emb.size(0) == 1: |
| perturbation_emb = perturbation_emb.expand(cell_1.shape[0], -1).contiguous() |
| perturbation_emb = self.perturbation_embedder.enc_norm(perturbation_emb) |
| return perturbation_emb |
|
|
| def forward( |
| self, |
| gene_id: Tensor, |
| cell_1: Tensor, |
| x_t: Tensor, |
| t: Tensor, |
| perturbation_id: Optional[Tensor] = None, |
| ) -> Tuple[Tensor, Optional[Tensor], Tensor]: |
| if t.dim() == 0: |
| t = t.repeat(cell_1.size(0)) |
|
|
| |
| gene_emb = self.encoder(gene_id) |
| val_emb_1 = self.value_encoder_1(x_t) |
| val_emb_2 = self.value_encoder_2(cell_1) + gene_emb |
| x = self.fusion_layer(torch.cat([val_emb_1, val_emb_2], dim=-1)) + gene_emb |
|
|
| |
| t_emb = self.t_embedder(t) |
| pert_emb = self.get_perturbation_emb(perturbation_id, cell_1=cell_1) |
| c = t_emb + pert_emb |
|
|
| |
| for i, block in enumerate(self.blocks): |
| x = self.gene_adaLN[i](gene_emb, x) |
| pert_exp = pert_emb[:, None, :].expand(-1, x.size(1), -1) |
| x = torch.cat([x, pert_exp], dim=-1) |
| x = self.adapter_layer[i](x) |
| x = block(x, val_emb_2, c) |
|
|
| |
| x_with_pert = torch.cat([x, pert_emb[:, None, :].expand(-1, x.size(1), -1)], dim=-1) |
| pred_velocity = self.final_layer(x_with_pert)["pred"] |
|
|
| |
| pred_score = None |
| if self.use_score: |
| pred_score = self.score_decoder(x, pert_emb) |
|
|
| |
| sigma_g = self.sigma_net(pert_emb, t, gene_emb) |
|
|
| return pred_velocity, pred_score, sigma_g |
|
|