| """ |
| SACFMDenoiser — Source-Anchored Conditional Flow Matching. |
| |
| Training: x_0 = source + sigma_aug * eps (noisy source, not pure noise). |
| Standard affine path: x_t = (1-t)*x_0 + t*target. |
| Velocity target: dx_t = target - x_0. |
| Gene-weighted MSE loss. |
| |
| Inference: ODE from clean source (no noise, no SDE). |
| """ |
|
|
| import torch |
| import torch.nn as nn |
| import torchdiffeq |
|
|
| from ._scdfm_imports import AffineProbPath, CondOTScheduler |
|
|
|
|
| def pairwise_sq_dists(X, Y): |
| return torch.cdist(X, Y, p=2) ** 2 |
|
|
|
|
| @torch.no_grad() |
| def median_sigmas(X, scales=(0.5, 1.0, 2.0, 4.0)): |
| D2 = pairwise_sq_dists(X, X) |
| tri = D2[~torch.eye(D2.size(0), dtype=bool, device=D2.device)] |
| m = torch.median(tri).clamp_min(1e-12) |
| s2 = torch.tensor(scales, device=X.device) * m |
| return [float(s.item()) for s in torch.sqrt(s2)] |
|
|
|
|
| def mmd2_unbiased_multi_sigma(X, Y, sigmas): |
| m, n = X.size(0), Y.size(0) |
| Dxx = pairwise_sq_dists(X, X) |
| Dyy = pairwise_sq_dists(Y, Y) |
| Dxy = pairwise_sq_dists(X, Y) |
| vals = [] |
| for sigma in sigmas: |
| beta = 1.0 / (2.0 * (sigma ** 2) + 1e-12) |
| Kxx = torch.exp(-beta * Dxx) |
| Kyy = torch.exp(-beta * Dyy) |
| Kxy = torch.exp(-beta * Dxy) |
| term_xx = (Kxx.sum() - Kxx.diag().sum()) / (m * (m - 1) + 1e-12) |
| term_yy = (Kyy.sum() - Kyy.diag().sum()) / (n * (n - 1) + 1e-12) |
| term_xy = Kxy.mean() |
| vals.append(term_xx + term_yy - 2.0 * term_xy) |
| return torch.stack(vals).mean() |
|
|
|
|
| class SACFMDenoiser(nn.Module): |
| """ |
| Source-Anchored Conditional Flow Matching Denoiser. |
| |
| Key differences from scDFM baseline: |
| - x_0 = source + sigma_aug * eps (not pure noise) |
| - Gene-weighted velocity loss |
| - Inference starts from clean source |
| |
| Key differences from SB: |
| - No SigmaNet (sigma_aug is fixed, data-driven) |
| - No ScoreDecoder |
| - No SDE inference |
| - No bridge formulation (standard affine path) |
| """ |
|
|
| def __init__( |
| self, |
| model: nn.Module, |
| sigma_aug: torch.Tensor, |
| gene_weight: torch.Tensor, |
| noise_type: str = "Gaussian", |
| use_mmd_loss: bool = True, |
| gamma: float = 0.5, |
| ): |
| super().__init__() |
| self.model = model |
| self.noise_type = noise_type |
| self.use_mmd_loss = use_mmd_loss |
| self.gamma = gamma |
|
|
| |
| self.register_buffer("sigma_aug", sigma_aug) |
| self.register_buffer("gene_weight", gene_weight) |
|
|
| |
| self.flow_path = AffineProbPath(scheduler=CondOTScheduler()) |
|
|
| def train_step( |
| self, |
| source: torch.Tensor, |
| target: torch.Tensor, |
| perturbation_id: torch.Tensor, |
| gene_input: torch.Tensor, |
| input_gene_ids: torch.Tensor, |
| ) -> dict: |
| B = source.shape[0] |
| device = source.device |
|
|
| |
| t = torch.rand(B, device=device).clamp(1e-5, 1 - 1e-5) |
|
|
| |
| sigma_sub = self.sigma_aug[input_gene_ids] |
| weight_sub = self.gene_weight[input_gene_ids] |
|
|
| |
| eps = torch.randn_like(source) |
| x_0 = source + sigma_sub.unsqueeze(0) * eps |
|
|
| |
| |
| |
| path_sample = self.flow_path.sample(t=t, x_0=x_0, x_1=target) |
|
|
| |
| pred_v = self.model( |
| gene_input, path_sample.x_t, path_sample.t, source, |
| perturbation_id, gene_input, mode="predict_y", |
| ) |
|
|
| |
| loss_v = (weight_sub.unsqueeze(0) * (pred_v - path_sample.dx_t) ** 2).mean() |
|
|
| |
| loss_mmd = torch.tensor(0.0, device=device) |
| if self.use_mmd_loss: |
| t_col = t.unsqueeze(-1) |
| x1_hat = path_sample.x_t + pred_v * (1 - t_col) |
| sigmas_mmd = median_sigmas(target, scales=(0.5, 1.0, 2.0, 4.0)) |
| loss_mmd = mmd2_unbiased_multi_sigma(x1_hat, target, sigmas_mmd) |
|
|
| loss = loss_v + self.gamma * loss_mmd |
|
|
| return { |
| "loss": loss, |
| "loss_v": loss_v.detach(), |
| "loss_mmd": loss_mmd.detach(), |
| } |
|
|
| @torch.no_grad() |
| def generate( |
| self, |
| source: torch.Tensor, |
| perturbation_id: torch.Tensor, |
| gene_ids: torch.Tensor, |
| steps: int = 20, |
| method: str = "rk4", |
| ) -> torch.Tensor: |
| """Generate perturbed expression via PF-ODE starting from clean source.""" |
| B, G = source.shape |
| device = source.device |
|
|
| if gene_ids.dim() == 1: |
| gene_ids = gene_ids.unsqueeze(0).expand(B, -1) |
|
|
| |
| x_0 = source.clone() |
|
|
| def ode_func(t_scalar, x): |
| t_batch = torch.full((B,), t_scalar.item(), device=device) |
| pred_v = self.model( |
| gene_ids, x, t_batch, source, |
| perturbation_id, gene_ids, mode="predict_y", |
| ) |
| return pred_v |
|
|
| t_span = torch.linspace(0, 1, steps, device=device) |
| trajectory = torchdiffeq.odeint( |
| ode_func, x_0, t_span, |
| method=method, atol=1e-4, rtol=1e-4, |
| ) |
| return torch.clamp(trajectory[-1], min=0) |
|
|