lfj-code / train /SB /SA7 /src /denoiser.py
ethan1115's picture
Upload folder using huggingface_hub
9651517 verified
"""
SACFMDenoiser — Source-Anchored Conditional Flow Matching.
Training: x_0 = source + sigma_aug * eps (noisy source, not pure noise).
Standard affine path: x_t = (1-t)*x_0 + t*target.
Velocity target: dx_t = target - x_0.
Gene-weighted MSE loss.
Inference: ODE from clean source (no noise, no SDE).
"""
import torch
import torch.nn as nn
import torchdiffeq
from ._scdfm_imports import AffineProbPath, CondOTScheduler
def pairwise_sq_dists(X, Y):
return torch.cdist(X, Y, p=2) ** 2
@torch.no_grad()
def median_sigmas(X, scales=(0.5, 1.0, 2.0, 4.0)):
D2 = pairwise_sq_dists(X, X)
tri = D2[~torch.eye(D2.size(0), dtype=bool, device=D2.device)]
m = torch.median(tri).clamp_min(1e-12)
s2 = torch.tensor(scales, device=X.device) * m
return [float(s.item()) for s in torch.sqrt(s2)]
def mmd2_unbiased_multi_sigma(X, Y, sigmas):
m, n = X.size(0), Y.size(0)
Dxx = pairwise_sq_dists(X, X)
Dyy = pairwise_sq_dists(Y, Y)
Dxy = pairwise_sq_dists(X, Y)
vals = []
for sigma in sigmas:
beta = 1.0 / (2.0 * (sigma ** 2) + 1e-12)
Kxx = torch.exp(-beta * Dxx)
Kyy = torch.exp(-beta * Dyy)
Kxy = torch.exp(-beta * Dxy)
term_xx = (Kxx.sum() - Kxx.diag().sum()) / (m * (m - 1) + 1e-12)
term_yy = (Kyy.sum() - Kyy.diag().sum()) / (n * (n - 1) + 1e-12)
term_xy = Kxy.mean()
vals.append(term_xx + term_yy - 2.0 * term_xy)
return torch.stack(vals).mean()
class SACFMDenoiser(nn.Module):
"""
Source-Anchored Conditional Flow Matching Denoiser.
Key differences from scDFM baseline:
- x_0 = source + sigma_aug * eps (not pure noise)
- Gene-weighted velocity loss
- Inference starts from clean source
Key differences from SB:
- No SigmaNet (sigma_aug is fixed, data-driven)
- No ScoreDecoder
- No SDE inference
- No bridge formulation (standard affine path)
"""
def __init__(
self,
model: nn.Module,
sigma_aug: torch.Tensor, # (G,) per-gene augmentation noise
gene_weight: torch.Tensor, # (G,) per-gene loss weight
noise_type: str = "Gaussian",
use_mmd_loss: bool = True,
gamma: float = 0.5,
):
super().__init__()
self.model = model
self.noise_type = noise_type
self.use_mmd_loss = use_mmd_loss
self.gamma = gamma
# Fixed per-gene tensors (not learned)
self.register_buffer("sigma_aug", sigma_aug)
self.register_buffer("gene_weight", gene_weight)
# Standard affine flow matching path (same as scDFM)
self.flow_path = AffineProbPath(scheduler=CondOTScheduler())
def train_step(
self,
source: torch.Tensor, # (B, G_sub) control expression
target: torch.Tensor, # (B, G_sub) perturbed expression
perturbation_id: torch.Tensor, # (B, n_pert)
gene_input: torch.Tensor, # (B, G_sub) vocab-encoded gene IDs
input_gene_ids: torch.Tensor, # (G_sub,) indices into full gene set
) -> dict:
B = source.shape[0]
device = source.device
# 1. Sample time (uniform, clamped away from boundaries)
t = torch.rand(B, device=device).clamp(1e-5, 1 - 1e-5)
# 2. Look up per-gene sigma and weight for current gene subset
sigma_sub = self.sigma_aug[input_gene_ids] # (G_sub,)
weight_sub = self.gene_weight[input_gene_ids] # (G_sub,)
# 3. Source-anchored x_0 with data-driven augmentation noise
eps = torch.randn_like(source)
x_0 = source + sigma_sub.unsqueeze(0) * eps # (B, G_sub)
# 4. Standard affine flow matching (reuses scDFM AffineProbPath)
# x_t = (1-t)*x_0 + t*target
# dx_t = target - x_0 = (target - source) - sigma * eps
path_sample = self.flow_path.sample(t=t, x_0=x_0, x_1=target)
# 5. Model forward (scDFM convention: cell_1=x_t, cell_2=source)
pred_v = self.model(
gene_input, path_sample.x_t, path_sample.t, source,
perturbation_id, gene_input, mode="predict_y",
)
# 6. Gene-weighted velocity loss
loss_v = (weight_sub.unsqueeze(0) * (pred_v - path_sample.dx_t) ** 2).mean()
# 7. MMD loss (optional, same as scDFM baseline)
loss_mmd = torch.tensor(0.0, device=device)
if self.use_mmd_loss:
t_col = t.unsqueeze(-1)
x1_hat = path_sample.x_t + pred_v * (1 - t_col)
sigmas_mmd = median_sigmas(target, scales=(0.5, 1.0, 2.0, 4.0))
loss_mmd = mmd2_unbiased_multi_sigma(x1_hat, target, sigmas_mmd)
loss = loss_v + self.gamma * loss_mmd
return {
"loss": loss,
"loss_v": loss_v.detach(),
"loss_mmd": loss_mmd.detach(),
}
@torch.no_grad()
def generate(
self,
source: torch.Tensor, # (B, G)
perturbation_id: torch.Tensor, # (B, n_pert)
gene_ids: torch.Tensor, # (B, G) or (G,)
steps: int = 20,
method: str = "rk4",
) -> torch.Tensor:
"""Generate perturbed expression via PF-ODE starting from clean source."""
B, G = source.shape
device = source.device
if gene_ids.dim() == 1:
gene_ids = gene_ids.unsqueeze(0).expand(B, -1)
# Start from clean source (no augmentation noise at inference)
x_0 = source.clone()
def ode_func(t_scalar, x):
t_batch = torch.full((B,), t_scalar.item(), device=device)
pred_v = self.model(
gene_ids, x, t_batch, source,
perturbation_id, gene_ids, mode="predict_y",
)
return pred_v
t_span = torch.linspace(0, 1, steps, device=device)
trajectory = torchdiffeq.odeint(
ode_func, x_0, t_span,
method=method, atol=1e-4, rtol=1e-4,
)
return torch.clamp(trajectory[-1], min=0)