| """ |
| SparseDeltaCache β Returns sparse delta triplets (indices, values) for SVD projection. |
| |
| Each gene row's delta attention is computed across ALL G_full=5035 columns, |
| then per-row top-K sparsification selects the K most important interactions. |
| The SVD projection (delta @ W) happens on GPU, not here. |
| |
| Multi-process safe: each DataLoader worker lazily opens its own HDF5 handle. |
| |
| HDF5 layout (from precompute_sparse_attn.py): |
| /attn_values (N, G_full, K) float16 β top-K attention values per row |
| /attn_indices (N, G_full, K) int16 β column indices in G_full space |
| /cell_names (N,) string |
| /valid_gene_mask (G_full,) bool |
| """ |
|
|
| import os |
| import h5py |
| import numpy as np |
| import torch |
|
|
|
|
| def _read_sparse_batch(h5_values, h5_indices, name_to_idx, |
| src_cell_names, tgt_cell_names, gene_idx_np=None): |
| """ |
| Shared HDF5 reading logic for sparse caches. |
| |
| Returns: |
| src_vals, src_idxs, tgt_vals, tgt_idxs: numpy arrays (B, G_sub, K) |
| """ |
| seen = {} |
| unique_names = [] |
| for n in src_cell_names + tgt_cell_names: |
| if n not in seen: |
| seen[n] = len(unique_names) |
| unique_names.append(n) |
|
|
| unique_h5_idx = [name_to_idx[n] for n in unique_names] |
| sorted_order = np.argsort(unique_h5_idx) |
| sorted_h5_idx = [unique_h5_idx[i] for i in sorted_order] |
|
|
| raw_vals = h5_values[sorted_h5_idx] |
| raw_idxs = h5_indices[sorted_h5_idx] |
|
|
| unsort = np.argsort(sorted_order) |
| raw_vals = raw_vals[unsort] |
| raw_idxs = raw_idxs[unsort] |
|
|
| if gene_idx_np is not None: |
| raw_vals = raw_vals[:, gene_idx_np, :] |
| raw_idxs = raw_idxs[:, gene_idx_np, :] |
|
|
| src_map = [seen[n] for n in src_cell_names] |
| tgt_map = [seen[n] for n in tgt_cell_names] |
| return raw_vals[src_map], raw_idxs[src_map], raw_vals[tgt_map], raw_idxs[tgt_map] |
|
|
|
|
| class SparseDeltaCache: |
| """ |
| Returns sparse delta triplets for GPU-side SVD projection. |
| |
| Lookup flow: |
| 1. Read src/tgt sparse attention: (G_full, K=300) values + indices |
| 2. Select gene subset rows |
| 3. Scatter to dense: (B, G_sub, G_full) β chunked to avoid OOM |
| 4. Delta = tgt_dense - src_dense (full G_full columns, NOT G_sub) |
| 5. Per-row top-K on G_full columns |
| 6. Return (delta_values, delta_indices) sparse triplets |
| """ |
|
|
| def __init__(self, h5_path, delta_top_k=30): |
| self.h5_path = h5_path |
| self.delta_top_k = delta_top_k |
|
|
| |
| with h5py.File(h5_path, "r") as h5: |
| self.G_full = h5["attn_values"].shape[1] |
| self.K_sparse = h5["attn_values"].shape[2] |
| cell_names = h5["cell_names"].asstr()[:] |
| self.name_to_idx = {name: i for i, name in enumerate(cell_names)} |
| if "valid_gene_mask" in h5: |
| self.valid_gene_mask = h5["valid_gene_mask"][:].astype(bool) |
| else: |
| self.valid_gene_mask = np.ones(self.G_full, dtype=bool) |
|
|
| |
| self._h5 = None |
| self._attn_values = None |
| self._attn_indices = None |
| self._pid = None |
|
|
| print(f" SparseDeltaCache: {len(self.name_to_idx)} cells, " |
| f"G_full={self.G_full}, K_sparse={self.K_sparse}, delta_topk={self.delta_top_k}") |
| print(f" valid genes: {self.valid_gene_mask.sum()}/{self.G_full}") |
|
|
| def _ensure_h5_open(self): |
| """Ensure current process has its own HDF5 file handle.""" |
| pid = os.getpid() |
| if self._h5 is None or self._pid != pid: |
| if self._h5 is not None: |
| try: |
| self._h5.close() |
| except Exception: |
| pass |
| self._h5 = h5py.File(self.h5_path, "r") |
| self._attn_values = self._h5["attn_values"] |
| self._attn_indices = self._h5["attn_indices"] |
| self._pid = pid |
|
|
| def get_missing_gene_mask(self, gene_indices=None): |
| """ |
| Return missing gene mask (True = missing/invalid). |
| Pure numpy operation β no HDF5 I/O needed. |
| """ |
| mask = torch.from_numpy(~self.valid_gene_mask) |
| if gene_indices is not None: |
| return mask[gene_indices.cpu()] |
| return mask |
|
|
| def lookup_delta(self, src_cell_names, tgt_cell_names, gene_indices, device=None): |
| """ |
| Compute sparse delta attention triplets for SVD projection. |
| |
| Args: |
| src_cell_names: list of str, control cell identifiers |
| tgt_cell_names: list of str, perturbation cell identifiers |
| gene_indices: (G_sub,) tensor, gene subset row indices |
| device: target torch device (usually CPU for DataLoader workers) |
| |
| Returns: |
| delta_values: (B, G_sub, delta_topk) float32 β top-K delta values per row |
| delta_indices: (B, G_sub, delta_topk) int16 β column indices in G_full space |
| """ |
| self._ensure_h5_open() |
|
|
| if device is None: |
| device = torch.device("cpu") |
|
|
| B = len(src_cell_names) |
| gene_idx_np = gene_indices.cpu().numpy() |
| G_sub = len(gene_idx_np) |
| K = self.delta_top_k |
|
|
| |
| |
| sv_np, si_np, tv_np, ti_np = _read_sparse_batch( |
| self._attn_values, self._attn_indices, self.name_to_idx, |
| src_cell_names, tgt_cell_names, gene_idx_np) |
|
|
| src_vals = torch.from_numpy(sv_np.astype(np.float32)).to(device) |
| src_idxs = torch.from_numpy(si_np.astype(np.int64)).to(device) |
| tgt_vals = torch.from_numpy(tv_np.astype(np.float32)).to(device) |
| tgt_idxs = torch.from_numpy(ti_np.astype(np.int64)).to(device) |
|
|
| |
| out_values = torch.zeros(B, G_sub, K, device=device) |
| out_indices = torch.zeros(B, G_sub, K, dtype=torch.int16, device=device) |
|
|
| |
| chunk_size = 100 |
| for c_start in range(0, G_sub, chunk_size): |
| c_end = min(c_start + chunk_size, G_sub) |
|
|
| sv = src_vals[:, c_start:c_end, :] |
| si = src_idxs[:, c_start:c_end, :] |
| tv = tgt_vals[:, c_start:c_end, :] |
| ti = tgt_idxs[:, c_start:c_end, :] |
| c_len = c_end - c_start |
|
|
| |
| src_dense = torch.zeros(B, c_len, self.G_full, device=device) |
| tgt_dense = torch.zeros(B, c_len, self.G_full, device=device) |
| src_dense.scatter_(-1, si, sv) |
| tgt_dense.scatter_(-1, ti, tv) |
|
|
| |
| delta = tgt_dense - src_dense |
|
|
| |
| _, topk_idx = delta.abs().topk(K, dim=-1) |
| topk_vals = delta.gather(-1, topk_idx) |
|
|
| out_values[:, c_start:c_end, :] = topk_vals |
| out_indices[:, c_start:c_end, :] = topk_idx.short() |
|
|
| return out_values, out_indices |
|
|
| def close(self): |
| if self._h5 is not None: |
| try: |
| self._h5.close() |
| except Exception: |
| pass |
| self._h5 = None |
| self._attn_values = None |
| self._attn_indices = None |
|
|
| def __del__(self): |
| self.close() |
|
|