ethan1115 commited on
Commit
dcce49b
·
verified ·
1 Parent(s): a1e9dc3

Upload folder using huggingface_hub

Browse files
train/CCFM/pca_emb/_bootstrap_scdfm.py ADDED
@@ -0,0 +1,101 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Bootstrap scDFM imports by creating missing __init__.py files and loading
3
+ its modules under a 'scdfm_src' prefix in sys.modules.
4
+
5
+ This module MUST be imported before any CCFM src imports.
6
+ """
7
+
8
+ import sys
9
+ import os
10
+ import types
11
+
12
+ _SCDFM_ROOT = os.path.normpath(
13
+ os.path.join(os.path.dirname(os.path.abspath(__file__)), "..", "..", "transfer", "code", "scDFM")
14
+ )
15
+
16
+ # Directories in scDFM that need __init__.py to be proper packages
17
+ _DIRS_NEEDING_INIT = [
18
+ "src",
19
+ "src/models",
20
+ "src/models/origin",
21
+ "src/data_process",
22
+ "src/tokenizer",
23
+ "src/script",
24
+ "src/models/perturbation",
25
+ ]
26
+
27
+
28
+ def _ensure_init_files():
29
+ """Create missing __init__.py files in scDFM so it can be imported as packages."""
30
+ created = []
31
+ for d in _DIRS_NEEDING_INIT:
32
+ init_path = os.path.join(_SCDFM_ROOT, d, "__init__.py")
33
+ if not os.path.exists(init_path):
34
+ with open(init_path, "w") as f:
35
+ f.write("# Auto-created by CCFM bootstrap\n")
36
+ created.append(init_path)
37
+ return created
38
+
39
+
40
+ def bootstrap():
41
+ """Load scDFM's src package as 'scdfm_src' in sys.modules."""
42
+ if "scdfm_src" in sys.modules:
43
+ return # Already bootstrapped
44
+
45
+ # Create missing __init__.py files
46
+ _ensure_init_files()
47
+
48
+ # Save CCFM's src modules
49
+ saved = {}
50
+ for key in list(sys.modules.keys()):
51
+ if key == "src" or key.startswith("src."):
52
+ saved[key] = sys.modules.pop(key)
53
+
54
+ # Add scDFM root to path
55
+ sys.path.insert(0, _SCDFM_ROOT)
56
+
57
+ try:
58
+ # Import scDFM modules (their relative imports work now)
59
+ import src as scdfm_src_pkg
60
+ import src.models
61
+ import src.models.origin
62
+ import src.models.origin.blocks
63
+ import src.models.origin.layers
64
+ import src.models.origin.model
65
+ import src.flow_matching
66
+ import src.flow_matching.path
67
+ import src.flow_matching.path.path
68
+ import src.flow_matching.path.path_sample
69
+ import src.flow_matching.path.affine
70
+ import src.flow_matching.path.scheduler
71
+ import src.flow_matching.path.scheduler.scheduler
72
+ # Skip src.flow_matching.ot (requires 'ot' package, not needed for CCFM)
73
+ import src.utils
74
+ import src.utils.utils
75
+ import src.tokenizer
76
+ import src.tokenizer.gene_tokenizer
77
+ # Skip src.data_process (has heavy deps like bs4, rdkit)
78
+ # We handle data loading separately in CCFM
79
+
80
+ # Re-register all under scdfm_src.* prefix
81
+ for key in list(sys.modules.keys()):
82
+ if key == "src" or key.startswith("src."):
83
+ new_key = "scdfm_" + key
84
+ sys.modules[new_key] = sys.modules[key]
85
+
86
+ finally:
87
+ # Remove scDFM's src.* entries
88
+ for key in list(sys.modules.keys()):
89
+ if (key == "src" or key.startswith("src.")) and not key.startswith("scdfm_"):
90
+ del sys.modules[key]
91
+
92
+ # Restore CCFM's src modules
93
+ for key, mod in saved.items():
94
+ sys.modules[key] = mod
95
+
96
+ # Remove scDFM from front of path
97
+ if _SCDFM_ROOT in sys.path:
98
+ sys.path.remove(_SCDFM_ROOT)
99
+
100
+
101
+ bootstrap()
train/CCFM/pca_emb/config/config_cascaded.py ADDED
@@ -0,0 +1,101 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from dataclasses import dataclass
2
+ import os
3
+
4
+
5
+ @dataclass
6
+ class CascadedFlowConfig:
7
+ # === Base (same as scDFM FlowConfig) ===
8
+ model_type: str = "cascaded"
9
+ batch_size: int = 96
10
+ ntoken: int = 512
11
+ d_model: int = 128
12
+ nhead: int = 8
13
+ nlayers: int = 4
14
+ d_hid: int = 512
15
+ lr: float = 5e-5
16
+ steps: int = 200000
17
+ eta_min: float = 1e-6
18
+ devices: str = "1"
19
+ test_only: bool = False
20
+
21
+ data_name: str = "norman"
22
+ perturbation_function: str = "crisper"
23
+ noise_type: str = "Gaussian"
24
+ poisson_alpha: float = 0.8
25
+ poisson_target_sum: int = -1
26
+
27
+ print_every: int = 5000
28
+ mode: str = "predict_y"
29
+ result_path: str = "/home/hp250092/ku50001222/qian/aivc/lfj/GRN/result/att_svd"
30
+ fusion_method: str = "differential_perceiver"
31
+ infer_top_gene: int = 1000
32
+ n_top_genes: int = 5000
33
+ checkpoint_path: str = ""
34
+ gamma: float = 0.5
35
+ split_method: str = "additive"
36
+ use_mmd_loss: bool = True
37
+ fold: int = 1
38
+ use_negative_edge: bool = True
39
+ topk: int = 30
40
+
41
+ # === SVD latent (replaces bilinear_head_dim from grn_att_only) ===
42
+ latent_dim: int = 128
43
+ dh_depth: int = 2
44
+ latent_weight: float = 1.0
45
+ choose_latent_p: float = 0.4
46
+ target_std: float = 1.0
47
+ use_variance_weight: bool = False
48
+
49
+ # === SVD dictionary ===
50
+ svd_dict_path: str = "" # path to svd_dict_*.pt from compute_svd_dict.py
51
+
52
+ # === Sparse attention cache ===
53
+ sparse_cache_path: str = "/home/hp250092/ku50001222/qian/aivc/lfj/GRN/grn_ccfm/cache/norman_attn_L11_sparse.h5"
54
+ delta_topk: int = 30 # per-row top-K for delta attention sparsification (on full 5035 cols)
55
+
56
+ # === Cascaded noise (LatentForcing dino_first_cascaded_noised) ===
57
+ noise_beta: float = 0.25
58
+
59
+ # === EMA ===
60
+ ema_decay: float = 0.9999
61
+
62
+ # === Logit-normal time-step sampling ===
63
+ t_sample_mode: str = "logit_normal"
64
+ t_expr_mean: float = 0.0
65
+ t_expr_std: float = 1.0
66
+ t_latent_mean: float = 0.0
67
+ t_latent_std: float = 1.0
68
+
69
+ # === LR warmup ===
70
+ warmup_steps: int = 2000
71
+
72
+ # === Inference ===
73
+ latent_steps: int = 20
74
+ expr_steps: int = 20
75
+ ode_method: str = "rk4"
76
+ eval_batch_size: int = 8
77
+
78
+ def __post_init__(self):
79
+ if self.data_name == "norman_umi_go_filtered":
80
+ self.n_top_genes = 5054
81
+ if self.data_name == "norman":
82
+ self.n_top_genes = 5000
83
+
84
+ # Experiment name override (if set, used instead of auto-generated name)
85
+ exp_name: str = ""
86
+
87
+ def make_path(self):
88
+ if self.exp_name:
89
+ return os.path.join(self.result_path, self.exp_name)
90
+ t_mode = "ln" if self.t_sample_mode == "logit_normal" else "uni"
91
+ exp_name = (
92
+ f"svd-{self.data_name}-f{self.fold}"
93
+ f"-topk{self.topk}-neg{self.use_negative_edge}"
94
+ f"-d{self.d_model}-ld{self.latent_dim}-dh{self.dh_depth}"
95
+ f"-lr{self.lr}"
96
+ f"-lw{self.latent_weight}-lp{self.choose_latent_p}"
97
+ f"-dtk{self.delta_topk}"
98
+ f"-ema{self.ema_decay}-{t_mode}-wu{self.warmup_steps}"
99
+ f"-{self.ode_method}"
100
+ )
101
+ return os.path.join(self.result_path, exp_name)
train/CCFM/pca_emb/run_pca_emb.sh ADDED
@@ -0,0 +1,75 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ #PJM -L rscgrp=b-batch
3
+ #PJM -L gpu=1
4
+ #PJM -L elapse=48:00:00
5
+ #PJM -N pca_emb
6
+ #PJM -j
7
+ #PJM -o logs/pca_emb_%j.out
8
+
9
+ module load cuda/12.2.2
10
+ module load cudnn/8.9.7
11
+ module load gcc-toolset/12
12
+
13
+ source /home/pj24002027/ku50002536/Takoai/lfj/lfj/stack_env/bin/activate
14
+
15
+ cd /home/pj24002027/ku50002536/Takoai/lfj/lfj/GRN/pca_emb
16
+
17
+ export PYTORCH_CUDA_ALLOC_CONF=max_split_size_mb:256
18
+
19
+ echo "=========================================="
20
+ echo "Job ID: $PJM_JOBID"
21
+ echo "Job Name: $PJM_JOBNAME"
22
+ echo "Start: $(date)"
23
+ echo "Node: $(hostname)"
24
+ echo "GPU: $(nvidia-smi --query-gpu=name,memory.total --format=csv,noheader 2>/dev/null || echo 'N/A')"
25
+ echo "=========================================="
26
+
27
+ # --- PCA-emb 1D latent: Δ_attn @ gene_emb -> PCA(1) -> flow matching ---
28
+ # 关键修改 vs svd_baseline:
29
+ # --latent-dim 1 (128 -> 1)
30
+ # --svd-dict-path 改为 PCA-emb dict
31
+ # --result-path 输出到 result/pca_emb
32
+
33
+ accelerate launch --num_processes=1 scripts/run_cascaded.py \
34
+ --data-name norman \
35
+ --d-model 128 \
36
+ --d-hid 512 \
37
+ --nhead 8 \
38
+ --nlayers 4 \
39
+ --batch-size 96 \
40
+ --lr 5e-5 \
41
+ --steps 200000 \
42
+ --fusion-method differential_perceiver \
43
+ --perturbation-function crisper \
44
+ --noise-type Gaussian \
45
+ --infer-top-gene 1000 \
46
+ --n-top-genes 5000 \
47
+ --use-mmd-loss \
48
+ --gamma 0.5 \
49
+ --split-method additive \
50
+ --fold 1 \
51
+ --latent-dim 1 \
52
+ --dh-depth 2 \
53
+ --latent-weight 1.0 \
54
+ --choose-latent-p 0.4 \
55
+ --print-every 5000 \
56
+ --topk 30 \
57
+ --use-negative-edge \
58
+ --delta-topk 30 \
59
+ --ema-decay 0.9999 \
60
+ --t-sample-mode logit_normal \
61
+ --t-expr-mean 0.0 \
62
+ --t-expr-std 1.0 \
63
+ --t-latent-mean 0.0 \
64
+ --t-latent-std 1.0 \
65
+ --warmup-steps 2000 \
66
+ --ode-method rk4 \
67
+ --eval-batch-size 8 \
68
+ --sparse-cache-path /home/pj24002027/ku50002536/Takoai/lfj/lfj/GRN/grn_ccfm/cache/norman_attn_L11_sparse.h5 \
69
+ --svd-dict-path /home/pj24002027/ku50002536/Takoai/lfj/lfj/GRN/pca_emb/cache/pca_emb_dict_norman_f1.pt \
70
+ --exp-name pca_emb_1d \
71
+ --result-path /home/pj24002027/ku50002536/Takoai/lfj/lfj/GRN/result/pca_emb
72
+
73
+ echo "=========================================="
74
+ echo "Finished: $(date)"
75
+ echo "=========================================="
train/CCFM/pca_emb/run_pca_emb_dict.sh ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ #PJM -L rscgrp=b-batch
3
+ #PJM -L gpu=1
4
+ #PJM -L elapse=4:00:00
5
+ #PJM -N pca_emb_dict
6
+ #PJM -j
7
+ #PJM -o logs/pca_emb_dict_%j.out
8
+
9
+ module load cuda/12.2.2
10
+ module load cudnn/8.9.7
11
+ module load gcc-toolset/12
12
+
13
+ source /home/pj24002027/ku50002536/Takoai/lfj/lfj/stack_env/bin/activate
14
+
15
+ cd /home/pj24002027/ku50002536/Takoai/lfj/lfj/GRN/pca_emb
16
+
17
+ echo "=========================================="
18
+ echo "Job ID: $PJM_JOBID"
19
+ echo "Job Name: $PJM_JOBNAME"
20
+ echo "Start: $(date)"
21
+ echo "Node: $(hostname)"
22
+ echo "=========================================="
23
+
24
+ python scripts/compute_pca_emb_dict.py \
25
+ --data-name norman \
26
+ --fold 1 \
27
+ --split-method additive \
28
+ --topk 30 \
29
+ --use-negative-edge \
30
+ --n-top-genes 5000 \
31
+ --sparse-cache-path /home/pj24002027/ku50002536/Takoai/lfj/lfj/GRN/grn_ccfm/cache/norman_attn_L11_sparse.h5 \
32
+ --scgpt-model-dir /home/pj24002027/ku50002536/Takoai/lfj/lfj/transfer/data/scGPT_pretrained \
33
+ --n-pairs-per-condition 50 \
34
+ --delta-topk 30 \
35
+ --rows-per-pair 500 \
36
+ --output-path cache/pca_emb_dict_norman_f1.pt
37
+
38
+ echo "=========================================="
39
+ echo "Finished: $(date)"
40
+ echo "=========================================="
train/CCFM/pca_emb/scripts/compute_pca_emb_dict.py ADDED
@@ -0,0 +1,325 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Offline PCA-emb dictionary computation for grn_svd (latent_dim=1).
3
+
4
+ Pipeline:
5
+ 1. Load sparse attention cache + scGPT gene embeddings
6
+ 2. Sample (control, perturbed) pairs, compute sparse delta attention
7
+ 3. Project delta through gene_emb: delta_512d = sparse_delta @ gene_emb
8
+ 4. Center + PCA on 512D features -> first principal component v
9
+ 5. Compute combined weight: w = gene_emb @ v (5035, 1)
10
+ 6. Save as dict compatible with grn_svd format
11
+
12
+ Math: (Δ_attn @ gene_emb) @ v = Δ_attn @ (gene_emb @ v) = Δ_attn @ w
13
+ -> _sparse_project(W=w) gives (B, G, 1), same structure as SVD dict.
14
+
15
+ Usage:
16
+ python scripts/compute_pca_emb_dict.py \
17
+ --data-name norman --fold 1 --split-method additive \
18
+ --topk 30 --use-negative-edge \
19
+ --sparse-cache-path .../norman_attn_L11_sparse.h5 \
20
+ --scgpt-model-dir .../scGPT_pretrained \
21
+ --n-pairs-per-condition 50 \
22
+ --output-path cache/pca_emb_dict_norman_f1.pt
23
+ """
24
+
25
+ import sys
26
+ import os
27
+
28
+ _PROJECT_ROOT = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
29
+ sys.path.insert(0, _PROJECT_ROOT)
30
+
31
+ import _bootstrap_scdfm # noqa: F401
32
+
33
+ import argparse
34
+ import json
35
+ import numpy as np
36
+ import torch
37
+ import h5py
38
+ from sklearn.decomposition import PCA
39
+
40
+ from src.data.data import get_data_classes
41
+ from src.data.sparse_raw_cache import _read_sparse_batch
42
+
43
+ _REPO_ROOT = os.path.normpath(os.path.join(_PROJECT_ROOT, "..", "..", "transfer", "code"))
44
+
45
+
46
+ def load_scgpt_gene_embeddings(scgpt_model_dir, hvg_gene_names):
47
+ """
48
+ Load scGPT gene embeddings for HVG genes without importing the full scGPT package.
49
+
50
+ Returns:
51
+ gene_emb: (G_full, 512) float32 tensor, zero for genes not in vocab
52
+ valid_mask: (G_full,) bool — True for genes in scGPT vocab
53
+ """
54
+ # Load vocab
55
+ vocab_path = os.path.join(scgpt_model_dir, "vocab.json")
56
+ with open(vocab_path, "r") as f:
57
+ scgpt_vocab = json.load(f)
58
+
59
+ # Map HVG genes to scGPT token IDs
60
+ hvg_to_scgpt = []
61
+ for gene in hvg_gene_names:
62
+ hvg_to_scgpt.append(scgpt_vocab.get(gene, -1))
63
+ hvg_to_scgpt = torch.tensor(hvg_to_scgpt, dtype=torch.long)
64
+ valid_mask = hvg_to_scgpt >= 0
65
+
66
+ # Load model args to get d_model
67
+ with open(os.path.join(scgpt_model_dir, "args.json"), "r") as f:
68
+ model_args = json.load(f)
69
+ d_model = model_args.get("embsize", 512)
70
+
71
+ # Load checkpoint — extract only embedding weights
72
+ ckpt_path = os.path.join(scgpt_model_dir, "best_model.pt")
73
+ ckpt = torch.load(ckpt_path, map_location="cpu", weights_only=False)
74
+ # scGPT stores encoder as nn.Embedding; key is "encoder.embedding.weight"
75
+ emb_weight = None
76
+ for key in ckpt:
77
+ if "encoder" in key and "weight" in key:
78
+ if ckpt[key].dim() == 2 and ckpt[key].shape[1] == d_model:
79
+ emb_weight = ckpt[key]
80
+ print(f" Found embedding weights: key='{key}', shape={emb_weight.shape}")
81
+ break
82
+ if emb_weight is None:
83
+ raise RuntimeError(f"Cannot find encoder embedding weights in {ckpt_path}")
84
+
85
+ # Build gene_emb: (G_full, D), zero for missing genes
86
+ G_full = len(hvg_gene_names)
87
+ gene_emb = torch.zeros(G_full, d_model)
88
+ valid_ids = hvg_to_scgpt[valid_mask]
89
+ gene_emb[valid_mask] = emb_weight[valid_ids].float()
90
+
91
+ n_valid = valid_mask.sum().item()
92
+ n_missing = G_full - n_valid
93
+ print(f" Gene embeddings: {n_valid}/{G_full} valid, {n_missing} missing (zero)")
94
+
95
+ return gene_emb, valid_mask.numpy()
96
+
97
+
98
+ def parse_args():
99
+ p = argparse.ArgumentParser(description="Compute PCA-emb dictionary for grn_svd (1D)")
100
+ p.add_argument("--data-name", type=str, default="norman")
101
+ p.add_argument("--sparse-cache-path", type=str, required=True)
102
+ p.add_argument("--scgpt-model-dir", type=str, required=True,
103
+ help="Path to scGPT pretrained model dir (contains vocab.json, args.json, best_model.pt)")
104
+ p.add_argument("--fold", type=int, default=1)
105
+ p.add_argument("--split-method", type=str, default="additive")
106
+ p.add_argument("--topk", type=int, default=30, help="GRN graph topk for scDFM process_data")
107
+ p.add_argument("--use-negative-edge", action="store_true", default=True)
108
+ p.add_argument("--n-top-genes", type=int, default=5000)
109
+ p.add_argument("--n-pairs-per-condition", type=int, default=50)
110
+ p.add_argument("--delta-topk", type=int, default=30, help="Per-row top-K on delta")
111
+ p.add_argument("--rows-per-pair", type=int, default=500,
112
+ help="Gene rows to sample per pair (0 = all)")
113
+ p.add_argument("--output-path", type=str, required=True)
114
+ return p.parse_args()
115
+
116
+
117
+ def main():
118
+ args = parse_args()
119
+
120
+ # === 1. Load scDFM data to get train/test split ===
121
+ Data, PerturbationDataset, TrainSampler, TestDataset = get_data_classes()
122
+
123
+ scdfm_data_path = os.path.join(_REPO_ROOT, "scDFM", "data")
124
+ data_manager = Data(scdfm_data_path)
125
+ data_manager.load_data(args.data_name)
126
+
127
+ if "gene_name" in data_manager.adata.var.columns and data_manager.adata.var_names[0].startswith("ENSG"):
128
+ data_manager.adata.var_names = data_manager.adata.var["gene_name"].values
129
+ data_manager.adata.var_names_make_unique()
130
+
131
+ data_manager.process_data(
132
+ n_top_genes=args.n_top_genes,
133
+ split_method=args.split_method,
134
+ fold=args.fold,
135
+ use_negative_edge=args.use_negative_edge,
136
+ k=args.topk,
137
+ )
138
+ train_sampler, _, _ = data_manager.load_flow_data(batch_size=32)
139
+
140
+ train_conditions = train_sampler._perturbation_covariates
141
+ adata = train_sampler.adata
142
+ ctrl_mask = adata.obs["perturbation_covariates"] == "control+control"
143
+ if ctrl_mask.sum() == 0:
144
+ ctrl_mask = adata.obs["condition"].isin(["control", "ctrl"])
145
+ ctrl_cell_ids = list(adata.obs_names[ctrl_mask])
146
+
147
+ cond_to_cells = {}
148
+ for cond in train_conditions:
149
+ cond_mask = adata.obs["perturbation_covariates"] == cond
150
+ cond_to_cells[cond] = list(adata.obs_names[cond_mask])
151
+
152
+ print(f"Training conditions: {len(train_conditions)}")
153
+ print(f"Control cells: {len(ctrl_cell_ids)}")
154
+
155
+ # === 2. Load scGPT gene embeddings ===
156
+ hvg_gene_names = list(data_manager.adata.var_names)
157
+ print(f"\nLoading scGPT gene embeddings...")
158
+ gene_emb, valid_gene_mask = load_scgpt_gene_embeddings(
159
+ args.scgpt_model_dir, hvg_gene_names
160
+ )
161
+ D = gene_emb.shape[1] # 512
162
+ print(f" gene_emb shape: {gene_emb.shape}, D={D}")
163
+
164
+ # === 3. Open HDF5 cache ===
165
+ h5 = h5py.File(args.sparse_cache_path, "r")
166
+ h5_values = h5["attn_values"]
167
+ h5_indices = h5["attn_indices"]
168
+ cell_names_all = h5["cell_names"].asstr()[:]
169
+ name_to_idx = {name: i for i, name in enumerate(cell_names_all)}
170
+ G_full = h5_values.shape[1]
171
+ K_sparse = h5_values.shape[2]
172
+
173
+ print(f"\nCache: {len(name_to_idx)} cells, G_full={G_full}, K_sparse={K_sparse}")
174
+
175
+ ctrl_in_cache = [c for c in ctrl_cell_ids if c in name_to_idx]
176
+ print(f"Control cells in cache: {len(ctrl_in_cache)}")
177
+
178
+ # === 4. Stratified sampling: collect delta_512d = sparse_delta @ gene_emb ===
179
+ all_delta_512d = [] # list of (chunk_size, D) tensors
180
+ delta_topk = args.delta_topk
181
+ rows_per_pair = args.rows_per_pair if args.rows_per_pair > 0 else G_full
182
+ rng = np.random.RandomState(42)
183
+
184
+ for cond_idx, cond in enumerate(train_conditions):
185
+ pert_cell_ids = cond_to_cells.get(cond, [])
186
+ pert_cell_ids = [c for c in pert_cell_ids if c in name_to_idx]
187
+ if not pert_cell_ids or not ctrl_in_cache:
188
+ continue
189
+
190
+ n_pairs = min(args.n_pairs_per_condition, len(pert_cell_ids), len(ctrl_in_cache))
191
+ src_sample = [ctrl_in_cache[i] for i in rng.choice(len(ctrl_in_cache), n_pairs, replace=True)]
192
+ tgt_sample = [pert_cell_ids[i] for i in rng.choice(len(pert_cell_ids), n_pairs, replace=True)]
193
+
194
+ if rows_per_pair < G_full:
195
+ gene_idx = np.sort(rng.choice(G_full, rows_per_pair, replace=False))
196
+ else:
197
+ gene_idx = np.arange(G_full)
198
+
199
+ sv, si, tv, ti = _read_sparse_batch(
200
+ h5_values, h5_indices, name_to_idx,
201
+ src_sample, tgt_sample, gene_idx)
202
+
203
+ for p in range(n_pairs):
204
+ for chunk_start in range(0, len(gene_idx), 100):
205
+ chunk_end = min(chunk_start + 100, len(gene_idx))
206
+
207
+ s_v = torch.from_numpy(sv[p, chunk_start:chunk_end].astype(np.float32))
208
+ s_i = torch.from_numpy(si[p, chunk_start:chunk_end].astype(np.int64))
209
+ t_v = torch.from_numpy(tv[p, chunk_start:chunk_end].astype(np.float32))
210
+ t_i = torch.from_numpy(ti[p, chunk_start:chunk_end].astype(np.int64))
211
+
212
+ c_len = chunk_end - chunk_start
213
+
214
+ # Scatter to dense
215
+ src_dense = torch.zeros(c_len, G_full)
216
+ tgt_dense = torch.zeros(c_len, G_full)
217
+ src_dense.scatter_(-1, s_i, s_v)
218
+ tgt_dense.scatter_(-1, t_i, t_v)
219
+
220
+ delta = tgt_dense - src_dense # (c_len, G_full)
221
+
222
+ # Per-row top-K (same sparsification as SVD dict)
223
+ _, topk_idx = delta.abs().topk(delta_topk, dim=-1)
224
+ topk_vals = delta.gather(-1, topk_idx) # (c_len, delta_topk)
225
+
226
+ # Project through gene_emb: delta_512d = sparse_delta @ gene_emb
227
+ # Equivalent to: sum_k topk_vals[r,k] * gene_emb[topk_idx[r,k]]
228
+ delta_512d = torch.zeros(c_len, D)
229
+ for k in range(delta_topk):
230
+ col_idx = topk_idx[:, k] # (c_len,)
231
+ val = topk_vals[:, k:k+1] # (c_len, 1)
232
+ emb_k = gene_emb[col_idx] # (c_len, D)
233
+ delta_512d = delta_512d + val * emb_k # (c_len, D)
234
+
235
+ all_delta_512d.append(delta_512d)
236
+
237
+ if (cond_idx + 1) % 10 == 0:
238
+ n_rows = sum(t.shape[0] for t in all_delta_512d)
239
+ print(f" Processed {cond_idx + 1}/{len(train_conditions)} conditions, {n_rows} rows")
240
+
241
+ h5.close()
242
+
243
+ # === 5. Concatenate and fit PCA ===
244
+ X = torch.cat(all_delta_512d, dim=0).numpy() # (N_rows, 512)
245
+ print(f"\nTotal delta_512d samples: {X.shape[0]} x {X.shape[1]}")
246
+
247
+ print("Fitting PCA (with centering)...")
248
+ pca = PCA(n_components=1, random_state=42)
249
+ pca.fit(X)
250
+
251
+ v = pca.components_[0] # (512,) — first principal component
252
+ explained = pca.explained_variance_ratio_[0]
253
+ print(f" Explained variance ratio: {explained:.4f} ({explained * 100:.1f}%)")
254
+ print(f" PC1 norm: {np.linalg.norm(v):.6f}")
255
+ print(f" Data mean norm: {np.linalg.norm(pca.mean_):.4f}")
256
+
257
+ # === 6. Compute combined projection weight: w = gene_emb @ v ===
258
+ v_tensor = torch.from_numpy(v.astype(np.float32)) # (512,)
259
+ w = (gene_emb @ v_tensor).unsqueeze(1) # (G_full, 1)
260
+ print(f"\n w = gene_emb @ v: shape={w.shape}")
261
+ print(f" w stats: mean={w.mean():.6f}, std={w.std():.6f}, "
262
+ f"range=[{w.min():.4f}, {w.max():.4f}]")
263
+
264
+ # === 7. Global scalar scaling ===
265
+ # Project all sampled data through w to compute global std
266
+ # z_1d = delta_sparse @ w, but we already have delta_512d, so z_1d = X @ v
267
+ z_1d = torch.from_numpy((X @ v).astype(np.float32)) # (N_rows,)
268
+
269
+ z_std = z_1d.std().item()
270
+ global_scale = 1.0 / z_std
271
+ print(f"\n Pre-scaling z_1d stats: mean={z_1d.mean():.4f}, std={z_std:.4f}")
272
+
273
+ # Apply scaling to W (same convention as compute_svd_dict.py)
274
+ W_scaled = w * global_scale
275
+
276
+ # Verify
277
+ z_scaled = z_1d * global_scale
278
+ print(f" Post-scaling z_1d stats: mean={z_scaled.mean():.4f}, std={z_scaled.std():.4f}")
279
+ print(f" Post-scaling range: [{z_scaled.min():.2f}, {z_scaled.max():.2f}]")
280
+
281
+ # Robust scaling if needed
282
+ extreme_ratio = (z_scaled.abs() > 5.0).float().mean().item()
283
+ print(f" |z| > 5.0: {extreme_ratio:.4%}")
284
+
285
+ if extreme_ratio > 0.01:
286
+ q99 = z_scaled.abs().quantile(0.99).item()
287
+ robust_factor = q99 / 3.0
288
+ W_scaled = W_scaled / robust_factor
289
+ global_scale = global_scale / robust_factor
290
+ z_robust = z_1d * global_scale
291
+ print(f" Robust scaling applied: 99th={q99:.2f} -> +/-3.0")
292
+ print(f" After robust: std={z_robust.std():.4f}, "
293
+ f"range=[{z_robust.min():.2f}, {z_robust.max():.2f}]")
294
+
295
+ # Zero invalid gene rows
296
+ W_scaled[~torch.from_numpy(valid_gene_mask)] = 0.0
297
+
298
+ # === 8. Save (compatible with grn_svd dict format) ===
299
+ os.makedirs(os.path.dirname(args.output_path) or ".", exist_ok=True)
300
+ save_dict = {
301
+ "W": W_scaled, # (G_full, 1) float32
302
+ "global_scale": global_scale, # float scalar
303
+ "valid_gene_mask": valid_gene_mask, # (G_full,) bool
304
+ "explained_variance_ratio": pca.explained_variance_ratio_, # (1,)
305
+ "singular_values": np.sqrt(pca.explained_variance_), # (1,)
306
+ "n_components": 1,
307
+ "delta_topk": args.delta_topk,
308
+ "data_name": args.data_name,
309
+ "fold": args.fold,
310
+ "n_pairs_per_condition": args.n_pairs_per_condition,
311
+ "n_rows": X.shape[0],
312
+ # PCA-emb specific metadata
313
+ "pca_component": v, # (512,) PC direction
314
+ "pca_mean": pca.mean_, # (512,) centering vector
315
+ "gene_emb_shape": list(gene_emb.shape), # [5035, 512]
316
+ }
317
+ torch.save(save_dict, args.output_path)
318
+
319
+ print(f"\nSaved PCA-emb dictionary to {args.output_path}")
320
+ print(f" W: {W_scaled.shape}, global_scale: {global_scale:.6f}")
321
+ print(f" Explained variance: {explained:.4f}")
322
+
323
+
324
+ if __name__ == "__main__":
325
+ main()
train/CCFM/pca_emb/scripts/run_cascaded.py ADDED
@@ -0,0 +1,437 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Training and evaluation entry point for grn_svd.
3
+
4
+ Uses SVD-projected (B, G, 128) as latent target with SparseDeltaCache.
5
+ SVD dictionary W is loaded as a frozen register_buffer on GPU.
6
+ """
7
+
8
+ import sys
9
+ import os
10
+
11
+ # Set up paths
12
+ _PROJECT_ROOT = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
13
+ sys.path.insert(0, _PROJECT_ROOT)
14
+
15
+ # Bootstrap scDFM imports (must happen before any src imports)
16
+ import _bootstrap_scdfm # noqa: F401
17
+
18
+ import copy
19
+ import torch
20
+ import torch.nn as nn
21
+ import tyro
22
+ import tqdm
23
+ import numpy as np
24
+ import pandas as pd
25
+ import anndata as ad
26
+ import scanpy as sc
27
+ from torch.utils.data import DataLoader
28
+ from tqdm import trange
29
+ from accelerate import Accelerator, DistributedDataParallelKwargs
30
+ from torch.optim.lr_scheduler import LinearLR, CosineAnnealingLR, SequentialLR
31
+
32
+ from config.config_cascaded import CascadedFlowConfig as Config
33
+ from src.data.data import get_data_classes, GRNDatasetWrapper
34
+ from src.model.model import CascadedFlowModel
35
+ from src.data.sparse_raw_cache import SparseDeltaCache
36
+ from src.denoiser import CascadedDenoiser
37
+ from src.utils import (
38
+ save_checkpoint,
39
+ load_checkpoint,
40
+ pick_eval_score,
41
+ process_vocab,
42
+ set_requires_grad_for_p_only,
43
+ GeneVocab,
44
+ )
45
+
46
+ from cell_eval import MetricsEvaluator
47
+
48
+ # Resolve scDFM directory paths
49
+ _REPO_ROOT = os.path.normpath(os.path.join(_PROJECT_ROOT, "..", "..", "transfer", "code"))
50
+
51
+
52
+ @torch.inference_mode()
53
+ def test(data_sampler, denoiser, accelerator, config, vocab, data_manager,
54
+ batch_size=8, path_dir="./"):
55
+ """Evaluate: generate predictions and compute cell-eval metrics."""
56
+ device = accelerator.device
57
+ gene_ids_test = vocab.encode(list(data_sampler.adata.var_names))
58
+ gene_ids_test = torch.tensor(gene_ids_test, dtype=torch.long, device=device)
59
+
60
+ perturbation_name_list = data_sampler._perturbation_covariates
61
+ control_data = data_sampler.get_control_data()
62
+ inverse_dict = {v: str(k) for k, v in data_manager.perturbation_dict.items()}
63
+
64
+ all_pred_expressions = [control_data["src_cell_data"]]
65
+ obs_perturbation_name_pred = ["control"] * control_data["src_cell_data"].shape[0]
66
+ all_target_expressions = [control_data["src_cell_data"]]
67
+ obs_perturbation_name_real = ["control"] * control_data["src_cell_data"].shape[0]
68
+
69
+ print("perturbation_name_list:", len(perturbation_name_list))
70
+ for perturbation_name in perturbation_name_list:
71
+ perturbation_data = data_sampler.get_perturbation_data(perturbation_name)
72
+ target = perturbation_data["tgt_cell_data"]
73
+ perturbation_id = perturbation_data["condition_id"]
74
+ source = control_data["src_cell_data"].to(device)
75
+ perturbation_id = perturbation_id.to(device)
76
+
77
+ if config.perturbation_function == "crisper":
78
+ perturbation_name_crisper = [
79
+ inverse_dict[int(p_id)] for p_id in perturbation_id[0].cpu().numpy()
80
+ ]
81
+ perturbation_id = torch.tensor(
82
+ vocab.encode(perturbation_name_crisper), dtype=torch.long, device=device
83
+ )
84
+ perturbation_id = perturbation_id.repeat(source.shape[0], 1)
85
+
86
+ idx = torch.randperm(source.shape[0])
87
+ source = source[idx]
88
+ N = 128
89
+ source = source[:N]
90
+
91
+ pred_expressions = []
92
+ for i in trange(0, N, batch_size, desc=perturbation_name):
93
+ batch_source = source[i : i + batch_size]
94
+ batch_pert_id = perturbation_id[0].repeat(batch_source.shape[0], 1).to(device)
95
+
96
+ # Get the underlying model for generation
97
+ model = denoiser.module if hasattr(denoiser, "module") else denoiser
98
+
99
+ pred = model.generate(
100
+ batch_source,
101
+ batch_pert_id,
102
+ gene_ids_test,
103
+ latent_steps=config.latent_steps,
104
+ expr_steps=config.expr_steps,
105
+ method=config.ode_method,
106
+ )
107
+ pred_expressions.append(pred)
108
+
109
+ pred_expressions = torch.cat(pred_expressions, dim=0).cpu().numpy()
110
+ all_pred_expressions.append(pred_expressions)
111
+ all_target_expressions.append(target)
112
+ obs_perturbation_name_pred.extend([perturbation_name] * pred_expressions.shape[0])
113
+ obs_perturbation_name_real.extend([perturbation_name] * target.shape[0])
114
+
115
+ all_pred_expressions = np.concatenate(all_pred_expressions, axis=0)
116
+ all_target_expressions = np.concatenate(all_target_expressions, axis=0)
117
+ obs_pred = pd.DataFrame({"perturbation": obs_perturbation_name_pred})
118
+ obs_real = pd.DataFrame({"perturbation": obs_perturbation_name_real})
119
+ pred_adata = ad.AnnData(X=all_pred_expressions, obs=obs_pred)
120
+ real_adata = ad.AnnData(X=all_target_expressions, obs=obs_real)
121
+
122
+ eval_score = None
123
+ if accelerator.is_main_process:
124
+ evaluator = MetricsEvaluator(
125
+ adata_pred=pred_adata,
126
+ adata_real=real_adata,
127
+ control_pert="control",
128
+ pert_col="perturbation",
129
+ num_threads=32,
130
+ )
131
+ results, agg_results = evaluator.compute()
132
+ results.write_csv(os.path.join(path_dir, "results.csv"))
133
+ agg_results.write_csv(os.path.join(path_dir, "agg_results.csv"))
134
+ pred_adata.write_h5ad(os.path.join(path_dir, "pred.h5ad"))
135
+ real_adata.write_h5ad(os.path.join(path_dir, "real.h5ad"))
136
+ df = agg_results.to_pandas()
137
+ eval_score = None
138
+ for _m in ("mse", "pearson_delta", "pr_auc"):
139
+ if _m in df.columns and df[_m].notna().any():
140
+ eval_score = float(df[_m].iloc[0])
141
+ break
142
+ if eval_score is not None:
143
+ print(f"Current evaluation score: {eval_score:.4f}")
144
+ else:
145
+ print("Warning: no valid eval metric available (NaN in predictions)")
146
+
147
+ return eval_score
148
+
149
+
150
+ if __name__ == "__main__":
151
+ config = tyro.cli(Config)
152
+
153
+ ddp_kwargs = DistributedDataParallelKwargs(find_unused_parameters=True)
154
+ accelerator = Accelerator(kwargs_handlers=[ddp_kwargs])
155
+ if accelerator.is_main_process:
156
+ print(config)
157
+ save_path = config.make_path()
158
+ os.makedirs(save_path, exist_ok=True)
159
+ device = accelerator.device
160
+
161
+ # === Data loading (reuse scDFM) ===
162
+ Data, PerturbationDataset, TrainSampler, TestDataset = get_data_classes()
163
+
164
+ scdfm_data_path = os.path.join(_REPO_ROOT, "scDFM", "data")
165
+ data_manager = Data(scdfm_data_path)
166
+ data_manager.load_data(config.data_name)
167
+
168
+ # Convert var_names from Ensembl IDs to gene symbols if needed.
169
+ if "gene_name" in data_manager.adata.var.columns and data_manager.adata.var_names[0].startswith("ENSG"):
170
+ data_manager.adata.var_names = data_manager.adata.var["gene_name"].values
171
+ data_manager.adata.var_names_make_unique()
172
+ if accelerator.is_main_process:
173
+ print(f"Converted var_names to gene symbols, sample: {list(data_manager.adata.var_names[:5])}")
174
+
175
+ data_manager.process_data(
176
+ n_top_genes=config.n_top_genes,
177
+ split_method=config.split_method,
178
+ fold=config.fold,
179
+ use_negative_edge=config.use_negative_edge,
180
+ k=config.topk,
181
+ )
182
+ train_sampler, valid_sampler, _ = data_manager.load_flow_data(batch_size=config.batch_size)
183
+
184
+ # === Build mask path ===
185
+ if config.use_negative_edge:
186
+ mask_path = os.path.join(
187
+ data_manager.data_path, data_manager.data_name,
188
+ f"mask_fold_{config.fold}topk_{config.topk}{config.split_method}_negative_edge.pt",
189
+ )
190
+ else:
191
+ mask_path = os.path.join(
192
+ data_manager.data_path, data_manager.data_name,
193
+ f"mask_fold_{config.fold}topk_{config.topk}{config.split_method}.pt",
194
+ )
195
+
196
+ # === Vocab ===
197
+ orig_cwd = os.getcwd()
198
+ os.chdir(os.path.join(_REPO_ROOT, "scDFM"))
199
+ vocab = process_vocab(data_manager, config)
200
+ os.chdir(orig_cwd)
201
+
202
+ gene_ids = vocab.encode(list(data_manager.adata.var_names))
203
+ gene_ids = torch.tensor(gene_ids, dtype=torch.long, device=device)
204
+
205
+ # === Build CascadedFlowModel ===
206
+ vf = CascadedFlowModel(
207
+ ntoken=len(vocab),
208
+ d_model=config.d_model,
209
+ nhead=config.nhead,
210
+ d_hid=config.d_hid,
211
+ nlayers=config.nlayers,
212
+ fusion_method=config.fusion_method,
213
+ perturbation_function=config.perturbation_function,
214
+ mask_path=mask_path,
215
+ latent_dim=config.latent_dim,
216
+ dh_depth=config.dh_depth,
217
+ )
218
+
219
+ # === Build SparseDeltaCache ===
220
+ sparse_cache = SparseDeltaCache(config.sparse_cache_path, delta_top_k=config.delta_topk)
221
+
222
+ # === DataLoader with GRNDatasetWrapper (sparse triplets from workers) ===
223
+ base_dataset = PerturbationDataset(train_sampler, config.batch_size)
224
+ train_dataset = GRNDatasetWrapper(base_dataset, sparse_cache, gene_ids.cpu(), config.infer_top_gene)
225
+ dataloader = DataLoader(
226
+ train_dataset, batch_size=1, shuffle=False,
227
+ num_workers=8, pin_memory=True, persistent_workers=True,
228
+ )
229
+
230
+ # === Build CascadedDenoiser (loads SVD dict as register_buffer) ===
231
+ denoiser = CascadedDenoiser(
232
+ model=vf,
233
+ sparse_cache=sparse_cache,
234
+ svd_dict_path=config.svd_dict_path,
235
+ choose_latent_p=config.choose_latent_p,
236
+ latent_weight=config.latent_weight,
237
+ noise_type=config.noise_type,
238
+ use_mmd_loss=config.use_mmd_loss,
239
+ gamma=config.gamma,
240
+ poisson_alpha=config.poisson_alpha,
241
+ poisson_target_sum=config.poisson_target_sum,
242
+ t_sample_mode=config.t_sample_mode,
243
+ t_expr_mean=config.t_expr_mean,
244
+ t_expr_std=config.t_expr_std,
245
+ t_latent_mean=config.t_latent_mean,
246
+ t_latent_std=config.t_latent_std,
247
+ noise_beta=config.noise_beta,
248
+ use_variance_weight=config.use_variance_weight,
249
+ )
250
+
251
+ # === EMA model ===
252
+ ema_model = copy.deepcopy(vf).to(device)
253
+ ema_model.eval()
254
+ ema_model.requires_grad_(False)
255
+
256
+ # === Optimizer & Scheduler (with warmup) ===
257
+ save_path = config.make_path()
258
+ optimizer = torch.optim.Adam(vf.parameters(), lr=config.lr)
259
+ warmup_scheduler = LinearLR(
260
+ optimizer, start_factor=1e-3, end_factor=1.0, total_iters=config.warmup_steps,
261
+ )
262
+ cosine_scheduler = CosineAnnealingLR(
263
+ optimizer, T_max=max(config.steps - config.warmup_steps, 1), eta_min=config.eta_min,
264
+ )
265
+ scheduler = SequentialLR(
266
+ optimizer, [warmup_scheduler, cosine_scheduler], milestones=[config.warmup_steps],
267
+ )
268
+
269
+ start_iteration = 0
270
+ if config.checkpoint_path != "":
271
+ start_iteration, _ = load_checkpoint(config.checkpoint_path, vf, optimizer, scheduler)
272
+ ema_model.load_state_dict(vf.state_dict())
273
+
274
+ # === Prepare with accelerator ===
275
+ denoiser = accelerator.prepare(denoiser)
276
+ optimizer, scheduler, dataloader = accelerator.prepare(optimizer, scheduler, dataloader)
277
+
278
+ inverse_dict = {v: str(k) for k, v in data_manager.perturbation_dict.items()}
279
+
280
+ # === Test-only mode ===
281
+ if config.test_only:
282
+ eval_path = os.path.join(save_path, "eval_only")
283
+ os.makedirs(eval_path, exist_ok=True)
284
+ if accelerator.is_main_process:
285
+ print(f"Test-only mode. Saving results to {eval_path}")
286
+ eval_score = test(
287
+ valid_sampler, denoiser, accelerator, config, vocab, data_manager,
288
+ batch_size=config.eval_batch_size, path_dir=eval_path,
289
+ )
290
+ if accelerator.is_main_process and eval_score is not None:
291
+ print(f"Final evaluation score: {eval_score:.4f}")
292
+ sys.exit(0)
293
+
294
+ # === Loss logging (CSV + TensorBoard) ===
295
+ import csv
296
+ from torch.utils.tensorboard import SummaryWriter
297
+ if accelerator.is_main_process:
298
+ os.makedirs(save_path, exist_ok=True)
299
+ csv_path = os.path.join(save_path, 'loss_curve.csv')
300
+ if start_iteration > 0 and os.path.exists(csv_path):
301
+ csv_file = open(csv_path, 'a', newline='')
302
+ csv_writer = csv.writer(csv_file)
303
+ else:
304
+ csv_file = open(csv_path, 'w', newline='')
305
+ csv_writer = csv.writer(csv_file)
306
+ csv_writer.writerow(['iteration', 'loss', 'loss_expr', 'loss_latent', 'loss_mmd', 'lr'])
307
+ tb_writer = SummaryWriter(log_dir=os.path.join(save_path, 'tb_logs'))
308
+
309
+ # === Training loop ===
310
+ pbar = tqdm.tqdm(total=config.steps, initial=start_iteration)
311
+ iteration = start_iteration
312
+
313
+ while iteration < config.steps:
314
+ for batch_data in dataloader:
315
+ # Sparse triplets from GRNDatasetWrapper (cache I/O done in worker)
316
+ source_sub = batch_data["src_cell_data"].squeeze(0).to(device) # (B, G_sub)
317
+ target_sub = batch_data["tgt_cell_data"].squeeze(0).to(device) # (B, G_sub)
318
+ delta_values = batch_data["delta_values"].squeeze(0).to(device) # (B, G_sub, K) → GPU
319
+ delta_indices = batch_data["delta_indices"].squeeze(0).to(device) # (B, G_sub, K) → GPU
320
+ gene_ids_sub = batch_data["gene_ids_sub"].squeeze(0).to(device) # (G_sub,)
321
+ input_gene_ids = batch_data["input_gene_ids"].squeeze(0) # (G_sub,) CPU
322
+ perturbation_id = batch_data["condition_id"].squeeze(0).to(device)
323
+
324
+ if config.perturbation_function == "crisper":
325
+ perturbation_name = [
326
+ inverse_dict[int(p_id)] for p_id in perturbation_id[0].cpu().numpy()
327
+ ]
328
+ perturbation_id = torch.tensor(
329
+ vocab.encode(perturbation_name), dtype=torch.long, device=device
330
+ )
331
+ perturbation_id = perturbation_id.repeat(source_sub.shape[0], 1)
332
+
333
+ # Get the underlying denoiser for train_step
334
+ base_denoiser = denoiser.module if hasattr(denoiser, "module") else denoiser
335
+ base_denoiser.model.train()
336
+
337
+ B = source_sub.shape[0]
338
+ gene_input = gene_ids_sub.unsqueeze(0).expand(B, -1) # (B, G_sub)
339
+
340
+ loss_dict = base_denoiser.train_step(
341
+ source_sub, target_sub, perturbation_id, gene_input,
342
+ delta_values=delta_values, delta_indices=delta_indices,
343
+ input_gene_ids=input_gene_ids,
344
+ )
345
+
346
+ loss = loss_dict["loss"]
347
+ optimizer.zero_grad(set_to_none=True)
348
+ accelerator.backward(loss)
349
+ optimizer.step()
350
+ scheduler.step()
351
+
352
+ # === EMA update ===
353
+ with torch.no_grad():
354
+ decay = config.ema_decay
355
+ for ema_p, model_p in zip(ema_model.parameters(), vf.parameters()):
356
+ ema_p.lerp_(model_p.data, 1 - decay)
357
+
358
+ if iteration % config.print_every == 0:
359
+ save_path_ = os.path.join(save_path, f"iteration_{iteration}")
360
+ os.makedirs(save_path_, exist_ok=True)
361
+ if accelerator.is_main_process:
362
+ print(f"Saving iteration {iteration} checkpoint...")
363
+ save_checkpoint(
364
+ model=ema_model,
365
+ optimizer=optimizer,
366
+ scheduler=scheduler,
367
+ iteration=iteration,
368
+ eval_score=None,
369
+ save_path=save_path_,
370
+ is_best=False,
371
+ )
372
+ # (Evaluation moved to after training loop)
373
+
374
+ # --- Per-iteration loss logging ---
375
+ if accelerator.is_main_process:
376
+ current_lr = scheduler.get_last_lr()[0]
377
+ # CSV: every 100 steps
378
+ if iteration % 100 == 0:
379
+ csv_writer.writerow([
380
+ iteration, loss.item(),
381
+ loss_dict["loss_expr"].item(),
382
+ loss_dict["loss_latent"].item(),
383
+ loss_dict["loss_mmd"].item(),
384
+ current_lr,
385
+ ])
386
+ csv_file.flush()
387
+ # TensorBoard: every step
388
+ tb_writer.add_scalar('loss/train', loss.item(), iteration)
389
+ tb_writer.add_scalar('loss/expr', loss_dict["loss_expr"].item(), iteration)
390
+ tb_writer.add_scalar('loss/latent', loss_dict["loss_latent"].item(), iteration)
391
+ tb_writer.add_scalar('loss/mmd', loss_dict["loss_mmd"].item(), iteration)
392
+ tb_writer.add_scalar('lr', current_lr, iteration)
393
+
394
+ accelerator.wait_for_everyone()
395
+
396
+ pbar.update(1)
397
+ pbar.set_description(
398
+ f"loss: {loss.item():.4f} (expr: {loss_dict['loss_expr'].item():.4f}, "
399
+ f"latent: {loss_dict['loss_latent'].item():.4f}, "
400
+ f"mmd: {loss_dict['loss_mmd'].item():.4f}), iter: {iteration}"
401
+ )
402
+ iteration += 1
403
+ if iteration >= config.steps:
404
+ break
405
+
406
+ # === Final checkpoint + evaluation at end of training ===
407
+ save_path_ = os.path.join(save_path, f"iteration_{iteration}")
408
+ os.makedirs(save_path_, exist_ok=True)
409
+ if accelerator.is_main_process:
410
+ print(f"Saving final checkpoint at iteration {iteration}...")
411
+ save_checkpoint(
412
+ model=ema_model,
413
+ optimizer=optimizer,
414
+ scheduler=scheduler,
415
+ iteration=iteration,
416
+ eval_score=None,
417
+ save_path=save_path_,
418
+ is_best=False,
419
+ )
420
+
421
+ orig_state = copy.deepcopy(vf.state_dict())
422
+ vf.load_state_dict(ema_model.state_dict())
423
+
424
+ eval_score = test(
425
+ valid_sampler, denoiser, accelerator, config, vocab, data_manager,
426
+ batch_size=config.eval_batch_size, path_dir=save_path_,
427
+ )
428
+
429
+ vf.load_state_dict(orig_state)
430
+
431
+ if accelerator.is_main_process and eval_score is not None:
432
+ tb_writer.add_scalar('eval/score', eval_score, iteration)
433
+
434
+ # === Close logging ===
435
+ if accelerator.is_main_process:
436
+ csv_file.close()
437
+ tb_writer.close()
train/CCFM/pca_emb/src/__init__.py ADDED
File without changes
train/CCFM/pca_emb/src/_scdfm_imports.py ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Central import hub for scDFM modules.
3
+ Requires _bootstrap_scdfm to have been imported first (at script entry point).
4
+ """
5
+
6
+ import sys
7
+
8
+ # Ensure bootstrap has run
9
+ if "scdfm_src" not in sys.modules:
10
+ import os
11
+ sys.path.insert(0, os.path.normpath(os.path.join(os.path.dirname(__file__), "..")))
12
+ import _bootstrap_scdfm
13
+
14
+ import scdfm_src.models.origin.layers as _layers
15
+ import scdfm_src.models.origin.model as _model
16
+ import scdfm_src.flow_matching.path as _fm_path
17
+ import scdfm_src.flow_matching.path.scheduler.scheduler as _scheduler
18
+ import scdfm_src.utils.utils as _utils
19
+ import scdfm_src.tokenizer.gene_tokenizer as _tokenizer
20
+ # === scDFM Layers ===
21
+ GeneadaLN = _layers.GeneadaLN
22
+ ContinuousValueEncoder = _layers.ContinuousValueEncoder
23
+ GeneEncoder = _layers.GeneEncoder
24
+ BatchLabelEncoder = _layers.BatchLabelEncoder
25
+ TimestepEmbedder = _layers.TimestepEmbedder
26
+ ExprDecoder = _layers.ExprDecoder
27
+
28
+ # === scDFM Blocks ===
29
+ DifferentialTransformerBlock = _model.DifferentialTransformerBlock
30
+ PerceiverBlock = _model.PerceiverBlock
31
+ DiffPerceiverBlock = _model.DiffPerceiverBlock
32
+
33
+ # === scDFM Flow Matching ===
34
+ AffineProbPath = _fm_path.AffineProbPath
35
+ CondOTScheduler = _scheduler.CondOTScheduler
36
+
37
+ # === scDFM Utils ===
38
+ save_checkpoint = _utils.save_checkpoint
39
+ load_checkpoint = _utils.load_checkpoint
40
+ make_lognorm_poisson_noise = _utils.make_lognorm_poisson_noise
41
+ pick_eval_score = _utils.pick_eval_score
42
+ process_vocab = _utils.process_vocab
43
+ set_requires_grad_for_p_only = _utils.set_requires_grad_for_p_only
44
+ get_perturbation_emb = _utils.get_perturbation_emb
45
+
46
+ # === scDFM Tokenizer ===
47
+ GeneVocab = _tokenizer.GeneVocab
48
+
49
+ # === scDFM Data ===
50
+ # Data loading handled separately in CCFM (scDFM data module has heavy deps)
train/CCFM/pca_emb/src/data/__init__.py ADDED
File without changes
train/CCFM/pca_emb/src/data/data.py ADDED
@@ -0,0 +1,112 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Data loading for grn_svd.
3
+ Imports scDFM Data/PerturbationDataset by temporarily swapping sys.modules
4
+ so that scDFM's 'src.*' packages are visible during import.
5
+ """
6
+
7
+ import sys
8
+ import os
9
+
10
+ import torch
11
+ from torch.utils.data import Dataset
12
+
13
+ _SCDFM_ROOT = os.path.normpath(
14
+ os.path.join(os.path.dirname(__file__), "..", "..", "..", "..", "transfer", "code", "scDFM")
15
+ )
16
+
17
+ # Cache to avoid repeated imports
18
+ _cached_classes = {}
19
+
20
+
21
+ def get_data_classes():
22
+ """Lazily import scDFM data classes with proper module isolation."""
23
+ if _cached_classes:
24
+ return (
25
+ _cached_classes["Data"],
26
+ _cached_classes["PerturbationDataset"],
27
+ _cached_classes["TrainSampler"],
28
+ _cached_classes["TestDataset"],
29
+ )
30
+
31
+ # Save CCFM's src modules
32
+ saved = {}
33
+ for key in list(sys.modules.keys()):
34
+ if key == "src" or key.startswith("src."):
35
+ saved[key] = sys.modules.pop(key)
36
+
37
+ # Ensure __init__.py exists for scDFM data_process
38
+ for d in ["src", "src/data_process", "src/utils", "src/tokenizer"]:
39
+ init_path = os.path.join(_SCDFM_ROOT, d, "__init__.py")
40
+ if not os.path.exists(init_path):
41
+ os.makedirs(os.path.dirname(init_path), exist_ok=True)
42
+ with open(init_path, "w") as f:
43
+ f.write("# Auto-created by CCFM\n")
44
+
45
+ sys.path.insert(0, _SCDFM_ROOT)
46
+ try:
47
+ from src.data_process.data import Data, PerturbationDataset, TrainSampler, TestDataset
48
+ _cached_classes["Data"] = Data
49
+ _cached_classes["PerturbationDataset"] = PerturbationDataset
50
+ _cached_classes["TrainSampler"] = TrainSampler
51
+ _cached_classes["TestDataset"] = TestDataset
52
+ finally:
53
+ # Remove scDFM's src.* entries
54
+ for key in list(sys.modules.keys()):
55
+ if (key == "src" or key.startswith("src.")) and not key.startswith("scdfm_"):
56
+ del sys.modules[key]
57
+
58
+ # Restore CCFM's src modules
59
+ for key, mod in saved.items():
60
+ sys.modules[key] = mod
61
+
62
+ if _SCDFM_ROOT in sys.path:
63
+ sys.path.remove(_SCDFM_ROOT)
64
+
65
+ return Data, PerturbationDataset, TrainSampler, TestDataset
66
+
67
+
68
+ class GRNDatasetWrapper(Dataset):
69
+ """
70
+ Wraps scDFM PerturbationDataset to produce sparse delta triplets.
71
+
72
+ Returns delta_values (B, G_sub, K) and delta_indices (B, G_sub, K)
73
+ instead of dense z_target (B, G_sub, G_sub).
74
+ SVD projection happens on GPU in denoiser.train_step().
75
+ """
76
+
77
+ def __init__(self, base_dataset, sparse_cache, gene_ids_cpu, infer_top_gene):
78
+ self.base = base_dataset # scDFM PerturbationDataset
79
+ self.sparse_cache = sparse_cache # SparseDeltaCache (multi-process safe)
80
+ self.gene_ids = gene_ids_cpu # (G_full,) CPU tensor — vocab-encoded gene IDs
81
+ self.infer_top_gene = infer_top_gene
82
+
83
+ def __len__(self):
84
+ return len(self.base)
85
+
86
+ def __getitem__(self, idx):
87
+ batch = self.base[idx]
88
+
89
+ # 1. Random gene subset
90
+ G_full = batch["src_cell_data"].shape[-1]
91
+ input_gene_ids = torch.randperm(G_full)[:self.infer_top_gene]
92
+
93
+ # 2. Sparse cache lookup → sparse triplets (runs in worker process)
94
+ src_names = batch["src_cell_id"]
95
+ tgt_names = batch["tgt_cell_id"]
96
+ if src_names and isinstance(src_names[0], (tuple, list)):
97
+ src_names = [n[0] for n in src_names]
98
+ tgt_names = [n[0] for n in tgt_names]
99
+ delta_values, delta_indices = self.sparse_cache.lookup_delta(
100
+ src_names, tgt_names, input_gene_ids, device=torch.device("cpu")
101
+ ) # delta_values: (B, G_sub, K), delta_indices: (B, G_sub, K) int16
102
+
103
+ # 3. Subset expression data
104
+ return {
105
+ "src_cell_data": batch["src_cell_data"][:, input_gene_ids], # (B, G_sub)
106
+ "tgt_cell_data": batch["tgt_cell_data"][:, input_gene_ids], # (B, G_sub)
107
+ "condition_id": batch["condition_id"], # (B, 2)
108
+ "delta_values": delta_values, # (B, G_sub, K)
109
+ "delta_indices": delta_indices, # (B, G_sub, K) int16
110
+ "gene_ids_sub": self.gene_ids[input_gene_ids], # (G_sub,)
111
+ "input_gene_ids": input_gene_ids, # (G_sub,)
112
+ }
train/CCFM/pca_emb/src/data/sparse_raw_cache.py ADDED
@@ -0,0 +1,199 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ SparseDeltaCache — Returns sparse delta triplets (indices, values) for SVD projection.
3
+
4
+ Each gene row's delta attention is computed across ALL G_full=5035 columns,
5
+ then per-row top-K sparsification selects the K most important interactions.
6
+ The SVD projection (delta @ W) happens on GPU, not here.
7
+
8
+ Multi-process safe: each DataLoader worker lazily opens its own HDF5 handle.
9
+
10
+ HDF5 layout (from precompute_sparse_attn.py):
11
+ /attn_values (N, G_full, K) float16 — top-K attention values per row
12
+ /attn_indices (N, G_full, K) int16 — column indices in G_full space
13
+ /cell_names (N,) string
14
+ /valid_gene_mask (G_full,) bool
15
+ """
16
+
17
+ import os
18
+ import h5py
19
+ import numpy as np
20
+ import torch
21
+
22
+
23
+ def _read_sparse_batch(h5_values, h5_indices, name_to_idx,
24
+ src_cell_names, tgt_cell_names, gene_idx_np=None):
25
+ """
26
+ Shared HDF5 reading logic for sparse caches.
27
+
28
+ Returns:
29
+ src_vals, src_idxs, tgt_vals, tgt_idxs: numpy arrays (B, G_sub, K)
30
+ """
31
+ seen = {}
32
+ unique_names = []
33
+ for n in src_cell_names + tgt_cell_names:
34
+ if n not in seen:
35
+ seen[n] = len(unique_names)
36
+ unique_names.append(n)
37
+
38
+ unique_h5_idx = [name_to_idx[n] for n in unique_names]
39
+ sorted_order = np.argsort(unique_h5_idx)
40
+ sorted_h5_idx = [unique_h5_idx[i] for i in sorted_order]
41
+
42
+ raw_vals = h5_values[sorted_h5_idx]
43
+ raw_idxs = h5_indices[sorted_h5_idx]
44
+
45
+ unsort = np.argsort(sorted_order)
46
+ raw_vals = raw_vals[unsort]
47
+ raw_idxs = raw_idxs[unsort]
48
+
49
+ if gene_idx_np is not None:
50
+ raw_vals = raw_vals[:, gene_idx_np, :]
51
+ raw_idxs = raw_idxs[:, gene_idx_np, :]
52
+
53
+ src_map = [seen[n] for n in src_cell_names]
54
+ tgt_map = [seen[n] for n in tgt_cell_names]
55
+ return raw_vals[src_map], raw_idxs[src_map], raw_vals[tgt_map], raw_idxs[tgt_map]
56
+
57
+
58
+ class SparseDeltaCache:
59
+ """
60
+ Returns sparse delta triplets for GPU-side SVD projection.
61
+
62
+ Lookup flow:
63
+ 1. Read src/tgt sparse attention: (G_full, K=300) values + indices
64
+ 2. Select gene subset rows
65
+ 3. Scatter to dense: (B, G_sub, G_full) — chunked to avoid OOM
66
+ 4. Delta = tgt_dense - src_dense (full G_full columns, NOT G_sub)
67
+ 5. Per-row top-K on G_full columns
68
+ 6. Return (delta_values, delta_indices) sparse triplets
69
+ """
70
+
71
+ def __init__(self, h5_path, delta_top_k=30):
72
+ self.h5_path = h5_path
73
+ self.delta_top_k = delta_top_k
74
+
75
+ # Read metadata only, then close — safe for fork
76
+ with h5py.File(h5_path, "r") as h5:
77
+ self.G_full = h5["attn_values"].shape[1]
78
+ self.K_sparse = h5["attn_values"].shape[2]
79
+ cell_names = h5["cell_names"].asstr()[:]
80
+ self.name_to_idx = {name: i for i, name in enumerate(cell_names)}
81
+ if "valid_gene_mask" in h5:
82
+ self.valid_gene_mask = h5["valid_gene_mask"][:].astype(bool)
83
+ else:
84
+ self.valid_gene_mask = np.ones(self.G_full, dtype=bool)
85
+
86
+ # Per-process HDF5 handle (lazily opened)
87
+ self._h5 = None
88
+ self._attn_values = None
89
+ self._attn_indices = None
90
+ self._pid = None
91
+
92
+ print(f" SparseDeltaCache: {len(self.name_to_idx)} cells, "
93
+ f"G_full={self.G_full}, K_sparse={self.K_sparse}, delta_topk={self.delta_top_k}")
94
+ print(f" valid genes: {self.valid_gene_mask.sum()}/{self.G_full}")
95
+
96
+ def _ensure_h5_open(self):
97
+ """Ensure current process has its own HDF5 file handle."""
98
+ pid = os.getpid()
99
+ if self._h5 is None or self._pid != pid:
100
+ if self._h5 is not None:
101
+ try:
102
+ self._h5.close()
103
+ except Exception:
104
+ pass
105
+ self._h5 = h5py.File(self.h5_path, "r")
106
+ self._attn_values = self._h5["attn_values"]
107
+ self._attn_indices = self._h5["attn_indices"]
108
+ self._pid = pid
109
+
110
+ def get_missing_gene_mask(self, gene_indices=None):
111
+ """
112
+ Return missing gene mask (True = missing/invalid).
113
+ Pure numpy operation — no HDF5 I/O needed.
114
+ """
115
+ mask = torch.from_numpy(~self.valid_gene_mask) # True = missing
116
+ if gene_indices is not None:
117
+ return mask[gene_indices.cpu()]
118
+ return mask
119
+
120
+ def lookup_delta(self, src_cell_names, tgt_cell_names, gene_indices, device=None):
121
+ """
122
+ Compute sparse delta attention triplets for SVD projection.
123
+
124
+ Args:
125
+ src_cell_names: list of str, control cell identifiers
126
+ tgt_cell_names: list of str, perturbation cell identifiers
127
+ gene_indices: (G_sub,) tensor, gene subset row indices
128
+ device: target torch device (usually CPU for DataLoader workers)
129
+
130
+ Returns:
131
+ delta_values: (B, G_sub, delta_topk) float32 — top-K delta values per row
132
+ delta_indices: (B, G_sub, delta_topk) int16 — column indices in G_full space
133
+ """
134
+ self._ensure_h5_open()
135
+
136
+ if device is None:
137
+ device = torch.device("cpu")
138
+
139
+ B = len(src_cell_names)
140
+ gene_idx_np = gene_indices.cpu().numpy()
141
+ G_sub = len(gene_idx_np)
142
+ K = self.delta_top_k
143
+
144
+ # Read sparse data from HDF5 (uses per-process handle)
145
+ # gene_idx_np selects ROWS only — we keep all G_full columns
146
+ sv_np, si_np, tv_np, ti_np = _read_sparse_batch(
147
+ self._attn_values, self._attn_indices, self.name_to_idx,
148
+ src_cell_names, tgt_cell_names, gene_idx_np)
149
+
150
+ src_vals = torch.from_numpy(sv_np.astype(np.float32)).to(device) # (B, G_sub, K_sparse)
151
+ src_idxs = torch.from_numpy(si_np.astype(np.int64)).to(device)
152
+ tgt_vals = torch.from_numpy(tv_np.astype(np.float32)).to(device)
153
+ tgt_idxs = torch.from_numpy(ti_np.astype(np.int64)).to(device)
154
+
155
+ # Output sparse triplets
156
+ out_values = torch.zeros(B, G_sub, K, device=device)
157
+ out_indices = torch.zeros(B, G_sub, K, dtype=torch.int16, device=device)
158
+
159
+ # Process in chunks (100 rows per chunk) to limit memory
160
+ chunk_size = 100
161
+ for c_start in range(0, G_sub, chunk_size):
162
+ c_end = min(c_start + chunk_size, G_sub)
163
+
164
+ sv = src_vals[:, c_start:c_end, :] # (B, c_len, K_sparse)
165
+ si = src_idxs[:, c_start:c_end, :]
166
+ tv = tgt_vals[:, c_start:c_end, :]
167
+ ti = tgt_idxs[:, c_start:c_end, :]
168
+ c_len = c_end - c_start
169
+
170
+ # Scatter sparse entries to dense attention rows: (B, c_len, G_full)
171
+ src_dense = torch.zeros(B, c_len, self.G_full, device=device)
172
+ tgt_dense = torch.zeros(B, c_len, self.G_full, device=device)
173
+ src_dense.scatter_(-1, si, sv)
174
+ tgt_dense.scatter_(-1, ti, tv)
175
+
176
+ # Delta on FULL G_full columns (no column subsetting!)
177
+ delta = tgt_dense - src_dense # (B, c_len, G_full)
178
+
179
+ # Per-row top-K on G_full columns
180
+ _, topk_idx = delta.abs().topk(K, dim=-1) # (B, c_len, K)
181
+ topk_vals = delta.gather(-1, topk_idx) # (B, c_len, K)
182
+
183
+ out_values[:, c_start:c_end, :] = topk_vals
184
+ out_indices[:, c_start:c_end, :] = topk_idx.short()
185
+
186
+ return out_values, out_indices # (B, G_sub, K) float32, (B, G_sub, K) int16
187
+
188
+ def close(self):
189
+ if self._h5 is not None:
190
+ try:
191
+ self._h5.close()
192
+ except Exception:
193
+ pass
194
+ self._h5 = None
195
+ self._attn_values = None
196
+ self._attn_indices = None
197
+
198
+ def __del__(self):
199
+ self.close()
train/CCFM/pca_emb/src/denoiser.py ADDED
@@ -0,0 +1,375 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ CascadedDenoiser for grn_svd — uses SVD-projected (B, G, 128) as latent target.
3
+
4
+ Key changes from grn_att_only:
5
+ - SVD dictionary W (5035, 128) as frozen register_buffer on GPU
6
+ - _sparse_project(): GPU-based loop-of-K gather+accumulate
7
+ - 1D missing mask (not 2D)
8
+ - Simplified masked MSE loss (no non-zero reweighting)
9
+ - reconstruct_grn(): inverse projection for GRN visualization
10
+ """
11
+
12
+ import torch
13
+ import torch.nn as nn
14
+ import torchdiffeq
15
+
16
+ from ._scdfm_imports import AffineProbPath, CondOTScheduler, make_lognorm_poisson_noise
17
+ from .model.model import CascadedFlowModel
18
+ from .data.sparse_raw_cache import SparseDeltaCache
19
+
20
+
21
+ # Shared flow matching path
22
+ flow_path = AffineProbPath(scheduler=CondOTScheduler())
23
+
24
+
25
+ def pairwise_sq_dists(X, Y):
26
+ return torch.cdist(X, Y, p=2) ** 2
27
+
28
+
29
+ @torch.no_grad()
30
+ def median_sigmas(X, scales=(0.5, 1.0, 2.0, 4.0)):
31
+ D2 = pairwise_sq_dists(X, X)
32
+ tri = D2[~torch.eye(D2.size(0), dtype=bool, device=D2.device)]
33
+ m = torch.median(tri).clamp_min(1e-12)
34
+ s2 = torch.tensor(scales, device=X.device) * m
35
+ return [float(s.item()) for s in torch.sqrt(s2)]
36
+
37
+
38
+ def mmd2_unbiased_multi_sigma(X, Y, sigmas):
39
+ m, n = X.size(0), Y.size(0)
40
+ Dxx = pairwise_sq_dists(X, X)
41
+ Dyy = pairwise_sq_dists(Y, Y)
42
+ Dxy = pairwise_sq_dists(X, Y)
43
+ vals = []
44
+ for sigma in sigmas:
45
+ beta = 1.0 / (2.0 * (sigma ** 2) + 1e-12)
46
+ Kxx = torch.exp(-beta * Dxx)
47
+ Kyy = torch.exp(-beta * Dyy)
48
+ Kxy = torch.exp(-beta * Dxy)
49
+ term_xx = (Kxx.sum() - Kxx.diag().sum()) / (m * (m - 1) + 1e-12)
50
+ term_yy = (Kyy.sum() - Kyy.diag().sum()) / (n * (n - 1) + 1e-12)
51
+ term_xy = Kxy.mean()
52
+ vals.append(term_xx + term_yy - 2.0 * term_xy)
53
+ return torch.stack(vals).mean()
54
+
55
+
56
+ class CascadedDenoiser(nn.Module):
57
+ """
58
+ Cascaded denoiser with SVD-projected latent target.
59
+
60
+ Training: Cascaded time-step sampling, GPU-side SVD projection.
61
+ Inference: Two-stage cascaded generation in 128-dim latent space.
62
+ """
63
+
64
+ def __init__(
65
+ self,
66
+ model: CascadedFlowModel,
67
+ sparse_cache: SparseDeltaCache,
68
+ svd_dict_path: str,
69
+ choose_latent_p: float = 0.4,
70
+ latent_weight: float = 1.0,
71
+ noise_type: str = "Gaussian",
72
+ use_mmd_loss: bool = True,
73
+ gamma: float = 0.5,
74
+ poisson_alpha: float = 0.8,
75
+ poisson_target_sum: float = 1e4,
76
+ # Logit-normal time-step sampling
77
+ t_sample_mode: str = "logit_normal",
78
+ t_expr_mean: float = 0.0,
79
+ t_expr_std: float = 1.0,
80
+ t_latent_mean: float = 0.0,
81
+ t_latent_std: float = 1.0,
82
+ # Cascaded noise
83
+ noise_beta: float = 0.25,
84
+ # Variance-weighted latent loss
85
+ use_variance_weight: bool = False,
86
+ ):
87
+ super().__init__()
88
+ self.model = model
89
+ self.sparse_cache = sparse_cache
90
+ self.choose_latent_p = choose_latent_p
91
+ self.latent_weight = latent_weight
92
+ self.noise_type = noise_type
93
+ self.use_mmd_loss = use_mmd_loss
94
+ self.gamma = gamma
95
+ self.poisson_alpha = poisson_alpha
96
+ self.poisson_target_sum = poisson_target_sum
97
+ self.t_sample_mode = t_sample_mode
98
+ self.t_expr_mean = t_expr_mean
99
+ self.t_expr_std = t_expr_std
100
+ self.t_latent_mean = t_latent_mean
101
+ self.t_latent_std = t_latent_std
102
+ self.noise_beta = noise_beta
103
+ self.use_variance_weight = use_variance_weight
104
+
105
+ # Load SVD dictionary as frozen buffer (moves with model to GPU)
106
+ svd_dict = torch.load(svd_dict_path, map_location="cpu", weights_only=False)
107
+ self.register_buffer("W", svd_dict["W"].float()) # (G_full, 128)
108
+ self.register_buffer("global_scale_sq",
109
+ torch.tensor(svd_dict["global_scale"] ** 2).float()) # scalar
110
+ self.latent_dim = self.W.shape[1]
111
+
112
+ # Variance-weighted latent loss: w_i = evr_i / sum(evr)
113
+ if use_variance_weight:
114
+ evr = torch.from_numpy(svd_dict["explained_variance_ratio"]).float()
115
+ w = evr / evr.sum() # (latent_dim,) normalized to sum=1
116
+ self.register_buffer("latent_dim_weights", w)
117
+ print(f" Variance weighting ON: top={w[0]:.4f}, bot={w[-1]:.4f}, "
118
+ f"ratio={w[0]/w[-1]:.1f}x")
119
+
120
+ print(f" SVD dict loaded: W {self.W.shape}, "
121
+ f"global_scale={svd_dict['global_scale']:.6f}")
122
+
123
+ def _sparse_project(self, delta_values, delta_indices):
124
+ """
125
+ Sparse GPU projection: delta @ W via loop-of-K gather+accumulate.
126
+
127
+ Args:
128
+ delta_values: (B, G_sub, K) float32 — top-K delta values (on GPU)
129
+ delta_indices: (B, G_sub, K) int16 — column indices in G_full space (on GPU)
130
+
131
+ Returns:
132
+ z_target: (B, G_sub, latent_dim=128)
133
+ """
134
+ assert delta_values.device == self.W.device, \
135
+ f"Device mismatch: delta on {delta_values.device}, W on {self.W.device}"
136
+
137
+ B, G_sub, K = delta_values.shape
138
+ z_target = torch.zeros(B, G_sub, self.latent_dim, device=delta_values.device)
139
+
140
+ indices_long = delta_indices.long() # (B, G_sub, K)
141
+ for k in range(K):
142
+ col_idx = indices_long[:, :, k] # (B, G_sub)
143
+ val = delta_values[:, :, k:k+1] # (B, G_sub, 1)
144
+ w_k = self.W[col_idx] # (B, G_sub, latent_dim)
145
+ z_target = z_target + val * w_k # broadcast + accumulate
146
+
147
+ return z_target
148
+
149
+ def reconstruct_grn(self, z: torch.Tensor) -> torch.Tensor:
150
+ """
151
+ Inverse projection: 128-dim latent → approximate delta attention (sparse GRN).
152
+
153
+ Math: W_saved = V × s (V orthonormal, s = global_scale)
154
+ Forward: z = delta @ W = delta @ Vs
155
+ Inverse: delta ≈ z @ V^T / s = z @ W^T / s^2
156
+
157
+ Args:
158
+ z: (B, G, 128) — latent from ODE generation
159
+ Returns:
160
+ (B, G, G_full) — approximate delta attention per gene
161
+ """
162
+ return z @ self.W.T / self.global_scale_sq
163
+
164
+ def sample_t(self, n: int, device: torch.device):
165
+ """Cascaded time-step sampling — dino_first_cascaded mode."""
166
+ if self.t_sample_mode == "logit_normal":
167
+ t_latent = torch.sigmoid(torch.randn(n, device=device) * self.t_latent_std + self.t_latent_mean)
168
+ t_expr = torch.sigmoid(torch.randn(n, device=device) * self.t_expr_std + self.t_expr_mean)
169
+ else:
170
+ t_latent = torch.rand(n, device=device)
171
+ t_expr = torch.rand(n, device=device)
172
+
173
+ choose_latent_mask = torch.rand(n, device=device) < self.choose_latent_p
174
+
175
+ t_latent_expr = torch.rand_like(t_latent) * self.noise_beta + (1.0 - self.noise_beta)
176
+ t_latent = torch.where(choose_latent_mask, t_latent, t_latent_expr)
177
+ t_expr = torch.where(choose_latent_mask, torch.zeros_like(t_expr), t_expr)
178
+
179
+ w_expr = (~choose_latent_mask).float()
180
+ w_latent = choose_latent_mask.float()
181
+
182
+ return t_expr, t_latent, w_expr, w_latent
183
+
184
+ def _make_expr_noise(self, source: torch.Tensor) -> torch.Tensor:
185
+ """Create noise for expression flow."""
186
+ if self.noise_type == "Gaussian":
187
+ return torch.randn_like(source)
188
+ elif self.noise_type == "Poisson":
189
+ return make_lognorm_poisson_noise(
190
+ target_log=source,
191
+ alpha=self.poisson_alpha,
192
+ per_cell_L=self.poisson_target_sum,
193
+ )
194
+ else:
195
+ raise ValueError(f"Unknown noise_type: {self.noise_type}")
196
+
197
+ def train_step(
198
+ self,
199
+ source: torch.Tensor, # (B, G_sub) — already subsetted
200
+ target: torch.Tensor, # (B, G_sub)
201
+ perturbation_id: torch.Tensor, # (B, 2)
202
+ gene_input: torch.Tensor, # (B, G_sub) vocab-encoded gene IDs
203
+ delta_values: torch.Tensor, # (B, G_sub, K) on GPU
204
+ delta_indices: torch.Tensor, # (B, G_sub, K) int16 on GPU
205
+ input_gene_ids: torch.Tensor, # (G_sub,) positional indices for missing mask
206
+ ) -> dict:
207
+ """Single training step with SVD-projected latent target."""
208
+ B = source.shape[0]
209
+ device = source.device
210
+ G_sub = source.shape[-1]
211
+
212
+ # 1. GPU-side SVD projection: sparse → dense 128-dim
213
+ z_target = self._sparse_project(delta_values, delta_indices) # (B, G_sub, 128)
214
+
215
+ # 2. Missing gene mask — 1D only
216
+ missing = self.sparse_cache.get_missing_gene_mask(input_gene_ids) # (G_sub,) bool
217
+ missing_dev = missing.to(device)
218
+
219
+ # 3. Cascaded time sampling
220
+ t_expr, t_latent, w_expr, w_latent = self.sample_t(B, device)
221
+
222
+ # 4. Expression flow path
223
+ noise_expr = self._make_expr_noise(source)
224
+ path_expr = flow_path.sample(t=t_expr, x_0=noise_expr, x_1=target)
225
+
226
+ # 5. Latent flow path — (B, G_sub, 128)
227
+ noise_latent = torch.randn_like(z_target)
228
+ # 1D missing mask: zero entire gene rows
229
+ noise_latent[:, missing_dev, :] = 0.0
230
+
231
+ z_flat = z_target.reshape(B, G_sub * self.latent_dim)
232
+ noise_flat = noise_latent.reshape(B, G_sub * self.latent_dim)
233
+ path_latent_flat = flow_path.sample(t=t_latent, x_0=noise_flat, x_1=z_flat)
234
+
235
+ class _LatentPath:
236
+ pass
237
+ path_latent = _LatentPath()
238
+ path_latent.x_t = path_latent_flat.x_t.reshape(B, G_sub, self.latent_dim)
239
+ path_latent.dx_t = path_latent_flat.dx_t.reshape(B, G_sub, self.latent_dim)
240
+
241
+ # 6. Model forward
242
+ pred_v_expr, pred_v_latent = self.model(
243
+ gene_input, source, path_expr.x_t, path_latent.x_t,
244
+ t_expr, t_latent, perturbation_id,
245
+ )
246
+
247
+ # 7. Losses
248
+ # Expression loss (unchanged from grn_att_only)
249
+ loss_expr_per_sample = ((pred_v_expr - path_expr.dx_t) ** 2).mean(dim=-1) # (B,)
250
+ loss_expr = (loss_expr_per_sample * w_expr).sum() / w_expr.sum().clamp(min=1)
251
+
252
+ # Latent loss — masked MSE over 128-dim SVD latent
253
+ loss_elem = (pred_v_latent - path_latent.dx_t) ** 2 # (B, G_sub, 128)
254
+ if self.use_variance_weight:
255
+ loss_per_gene = (loss_elem * self.latent_dim_weights).sum(dim=-1) # (B, G_sub)
256
+ else:
257
+ loss_per_gene = loss_elem.mean(dim=-1) # (B, G_sub) — uniform over 128 dims
258
+ loss_per_gene[:, missing_dev] = 0.0 # zero out missing genes
259
+ n_valid = (~missing_dev).sum().clamp(min=1)
260
+ loss_latent_per_sample = loss_per_gene.sum(dim=-1) / n_valid # (B,)
261
+ loss_latent = (loss_latent_per_sample * w_latent).sum() / w_latent.sum().clamp(min=1)
262
+
263
+ loss = loss_expr + self.latent_weight * loss_latent
264
+
265
+ # Optional MMD loss on expression
266
+ _mmd_loss = torch.tensor(0.0, device=device)
267
+ if self.use_mmd_loss and w_expr.sum() > 0:
268
+ expr_mask = w_expr > 0
269
+ if expr_mask.any():
270
+ x1_hat = (
271
+ path_expr.x_t[expr_mask]
272
+ + pred_v_expr[expr_mask] * (1 - t_expr[expr_mask]).unsqueeze(-1)
273
+ )
274
+ sigmas = median_sigmas(target[expr_mask], scales=(0.5, 1.0, 2.0, 4.0))
275
+ _mmd_loss = mmd2_unbiased_multi_sigma(x1_hat, target[expr_mask], sigmas)
276
+ loss = loss + _mmd_loss * self.gamma
277
+
278
+ return {
279
+ "loss": loss,
280
+ "loss_expr": loss_expr.detach(),
281
+ "loss_latent": loss_latent.detach(),
282
+ "loss_mmd": _mmd_loss.detach(),
283
+ }
284
+
285
+ @torch.no_grad()
286
+ def generate(
287
+ self,
288
+ source: torch.Tensor, # (B, G)
289
+ perturbation_id: torch.Tensor, # (B, 2)
290
+ gene_ids: torch.Tensor, # (B, G) or (G,)
291
+ latent_steps: int = 20,
292
+ expr_steps: int = 20,
293
+ method: str = "rk4",
294
+ ) -> torch.Tensor:
295
+ """
296
+ Two-stage cascaded generation in 128-dim latent space.
297
+
298
+ Returns: (B, G) generated expression values
299
+ """
300
+ B, G = source.shape
301
+ device = source.device
302
+
303
+ if gene_ids.dim() == 1:
304
+ gene_ids = gene_ids.unsqueeze(0).expand(B, -1)
305
+
306
+ # 1D missing mask
307
+ missing = self.sparse_cache.get_missing_gene_mask(torch.arange(G))
308
+
309
+ # Initialize latent noise in 128-dim (NOT G×G!)
310
+ z_t = torch.randn(B, G, self.latent_dim, device=device)
311
+ if missing is not None:
312
+ z_t[:, missing, :] = 0.0
313
+ x_t = self._make_expr_noise(source)
314
+
315
+ if method == "rk4":
316
+ # === Stage 1: Latent generation (t_latent: 0->1, t_expr=0) ===
317
+ t_zero = torch.zeros(B, device=device)
318
+ t_one = torch.ones(B, device=device)
319
+
320
+ def latent_vf(t, z):
321
+ v_expr, v_latent = self.model(
322
+ gene_ids, source, x_t, z,
323
+ t_zero, t.expand(B), perturbation_id,
324
+ )
325
+ if missing is not None:
326
+ v_latent[:, missing, :] = 0.0
327
+ return v_latent
328
+
329
+ z_t = torchdiffeq.odeint(
330
+ latent_vf, z_t,
331
+ torch.linspace(0, 1, latent_steps + 1, device=device),
332
+ method="rk4", atol=1e-4, rtol=1e-4,
333
+ )[-1]
334
+
335
+ # === Stage 2: Expression generation (t_expr: 0->1, t_latent=1) ===
336
+ def expr_vf(t, x):
337
+ v_expr, v_latent = self.model(
338
+ gene_ids, source, x, z_t,
339
+ t.expand(B), t_one, perturbation_id,
340
+ )
341
+ return v_expr
342
+
343
+ x_t = torchdiffeq.odeint(
344
+ expr_vf, x_t,
345
+ torch.linspace(0, 1, expr_steps + 1, device=device),
346
+ method="rk4", atol=1e-4, rtol=1e-4,
347
+ )[-1]
348
+
349
+ else: # euler
350
+ t_latent_schedule = torch.cat([
351
+ torch.linspace(0, 1, latent_steps + 1, device=device),
352
+ torch.ones(expr_steps, device=device),
353
+ ])
354
+ t_expr_schedule = torch.cat([
355
+ torch.zeros(latent_steps + 1, device=device),
356
+ torch.linspace(0, 1, expr_steps + 1, device=device)[1:],
357
+ ])
358
+
359
+ for i in range(latent_steps + expr_steps):
360
+ t_lat = t_latent_schedule[i]
361
+ t_lat_next = t_latent_schedule[i + 1]
362
+ t_exp = t_expr_schedule[i]
363
+ t_exp_next = t_expr_schedule[i + 1]
364
+
365
+ v_expr, v_latent = self.model(
366
+ gene_ids, source, x_t, z_t,
367
+ t_exp.expand(B), t_lat.expand(B), perturbation_id,
368
+ )
369
+
370
+ x_t = x_t + (t_exp_next - t_exp) * v_expr
371
+ z_t = z_t + (t_lat_next - t_lat) * v_latent
372
+ if missing is not None:
373
+ z_t[:, missing, :] = 0.0
374
+
375
+ return torch.clamp(x_t, min=0)
train/CCFM/pca_emb/src/model/__init__.py ADDED
File without changes
train/CCFM/pca_emb/src/model/layers.py ADDED
@@ -0,0 +1,121 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Latent layers for grn_svd: LatentEmbedder, LatentDecoderBlock, LatentDecoder.
3
+ Adapted from GRN/grn_ccfm/src/model/layers.py for SVD-projected 128-dim latent space.
4
+ """
5
+
6
+ import torch
7
+ import torch.nn as nn
8
+
9
+
10
+ class LatentEmbedder(nn.Module):
11
+ """
12
+ Projects z_t (B, G, latent_dim) to (B, G, d_model).
13
+ When latent_dim == d_model: LayerNorm + Linear (identity-like).
14
+ When latent_dim != d_model: Linear bottleneck without LayerNorm
15
+ (LayerNorm on dim=1 destroys the signal).
16
+ """
17
+
18
+ def __init__(self, latent_dim: int = 128, d_model: int = 128):
19
+ super().__init__()
20
+ if latent_dim == d_model:
21
+ self.proj = nn.Sequential(
22
+ nn.LayerNorm(latent_dim),
23
+ nn.Linear(latent_dim, d_model),
24
+ )
25
+ else:
26
+ self.proj = nn.Sequential(
27
+ nn.Linear(latent_dim, d_model),
28
+ nn.GELU(),
29
+ nn.Linear(d_model, d_model),
30
+ )
31
+
32
+ def forward(self, z: torch.Tensor) -> torch.Tensor:
33
+ """z: (B, G, latent_dim) -> (B, G, d_model)"""
34
+ return self.proj(z)
35
+
36
+
37
+ class LatentDecoderBlock(nn.Module):
38
+ """
39
+ AdaLN-conditioned transformer block for latent decoder head.
40
+ 6-way modulation: shift/scale/gate for self-attention and MLP.
41
+ Copied from CCFM (GRN/grn_ccfm/src/model/layers.py).
42
+ """
43
+
44
+ def __init__(self, hidden_size: int, num_heads: int = 4, mlp_ratio: float = 4.0,
45
+ hidden_size_c: int = None):
46
+ super().__init__()
47
+ hidden_size_c = hidden_size_c or hidden_size
48
+ self.norm1 = nn.LayerNorm(hidden_size, elementwise_affine=False, eps=1e-6)
49
+ self.attn = nn.MultiheadAttention(hidden_size, num_heads, batch_first=True)
50
+ self.norm2 = nn.LayerNorm(hidden_size, elementwise_affine=False, eps=1e-6)
51
+ mlp_hidden = int(hidden_size * mlp_ratio)
52
+ self.mlp = nn.Sequential(
53
+ nn.Linear(hidden_size, mlp_hidden),
54
+ nn.GELU(),
55
+ nn.Linear(mlp_hidden, hidden_size),
56
+ )
57
+ self.adaLN_modulation = nn.Sequential(
58
+ nn.SiLU(),
59
+ nn.Linear(hidden_size_c, 6 * hidden_size, bias=True),
60
+ )
61
+
62
+ def forward(self, x: torch.Tensor, c: torch.Tensor) -> torch.Tensor:
63
+ """
64
+ x: (B, G, hidden_size)
65
+ c: (B, hidden_size_c) — conditioning vector (t_expr + t_latent + pert_emb)
66
+ """
67
+ shift_msa, scale_msa, gate_msa, shift_mlp, scale_mlp, gate_mlp = (
68
+ self.adaLN_modulation(c).chunk(6, dim=1)
69
+ )
70
+ # Self-attention with AdaLN
71
+ h = self.norm1(x)
72
+ h = h * (1 + scale_msa.unsqueeze(1)) + shift_msa.unsqueeze(1)
73
+ h = self.attn(h, h, h)[0]
74
+ x = x + gate_msa.unsqueeze(1) * h
75
+ # MLP with AdaLN
76
+ h = self.norm2(x)
77
+ h = h * (1 + scale_mlp.unsqueeze(1)) + shift_mlp.unsqueeze(1)
78
+ h = self.mlp(h)
79
+ x = x + gate_mlp.unsqueeze(1) * h
80
+ return x
81
+
82
+
83
+ class LatentDecoder(nn.Module):
84
+ """
85
+ Decodes backbone output (B, G, d_model) to latent velocity (B, G, latent_dim).
86
+ Uses AdaLN blocks conditioned on c for timestep/perturbation awareness.
87
+ """
88
+
89
+ def __init__(self, d_model: int = 128, latent_dim: int = 128,
90
+ dh_depth: int = 2, num_heads: int = 4,
91
+ hidden_size_c: int = None):
92
+ super().__init__()
93
+ hidden_size_c = hidden_size_c or d_model
94
+
95
+ self.dh_proj = nn.Linear(d_model, d_model)
96
+
97
+ if dh_depth > 0:
98
+ self.dh_blocks = nn.ModuleList([
99
+ LatentDecoderBlock(d_model, num_heads=num_heads, hidden_size_c=hidden_size_c)
100
+ for _ in range(dh_depth)
101
+ ])
102
+ else:
103
+ self.dh_blocks = nn.ModuleList()
104
+
105
+ self.final = nn.Sequential(
106
+ nn.LayerNorm(d_model),
107
+ nn.Linear(d_model, d_model),
108
+ nn.GELU(),
109
+ nn.Linear(d_model, latent_dim),
110
+ )
111
+
112
+ def forward(self, x: torch.Tensor, c: torch.Tensor) -> torch.Tensor:
113
+ """
114
+ x: (B, G, d_model) — backbone output
115
+ c: (B, d_model) — conditioning vector
116
+ Returns: (B, G, latent_dim=128) — predicted latent velocity
117
+ """
118
+ h = self.dh_proj(x)
119
+ for block in self.dh_blocks:
120
+ h = block(h, c)
121
+ return self.final(h)
train/CCFM/pca_emb/src/model/model.py ADDED
@@ -0,0 +1,219 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ CascadedFlowModel for grn_svd.
3
+
4
+ Key changes from grn_att_only:
5
+ - Latent target: SVD-projected (B, G, 128) instead of raw (B, G, G)
6
+ - Latent encoder: LatentEmbedder(LayerNorm+Linear) instead of z_t @ gene_emb
7
+ - Latent decoder: LatentDecoder(AdaLN, conditioned on c) instead of BilinearLatentDecoder
8
+ """
9
+
10
+ import torch
11
+ import torch.nn as nn
12
+ from torch import Tensor
13
+ from typing import Optional, Tuple
14
+
15
+ from .layers import LatentEmbedder, LatentDecoder
16
+ from .._scdfm_imports import (
17
+ GeneadaLN,
18
+ ContinuousValueEncoder,
19
+ GeneEncoder,
20
+ BatchLabelEncoder,
21
+ TimestepEmbedder,
22
+ ExprDecoder,
23
+ DifferentialTransformerBlock,
24
+ PerceiverBlock,
25
+ DiffPerceiverBlock,
26
+ )
27
+
28
+
29
+ class CascadedFlowModel(nn.Module):
30
+ """
31
+ Cascaded Flow Model with SVD-projected latent target.
32
+
33
+ Inputs:
34
+ gene_id: (B, G) gene token IDs
35
+ cell_1: (B, G) source (control) expression
36
+ x_t: (B, G) noised target expression (expression flow)
37
+ z_t: (B, G, 128) noised SVD-projected latent (latent flow)
38
+ t_expr: (B,) expression flow timestep
39
+ t_latent: (B,) latent flow timestep
40
+ perturbation_id: (B, 2) perturbation token IDs
41
+
42
+ Outputs:
43
+ pred_v_expr: (B, G) predicted expression velocity
44
+ pred_v_latent: (B, G, 128) predicted latent velocity
45
+ """
46
+
47
+ def __init__(
48
+ self,
49
+ ntoken: int = 6000,
50
+ d_model: int = 128,
51
+ nhead: int = 8,
52
+ d_hid: int = 512,
53
+ nlayers: int = 4,
54
+ dropout: float = 0.1,
55
+ fusion_method: str = "differential_perceiver",
56
+ perturbation_function: str = "crisper",
57
+ use_perturbation_interaction: bool = True,
58
+ mask_path: str = None,
59
+ latent_dim: int = 128,
60
+ dh_depth: int = 2,
61
+ ):
62
+ super().__init__()
63
+ self.d_model = d_model
64
+ self.fusion_method = fusion_method
65
+ self.perturbation_function = perturbation_function
66
+
67
+ # === Timestep embedders (separate for expr and latent) ===
68
+ self.t_expr_embedder = TimestepEmbedder(d_model)
69
+ self.t_latent_embedder = TimestepEmbedder(d_model)
70
+
71
+ # === Perturbation embedder ===
72
+ self.perturbation_embedder = BatchLabelEncoder(ntoken, d_model)
73
+
74
+ # === Expression stream (reused from scDFM) ===
75
+ self.value_encoder_1 = ContinuousValueEncoder(d_model, dropout)
76
+ self.value_encoder_2 = ContinuousValueEncoder(d_model, dropout)
77
+ self.encoder = GeneEncoder(
78
+ ntoken, d_model,
79
+ use_perturbation_interaction=use_perturbation_interaction,
80
+ mask_path=mask_path,
81
+ )
82
+ self.use_perturbation_interaction = use_perturbation_interaction
83
+ self.fusion_layer = nn.Sequential(
84
+ nn.Linear(2 * d_model, d_model),
85
+ nn.GELU(),
86
+ nn.Linear(d_model, d_model),
87
+ nn.LayerNorm(d_model),
88
+ )
89
+
90
+ # === Latent stream: LatentEmbedder (LayerNorm + Linear) ===
91
+ self.latent_embedder = LatentEmbedder(latent_dim, d_model)
92
+
93
+ # === Shared backbone blocks ===
94
+ if fusion_method == "differential_transformer":
95
+ self.blocks = nn.ModuleList([
96
+ DifferentialTransformerBlock(d_model, nhead, i, mlp_ratio=4.0)
97
+ for i in range(nlayers)
98
+ ])
99
+ elif fusion_method == "differential_perceiver":
100
+ self.blocks = nn.ModuleList([
101
+ DiffPerceiverBlock(d_model, nhead, i, mlp_ratio=4.0)
102
+ for i in range(nlayers)
103
+ ])
104
+ elif fusion_method == "perceiver":
105
+ self.blocks = nn.ModuleList([
106
+ PerceiverBlock(d_model, d_model, heads=nhead, mlp_ratio=4.0, dropout=0.1)
107
+ for _ in range(nlayers)
108
+ ])
109
+ else:
110
+ raise ValueError(f"Invalid fusion method: {fusion_method}")
111
+
112
+ # === Per-layer gene AdaLN + adapter ===
113
+ self.gene_adaLN = nn.ModuleList([
114
+ GeneadaLN(d_model, dropout) for _ in range(nlayers)
115
+ ])
116
+ self.adapter_layer = nn.ModuleList([
117
+ nn.Sequential(
118
+ nn.Linear(2 * d_model, d_model),
119
+ nn.LeakyReLU(),
120
+ nn.Dropout(dropout),
121
+ nn.Linear(d_model, d_model),
122
+ nn.LeakyReLU(),
123
+ )
124
+ for _ in range(nlayers)
125
+ ])
126
+
127
+ # === Expression decoder head (reused from scDFM) ===
128
+ self.final_layer = ExprDecoder(d_model, explicit_zero_prob=False, use_batch_labels=True)
129
+
130
+ # === Latent decoder head: AdaLN + MLP -> (B, G, latent_dim) ===
131
+ self.latent_decoder = LatentDecoder(
132
+ d_model=d_model, latent_dim=latent_dim,
133
+ dh_depth=dh_depth, num_heads=max(nhead // 2, 1),
134
+ hidden_size_c=d_model,
135
+ )
136
+
137
+ self.initialize_weights()
138
+
139
+ def initialize_weights(self):
140
+ def _basic_init(module):
141
+ if isinstance(module, nn.Linear):
142
+ torch.nn.init.xavier_uniform_(module.weight)
143
+ if module.bias is not None:
144
+ nn.init.constant_(module.bias, 0)
145
+ self.apply(_basic_init)
146
+
147
+ def get_perturbation_emb(
148
+ self,
149
+ perturbation_id: Optional[Tensor] = None,
150
+ perturbation_emb: Optional[Tensor] = None,
151
+ cell_1: Optional[Tensor] = None,
152
+ ) -> Tensor:
153
+ """Get perturbation embedding, replicating scDFM logic."""
154
+ assert perturbation_emb is None or perturbation_id is None
155
+ if perturbation_id is not None:
156
+ if self.perturbation_function == "crisper":
157
+ perturbation_emb = self.encoder(perturbation_id)
158
+ else:
159
+ perturbation_emb = self.perturbation_embedder(perturbation_id)
160
+ perturbation_emb = perturbation_emb.mean(1) # (B, d)
161
+ elif perturbation_emb is not None:
162
+ perturbation_emb = perturbation_emb.to(cell_1.device, dtype=cell_1.dtype)
163
+ if perturbation_emb.dim() == 1:
164
+ perturbation_emb = perturbation_emb.unsqueeze(0)
165
+ if perturbation_emb.size(0) == 1:
166
+ perturbation_emb = perturbation_emb.expand(cell_1.shape[0], -1).contiguous()
167
+ perturbation_emb = self.perturbation_embedder.enc_norm(perturbation_emb)
168
+ return perturbation_emb
169
+
170
+ def forward(
171
+ self,
172
+ gene_id: Tensor, # (B, G)
173
+ cell_1: Tensor, # (B, G) source expression
174
+ x_t: Tensor, # (B, G) noised expression
175
+ z_t: Tensor, # (B, G, 128) noised SVD-projected latent
176
+ t_expr: Tensor, # (B,)
177
+ t_latent: Tensor, # (B,)
178
+ perturbation_id: Optional[Tensor] = None, # (B, 2)
179
+ ) -> Tuple[Tensor, Tensor]:
180
+ if t_expr.dim() == 0:
181
+ t_expr = t_expr.repeat(cell_1.size(0))
182
+ if t_latent.dim() == 0:
183
+ t_latent = t_latent.repeat(cell_1.size(0))
184
+
185
+ # === 1. Expression stream embedding (aligned with scDFM) ===
186
+ gene_emb = self.encoder(gene_id) # (B, G, d_model)
187
+ val_emb_1 = self.value_encoder_1(x_t)
188
+ val_emb_2 = self.value_encoder_2(cell_1) + gene_emb
189
+ expr_tokens = self.fusion_layer(torch.cat([val_emb_1, val_emb_2], dim=-1)) + gene_emb
190
+
191
+ # === 2. Latent stream: LatentEmbedder (LayerNorm + Linear) ===
192
+ # z_t: (B, G, 128) — already in d_model space via SVD projection
193
+ latent_tokens = self.latent_embedder(z_t) # (B, G, d_model)
194
+
195
+ # === 3. Element-wise addition ===
196
+ x = expr_tokens + latent_tokens # (B, G, d_model)
197
+
198
+ # === 4. Conditioning vector ===
199
+ t_expr_emb = self.t_expr_embedder(t_expr)
200
+ t_latent_emb = self.t_latent_embedder(t_latent)
201
+ pert_emb = self.get_perturbation_emb(perturbation_id, cell_1=cell_1)
202
+ c = t_expr_emb + t_latent_emb + pert_emb
203
+
204
+ # === 5. Shared backbone ===
205
+ for i, block in enumerate(self.blocks):
206
+ x = self.gene_adaLN[i](gene_emb, x)
207
+ pert_exp = pert_emb[:, None, :].expand(-1, x.size(1), -1)
208
+ x = torch.cat([x, pert_exp], dim=-1)
209
+ x = self.adapter_layer[i](x)
210
+ x = block(x, val_emb_2, c)
211
+
212
+ # === 6a. Expression decoder head ===
213
+ x_with_pert = torch.cat([x, pert_emb[:, None, :].expand(-1, x.size(1), -1)], dim=-1)
214
+ pred_v_expr = self.final_layer(x_with_pert)["pred"] # (B, G)
215
+
216
+ # === 6b. Latent decoder head (AdaLN, conditioned on c) ===
217
+ pred_v_latent = self.latent_decoder(x, c) # (B, G, 128)
218
+
219
+ return pred_v_expr, pred_v_latent
train/CCFM/pca_emb/src/utils.py ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Re-export scDFM utility functions from the central import module.
3
+ """
4
+
5
+ from ._scdfm_imports import (
6
+ save_checkpoint,
7
+ load_checkpoint,
8
+ make_lognorm_poisson_noise,
9
+ pick_eval_score,
10
+ process_vocab,
11
+ set_requires_grad_for_p_only,
12
+ get_perturbation_emb,
13
+ GeneVocab,
14
+ )