lfj-code / transfer /code /CCFM /scripts /run_cascaded.py
ethan1115's picture
Upload folder using huggingface_hub
0161e74 verified
"""
Training and evaluation entry point for CCFM (Cascaded Conditioned Flow Matching).
Based on scDFM's run.py with cascaded denoiser integration.
Conditioning signals: control expression + perturbation_id.
scGPT latent features are an auxiliary generation target (like DINO in LatentForcing),
generated from noise at inference — not a conditioning signal.
"""
import sys
import os
# Set up paths — CCFM project root must be on sys.path for config/ and src/ imports
_PROJECT_ROOT = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.insert(0, _PROJECT_ROOT)
# Bootstrap scDFM imports (must happen before any CCFM src imports)
import _bootstrap_scdfm # noqa: F401
import copy
import torch
import torch.nn as nn
import tyro
import tqdm
import numpy as np
import pandas as pd
import anndata as ad
import scanpy as sc
from torch.utils.data import DataLoader
from tqdm import trange
from accelerate import Accelerator, DistributedDataParallelKwargs
from torch.optim.lr_scheduler import LinearLR, CosineAnnealingLR, SequentialLR
from config.config_cascaded import CascadedFlowConfig as Config
from src.data.data import get_data_classes
from src.model.model import CascadedFlowModel
from src.data.scgpt_extractor import FrozenScGPTExtractor
from src.data.scgpt_cache import ScGPTFeatureCache
from src.denoiser import CascadedDenoiser
from src.utils import (
save_checkpoint,
load_checkpoint,
pick_eval_score,
process_vocab,
set_requires_grad_for_p_only,
GeneVocab,
)
from cell_eval import MetricsEvaluator
# Resolve scDFM directory paths
_REPO_ROOT = os.path.dirname(_PROJECT_ROOT) # transfer/code/
@torch.inference_mode()
def test(data_sampler, denoiser, accelerator, config, vocab, data_manager,
batch_size=128, path_dir="./"):
"""Evaluate: generate predictions and compute cell-eval metrics."""
device = accelerator.device
gene_ids_test = vocab.encode(list(data_sampler.adata.var_names))
gene_ids_test = torch.tensor(gene_ids_test, dtype=torch.long, device=device)
perturbation_name_list = data_sampler._perturbation_covariates
control_data = data_sampler.get_control_data()
inverse_dict = {v: str(k) for k, v in data_manager.perturbation_dict.items()}
all_pred_expressions = [control_data["src_cell_data"]]
obs_perturbation_name_pred = ["control"] * control_data["src_cell_data"].shape[0]
all_target_expressions = [control_data["src_cell_data"]]
obs_perturbation_name_real = ["control"] * control_data["src_cell_data"].shape[0]
print("perturbation_name_list:", len(perturbation_name_list))
for perturbation_name in perturbation_name_list:
perturbation_data = data_sampler.get_perturbation_data(perturbation_name)
target = perturbation_data["tgt_cell_data"]
perturbation_id = perturbation_data["condition_id"]
source = control_data["src_cell_data"].to(device)
perturbation_id = perturbation_id.to(device)
if config.perturbation_function == "crisper":
perturbation_name_crisper = [
inverse_dict[int(p_id)] for p_id in perturbation_id[0].cpu().numpy()
]
perturbation_id = torch.tensor(
vocab.encode(perturbation_name_crisper), dtype=torch.long, device=device
)
perturbation_id = perturbation_id.repeat(source.shape[0], 1)
idx = torch.randperm(source.shape[0])
source = source[idx]
N = 128
source = source[:N]
pred_expressions = []
for i in trange(0, N, batch_size, desc=perturbation_name):
batch_source = source[i : i + batch_size]
batch_pert_id = perturbation_id[0].repeat(batch_source.shape[0], 1).to(device)
# Get the underlying model for generation
model = denoiser.module if hasattr(denoiser, "module") else denoiser
pred = model.generate(
batch_source,
batch_pert_id,
gene_ids_test,
latent_steps=config.latent_steps,
expr_steps=config.expr_steps,
method=config.ode_method,
)
pred_expressions.append(pred)
pred_expressions = torch.cat(pred_expressions, dim=0).cpu().numpy()
all_pred_expressions.append(pred_expressions)
all_target_expressions.append(target)
obs_perturbation_name_pred.extend([perturbation_name] * pred_expressions.shape[0])
obs_perturbation_name_real.extend([perturbation_name] * target.shape[0])
all_pred_expressions = np.concatenate(all_pred_expressions, axis=0)
all_target_expressions = np.concatenate(all_target_expressions, axis=0)
obs_pred = pd.DataFrame({"perturbation": obs_perturbation_name_pred})
obs_real = pd.DataFrame({"perturbation": obs_perturbation_name_real})
pred_adata = ad.AnnData(X=all_pred_expressions, obs=obs_pred)
real_adata = ad.AnnData(X=all_target_expressions, obs=obs_real)
eval_score = None
if accelerator.is_main_process:
evaluator = MetricsEvaluator(
adata_pred=pred_adata,
adata_real=real_adata,
control_pert="control",
pert_col="perturbation",
num_threads=32,
)
results, agg_results = evaluator.compute()
results.write_csv(os.path.join(path_dir, "results.csv"))
agg_results.write_csv(os.path.join(path_dir, "agg_results.csv"))
pred_adata.write_h5ad(os.path.join(path_dir, "pred.h5ad"))
real_adata.write_h5ad(os.path.join(path_dir, "real.h5ad"))
eval_score = pick_eval_score(agg_results, "mse")
print(f"Current evaluation score: {eval_score:.4f}")
return eval_score
if __name__ == "__main__":
config = tyro.cli(Config)
ddp_kwargs = DistributedDataParallelKwargs(find_unused_parameters=True)
accelerator = Accelerator(kwargs_handlers=[ddp_kwargs])
if accelerator.is_main_process:
print(config)
save_path = config.make_path()
os.makedirs(save_path, exist_ok=True)
device = accelerator.device
# === Data loading (reuse scDFM) ===
Data, PerturbationDataset, TrainSampler, TestDataset = get_data_classes()
scdfm_data_path = os.path.join(_REPO_ROOT, "scDFM", "data")
data_manager = Data(scdfm_data_path)
data_manager.load_data(config.data_name)
# Convert var_names from Ensembl IDs to gene symbols if needed.
# scDFM vocab and perturbation encoding both expect gene symbols as var_names.
if "gene_name" in data_manager.adata.var.columns and data_manager.adata.var_names[0].startswith("ENSG"):
data_manager.adata.var_names = data_manager.adata.var["gene_name"].values
data_manager.adata.var_names_make_unique()
if accelerator.is_main_process:
print(f"Converted var_names to gene symbols, sample: {list(data_manager.adata.var_names[:5])}")
data_manager.process_data(
n_top_genes=config.n_top_genes,
split_method=config.split_method,
fold=config.fold,
use_negative_edge=config.use_negative_edge,
k=config.topk,
)
train_sampler, valid_sampler, _ = data_manager.load_flow_data(batch_size=config.batch_size)
train_dataset = PerturbationDataset(train_sampler, config.batch_size)
dataloader = DataLoader(
train_dataset, batch_size=1, shuffle=False,
num_workers=8, pin_memory=True, persistent_workers=True,
)
# === Build mask path ===
if config.use_negative_edge:
mask_path = os.path.join(
data_manager.data_path, data_manager.data_name,
f"mask_fold_{config.fold}topk_{config.topk}{config.split_method}_negative_edge.pt",
)
else:
mask_path = os.path.join(
data_manager.data_path, data_manager.data_name,
f"mask_fold_{config.fold}topk_{config.topk}{config.split_method}.pt",
)
# === Vocab ===
orig_cwd = os.getcwd()
os.chdir(os.path.join(_REPO_ROOT, "scDFM"))
vocab = process_vocab(data_manager, config)
os.chdir(orig_cwd)
# Vocab is built from var_names (may be Ensembl IDs or gene symbols)
gene_ids = vocab.encode(list(data_manager.adata.var_names))
gene_ids = torch.tensor(gene_ids, dtype=torch.long, device=device)
# === Build CascadedFlowModel ===
vf = CascadedFlowModel(
ntoken=len(vocab),
d_model=config.d_model,
nhead=config.nhead,
d_hid=config.d_model * 4,
nlayers=config.nlayers,
fusion_method=config.fusion_method,
perturbation_function=config.perturbation_function,
mask_path=mask_path,
scgpt_dim=config.scgpt_dim,
bottleneck_dim=config.bottleneck_dim,
dh_depth=config.dh_depth,
)
# === Build FrozenScGPTExtractor ===
# var_names have been converted to gene symbols above, matching scGPT vocab.
hvg_gene_names = list(data_manager.adata.var_names)
scgpt_model_dir = os.path.join(
os.path.dirname(_REPO_ROOT), # transfer/
config.scgpt_model_dir.replace("transfer/", ""),
)
scgpt_extractor = FrozenScGPTExtractor(
model_dir=scgpt_model_dir,
hvg_gene_names=hvg_gene_names,
device=device,
max_seq_len=config.scgpt_max_seq_len,
target_std=config.target_std,
warmup_batches=config.warmup_batches,
)
scgpt_extractor = scgpt_extractor.to(device)
# === Build CascadedDenoiser ===
denoiser = CascadedDenoiser(
model=vf,
scgpt_extractor=scgpt_extractor,
choose_latent_p=config.choose_latent_p,
latent_weight=config.latent_weight,
noise_type=config.noise_type,
use_mmd_loss=config.use_mmd_loss,
gamma=config.gamma,
poisson_alpha=config.poisson_alpha,
poisson_target_sum=config.poisson_target_sum,
t_sample_mode=config.t_sample_mode,
t_expr_mean=config.t_expr_mean,
t_expr_std=config.t_expr_std,
t_latent_mean=config.t_latent_mean,
t_latent_std=config.t_latent_std,
noise_beta=config.noise_beta,
)
# === Load scGPT cache if configured ===
scgpt_cache = None
if config.scgpt_cache_path:
scgpt_cache = ScGPTFeatureCache(
config.scgpt_cache_path,
target_std=config.target_std,
)
if accelerator.is_main_process:
print(f"Using pre-extracted scGPT cache: {config.scgpt_cache_path}")
print(f" Cache shape: {scgpt_cache.features.shape}, cells: {len(scgpt_cache.name_to_idx)}")
# === EMA model (on same device as training model) ===
ema_model = copy.deepcopy(vf).to(device)
ema_model.eval()
ema_model.requires_grad_(False)
# === Optimizer & Scheduler (with warmup) ===
save_path = config.make_path()
optimizer = torch.optim.Adam(vf.parameters(), lr=config.lr)
warmup_scheduler = LinearLR(
optimizer, start_factor=1e-3, end_factor=1.0, total_iters=config.warmup_steps,
)
cosine_scheduler = CosineAnnealingLR(
optimizer, T_max=max(config.steps - config.warmup_steps, 1), eta_min=config.eta_min,
)
scheduler = SequentialLR(
optimizer, [warmup_scheduler, cosine_scheduler], milestones=[config.warmup_steps],
)
start_iteration = 0
if config.checkpoint_path != "":
start_iteration, _ = load_checkpoint(config.checkpoint_path, vf, optimizer, scheduler)
# Sync EMA with loaded weights
ema_model.load_state_dict(vf.state_dict())
# === Prepare with accelerator ===
denoiser = accelerator.prepare(denoiser)
optimizer, scheduler, dataloader = accelerator.prepare(optimizer, scheduler, dataloader)
inverse_dict = {v: str(k) for k, v in data_manager.perturbation_dict.items()}
# === Test-only mode ===
if config.test_only:
eval_path = os.path.join(save_path, "eval_only")
os.makedirs(eval_path, exist_ok=True)
if accelerator.is_main_process:
print(f"Test-only mode. Saving results to {eval_path}")
eval_score = test(
valid_sampler, denoiser, accelerator, config, vocab, data_manager,
batch_size=config.batch_size, path_dir=eval_path,
)
if accelerator.is_main_process and eval_score is not None:
print(f"Final evaluation score: {eval_score:.4f}")
sys.exit(0)
# === Loss logging (CSV + TensorBoard) ===
import csv
from torch.utils.tensorboard import SummaryWriter
if accelerator.is_main_process:
os.makedirs(save_path, exist_ok=True)
csv_path = os.path.join(save_path, 'loss_curve.csv')
if start_iteration > 0 and os.path.exists(csv_path):
csv_file = open(csv_path, 'a', newline='')
csv_writer = csv.writer(csv_file)
else:
csv_file = open(csv_path, 'w', newline='')
csv_writer = csv.writer(csv_file)
csv_writer.writerow(['iteration', 'loss', 'loss_expr', 'loss_latent', 'loss_mmd', 'lr'])
tb_writer = SummaryWriter(log_dir=os.path.join(save_path, 'tb_logs'))
# === Training loop ===
pbar = tqdm.tqdm(total=config.steps, initial=start_iteration)
iteration = start_iteration
while iteration < config.steps:
for batch_data in dataloader:
source = batch_data["src_cell_data"].squeeze(0)
target = batch_data["tgt_cell_data"].squeeze(0)
perturbation_id = batch_data["condition_id"].squeeze(0).to(device)
if config.perturbation_function == "crisper":
perturbation_name = [
inverse_dict[int(p_id)] for p_id in perturbation_id[0].cpu().numpy()
]
perturbation_id = torch.tensor(
vocab.encode(perturbation_name), dtype=torch.long, device=device
)
perturbation_id = perturbation_id.repeat(source.shape[0], 1)
# Get the underlying denoiser for train_step
base_denoiser = denoiser.module if hasattr(denoiser, "module") else denoiser
base_denoiser.model.train()
if scgpt_cache is not None:
# Cache mode: sample gene subset here, look up pre-extracted features
# DataLoader collate wraps strings in tuples; unwrap them
tgt_cell_names = [n[0] if isinstance(n, (tuple, list)) else n for n in batch_data["tgt_cell_id"]]
input_gene_ids = torch.randperm(source.shape[-1], device=device)[:config.infer_top_gene]
cached_z_target = scgpt_cache.lookup(tgt_cell_names, input_gene_ids, device=device)
loss_dict = base_denoiser.train_step(
source, target, perturbation_id, gene_ids,
infer_top_gene=config.infer_top_gene,
cached_z_target=cached_z_target,
cached_gene_ids=input_gene_ids,
)
else:
loss_dict = base_denoiser.train_step(
source, target, perturbation_id, gene_ids,
infer_top_gene=config.infer_top_gene,
)
loss = loss_dict["loss"]
optimizer.zero_grad(set_to_none=True)
accelerator.backward(loss)
optimizer.step()
scheduler.step()
# === EMA update ===
with torch.no_grad():
decay = config.ema_decay
for ema_p, model_p in zip(ema_model.parameters(), vf.parameters()):
ema_p.lerp_(model_p.data, 1 - decay)
if iteration % config.print_every == 0:
save_path_ = os.path.join(save_path, f"iteration_{iteration}")
os.makedirs(save_path_, exist_ok=True)
if accelerator.is_main_process:
print(f"Saving iteration {iteration} checkpoint...")
# Save EMA model (used for inference) and training state
save_checkpoint(
model=ema_model,
optimizer=optimizer,
scheduler=scheduler,
iteration=iteration,
eval_score=None,
save_path=save_path_,
is_best=False,
)
# Evaluate with EMA weights
# Only evaluate at the start and the last checkpoint
if iteration == 0 or iteration + config.print_every >= config.steps:
# Swap EMA weights into denoiser for evaluation
orig_state = copy.deepcopy(vf.state_dict())
vf.load_state_dict(ema_model.state_dict())
eval_score = test(
valid_sampler, denoiser, accelerator, config, vocab, data_manager,
batch_size=config.batch_size, path_dir=save_path_,
)
# Restore training weights
vf.load_state_dict(orig_state)
if accelerator.is_main_process and eval_score is not None:
tb_writer.add_scalar('eval/score', eval_score, iteration)
# --- Per-iteration loss logging ---
if accelerator.is_main_process:
current_lr = scheduler.get_last_lr()[0]
csv_writer.writerow([
iteration, loss.item(),
loss_dict["loss_expr"].item(),
loss_dict["loss_latent"].item(),
loss_dict["loss_mmd"].item(),
current_lr,
])
if iteration % 100 == 0:
csv_file.flush()
tb_writer.add_scalar('loss/train', loss.item(), iteration)
tb_writer.add_scalar('loss/expr', loss_dict["loss_expr"].item(), iteration)
tb_writer.add_scalar('loss/latent', loss_dict["loss_latent"].item(), iteration)
tb_writer.add_scalar('loss/mmd', loss_dict["loss_mmd"].item(), iteration)
tb_writer.add_scalar('lr', current_lr, iteration)
accelerator.wait_for_everyone()
pbar.update(1)
pbar.set_description(
f"loss: {loss.item():.4f} (expr: {loss_dict['loss_expr'].item():.4f}, "
f"latent: {loss_dict['loss_latent'].item():.4f}, "
f"mmd: {loss_dict['loss_mmd'].item():.4f}), iter: {iteration}"
)
iteration += 1
if iteration >= config.steps:
break
# === Close logging ===
if accelerator.is_main_process:
csv_file.close()
tb_writer.close()