| |
| """ |
| Experiment script: Adaptive Prompt Selection vs. Random Baseline. |
| |
| Runs both methods on openproblems_donor1.h5ad and compares predictions |
| against ground truth perturbed cells. |
| |
| Usage: |
| source stack_env/bin/activate |
| python code/adaptive_prompt_selection/run_experiment.py \ |
| --checkpoint data/tutorial-pred-model/bc_large_aligned.ckpt \ |
| --data data/tutorial-pred-data/openproblems_donor1.h5ad \ |
| --genelist data/tutorial-pred-model/basecount_1000per_15000max.pkl \ |
| --output-dir data/adaptive_prompt_results \ |
| --show-progress |
| """ |
|
|
| from __future__ import annotations |
|
|
| import argparse |
| import json |
| import logging |
| import sys |
| from pathlib import Path |
| from typing import Dict, List, Optional |
|
|
| import anndata as ad |
| import numpy as np |
| import pandas as pd |
| from scipy.sparse import issparse |
| from scipy.stats import pearsonr |
|
|
| |
| REPO_ROOT = Path(__file__).resolve().parents[2] |
| STACK_SRC = REPO_ROOT / "code" / "stack" / "src" |
| THIS_DIR = Path(__file__).resolve().parent |
| for p in [str(STACK_SRC), str(THIS_DIR)]: |
| if p not in sys.path: |
| sys.path.insert(0, p) |
|
|
| from stack.model_loading import load_model_from_checkpoint |
| from adaptive_prompt import adaptive_prompt_selection, run_baseline |
|
|
| LOGGER = logging.getLogger("run_experiment") |
|
|
|
|
| def compute_metrics( |
| pred_adata: ad.AnnData, |
| real_adata: ad.AnnData, |
| ) -> Dict[str, float]: |
| """Compute evaluation metrics between predicted and real expression. |
| |
| Aligns genes by var index, then computes: |
| - mean_pearson: mean per-gene Pearson correlation |
| - mean_mse: mean squared error across all genes |
| - mean_mae: mean absolute error across all genes |
| """ |
| |
| pred_X = pred_adata.X |
| if issparse(pred_X): |
| pred_X = pred_X.toarray() |
| pred_X = np.asarray(pred_X, dtype=np.float64) |
|
|
| real_X = real_adata.X |
| if issparse(real_X): |
| real_X = real_X.toarray() |
| real_X = np.asarray(real_X, dtype=np.float64) |
|
|
| |
| pred_genes = pred_adata.var_names |
| real_genes = real_adata.var_names |
| common_genes = pred_genes.intersection(real_genes) |
|
|
| if len(common_genes) == 0: |
| LOGGER.warning("No common genes found between predicted and real data") |
| return {"mean_pearson": float("nan"), "mean_mse": float("nan"), "mean_mae": float("nan")} |
|
|
| pred_X = pred_X[:, pred_genes.isin(common_genes)] |
| real_X = real_X[:, real_genes.isin(common_genes)] |
|
|
| |
| pred_mean = pred_X.mean(axis=0) |
| real_mean = real_X.mean(axis=0) |
|
|
| |
| valid_mask = (np.std(pred_mean) > 0) and (np.std(real_mean) > 0) |
| if valid_mask: |
| overall_pearson, _ = pearsonr(pred_mean, real_mean) |
| else: |
| overall_pearson = float("nan") |
|
|
| |
| mse = float(np.mean((pred_mean - real_mean) ** 2)) |
| mae = float(np.mean(np.abs(pred_mean - real_mean))) |
|
|
| return { |
| "mean_pearson": float(overall_pearson), |
| "mean_mse": mse, |
| "mean_mae": mae, |
| "n_common_genes": len(common_genes), |
| "n_pred_cells": pred_adata.n_obs, |
| "n_real_cells": real_adata.n_obs, |
| } |
|
|
|
|
| def run_experiment( |
| model, |
| full_adata: ad.AnnData, |
| genelist_path: str, |
| output_dir: Path, |
| query_cell_types: List[str], |
| perturbations: Optional[List[str]] = None, |
| cell_type_col: str = "cell_type", |
| perturbation_col: str = "sm_name", |
| control_col: str = "control", |
| control_name: str = "Dimethyl Sulfoxide", |
| gene_name_col: Optional[str] = None, |
| batch_size: int = 16, |
| num_steps: int = 5, |
| mode: str = "mdm", |
| random_seed: int = 42, |
| show_progress: bool = True, |
| |
| n_clusters_per_type: int = 5, |
| zoom_ratio: float = 0.25, |
| top_ratio: float = 0.2, |
| temperature: float = 0.06, |
| ) -> pd.DataFrame: |
| """Run adaptive vs baseline comparison across conditions.""" |
|
|
| output_dir.mkdir(parents=True, exist_ok=True) |
|
|
| |
| if perturbations is None: |
| all_perts = full_adata.obs[perturbation_col].unique() |
| perturbations = [p for p in all_perts if p != control_name] |
| LOGGER.info("Auto-detected %d perturbations: %s", len(perturbations), perturbations) |
|
|
| results_rows = [] |
|
|
| for query_ct in query_cell_types: |
| for drug in perturbations: |
| LOGGER.info("=" * 60) |
| LOGGER.info("Experiment: query=%s, perturbation=%s", query_ct, drug) |
| LOGGER.info("=" * 60) |
|
|
| |
| obs = full_adata.obs |
| gt_mask = (obs[cell_type_col] == query_ct) & (obs[perturbation_col] == drug) |
| if gt_mask.sum() == 0: |
| LOGGER.warning("No ground truth cells for %s + %s, skipping", query_ct, drug) |
| continue |
| ground_truth = full_adata[gt_mask].copy() |
|
|
| |
| perturbed_pool_mask = (obs[cell_type_col] != query_ct) & (obs[perturbation_col] == drug) |
| if perturbed_pool_mask.sum() == 0: |
| LOGGER.warning("No perturbed pool for %s (non-%s), skipping", drug, query_ct) |
| continue |
|
|
| safe_name = f"{query_ct}_{drug}".replace(" ", "_") |
|
|
| |
| try: |
| adaptive_pred, bandit_details = adaptive_prompt_selection( |
| model=model, |
| full_adata=full_adata, |
| genelist_path=genelist_path, |
| query_cell_type=query_ct, |
| perturbation=drug, |
| control_name=control_name, |
| cell_type_col=cell_type_col, |
| perturbation_col=perturbation_col, |
| control_col=control_col, |
| n_clusters_per_type=n_clusters_per_type, |
| zoom_ratio=zoom_ratio, |
| top_ratio=top_ratio, |
| temperature=temperature, |
| batch_size=batch_size, |
| num_steps=num_steps, |
| mode=mode, |
| gene_name_col=gene_name_col, |
| random_seed=random_seed, |
| show_progress=show_progress, |
| ) |
| adaptive_metrics = compute_metrics(adaptive_pred, ground_truth) |
| LOGGER.info("Adaptive metrics: %s", adaptive_metrics) |
|
|
| |
| adaptive_pred.write_h5ad(output_dir / f"adaptive_{safe_name}.h5ad") |
|
|
| |
| with open(output_dir / f"bandit_details_{safe_name}.json", "w") as f: |
| json.dump(bandit_details, f, indent=2, default=str) |
|
|
| except Exception as e: |
| LOGGER.error("Adaptive failed for %s + %s: %s", query_ct, drug, e, exc_info=True) |
| adaptive_metrics = { |
| "mean_pearson": float("nan"), |
| "mean_mse": float("nan"), |
| "mean_mae": float("nan"), |
| } |
|
|
| |
| try: |
| |
| baseline_context = full_adata[perturbed_pool_mask].copy() |
| query_mask = (obs[cell_type_col] == query_ct) & (obs[control_col] == True) |
| query_cells = full_adata[query_mask].copy() |
|
|
| baseline_pred = run_baseline( |
| model=model, |
| context_adata=baseline_context, |
| query_adata=query_cells, |
| genelist_path=genelist_path, |
| batch_size=batch_size, |
| num_steps=num_steps, |
| mode=mode, |
| gene_name_col=gene_name_col, |
| random_seed=random_seed, |
| show_progress=show_progress, |
| ) |
| baseline_metrics = compute_metrics(baseline_pred, ground_truth) |
| LOGGER.info("Baseline metrics: %s", baseline_metrics) |
|
|
| baseline_pred.write_h5ad(output_dir / f"baseline_{safe_name}.h5ad") |
|
|
| except Exception as e: |
| LOGGER.error("Baseline failed for %s + %s: %s", query_ct, drug, e, exc_info=True) |
| baseline_metrics = { |
| "mean_pearson": float("nan"), |
| "mean_mse": float("nan"), |
| "mean_mae": float("nan"), |
| } |
|
|
| |
| row = { |
| "query_cell_type": query_ct, |
| "perturbation": drug, |
| "n_ground_truth": gt_mask.sum(), |
| } |
| for key in ["mean_pearson", "mean_mse", "mean_mae"]: |
| row[f"adaptive_{key}"] = adaptive_metrics.get(key, float("nan")) |
| row[f"baseline_{key}"] = baseline_metrics.get(key, float("nan")) |
| results_rows.append(row) |
|
|
| results_df = pd.DataFrame(results_rows) |
| results_df.to_csv(output_dir / "comparison_results.csv", index=False) |
| LOGGER.info("Results saved to %s", output_dir / "comparison_results.csv") |
| return results_df |
|
|
|
|
| def build_parser() -> argparse.ArgumentParser: |
| parser = argparse.ArgumentParser( |
| description="Adaptive Prompt Selection experiment for Stack" |
| ) |
| parser.add_argument("--checkpoint", required=True, help="Path to Stack checkpoint (.ckpt)") |
| parser.add_argument("--data", required=True, help="Path to full AnnData (.h5ad)") |
| parser.add_argument("--genelist", required=True, help="Path to gene list pickle") |
| parser.add_argument("--output-dir", required=True, help="Output directory") |
| parser.add_argument("--cell-type-col", default="cell_type") |
| parser.add_argument("--perturbation-col", default="sm_name") |
| parser.add_argument("--control-col", default="control") |
| parser.add_argument("--control-name", default="Dimethyl Sulfoxide") |
| parser.add_argument("--gene-name-col", default=None) |
| parser.add_argument( |
| "--query-cell-types", nargs="*", default=None, |
| help="Cell types to use as query (default: all non-T cell types)", |
| ) |
| parser.add_argument( |
| "--perturbations", nargs="*", default=None, |
| help="Perturbations to test (default: all non-control)", |
| ) |
| parser.add_argument("--batch-size", type=int, default=16) |
| parser.add_argument("--num-steps", type=int, default=5) |
| parser.add_argument("--mode", default="mdm") |
| parser.add_argument("--random-seed", type=int, default=42) |
| parser.add_argument("--show-progress", action="store_true") |
| |
| parser.add_argument("--n-clusters-per-type", type=int, default=5) |
| parser.add_argument("--zoom-ratio", type=float, default=0.25) |
| parser.add_argument("--top-ratio", type=float, default=0.2) |
| parser.add_argument("--temperature", type=float, default=0.06) |
| return parser |
|
|
|
|
| def main(args=None): |
| parser = build_parser() |
| parsed = parser.parse_args(args=args) |
| logging.basicConfig(level=logging.INFO, format="%(asctime)s %(name)s %(levelname)s %(message)s") |
|
|
| LOGGER.info("Loading model from %s", parsed.checkpoint) |
| model = load_model_from_checkpoint(parsed.checkpoint, model_class="ICL_FinetunedModel") |
|
|
| LOGGER.info("Loading data from %s", parsed.data) |
| full_adata = ad.read_h5ad(parsed.data) |
|
|
| |
| if parsed.query_cell_types is None: |
| all_types = full_adata.obs[parsed.cell_type_col].unique().tolist() |
| |
| t_cell_keywords = ["t cell", "t regulatory", "nk"] |
| query_cell_types = [ |
| ct for ct in all_types |
| if not any(kw in ct.lower() for kw in t_cell_keywords) |
| ] |
| if not query_cell_types: |
| query_cell_types = all_types[:1] |
| LOGGER.info("Auto-selected query cell types: %s", query_cell_types) |
| else: |
| query_cell_types = parsed.query_cell_types |
|
|
| output_dir = Path(parsed.output_dir) |
|
|
| results = run_experiment( |
| model=model, |
| full_adata=full_adata, |
| genelist_path=parsed.genelist, |
| output_dir=output_dir, |
| query_cell_types=query_cell_types, |
| perturbations=parsed.perturbations, |
| cell_type_col=parsed.cell_type_col, |
| perturbation_col=parsed.perturbation_col, |
| control_col=parsed.control_col, |
| control_name=parsed.control_name, |
| gene_name_col=parsed.gene_name_col, |
| batch_size=parsed.batch_size, |
| num_steps=parsed.num_steps, |
| mode=parsed.mode, |
| random_seed=parsed.random_seed, |
| show_progress=parsed.show_progress, |
| n_clusters_per_type=parsed.n_clusters_per_type, |
| zoom_ratio=parsed.zoom_ratio, |
| top_ratio=parsed.top_ratio, |
| temperature=parsed.temperature, |
| ) |
|
|
| print("\n" + "=" * 70) |
| print("RESULTS SUMMARY") |
| print("=" * 70) |
| print(results.to_string(index=False)) |
|
|
| |
| if len(results) > 0: |
| print("\n--- Aggregated ---") |
| for metric in ["mean_pearson", "mean_mse", "mean_mae"]: |
| a_col = f"adaptive_{metric}" |
| b_col = f"baseline_{metric}" |
| if a_col in results.columns and b_col in results.columns: |
| a_mean = results[a_col].mean() |
| b_mean = results[b_col].mean() |
| print(f" {metric}: adaptive={a_mean:.4f} baseline={b_mean:.4f} diff={a_mean - b_mean:+.4f}") |
|
|
|
|
| if __name__ == "__main__": |
| main() |
|
|