Datasets:
Formats:
parquet
Languages:
English
Size:
10M - 100M
Tags:
biology
chemistry
drug-discovery
clinical-trials
protein-protein-interaction
gene-essentiality
License:
File size: 7,330 Bytes
6d1bbc7 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 | #!/usr/bin/env python
"""Train GE baseline models (XGBoost + MLP)."""
import argparse
import json
import logging
import sys
from pathlib import Path
import numpy as np
import pandas as pd
_PROJECT_ROOT = Path(__file__).resolve().parent.parent
logging.basicConfig(level=logging.INFO, format="%(levelname)s: %(message)s")
logger = logging.getLogger(__name__)
def main():
parser = argparse.ArgumentParser(description="Train GE baseline models")
parser.add_argument("--db-path", type=str, default=None)
parser.add_argument("--task", type=str, default="m1", choices=["m1", "m2"])
parser.add_argument("--split", type=str, default="random")
parser.add_argument("--neg-source", type=str, default="negbiodb",
choices=["negbiodb", "uniform_random", "degree_matched"])
parser.add_argument("--model", type=str, default="xgboost", choices=["xgboost", "mlp"])
parser.add_argument("--balanced", action="store_true")
parser.add_argument("--seed", type=int, default=42)
parser.add_argument("--output-dir", type=str, default=str(_PROJECT_ROOT / "results" / "ge"))
# Data files for positives
parser.add_argument("--gene-effect-file", type=str, default=None)
parser.add_argument("--dependency-file", type=str, default=None)
args = parser.parse_args()
from negbiodb_depmap.depmap_db import get_connection
from negbiodb_depmap.export import (
apply_split_to_dataset,
build_ge_m1,
build_ge_m2,
export_ge_negatives,
generate_degree_matched_negatives,
generate_uniform_random_negatives,
load_essential_positives,
)
from negbiodb_depmap.ge_features import build_feature_matrix
db_path = Path(args.db_path) if args.db_path else _PROJECT_ROOT / "data" / "negbiodb_depmap.db"
output_dir = Path(args.output_dir)
output_dir.mkdir(parents=True, exist_ok=True)
conn = get_connection(db_path)
try:
# Load negatives based on source type
if args.neg_source == "negbiodb":
neg_query = """
SELECT p.gene_id, p.cell_line_id, g.entrez_id, g.gene_symbol,
c.model_id, p.gene_degree, p.cell_line_degree,
p.mean_gene_effect, p.best_confidence
FROM gene_cell_pairs p
JOIN genes g ON p.gene_id = g.gene_id
JOIN cell_lines c ON p.cell_line_id = c.cell_line_id
"""
neg_df = pd.read_sql_query(neg_query, conn)
elif args.neg_source == "uniform_random":
# Load DB negatives first to determine sample size
n_db_neg = conn.execute("SELECT COUNT(*) FROM gene_cell_pairs").fetchone()[0]
neg_df = generate_uniform_random_negatives(conn, n_samples=n_db_neg, seed=args.seed)
logger.info("Generated %d uniform random control negatives", len(neg_df))
elif args.neg_source == "degree_matched":
# Load DB negatives to match their degree distribution
neg_query = """
SELECT p.gene_id, p.cell_line_id, g.entrez_id, g.gene_symbol,
c.model_id, p.gene_degree, p.cell_line_degree,
p.mean_gene_effect, p.best_confidence
FROM gene_cell_pairs p
JOIN genes g ON p.gene_id = g.gene_id
JOIN cell_lines c ON p.cell_line_id = c.cell_line_id
"""
db_neg_df = pd.read_sql_query(neg_query, conn)
neg_df = generate_degree_matched_negatives(conn, db_neg_df, seed=args.seed)
logger.info("Generated %d degree-matched control negatives", len(neg_df))
# Load positives
if args.gene_effect_file and args.dependency_file:
pos_df = load_essential_positives(
conn,
Path(args.gene_effect_file),
Path(args.dependency_file),
)
else:
logger.warning("No gene-effect/dependency files provided. Using synthetic positives.")
pos_df = pd.DataFrame(columns=neg_df.columns)
if len(pos_df) == 0:
logger.error("No positives loaded. Provide --gene-effect-file and --dependency-file.")
sys.exit(1)
# Build dataset
if args.task == "m1":
dataset = build_ge_m1(
conn, pos_df, neg_df,
balanced=args.balanced, seed=args.seed,
)
else:
dataset = build_ge_m2(conn, pos_df, neg_df, seed=args.seed)
# Build features
X = build_feature_matrix(conn, dataset)
y = dataset["label"].values
# Split
dataset = apply_split_to_dataset(dataset, args.split, seed=args.seed)
split_col = "split"
train_mask = dataset[split_col] == "train"
val_mask = dataset[split_col] == "val"
test_mask = dataset[split_col] == "test"
X_train, y_train = X[train_mask], y[train_mask]
X_val, y_val = X[val_mask], y[val_mask]
X_test, y_test = X[test_mask], y[test_mask]
logger.info("Train: %d, Val: %d, Test: %d", len(y_train), len(y_val), len(y_test))
# Train
if args.model == "xgboost":
from negbiodb_depmap.models.xgboost_ge import train_xgboost_ge, predict_xgboost_ge
task_type = "binary" if args.task == "m1" else "multiclass"
model = train_xgboost_ge(X_train, y_train, X_val, y_val, task=task_type, seed=args.seed)
y_pred, y_prob = predict_xgboost_ge(model, X_test, task=task_type)
else:
from negbiodb_depmap.models.mlp_features import train_mlp_ge
n_classes = 2 if args.task == "m1" else 3
model, history = train_mlp_ge(X_train, y_train, X_val, y_val, n_classes=n_classes)
import torch
with torch.no_grad():
logits = model(torch.tensor(X_test, dtype=torch.float32))
y_prob = torch.softmax(logits, dim=1).numpy()
y_pred = np.argmax(y_prob, axis=1)
# Evaluate
from sklearn.metrics import accuracy_score, f1_score, matthews_corrcoef
results = {
"task": args.task,
"model": args.model,
"split": args.split,
"neg_source": args.neg_source,
"seed": args.seed,
"n_train": int(len(y_train)),
"n_val": int(len(y_val)),
"n_test": int(len(y_test)),
"accuracy": float(accuracy_score(y_test, y_pred)),
"macro_f1": float(f1_score(y_test, y_pred, average="macro", zero_division=0)),
"mcc": float(matthews_corrcoef(y_test, y_pred)),
}
if args.task == "m1" and len(y_prob.shape) > 1:
from sklearn.metrics import roc_auc_score
try:
results["auroc"] = float(roc_auc_score(y_test, y_prob[:, 1]))
except ValueError:
results["auroc"] = None
# Save
run_name = f"{args.task}_{args.model}_{args.split}_{args.neg_source}_s{args.seed}"
result_path = output_dir / f"{run_name}.json"
with open(result_path, "w") as f:
json.dump(results, f, indent=2)
logger.info("Results: %s", json.dumps(results, indent=2))
logger.info("Saved to %s", result_path)
finally:
conn.close()
if __name__ == "__main__":
main()
|