| |
| """ |
| Re-assigns cluster-based splits to account for mutation data counts per protein (via greedy partition). |
| |
| Reads in the previous split parquet file, and then generates a new split column |
| |
| Ouputs: |
| - A parquet/CSV with new assigned splits. |
| |
| Usage: |
| python 06_make_weighted_splits.py \ |
| --input ../data/fireprotdb_cluster_splits.parquet \ |
| --output ../data/fireprotdb_splits_balanced_ddg.parquet \ |
| --ratios 0.8,0.1,0.1 \ |
| --task ddg |
| Notes: |
| - Can balance splits based on the amount of different data types. Useful if the splits based on ddG result in large imbalances for other datatypes. Specified with --task [type] |
| """ |
| from __future__ import annotations |
|
|
| import argparse |
| import pandas as pd |
|
|
| SPLITS = ["train", "validation", "test"] |
|
|
| def assign_weighted_splits(cluster_weights: pd.Series, ratios=(0.8, 0.1, 0.1)) -> pd.DataFrame: |
| total = float(cluster_weights.sum()) |
| targets = { |
| "train": total * ratios[0], |
| "validation": total * ratios[1], |
| "test": total * ratios[2], |
| } |
| current = {s: 0.0 for s in SPLITS} |
| assignment = {} |
|
|
| |
| for cid, w in cluster_weights.sort_values(ascending=False).items(): |
| w = float(w) |
|
|
| |
| |
| def score(s): |
| t = targets[s] if targets[s] > 0 else 1.0 |
| return (current[s] + w) / t |
|
|
| chosen = min(SPLITS, key=score) |
| assignment[cid] = chosen |
| current[chosen] += w |
|
|
| out = pd.DataFrame({"cluster_id": list(assignment.keys()), "split": list(assignment.values())}) |
|
|
| |
| print("Target totals:", {k: round(v) for k, v in targets.items()}) |
| print("Achieved totals:", {k: round(v) for k, v in current.items()}) |
| return out |
|
|
| def main(): |
| ap = argparse.ArgumentParser() |
| ap.add_argument("--input", required=True) |
| ap.add_argument("--output", required=True) |
| ap.add_argument("--ratios", default="0.8,0.1,0.1") |
| ap.add_argument("--task", choices=["dg", "ddg", "tm", "dtm", "fitness", "binary"], default="ddg") |
| args = ap.parse_args() |
|
|
| ratios = tuple(float(x) for x in args.ratios.split(",")) |
|
|
| df = pd.read_parquet(args.input) |
| if "cluster_id" not in df.columns: |
| raise ValueError("Input must contain cluster_id") |
|
|
| |
| if "split" in df.columns: |
| df = df.drop(columns=["split"]) |
|
|
| has_mut = df["mutation"].notna() |
|
|
| if args.task == "dg": |
| df_task = df[has_mut & df["dg"].notna()].copy() |
| elif args.task == "ddg": |
| df_task = df[has_mut & df["ddg"].notna()].copy() |
| elif args.task == "tm": |
| df_task = df[has_mut & df["tm"].notna()].copy() |
| elif args.task == "dtm": |
| df_task = df[has_mut & df["dtm"].notna()].copy() |
| elif args.task == "fitness": |
| df_task = df[has_mut & df["fitness"].notna()].copy() |
| else: |
| df_task = df[has_mut & df["stabilizing"].notna()].copy() |
|
|
| |
| df_task["cluster_id"] = df_task["cluster_id"].astype("string").fillna("NA_CLUSTER") |
|
|
| |
| w = df_task.groupby("cluster_id").size() |
|
|
| print(f"Task={args.task} rows: {len(df_task):,}") |
| print(f"Task clusters: {len(w):,}") |
| print("Top 10 clusters by rows:") |
| print(w.sort_values(ascending=False).head(10)) |
|
|
| assign = assign_weighted_splits(w, ratios=ratios) |
|
|
| |
| df["cluster_id"] = df["cluster_id"].astype("string").fillna("NA_CLUSTER") |
| df = df.merge(assign, on="cluster_id", how="left") |
| df["split"] = df["split"].fillna("train") |
|
|
| df.to_parquet(args.output, index=False) |
| print("Wrote:", args.output) |
|
|
| |
| df_task_out = df_task.merge(assign, on="cluster_id", how="left") |
| df_task_out["split"] = df_task_out["split"].fillna("train") |
| print("Task rows by split:") |
| print(df_task_out["split"].value_counts()) |
|
|
| if __name__ == "__main__": |
| main() |
|
|