File size: 6,514 Bytes
8aa6c84 01310fd 8aa6c84 01310fd 8aa6c84 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 | """Regenerate ML artifacts inside Docker to ensure pickle compatibility."""
import numpy as np
import pandas as pd
import json
import joblib
import os
import sys
import warnings
warnings.filterwarnings("ignore")
np.random.seed(42)
_script_dir = os.path.dirname(os.path.abspath(__file__)) if "__file__" in dir() else os.getcwd()
sys.path.insert(0, _script_dir)
from app.services.generators import GENERATORS
from app.services.feature_engine import engineer_features
from app.config import FEATURE_COLS, START_DATE, DAYS
from sklearn.preprocessing import RobustScaler
from sklearn.cluster import KMeans
from sklearn.decomposition import PCA
from sklearn.metrics import silhouette_score
from xgboost import XGBClassifier
import umap
ARTIFACTS_DIR = os.path.join(_script_dir, "app", "artifacts")
os.makedirs(ARTIFACTS_DIR, exist_ok=True)
# 1. Generate dataset
print("Generating dataset...")
counts = {
"normal_salaried_employee": 600, "normal_freelancer": 350, "normal_student": 450,
"normal_retiree": 350, "normal_small_business": 300, "normal_high_net_worth": 200,
"normal_young_professional": 400, "normal_family_household": 350,
"mule_rapid_passthrough": 130, "mule_structuring_smurfing": 100,
"mule_funnel_collector": 90, "mule_dormant_burst": 110,
"mule_recruit_escalation": 120, "mule_round_trip": 100,
"mule_crypto_cashout": 120, "mule_layering_chain": 110,
"mule_micro_structuring": 130, "mule_ghost_payroll": 140,
"mule_onboarding_burst": 120, "mule_device_mule": 110,
}
all_records = []
for btype, count in counts.items():
print(f" {btype}: {count}")
all_records += GENERATORS[btype](count)
df = pd.DataFrame(all_records)
df["timestamp"] = pd.to_datetime(df["timestamp"])
df = df.sort_values("timestamp").reset_index(drop=True)
df["day_of_week"] = df["timestamp"].dt.dayofweek
df["hour"] = df["timestamp"].dt.hour
df["is_weekend"] = df["day_of_week"].isin([5, 6]).astype(int)
df["category"] = df["label"].apply(lambda x: "mule" if x.startswith("mule_") else "normal")
print(f" Total: {len(df):,} txns, {df['account_id'].nunique():,} accounts")
# 2. Feature engineering
print("Engineering features...")
features_df = engineer_features(df)
label_cat = df.groupby("account_id").agg(label=("label", "first"), category=("category", "first"))
features_df = features_df.join(label_cat)
feature_cols = [c for c in FEATURE_COLS if c in features_df.columns]
X = features_df[feature_cols].fillna(0).values
# 3. Scaler
print("Fitting scaler...")
scaler = RobustScaler()
X_scaled = scaler.fit_transform(X)
X_scaled = np.nan_to_num(X_scaled, nan=0.0, posinf=0.0, neginf=0.0)
joblib.dump(scaler, os.path.join(ARTIFACTS_DIR, "scaler.joblib"))
# 4. PCA
print("Fitting PCA...")
pca2 = PCA(n_components=2)
pca2.fit(X_scaled)
joblib.dump(pca2, os.path.join(ARTIFACTS_DIR, "pca2.joblib"))
# 5. UMAP
print("Fitting UMAP...")
reducer = umap.UMAP(n_components=2, n_neighbors=30, min_dist=0.3, random_state=42)
X_umap = reducer.fit_transform(X_scaled)
joblib.dump(reducer, os.path.join(ARTIFACTS_DIR, "umap_reducer.joblib"))
# 6. KMeans
print("Fitting KMeans...")
K_range = range(2, 16)
sil_scores = []
for k in K_range:
km = KMeans(n_clusters=k, n_init=10, random_state=42)
labs = km.fit_predict(X_scaled)
sil_scores.append(silhouette_score(X_scaled, labs))
best_k = list(K_range)[np.argmax(sil_scores)]
print(f" Best k = {best_k}")
kmeans = KMeans(n_clusters=best_k, n_init=10, random_state=42)
kmeans.fit(X_scaled)
joblib.dump(kmeans, os.path.join(ARTIFACTS_DIR, "kmeans.joblib"))
# 7. XGBoost classifier
print("Training XGBoost classifier...")
y_binary = (features_df["category"] == "mule").astype(int).values
classifier = XGBClassifier(
n_estimators=300, max_depth=5, learning_rate=0.1,
subsample=0.8, colsample_bytree=0.8,
scale_pos_weight=sum(y_binary == 0) / max(sum(y_binary == 1), 1),
random_state=42, use_label_encoder=False, eval_metric="logloss",
)
classifier.fit(X, y_binary)
print(f" Train accuracy: {classifier.score(X, y_binary):.3f}")
joblib.dump(classifier, os.path.join(ARTIFACTS_DIR, "classifier.joblib"))
joblib.dump(classifier, os.path.join(ARTIFACTS_DIR, "surrogate_model.joblib"))
bg_indices = np.random.RandomState(42).choice(len(X), size=min(200, len(X)), replace=False)
np.save(os.path.join(ARTIFACTS_DIR, "shap_background.npy"), X[bg_indices])
# 8. Cluster metadata
print("Computing metadata...")
cluster_labels = kmeans.predict(X_scaled)
features_df["cluster"] = cluster_labels
features_df["umap_1"] = X_umap[:, 0]
features_df["umap_2"] = X_umap[:, 1]
normal_mask = features_df["category"] == "normal"
normal_centroid = X_scaled[normal_mask.values].mean(axis=0).tolist()
normal_distances = np.linalg.norm(X_scaled[normal_mask.values] - np.array(normal_centroid), axis=1)
max_normal_distance = float(np.percentile(normal_distances, 95))
clusters_meta = {}
for c in range(best_k):
c_mask = features_df["cluster"] == c
c_data = features_df[c_mask]
mule_pct = float((c_data["category"] == "mule").mean())
clusters_meta[str(c)] = {"size": int(c_mask.sum()), "mule_pct": round(mule_pct, 4),
"dominant": "mule" if mule_pct > 0.5 else "normal"}
cluster_metadata = {"best_k": best_k, "clusters": clusters_meta,
"normal_centroid_scaled": normal_centroid,
"max_normal_distance": max_normal_distance, "feature_cols": feature_cols}
with open(os.path.join(ARTIFACTS_DIR, "cluster_metadata.json"), "w") as f:
json.dump(cluster_metadata, f, indent=2)
# 9. Baseline
normal_features = features_df[normal_mask][feature_cols]
baseline = {"means": normal_features.mean().to_dict(), "stds": normal_features.std().fillna(0).to_dict(),
"mins": features_df[feature_cols].min().to_dict(), "maxs": features_df[feature_cols].max().to_dict()}
for key in baseline:
baseline[key] = {k: float(v) for k, v in baseline[key].items()}
with open(os.path.join(ARTIFACTS_DIR, "baseline_features.json"), "w") as f:
json.dump(baseline, f, indent=2)
# 10. UMAP coordinates
umap_points = [{"x": round(float(row["umap_1"]), 4), "y": round(float(row["umap_2"]), 4),
"category": row["category"], "label": row["label"]}
for _, row in features_df.iterrows()]
with open(os.path.join(ARTIFACTS_DIR, "existing_umap_coords.json"), "w") as f:
json.dump(umap_points, f)
# 11. Transactions CSV
print("Saving CSV...")
df.to_csv(os.path.join(ARTIFACTS_DIR, "synthetic_transactions.csv"), index=False)
print("Done!")
|