erinkhoo commited on
Commit
3bfc8f1
Β·
verified Β·
1 Parent(s): 1fdfeda

Complete benchmark_quick.py with 5 benchmark suites

Browse files
Files changed (1) hide show
  1. benchmark_quick.py +437 -1
benchmark_quick.py CHANGED
@@ -1 +1,437 @@
1
- xxx
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ """
3
+ Quick benchmark: HyperOpt-GBT (Python) vs XGBoost vs LightGBM vs CatBoost
4
+
5
+ Runs on synthetic datasets to validate accuracy and speed of the core
6
+ innovations: GOSS, quantile sketch binning, and histogram-based splits.
7
+
8
+ Usage:
9
+ pip install hyperopt-gbt xgboost lightgbm catboost scikit-learn
10
+ python benchmark_quick.py
11
+ """
12
+
13
+ import time
14
+ import warnings
15
+ import numpy as np
16
+ from sklearn.datasets import make_classification, make_regression
17
+ from sklearn.model_selection import train_test_split
18
+ from sklearn.metrics import roc_auc_score, root_mean_squared_error
19
+
20
+ warnings.filterwarnings("ignore")
21
+
22
+ # ── Helpers ──────────────────────────────────────────────────────────────────
23
+
24
+ def timer(func):
25
+ """Time a callable, return (result, elapsed_seconds)."""
26
+ t0 = time.perf_counter()
27
+ result = func()
28
+ return result, time.perf_counter() - t0
29
+
30
+
31
+ def print_table(headers, rows):
32
+ """Pretty-print a markdown-style table."""
33
+ widths = [max(len(h), max((len(str(r[i])) for r in rows), default=0))
34
+ for i, h in enumerate(headers)]
35
+ fmt = " | ".join(f"{{:<{w}}}" for w in widths)
36
+ sep = "-|-".join("-" * w for w in widths)
37
+ print(fmt.format(*headers))
38
+ print(sep)
39
+ for row in rows:
40
+ print(fmt.format(*[str(c) for c in row]))
41
+ print()
42
+
43
+
44
+ # ── Optional imports (graceful degradation) ──────────────────────────────────
45
+
46
+ def try_import(name):
47
+ try:
48
+ return __import__(name)
49
+ except ImportError:
50
+ return None
51
+
52
+ xgb = try_import("xgboost")
53
+ lgb = try_import("lightgbm")
54
+ cb = try_import("catboost")
55
+
56
+ # Always import our library
57
+ from hyperopt_gbt import HyperOptGradientBoostedClassifier, HyperOptGradientBoostedRegressor
58
+
59
+ # Try Rust backend
60
+ try:
61
+ import rust_gbt as rgbt
62
+ HAS_RUST = True
63
+ except ImportError:
64
+ HAS_RUST = False
65
+
66
+
67
+ # =============================================================================
68
+ # BENCHMARK 1: Large-scale binary classification
69
+ # =============================================================================
70
+
71
+ def benchmark_classification(n_train=80_000, n_test=20_000, n_features=30,
72
+ n_trees=50, seed=42):
73
+ print("=" * 72)
74
+ print(f"BENCHMARK 1: Binary Classification ({n_train:,} train / {n_test:,} test / "
75
+ f"{n_features} features / {n_trees} trees)")
76
+ print("=" * 72)
77
+
78
+ rng = np.random.RandomState(seed)
79
+
80
+ # Nonlinear synthetic data
81
+ X = rng.randn(n_train + n_test, n_features)
82
+ signal = (X[:, 0] * X[:, 1]
83
+ + np.sin(X[:, 2]) * 2
84
+ + (X[:, 3] > 0).astype(float) * 1.5
85
+ + rng.randn(n_train + n_test) * 0.5)
86
+ y = (signal > np.median(signal)).astype(float)
87
+
88
+ X_train, X_test = X[:n_train], X[n_train:]
89
+ y_train, y_test = y[:n_train], y[n_train:]
90
+
91
+ rows = []
92
+
93
+ # ── HyperOpt-GBT (Python, GOSS) ─────────────────────────────────────────
94
+ clf = HyperOptGradientBoostedClassifier(
95
+ n_estimators=n_trees, learning_rate=0.1, max_depth=6,
96
+ use_goss=True, goss_a=0.2, goss_b=0.1,
97
+ n_bins=255, binning="uniform", random_state=seed,
98
+ )
99
+ _, train_time = timer(lambda: clf.fit(X_train, y_train))
100
+ proba, pred_time = timer(lambda: clf.predict_proba(X_test)[:, 1])
101
+ auc = roc_auc_score(y_test, proba)
102
+ rows.append(["HyperOpt-GBT (GOSS)", f"{auc:.4f}", f"{train_time:.2f}s", f"{pred_time*1e3:.0f}ms"])
103
+
104
+ # ── HyperOpt-GBT (Python, no GOSS) ──────────────────────────────────────
105
+ clf2 = HyperOptGradientBoostedClassifier(
106
+ n_estimators=n_trees, learning_rate=0.1, max_depth=6,
107
+ use_goss=False, n_bins=255, binning="uniform", random_state=seed,
108
+ )
109
+ _, train_time = timer(lambda: clf2.fit(X_train, y_train))
110
+ proba2, pred_time = timer(lambda: clf2.predict_proba(X_test)[:, 1])
111
+ auc2 = roc_auc_score(y_test, proba2)
112
+ rows.append(["HyperOpt-GBT (no GOSS)", f"{auc2:.4f}", f"{train_time:.2f}s", f"{pred_time*1e3:.0f}ms"])
113
+
114
+ # ── HyperOpt-GBT (quantile sketch) ──────────────────────────────────────
115
+ clf3 = HyperOptGradientBoostedClassifier(
116
+ n_estimators=n_trees, learning_rate=0.1, max_depth=6,
117
+ use_goss=True, goss_a=0.2, goss_b=0.1,
118
+ n_bins=255, binning="quantile_sketch", random_state=seed,
119
+ )
120
+ _, train_time = timer(lambda: clf3.fit(X_train, y_train))
121
+ proba3, pred_time = timer(lambda: clf3.predict_proba(X_test)[:, 1])
122
+ auc3 = roc_auc_score(y_test, proba3)
123
+ rows.append(["HyperOpt-GBT (quantile)", f"{auc3:.4f}", f"{train_time:.2f}s", f"{pred_time*1e3:.0f}ms"])
124
+
125
+ # ── Rust backend ─────────────────────────────────────────────────────────
126
+ if HAS_RUST:
127
+ model = rgbt.PyRustGBT()
128
+ _, train_time = timer(lambda: model.fit(
129
+ X_train, y_train, n_estimators=n_trees, learning_rate=0.1,
130
+ max_depth=6, n_bins=255, use_goss=True, goss_a=0.2, goss_b=0.1,
131
+ task="classification", verbose=False,
132
+ ))
133
+ proba_r, pred_time = timer(lambda: model.predict_proba(X_test))
134
+ auc_r = roc_auc_score(y_test, np.asarray(proba_r))
135
+ rows.append(["Rust-GBT (GOSS)", f"{auc_r:.4f}", f"{train_time:.2f}s", f"{pred_time*1e3:.0f}ms"])
136
+
137
+ # ── XGBoost ──────────────────────────────────────────────────────────────
138
+ if xgb:
139
+ xgb_clf = xgb.XGBClassifier(
140
+ n_estimators=n_trees, learning_rate=0.1, max_depth=6,
141
+ tree_method="hist", random_state=seed, verbosity=0,
142
+ )
143
+ _, train_time = timer(lambda: xgb_clf.fit(X_train, y_train))
144
+ proba_x, pred_time = timer(lambda: xgb_clf.predict_proba(X_test)[:, 1])
145
+ auc_x = roc_auc_score(y_test, proba_x)
146
+ rows.append(["XGBoost (hist)", f"{auc_x:.4f}", f"{train_time:.2f}s", f"{pred_time*1e3:.0f}ms"])
147
+
148
+ # ── LightGBM ─────────────────────────────────────────────────────────────
149
+ if lgb:
150
+ lgb_clf = lgb.LGBMClassifier(
151
+ n_estimators=n_trees, learning_rate=0.1, max_depth=6,
152
+ random_state=seed, verbose=-1,
153
+ )
154
+ _, train_time = timer(lambda: lgb_clf.fit(X_train, y_train))
155
+ proba_l, pred_time = timer(lambda: lgb_clf.predict_proba(X_test)[:, 1])
156
+ auc_l = roc_auc_score(y_test, proba_l)
157
+ rows.append(["LightGBM", f"{auc_l:.4f}", f"{train_time:.2f}s", f"{pred_time*1e3:.0f}ms"])
158
+
159
+ # ── CatBoost ─────────────────────────────────────────────────────────────
160
+ if cb:
161
+ cb_clf = cb.CatBoostClassifier(
162
+ iterations=n_trees, learning_rate=0.1, depth=6,
163
+ random_seed=seed, verbose=0,
164
+ )
165
+ _, train_time = timer(lambda: cb_clf.fit(X_train, y_train))
166
+ proba_c, pred_time = timer(lambda: cb_clf.predict_proba(X_test)[:, 1])
167
+ auc_c = roc_auc_score(y_test, proba_c)
168
+ rows.append(["CatBoost", f"{auc_c:.4f}", f"{train_time:.2f}s", f"{pred_time*1e3:.0f}ms"])
169
+
170
+ print()
171
+ print_table(["Library", "AUC", "Train Time", "Predict Time"], rows)
172
+
173
+
174
+ # =============================================================================
175
+ # BENCHMARK 2: GOSS ablation
176
+ # =============================================================================
177
+
178
+ def benchmark_goss_ablation(n_train=80_000, n_test=20_000, n_features=30,
179
+ n_trees=50, seed=42):
180
+ print("=" * 72)
181
+ print("BENCHMARK 2: GOSS Ablation")
182
+ print("=" * 72)
183
+
184
+ rng = np.random.RandomState(seed)
185
+ X = rng.randn(n_train + n_test, n_features)
186
+ signal = (X[:, 0] * X[:, 1]
187
+ + np.sin(X[:, 2]) * 2
188
+ + (X[:, 3] > 0).astype(float) * 1.5
189
+ + rng.randn(n_train + n_test) * 0.5)
190
+ y = (signal > np.median(signal)).astype(float)
191
+ X_train, X_test = X[:n_train], X[n_train:]
192
+ y_train, y_test = y[:n_train], y[n_train:]
193
+
194
+ configs = [
195
+ ("Full data (no GOSS)", False, 0.0, 0.0, "100%"),
196
+ ("GOSS a=0.3, b=0.1", True, 0.3, 0.1, "40%"),
197
+ ("GOSS a=0.2, b=0.1", True, 0.2, 0.1, "30%"),
198
+ ("GOSS a=0.1, b=0.05", True, 0.1, 0.05, "15%"),
199
+ ]
200
+
201
+ baseline_time = None
202
+ rows = []
203
+
204
+ for name, use_goss, a, b, data_pct in configs:
205
+ clf = HyperOptGradientBoostedClassifier(
206
+ n_estimators=n_trees, learning_rate=0.1, max_depth=6,
207
+ use_goss=use_goss, goss_a=a, goss_b=b,
208
+ n_bins=255, random_state=seed,
209
+ )
210
+ _, train_time = timer(lambda: clf.fit(X_train, y_train))
211
+ proba = clf.predict_proba(X_test)[:, 1]
212
+ auc = roc_auc_score(y_test, proba)
213
+
214
+ if baseline_time is None:
215
+ baseline_time = train_time
216
+ speedup = baseline_time / train_time if train_time > 0 else float("inf")
217
+
218
+ rows.append([name, data_pct, f"{auc:.4f}", f"{train_time:.2f}s", f"{speedup:.1f}x"])
219
+
220
+ print()
221
+ print_table(["Configuration", "Data Used", "AUC", "Train Time", "Speedup"], rows)
222
+
223
+
224
+ # =============================================================================
225
+ # BENCHMARK 3: Quantile sketch vs uniform on skewed data
226
+ # =============================================================================
227
+
228
+ def benchmark_quantile_sketch(n_train=40_000, n_test=10_000, n_trees=50, seed=42):
229
+ print("=" * 72)
230
+ print("BENCHMARK 3: Quantile Sketch vs Uniform Binning (Skewed Data)")
231
+ print("=" * 72)
232
+
233
+ rng = np.random.RandomState(seed)
234
+ n_total = n_train + n_test
235
+ n_features = 10
236
+
237
+ # Create highly skewed features: 85% in [0, 0.5], 15% outliers at ~50-100
238
+ X = np.zeros((n_total, n_features))
239
+ for f in range(n_features):
240
+ mask = rng.rand(n_total) < 0.85
241
+ X[mask, f] = rng.exponential(0.1, mask.sum())
242
+ X[~mask, f] = rng.uniform(50, 100, (~mask).sum())
243
+
244
+ # Target depends on the dense region
245
+ signal = X[:, 0] * 3 + np.sin(X[:, 1] * 10) + (X[:, 2] > 0.3).astype(float) * 2
246
+ y = (signal > np.median(signal)).astype(float)
247
+
248
+ X_train, X_test = X[:n_train], X[n_train:]
249
+ y_train, y_test = y[:n_train], y[n_train:]
250
+
251
+ rows = []
252
+ for n_bins in [31, 63, 127, 255]:
253
+ # Uniform
254
+ clf_u = HyperOptGradientBoostedClassifier(
255
+ n_estimators=n_trees, learning_rate=0.1, max_depth=6,
256
+ n_bins=n_bins, binning="uniform", use_goss=False, random_state=seed,
257
+ )
258
+ clf_u.fit(X_train, y_train)
259
+ auc_u = roc_auc_score(y_test, clf_u.predict_proba(X_test)[:, 1])
260
+
261
+ # Quantile sketch
262
+ clf_q = HyperOptGradientBoostedClassifier(
263
+ n_estimators=n_trees, learning_rate=0.1, max_depth=6,
264
+ n_bins=n_bins, binning="quantile_sketch", use_goss=False, random_state=seed,
265
+ )
266
+ clf_q.fit(X_train, y_train)
267
+ auc_q = roc_auc_score(y_test, clf_q.predict_proba(X_test)[:, 1])
268
+
269
+ gain = auc_q - auc_u
270
+ rows.append([str(n_bins), f"{auc_u:.4f}", f"{auc_q:.4f}", f"+{gain:.4f}"])
271
+
272
+ print()
273
+ print_table(["Bins", "Uniform AUC", "Quantile AUC", "Gain"], rows)
274
+
275
+
276
+ # =============================================================================
277
+ # BENCHMARK 4: Regression (California Housing style)
278
+ # =============================================================================
279
+
280
+ def benchmark_regression(n_train=20_000, n_test=5_000, n_features=8,
281
+ n_trees=100, seed=42):
282
+ print("=" * 72)
283
+ print(f"BENCHMARK 4: Regression ({n_train:,} train / {n_test:,} test)")
284
+ print("=" * 72)
285
+
286
+ X, y = make_regression(
287
+ n_samples=n_train + n_test,
288
+ n_features=n_features,
289
+ n_informative=6,
290
+ noise=10.0,
291
+ random_state=seed,
292
+ )
293
+ X_train, X_test, y_train, y_test = train_test_split(
294
+ X, y, test_size=n_test, random_state=seed
295
+ )
296
+
297
+ rows = []
298
+
299
+ # HyperOpt-GBT
300
+ reg = HyperOptGradientBoostedRegressor(
301
+ n_estimators=n_trees, learning_rate=0.1, max_depth=6,
302
+ use_goss=True, goss_a=0.2, goss_b=0.1,
303
+ n_bins=255, random_state=seed,
304
+ )
305
+ _, train_time = timer(lambda: reg.fit(X_train, y_train))
306
+ pred, pred_time = timer(lambda: reg.predict(X_test))
307
+ rmse = root_mean_squared_error(y_test, pred)
308
+ rows.append(["HyperOpt-GBT (GOSS)", f"{rmse:.2f}", f"{train_time:.2f}s", f"{pred_time*1e3:.0f}ms"])
309
+
310
+ # HyperOpt-GBT quantile
311
+ reg_q = HyperOptGradientBoostedRegressor(
312
+ n_estimators=n_trees, learning_rate=0.1, max_depth=6,
313
+ use_goss=True, goss_a=0.2, goss_b=0.1,
314
+ n_bins=255, binning="quantile_sketch", random_state=seed,
315
+ )
316
+ _, train_time = timer(lambda: reg_q.fit(X_train, y_train))
317
+ pred_q, pred_time = timer(lambda: reg_q.predict(X_test))
318
+ rmse_q = root_mean_squared_error(y_test, pred_q)
319
+ rows.append(["HyperOpt-GBT (quantile)", f"{rmse_q:.2f}", f"{train_time:.2f}s", f"{pred_time*1e3:.0f}ms"])
320
+
321
+ if xgb:
322
+ xgb_reg = xgb.XGBRegressor(
323
+ n_estimators=n_trees, learning_rate=0.1, max_depth=6,
324
+ tree_method="hist", random_state=seed, verbosity=0,
325
+ )
326
+ _, train_time = timer(lambda: xgb_reg.fit(X_train, y_train))
327
+ pred_x, pred_time = timer(lambda: xgb_reg.predict(X_test))
328
+ rmse_x = root_mean_squared_error(y_test, pred_x)
329
+ rows.append(["XGBoost", f"{rmse_x:.2f}", f"{train_time:.2f}s", f"{pred_time*1e3:.0f}ms"])
330
+
331
+ if lgb:
332
+ lgb_reg = lgb.LGBMRegressor(
333
+ n_estimators=n_trees, learning_rate=0.1, max_depth=6,
334
+ random_state=seed, verbose=-1,
335
+ )
336
+ _, train_time = timer(lambda: lgb_reg.fit(X_train, y_train))
337
+ pred_l, pred_time = timer(lambda: lgb_reg.predict(X_test))
338
+ rmse_l = root_mean_squared_error(y_test, pred_l)
339
+ rows.append(["LightGBM", f"{rmse_l:.2f}", f"{train_time:.2f}s", f"{pred_time*1e3:.0f}ms"])
340
+
341
+ if cb:
342
+ cb_reg = cb.CatBoostRegressor(
343
+ iterations=n_trees, learning_rate=0.1, depth=6,
344
+ random_seed=seed, verbose=0,
345
+ )
346
+ _, train_time = timer(lambda: cb_reg.fit(X_train, y_train))
347
+ pred_c, pred_time = timer(lambda: cb_reg.predict(X_test))
348
+ rmse_c = root_mean_squared_error(y_test, pred_c)
349
+ rows.append(["CatBoost", f"{rmse_c:.2f}", f"{train_time:.2f}s", f"{pred_time*1e3:.0f}ms"])
350
+
351
+ print()
352
+ print_table(["Library", "RMSE", "Train Time", "Predict Time"], rows)
353
+
354
+
355
+ # =============================================================================
356
+ # BENCHMARK 5: Inference engine comparison
357
+ # =============================================================================
358
+
359
+ def benchmark_inference_engines(n_train=20_000, n_test=50_000, n_trees=50, seed=42):
360
+ print("=" * 72)
361
+ print(f"BENCHMARK 5: Inference Engine Comparison ({n_test:,} test samples)")
362
+ print("=" * 72)
363
+
364
+ from hyperopt_gbt.inference import (
365
+ compile_inference_engine,
366
+ NaiveEngine,
367
+ FlatTreeEngine,
368
+ BatchedSIMDEngine,
369
+ QuickScorerEngine,
370
+ )
371
+
372
+ rng = np.random.RandomState(seed)
373
+ X = rng.randn(n_train + n_test, 20)
374
+ signal = X[:, 0] * X[:, 1] + np.sin(X[:, 2]) * 2 + rng.randn(n_train + n_test) * 0.3
375
+ y = (signal > np.median(signal)).astype(float)
376
+ X_train, X_test = X[:n_train], X[n_train:]
377
+ y_train, y_test = y[:n_train], y[n_train:]
378
+
379
+ clf = HyperOptGradientBoostedClassifier(
380
+ n_estimators=n_trees, learning_rate=0.1, max_depth=6,
381
+ n_bins=255, random_state=seed,
382
+ )
383
+ clf.fit(X_train, y_train)
384
+
385
+ # Bin test data
386
+ X_test_binned = clf._transform_to_bins(X_test)
387
+
388
+ rows = []
389
+ engines = [
390
+ ("Naive", NaiveEngine(clf.trees_)),
391
+ ("Flat Tree", FlatTreeEngine(clf.trees_, clf.n_bins)),
392
+ ("Batched SIMD", BatchedSIMDEngine(clf.trees_, clf.n_bins)),
393
+ ("QuickScorer", QuickScorerEngine(clf.trees_, clf.n_bins)),
394
+ ]
395
+
396
+ for name, engine in engines:
397
+ # Warmup
398
+ _ = engine.predict(X_test_binned[:100])
399
+
400
+ _, elapsed = timer(lambda: engine.predict(X_test_binned))
401
+ throughput = n_test / elapsed
402
+ rows.append([name, f"{elapsed*1e3:.1f}ms", f"{throughput:,.0f} samples/s"])
403
+
404
+ # sklearn predict for reference
405
+ _, elapsed = timer(lambda: clf.predict_proba(X_test))
406
+ throughput = n_test / elapsed
407
+ rows.append(["sklearn predict_proba", f"{elapsed*1e3:.1f}ms", f"{throughput:,.0f} samples/s"])
408
+
409
+ print()
410
+ print_table(["Engine", "Latency", "Throughput"], rows)
411
+
412
+
413
+ # =============================================================================
414
+ # MAIN
415
+ # =============================================================================
416
+
417
+ if __name__ == "__main__":
418
+ print()
419
+ print("╔══════════════════════════════════════════════════════════════════════╗")
420
+ print("β•‘ HyperOpt-GBT β€” Quick Benchmark Suite β•‘")
421
+ print("╠══════════════════════════════════════════════════════════════════════╣")
422
+ print(f"β•‘ Rust backend: {'AVAILABLE' if HAS_RUST else 'not found (pip install maturin && cd rust_gbt && maturin develop --release)':55s} β•‘")
423
+ print(f"β•‘ XGBoost: {'AVAILABLE' if xgb else 'not installed':55s} β•‘")
424
+ print(f"β•‘ LightGBM: {'AVAILABLE' if lgb else 'not installed':55s} β•‘")
425
+ print(f"β•‘ CatBoost: {'AVAILABLE' if cb else 'not installed':55s} β•‘")
426
+ print("β•šβ•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•")
427
+ print()
428
+
429
+ benchmark_classification()
430
+ benchmark_goss_ablation()
431
+ benchmark_quantile_sketch()
432
+ benchmark_regression()
433
+ benchmark_inference_engines()
434
+
435
+ print("=" * 72)
436
+ print("All benchmarks complete.")
437
+ print("=" * 72)