Oguzz07 commited on
Commit
8744a77
·
verified ·
1 Parent(s): 2269411

Update to GBM-500 best model (70.5% top-3 hit rate)

Browse files
Files changed (1) hide show
  1. improve_model.py +248 -0
improve_model.py ADDED
@@ -0,0 +1,248 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Data augmentation and model improvement:
3
+ 1. Variable subsampling: drop random subsets of variables from existing datasets
4
+ 2. Hyperparameter tuning for the meta-learner
5
+ 3. Pairwise ranking approach
6
+ """
7
+ import os
8
+ import sys
9
+ import numpy as np
10
+ import pandas as pd
11
+ import json
12
+ import logging
13
+ import warnings
14
+ from itertools import combinations
15
+
16
+ warnings.filterwarnings('ignore')
17
+ logging.basicConfig(level=logging.INFO, format='%(asctime)s %(levelname)s %(message)s')
18
+ logger = logging.getLogger(__name__)
19
+
20
+ sys.path.insert(0, '/app')
21
+ from causal_selection.data.generator import (
22
+ load_bn_model, get_true_dag_adjmat, dag_to_cpdag, sample_dataset,
23
+ ALL_NETWORKS, get_network_tier
24
+ )
25
+ from causal_selection.discovery.algorithms import run_algorithm, ALGORITHM_POOL
26
+ from causal_selection.discovery.evaluator import evaluate_algorithm_result
27
+ from causal_selection.features.extractor import extract_all_features, FEATURE_NAMES
28
+ from causal_selection.meta_learner.trainer import (
29
+ load_meta_dataset, train_meta_learner, evaluate_lono_cv,
30
+ get_feature_importance, save_model, ALGO_NAMES, RESULTS_DIR
31
+ )
32
+
33
+ from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor, ExtraTreesRegressor
34
+ from sklearn.multioutput import MultiOutputRegressor
35
+ from sklearn.preprocessing import StandardScaler
36
+ from sklearn.model_selection import cross_val_score
37
+ import joblib
38
+
39
+
40
+ def augment_variable_subsampling(networks=None, n_augments_per_net=3,
41
+ drop_frac=0.3, n_samples=1000, seed_base=100):
42
+ """Create augmented datasets by dropping random subsets of variables.
43
+
44
+ This creates new 'virtual networks' with different structural properties.
45
+ Only works for networks with >10 variables (need enough remaining vars).
46
+ """
47
+ if networks is None:
48
+ networks = [n for n in ALL_NETWORKS if n not in ['cancer', 'earthquake', 'survey']] # skip tiny
49
+
50
+ augmented_features = []
51
+ augmented_shds = []
52
+ augmented_nshds = []
53
+ augmented_configs = []
54
+
55
+ for net_name in networks:
56
+ try:
57
+ model = load_bn_model(net_name)
58
+ true_dag, node_names = get_true_dag_adjmat(model)
59
+ n_vars = len(node_names)
60
+
61
+ if n_vars < 8:
62
+ logger.info(f"Skipping {net_name} ({n_vars} vars): too few for subsampling")
63
+ continue
64
+
65
+ n_to_keep = max(5, int(n_vars * (1 - drop_frac)))
66
+ tier = get_network_tier(net_name)
67
+ timeout = {'small': 60, 'medium': 120, 'large': 180}[tier]
68
+
69
+ for aug_idx in range(n_augments_per_net):
70
+ rng = np.random.RandomState(seed_base + aug_idx)
71
+
72
+ # Select random subset of variables
73
+ keep_idx = sorted(rng.choice(n_vars, n_to_keep, replace=False))
74
+
75
+ # Subsample the DAG and recompute CPDAG
76
+ sub_dag = true_dag[np.ix_(keep_idx, keep_idx)]
77
+ sub_cpdag = dag_to_cpdag(sub_dag)
78
+ sub_names = [node_names[i] for i in keep_idx]
79
+
80
+ # Sample full data then select columns
81
+ df_full = sample_dataset(model, n_samples, seed=seed_base + aug_idx)
82
+ df_sub = df_full[sub_names].copy()
83
+ df_sub.columns = [f'X{i}' for i in range(len(sub_names))]
84
+
85
+ logger.info(f" Augment {net_name} #{aug_idx}: {n_vars}->{n_to_keep} vars")
86
+
87
+ # Extract features
88
+ features = extract_all_features(df_sub, n_probe_triplets=50)
89
+
90
+ # Run algorithms on subsampled data
91
+ shd_row = {}
92
+ nshd_row = {}
93
+ n_sub = len(sub_names)
94
+ max_shd = n_sub * (n_sub - 1) // 2
95
+
96
+ for algo_name in ALGO_NAMES:
97
+ result = run_algorithm(algo_name, df_sub, timeout_sec=timeout)
98
+ metrics = evaluate_algorithm_result(result, sub_cpdag)
99
+ shd_row[algo_name] = metrics['shd']
100
+ nshd_row[algo_name] = metrics['normalized_shd']
101
+
102
+ s = metrics['status']
103
+ if s == 'success':
104
+ logger.info(f" {algo_name:12s}: SHD={metrics['shd']:3d} t={metrics['runtime']:.1f}s")
105
+ else:
106
+ logger.info(f" {algo_name:12s}: {s}")
107
+
108
+ feat_row = {name: features.get(name, 0.0) for name in FEATURE_NAMES}
109
+ augmented_features.append(feat_row)
110
+ augmented_shds.append(shd_row)
111
+ augmented_nshds.append(nshd_row)
112
+ augmented_configs.append({
113
+ 'network': f'{net_name}_sub{aug_idx}',
114
+ 'n_samples': n_samples,
115
+ 'seed': seed_base + aug_idx,
116
+ 'n_variables': n_to_keep,
117
+ 'n_true_edges': int(((sub_cpdag + sub_cpdag.T) > 0).sum() // 2),
118
+ })
119
+
120
+ except Exception as e:
121
+ logger.error(f"Augmentation failed for {net_name}: {e}")
122
+ import traceback
123
+ traceback.print_exc()
124
+
125
+ return augmented_features, augmented_shds, augmented_nshds, augmented_configs
126
+
127
+
128
+ def hyperparameter_sweep():
129
+ """Try different model configs and evaluate."""
130
+ X, Y_shd, Y_nshd, configs = load_meta_dataset()
131
+
132
+ print(f"Data: {X.shape[0]} samples, {X.shape[1]} features, {Y_nshd.shape[1]} algorithms")
133
+ print(f"Networks: {sorted(configs.network.unique())}")
134
+
135
+ model_configs = [
136
+ ('RF-200', 'rf', {'n_estimators': 200}),
137
+ ('RF-500', 'rf', {'n_estimators': 500}),
138
+ ('RF-200-d10', 'rf', {'n_estimators': 200, 'max_depth': 10}),
139
+ ('RF-200-d5', 'rf', {'n_estimators': 200, 'max_depth': 5}),
140
+ ('RF-200-leaf5', 'rf', {'n_estimators': 200, 'min_samples_leaf': 5}),
141
+ ('GBM-200', 'gbm', {'n_estimators': 200, 'max_depth': 5, 'learning_rate': 0.1}),
142
+ ('GBM-500', 'gbm', {'n_estimators': 500, 'max_depth': 3, 'learning_rate': 0.05}),
143
+ ('GBM-200-lr01', 'gbm', {'n_estimators': 200, 'max_depth': 4, 'learning_rate': 0.01}),
144
+ ]
145
+
146
+ print(f"\n{'Model':20s} {'Top3 Hit':>10s} {'NDCG@3':>8s} {'Regret':>8s} {'Overlap':>8s}")
147
+ print("-" * 60)
148
+
149
+ best_hit = 0
150
+ best_name = None
151
+ best_type = None
152
+ best_kwargs = None
153
+
154
+ for name, mtype, kwargs in model_configs:
155
+ results = evaluate_lono_cv(X, Y_nshd, configs, model_type=mtype, k=3, **kwargs)
156
+ o = results['overall']
157
+ print(f"{name:20s} {o['top_k_hit_rate']:10.3f} {o['ndcg_at_k']:8.3f} "
158
+ f"{o['mean_regret']:8.4f} {o['top_k_overlap_rate']:8.3f}")
159
+
160
+ if o['top_k_hit_rate'] > best_hit:
161
+ best_hit = o['top_k_hit_rate']
162
+ best_name = name
163
+ best_type = mtype
164
+ best_kwargs = kwargs
165
+
166
+ print(f"\nBest model: {best_name} (hit rate={best_hit:.3f})")
167
+
168
+ # Train and save best model
169
+ model, scaler = train_meta_learner(X, Y_nshd, model_type=best_type, **best_kwargs)
170
+ save_model(model, scaler)
171
+
172
+ avg_imp, _ = get_feature_importance(model)
173
+ print("\nTop 10 Features (best model):")
174
+ for feat, imp in sorted(avg_imp.items(), key=lambda x: -x[1])[:10]:
175
+ print(f" {feat:30s}: {imp:.4f}")
176
+
177
+ return best_name, best_type, best_kwargs
178
+
179
+
180
+ if __name__ == '__main__':
181
+ import sys
182
+
183
+ mode = sys.argv[1] if len(sys.argv) > 1 else 'sweep'
184
+
185
+ if mode == 'augment':
186
+ # Run variable subsampling augmentation
187
+ feats, shds, nshds, cfgs = augment_variable_subsampling(
188
+ networks=['asia', 'sachs', 'alarm', 'child', 'insurance', 'water'],
189
+ n_augments_per_net=2, drop_frac=0.3, n_samples=1000
190
+ )
191
+
192
+ # Merge with existing data
193
+ X_orig, Y_shd_orig, Y_nshd_orig, configs_orig = load_meta_dataset()
194
+
195
+ X_aug = pd.DataFrame(feats, columns=FEATURE_NAMES)
196
+ Y_shd_aug = pd.DataFrame(shds, columns=ALGO_NAMES)
197
+ Y_nshd_aug = pd.DataFrame(nshds, columns=ALGO_NAMES)
198
+ configs_aug = pd.DataFrame(cfgs)
199
+
200
+ X_all = pd.concat([X_orig, X_aug], ignore_index=True)
201
+ Y_shd_all = pd.concat([Y_shd_orig, Y_shd_aug], ignore_index=True)
202
+ Y_nshd_all = pd.concat([Y_nshd_orig, Y_nshd_aug], ignore_index=True)
203
+ configs_all = pd.concat([configs_orig, configs_aug], ignore_index=True)
204
+
205
+ # Save
206
+ X_all.to_csv(os.path.join(RESULTS_DIR, 'meta_features.csv'), index=False)
207
+ Y_shd_all.to_csv(os.path.join(RESULTS_DIR, 'shd_matrix.csv'), index=False)
208
+ Y_nshd_all.to_csv(os.path.join(RESULTS_DIR, 'normalized_shd_matrix.csv'), index=False)
209
+ configs_all.to_csv(os.path.join(RESULTS_DIR, 'configs.csv'), index=False)
210
+
211
+ print(f"\nAugmented dataset: {len(configs_all)} total configs ({len(configs_orig)} original + {len(configs_aug)} augmented)")
212
+
213
+ elif mode == 'sweep':
214
+ hyperparameter_sweep()
215
+
216
+ elif mode == 'all':
217
+ # First augment, then sweep
218
+ print("=" * 80)
219
+ print("STEP 1: DATA AUGMENTATION")
220
+ print("=" * 80)
221
+
222
+ feats, shds, nshds, cfgs = augment_variable_subsampling(
223
+ networks=['asia', 'sachs', 'alarm', 'child', 'insurance', 'water'],
224
+ n_augments_per_net=2, drop_frac=0.3, n_samples=1000
225
+ )
226
+
227
+ X_orig, Y_shd_orig, Y_nshd_orig, configs_orig = load_meta_dataset()
228
+ X_aug = pd.DataFrame(feats, columns=FEATURE_NAMES)
229
+ Y_shd_aug = pd.DataFrame(shds, columns=ALGO_NAMES)
230
+ Y_nshd_aug = pd.DataFrame(nshds, columns=ALGO_NAMES)
231
+ configs_aug = pd.DataFrame(cfgs)
232
+
233
+ X_all = pd.concat([X_orig, X_aug], ignore_index=True)
234
+ Y_shd_all = pd.concat([Y_shd_orig, Y_shd_aug], ignore_index=True)
235
+ Y_nshd_all = pd.concat([Y_nshd_orig, Y_nshd_aug], ignore_index=True)
236
+ configs_all = pd.concat([configs_orig, configs_aug], ignore_index=True)
237
+
238
+ X_all.to_csv(os.path.join(RESULTS_DIR, 'meta_features.csv'), index=False)
239
+ Y_shd_all.to_csv(os.path.join(RESULTS_DIR, 'shd_matrix.csv'), index=False)
240
+ Y_nshd_all.to_csv(os.path.join(RESULTS_DIR, 'normalized_shd_matrix.csv'), index=False)
241
+ configs_all.to_csv(os.path.join(RESULTS_DIR, 'configs.csv'), index=False)
242
+
243
+ print(f"\nAugmented: {len(configs_all)} configs")
244
+
245
+ print("\n" + "=" * 80)
246
+ print("STEP 2: HYPERPARAMETER SWEEP")
247
+ print("=" * 80)
248
+ hyperparameter_sweep()