Wuhuwill commited on
Commit
766d56c
·
verified ·
1 Parent(s): 066b64c

Upload ProDiff/test.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. ProDiff/test.py +272 -0
ProDiff/test.py ADDED
@@ -0,0 +1,272 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import torch
3
+ import numpy as np
4
+ from tqdm import tqdm
5
+ import torch.nn.functional as F
6
+
7
+ from utils.metric import *
8
+ from dataset.data_util import MinMaxScaler
9
+ from utils.utils import mask_data_general, ddp_setup, continuous_mask_data, continuous_time_based_mask, mask_multiple_segments
10
+ # Diffusion model will be imported directly in the main script that calls this test function.
11
+ # from diffProModel.Diffusion import Diffusion # No, pass model as argument
12
+
13
+ def test_model(test_dataloader, diffusion_model, short_samples_model, config, epoch,
14
+ prototypes, device, logger, exp_dir):
15
+ """
16
+ Test the unified Diffusion model (DDPM or DDIM) on the test dataset.
17
+
18
+ Args:
19
+ test_dataloader: DataLoader for test data.
20
+ diffusion_model: The unified diffusion model (instance of diffProModel.Diffusion.Diffusion).
21
+ short_samples_model: Trajectory transformer model for feature extraction.
22
+ config: Configuration object.
23
+ epoch: Current epoch number (or identifier for the test run).
24
+ prototypes: Prototype vectors (e.g., from TrajectoryTransformer or K-Means).
25
+ device: Device to run the model on (already determined by the caller).
26
+ logger: Logger object.
27
+ exp_dir: Experiment directory path.
28
+ """
29
+ # Determine distributed status and local_rank first
30
+ distributed = config.training.dis_gpu
31
+ local_rank = 0
32
+ if distributed:
33
+ # If DDP is active, LOCAL_RANK should be set by the environment.
34
+ # ddp_setup should have been called by the parent process (e.g., train_main or main for DDP launch)
35
+ # test_model itself typically does not re-initialize DDP.
36
+ try:
37
+ local_rank = int(os.environ.get('LOCAL_RANK', 0))
38
+ except ValueError:
39
+ if logger: logger.warning("LOCAL_RANK environment variable not a valid integer. Defaulting to 0.")
40
+ local_rank = 0
41
+ # The 'device' argument passed to this function should be the correct one to use.
42
+
43
+ thresholds = [i for i in range(1000, 11000, 1000)] # Thresholds for TC metric
44
+ # Initialize lists to store metrics for each batch
45
+ mtd_list, mppe_list, maepp_list, maeps_list, aptc_list, avg_aptc_list, max_td_list = [], [], [], [], [], [], []
46
+
47
+ # Get sampling parameters from config (assuming they are in config.sampling)
48
+ sampling_type = getattr(config.sampling, 'type', 'ddpm') # Default to ddpm if not specified
49
+ ddim_steps = getattr(config.sampling, 'ddim_steps', 50)
50
+ ddim_eta = getattr(config.sampling, 'ddim_eta', 0.0)
51
+ debug_mode = getattr(config, 'debug', False) # General debug flag
52
+
53
+ if logger and local_rank == 0: # Ensure logger operations happen on rank 0 if distributed
54
+ logger.info(f"Testing with sampling_type: {sampling_type} for epoch {epoch}")
55
+ if sampling_type == 'ddim':
56
+ logger.info(f"DDIM steps: {ddim_steps}, DDIM eta: {ddim_eta}")
57
+
58
+ diffusion_model.eval() # Ensure diffusion model is in eval mode
59
+ short_samples_model.eval() # Ensure feature extractor is in eval mode
60
+
61
+ pbar_desc = f"Epoch {epoch} Test Progress ({sampling_type.upper()})"
62
+ for batch_idx, (abs_time, lat, lng) in enumerate(tqdm(test_dataloader, desc=pbar_desc, disable=(local_rank != 0))):
63
+
64
+ if debug_mode and logger and local_rank == 0:
65
+ logger.info(f"Batch {batch_idx} - Input shapes: abs_time {abs_time.shape}, lat {lat.shape}, lng {lng.shape}")
66
+ logger.info(f"Input data stats - abs_time: min={abs_time.min().item():.4f}, max={abs_time.max().item():.4f}, " +
67
+ f"lat: min={lat.min().item():.4f}, max={lat.max().item():.4f}, " +
68
+ f"lng: min={lng.min().item():.4f}, max={lng.max().item():.4f}")
69
+
70
+ if torch.isnan(abs_time).any() or torch.isnan(lat).any() or torch.isnan(lng).any():
71
+ if logger and local_rank == 0: logger.error(f"Batch {batch_idx} - NaN detected in input data!")
72
+ continue
73
+
74
+ # Prepare input tensor (ground truth for start/end points and for scaling)
75
+ # This testx_raw is used for scaler fitting and as test_x0 for diffusion model
76
+ testx_raw = torch.stack([abs_time, lat, lng], dim=-1).to(device)
77
+
78
+ # Use global normalization parameters for consistency
79
+ scaler = MinMaxScaler(global_params_file='./data/robust_normalization_params.json')
80
+ scaler.fit(testx_raw) # This does nothing for global scaler, but maintains interface
81
+ testx_scaled = scaler.transform(testx_raw) # Scale data
82
+
83
+ if debug_mode and logger and local_rank == 0:
84
+ logger.info(f"Scaler min: {scaler.min_val.flatten().cpu().numpy()}, max: {scaler.max_val.flatten().cpu().numpy()}")
85
+
86
+ if torch.isnan(testx_scaled).any():
87
+ if logger and local_rank == 0:
88
+ logger.error(f"Batch {batch_idx} - NaN detected after scaling!")
89
+ if torch.any(scaler.max_val == scaler.min_val):
90
+ logger.error("Division by zero in scaler possible: max_val equals min_val for some features.")
91
+ continue
92
+
93
+ # Permute for diffusion model input: (batch_size, num_features, traj_length)
94
+ testx_scaled_permuted = testx_scaled.permute(0, 2, 1)
95
+
96
+ # Apply masking
97
+ if config.masking_strategy == 'general':
98
+ masked_condition_permuted = mask_data_general(testx_scaled_permuted)
99
+ elif config.masking_strategy == 'continuous':
100
+ masked_condition_permuted = continuous_mask_data(testx_scaled_permuted, config.mask_ratio)
101
+ elif config.masking_strategy == 'time_based':
102
+ masked_condition_permuted = continuous_time_based_mask(testx_scaled_permuted, points_to_mask=config.mask_points_per_hour)
103
+ elif config.masking_strategy == 'multi_segment':
104
+ masked_condition_permuted = mask_multiple_segments(testx_scaled_permuted, points_per_segment=config.mask_segments)
105
+ else:
106
+ raise ValueError(f"Unknown masking strategy: {config.masking_strategy}")
107
+
108
+ masked_condition = masked_condition_permuted.permute(0, 2, 1)
109
+
110
+ with torch.no_grad():
111
+ _, query_features = short_samples_model(masked_condition)
112
+
113
+ if torch.isnan(query_features).any():
114
+ if logger and local_rank == 0: logger.error(f"Batch {batch_idx} - NaN detected in query_features!")
115
+ continue
116
+ if torch.isnan(prototypes).any():
117
+ if logger and local_rank == 0: logger.error(f"Batch {batch_idx} - NaN detected in provided prototypes!")
118
+ continue
119
+
120
+ # Match query features with prototypes (e.g., via cosine similarity and softmax attention)
121
+ # This logic should align with how matched_prototypes are generated during training
122
+ cos_sim = F.cosine_similarity(query_features.unsqueeze(1), prototypes.unsqueeze(0), dim=-1)
123
+ if torch.isnan(cos_sim).any():
124
+ if logger and local_rank == 0: logger.error(f"Batch {batch_idx} - NaN detected in cos_sim!")
125
+ continue
126
+
127
+ # Using the same attention-weighted sum as in the unified training script
128
+ d_k = query_features.size(-1)
129
+ scaled_cos_sim = F.softmax(cos_sim / np.sqrt(d_k), dim=-1)
130
+ matched_prototypes_for_diffusion = torch.matmul(scaled_cos_sim, prototypes).to(device)
131
+
132
+ if torch.isnan(matched_prototypes_for_diffusion).any():
133
+ if logger and local_rank == 0: logger.error(f"Batch {batch_idx} - NaN detected in matched_prototypes!")
134
+ continue
135
+
136
+ if debug_mode and logger and local_rank == 0:
137
+ logger.info(f"Sampling with type: {sampling_type}, DDIM steps: {ddim_steps}, eta: {ddim_eta}")
138
+ logger.info(f"Input to diffusion model (testx_scaled_permuted) shape: {testx_scaled_permuted.shape}, "
139
+ f"masked condition (masked_condition_permuted) shape: {masked_condition_permuted.shape}, "
140
+ f"matched prototypes shape: {matched_prototypes_for_diffusion.shape}")
141
+
142
+ try:
143
+
144
+ pred_x0_scaled = diffusion_model.sample(
145
+ test_x0=testx_scaled_permuted, # Ground truth (scaled) for start/end points and reference
146
+ attr=masked_condition_permuted, # Masked data for conditional U-Net input (GuideNet attr)
147
+ prototype=matched_prototypes_for_diffusion, # Matched prototypes for GuideNet
148
+ sampling_type=sampling_type,
149
+ ddim_num_steps=ddim_steps,
150
+ ddim_eta=ddim_eta
151
+ )
152
+
153
+ if torch.isnan(pred_x0_scaled).any():
154
+ if logger and local_rank == 0: logger.error(f"Batch {batch_idx} - NaN detected in Diffusion model output!")
155
+ continue
156
+
157
+ except Exception as e:
158
+ if logger and local_rank == 0: logger.error(f"Exception during Diffusion model sampling: {str(e)}")
159
+ import traceback
160
+ if logger and local_rank == 0: logger.error(traceback.format_exc())
161
+ continue
162
+
163
+ # pred_x0_scaled is (batch_size, num_features, traj_length)
164
+ pred_x0_scaled_unpermuted = pred_x0_scaled.permute(0, 2, 1)
165
+
166
+ if debug_mode and logger and local_rank == 0:
167
+ logger.info(f"pred_x0_scaled_unpermuted stats before inverse_transform: min={pred_x0_scaled_unpermuted.min().item():.4f}, max={pred_x0_scaled_unpermuted.max().item():.4f}")
168
+
169
+ if (pred_x0_scaled_unpermuted < 0).any() or (pred_x0_scaled_unpermuted > 1).any():
170
+ if logger and local_rank == 0:
171
+ logger.warning(f"Batch {batch_idx} - Values outside [0,1] in pred_x0_scaled: min={pred_x0_scaled_unpermuted.min().item():.4f}, max={pred_x0_scaled_unpermuted.max().item():.4f}. Clamping.")
172
+ pred_x0_scaled_unpermuted = torch.clamp(pred_x0_scaled_unpermuted, 0, 1)
173
+
174
+ # Inverse transform to original data scale - ensure this happens on the correct device
175
+ pred_x0_final = scaler.inverse_transform(pred_x0_scaled_unpermuted)
176
+
177
+ ground_truth_final = testx_raw.cpu()
178
+
179
+ if torch.isnan(pred_x0_final).any() or torch.isnan(ground_truth_final).any():
180
+ if logger and local_rank == 0: logger.error(f"Batch {batch_idx} - NaN detected after inverse transform!")
181
+ continue
182
+
183
+ # Move to CPU before converting to NumPy for metric calculation
184
+ pred_x0_np = pred_x0_final.cpu().numpy()
185
+ ground_truth_np = ground_truth_final.numpy()
186
+
187
+ if debug_mode and logger and local_rank == 0:
188
+ logger.info(f"Shapes for metrics: pred_x0_np {pred_x0_np.shape}, ground_truth_np {ground_truth_np.shape}")
189
+ logger.info(f"pred_x0_np stats: min={np.min(pred_x0_np):.4f}, max={np.max(pred_x0_np):.4f}")
190
+ logger.info(f"ground_truth_np stats: min={np.min(ground_truth_np):.4f}, max={np.max(ground_truth_np):.4f}")
191
+
192
+ try:
193
+ mtd_list.append(mean_trajectory_deviation(pred_x0_np, ground_truth_np))
194
+ mppe_list.append(mean_point_to_point_error(pred_x0_np, ground_truth_np))
195
+ maepp_list.append(mean_absolute_error_per_point(pred_x0_np[:, :, 0], ground_truth_np[:, :, 0]))
196
+ maeps_list.append(mean_absolute_error_per_sample(pred_x0_np[:, :, 0], ground_truth_np[:, :, 0]))
197
+ aptc_result, avg_aptc_result = trajectory_coverage(pred_x0_np, ground_truth_np, thresholds)
198
+ aptc_list.append(aptc_result)
199
+ avg_aptc_list.append(avg_aptc_result)
200
+ max_td_list.append(max_trajectory_deviation(pred_x0_np, ground_truth_np))
201
+ except Exception as e:
202
+ if logger and local_rank == 0: logger.error(f"Exception during metric calculation in batch {batch_idx}: {str(e)}")
203
+ if debug_mode and logger and local_rank == 0: import traceback; logger.error(traceback.format_exc())
204
+ continue
205
+
206
+ if debug_mode and batch_idx == 0 and os.environ.get('PROJECT_DEBUG_MODE', '0') == '1': # Use a distinct env var for this specific break
207
+ if logger and local_rank == 0: logger.info("Project debug mode: Breaking after first test batch")
208
+ break
209
+
210
+ # Aggregate and log metrics (only on rank 0 if distributed)
211
+ if local_rank == 0:
212
+ mean_mtd = np.mean(mtd_list) if mtd_list else float('nan')
213
+ mean_mppe = np.mean(mppe_list) if mppe_list else float('nan')
214
+ mean_maepp = np.mean(maepp_list) if maepp_list else float('nan')
215
+ mean_maeps = np.mean(maeps_list) if maeps_list else float('nan')
216
+ mean_avg_aptc = np.mean(avg_aptc_list) if avg_aptc_list else float('nan')
217
+ mean_max_td = np.max(max_td_list) if max_td_list else float('nan') # MaxTD is max over all samples
218
+ mean_aptc_thresholds = {k: np.mean([d[k] for d in aptc_list if k in d]) for k in aptc_list[0]} if aptc_list else {f'TC@{thr}': float('nan') for thr in thresholds}
219
+
220
+ if logger:
221
+ logger.info(f"--- Test Results for Epoch {epoch} ({sampling_type.upper()}) ---")
222
+ logger.info(f"Mean MTD: {mean_mtd:.4f}")
223
+ logger.info(f"Mean MPPE: {mean_mppe:.4f}")
224
+ logger.info(f"Mean MAEPP (time): {mean_maepp:.4f}")
225
+ logger.info(f"Mean MAEPS (time): {mean_maeps:.4f}")
226
+ logger.info(f"Mean AVG_TC: {mean_avg_aptc:.4f}")
227
+ logger.info(f"Overall MaxTD: {mean_max_td:.4f}")
228
+ for threshold_val, tc_val in mean_aptc_thresholds.items():
229
+ logger.info(f"Mean {threshold_val}: {tc_val:.4f}")
230
+ if sampling_type == 'ddim':
231
+ logger.info(f"DDIM sampling with {ddim_steps} steps, eta: {ddim_eta:.2f}")
232
+ else:
233
+ logger.info(f"DDPM sampling with {config.diffusion.num_diffusion_timesteps} steps")
234
+
235
+ # Save results to .npy files
236
+ results_dir = exp_dir / 'results'
237
+ os.makedirs(results_dir, exist_ok=True)
238
+ sampling_prefix = f"{sampling_type.upper()}_"
239
+
240
+ def save_metric_npy(metric_name, value, current_epoch):
241
+ file_path = results_dir / f"{sampling_prefix}Test_mean_{metric_name}.npy"
242
+ if np.isnan(value): return # Don't save if NaN
243
+ if os.path.exists(file_path):
244
+ try:
245
+ existing_data = np.load(file_path, allow_pickle=True).item()
246
+ except: # Handle empty or corrupted file
247
+ existing_data = {}
248
+ existing_data[current_epoch] = value
249
+ else:
250
+ existing_data = {current_epoch: value}
251
+ np.save(file_path, existing_data)
252
+
253
+ save_metric_npy('mtd', mean_mtd, epoch)
254
+ save_metric_npy('mppe', mean_mppe, epoch)
255
+ save_metric_npy('maepp', mean_maepp, epoch)
256
+ save_metric_npy('maeps', mean_maeps, epoch)
257
+ save_metric_npy('avg_aptc', mean_avg_aptc, epoch)
258
+ save_metric_npy('max_td', mean_max_td, epoch)
259
+ for threshold_key, tc_value in mean_aptc_thresholds.items():
260
+ metric_key_name = threshold_key.replace('@', '_at_') # Sanitize for filename
261
+ save_metric_npy(f"tc_{metric_key_name}", tc_value, epoch)
262
+
263
+ if logger: logger.info(f"Saved test metrics to {results_dir}")
264
+
265
+ # Ensure all processes finish if in DDP, though testing is usually single-process or rank 0 handles results
266
+ if torch.distributed.is_initialized():
267
+ torch.distributed.barrier() # Wait for all processes if any were involved
268
+
269
+ return { # Return main metrics, could be useful for main script
270
+ "mean_mtd": mean_mtd,
271
+ "mean_mppe": mean_mppe
272
+ } if local_rank == 0 else {}