Wuhuwill commited on
Commit
3a521ca
·
verified ·
1 Parent(s): d1e92f6

Upload ProDiff/main.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. ProDiff/main.py +275 -0
ProDiff/main.py ADDED
@@ -0,0 +1,275 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import torch
3
+ import datetime
4
+ import shutil
5
+ from pathlib import Path
6
+ import argparse
7
+ from types import SimpleNamespace
8
+ import sys
9
+ import numpy as np
10
+
11
+
12
+ from conf import config as config_module # Corrected alias
13
+ from utils.logger import Logger, log_info
14
+ from utils.utils import set_seed, ddp_setup, destroy_process_group, get_data_paths
15
+ from dataset.data_util import TrajectoryDataset
16
+ from torch.utils.data import DataLoader
17
+ from diffProModel.Diffusion import Diffusion
18
+ from diffProModel.protoTrans import TrajectoryTransformer
19
+
20
+
21
+ from train import train_main
22
+ from test import test_model
23
+
24
+ def setup_experiment_environment(base_exp_dir, exp_name_with_timestamp, config_to_save, files_to_copy=None):
25
+ """Sets up the experiment directory structure and saves essential files."""
26
+ exp_dir = base_exp_dir / exp_name_with_timestamp
27
+ results_dir = exp_dir / 'results'
28
+ models_dir = exp_dir / 'models' # Unified models dir, not timestamped sub-dir by default here
29
+ logs_dir = exp_dir / 'logs'
30
+ code_save_dir = exp_dir / 'code_snapshot'
31
+
32
+ os.makedirs(results_dir, exist_ok=True)
33
+ os.makedirs(models_dir, exist_ok=True)
34
+ os.makedirs(logs_dir, exist_ok=True)
35
+ os.makedirs(code_save_dir, exist_ok=True)
36
+
37
+ # Save configuration
38
+ # (Convert SimpleNamespace to dict for easier saving if needed, or save as text)
39
+ with open(exp_dir / 'config_used.txt', 'w') as f:
40
+ import json
41
+ # Convert SimpleNamespace to dict for JSON serialization
42
+ def ns_to_dict(ns):
43
+ if isinstance(ns, SimpleNamespace):
44
+ return {k: ns_to_dict(v) for k, v in ns.__dict__.items()}
45
+ elif isinstance(ns, dict):
46
+ return {k: ns_to_dict(v) for k, v in ns.items()}
47
+ elif isinstance(ns, list):
48
+ return [ns_to_dict(i) for i in ns]
49
+ return ns
50
+ config_dict = ns_to_dict(config_to_save)
51
+ json.dump(config_dict, f, indent=4)
52
+
53
+ # Copy essential code files
54
+ if files_to_copy:
55
+ for file_path_str in files_to_copy:
56
+ try:
57
+ file_path = Path(file_path_str)
58
+ if file_path.exists():
59
+ shutil.copy(file_path, code_save_dir)
60
+ else:
61
+ print(f"Warning: File to copy not found: {file_path_str}") # Use logger if available
62
+ except Exception as e:
63
+ print(f"Warning: Could not copy file {file_path_str}: {e}")
64
+
65
+ return exp_dir, models_dir, logs_dir, results_dir
66
+
67
+ def main():
68
+ parser = argparse.ArgumentParser(description='Unified Trajectory Interpolation - Training with Periodic Validation')
69
+ parser.add_argument('--sampling_type', type=str, default='ddpm', choices=['ddpm', 'ddim'],
70
+ help='Diffusion sampling type (ddpm or ddim) - influences periodic validation if DDIM is chosen, and experiment naming.')
71
+ parser.add_argument('--config_module_path', type=str, default='conf.config',
72
+ help='Python module path for base configuration (e.g., conf.config)')
73
+ parser.add_argument('--exp_name', type=str, default='traj_interp_exp',
74
+ help='Base name for the experiment directory')
75
+ parser.add_argument('--seed', type=int, default=42, help='Random seed')
76
+ parser.add_argument('--device_id', type=int, default=0, help='CUDA device ID to use')
77
+ parser.add_argument('--distributed', action='store_true', help='Enable distributed training (DDP)')
78
+
79
+ parser.add_argument('--ddim_steps', type=int, default=50, help='Number of DDIM sampling steps for periodic validation')
80
+ parser.add_argument('--ddim_eta', type=float, default=0.0,
81
+ help='DDIM stochasticity parameter for periodic validation (0=deterministic, 1=DDPM-like)')
82
+
83
+ parser.add_argument('--debug', action='store_true', help='Enable debug mode for more detailed logs')
84
+
85
+ # 添加测试模式相关参数
86
+ parser.add_argument('--mode', type=str, default='train', choices=['train', 'test'],
87
+ help='运行模式:训练或测试')
88
+ parser.add_argument('--model_path', type=str, default=None,
89
+ help='测试模式下,指定要加载的模型路径')
90
+ parser.add_argument('--model_epoch', type=int, default=None,
91
+ help='测试模式下,指定要加载的模型epoch')
92
+
93
+ args = parser.parse_args()
94
+
95
+ # --- Basic Setup ---
96
+ if args.distributed:
97
+ ddp_setup(args.distributed) # Sets LOCAL_RANK env var if not already set by launcher
98
+ local_rank = int(os.environ.get('LOCAL_RANK', 0))
99
+ else:
100
+ local_rank = 0
101
+
102
+ if not args.distributed or local_rank == 0: # Setup master process first or if not distributed
103
+ print(f"Running on device: cuda:{args.device_id}" if torch.cuda.is_available() else "Running on CPU")
104
+
105
+ if torch.cuda.is_available():
106
+ torch.cuda.set_device(args.device_id if not args.distributed else local_rank)
107
+
108
+ set_seed(args.seed + local_rank) # Ensure different seeds for different processes in DDP for some operations
109
+
110
+ # --- Load Configuration ---
111
+ try:
112
+ base_config_dict = config_module.load_config() # from conf.config import load_config
113
+ except Exception as e:
114
+ print(f"Error loading base configuration from {args.config_module_path}: {e}")
115
+ sys.exit(1)
116
+
117
+ # Handle both dict and SimpleNamespace return from load_config()
118
+ if isinstance(base_config_dict, dict):
119
+ cfg_ns = {k: SimpleNamespace(**v) for k, v in base_config_dict.items()}
120
+ config = SimpleNamespace(**cfg_ns)
121
+ else:
122
+ # load_config() already returned a SimpleNamespace
123
+ config = base_config_dict
124
+
125
+ # Update config with command-line arguments
126
+ config.debug = args.debug
127
+ config.training.dis_gpu = args.distributed
128
+ config.sampling.type = args.sampling_type
129
+ config.sampling.ddim_steps = args.ddim_steps
130
+ config.sampling.ddim_eta = args.ddim_eta
131
+ config.device_id = args.device_id # Pass device_id for train_main
132
+ # Ensure other necessary fields exist in config (add defaults if not in config.py)
133
+ if not hasattr(config, 'model'): config.model = SimpleNamespace()
134
+ if not hasattr(config.model, 'loss_type'): config.model.loss_type = 'l1' # Default
135
+ if not hasattr(config.training, 'learning_rate'): config.training.learning_rate = 2e-4
136
+ if not hasattr(config.training, 'warmup_epochs'): config.training.warmup_epochs = 10
137
+ if not hasattr(config.training, 'contrastive_margin'): config.training.contrastive_margin = 1.0
138
+ if not hasattr(config.training, 'use_amp'): config.training.use_amp = True
139
+ if not hasattr(config.training, 'kmeans_memory_size'): config.training.kmeans_memory_size = 10 # Batches
140
+ if not hasattr(config.training, 'ce_loss_weight'): config.training.ce_loss_weight = 0.1
141
+ if not hasattr(config.training, 'diffusion_loss_weight'): config.training.diffusion_loss_weight = 1.0
142
+ if not hasattr(config.training, 'contrastive_loss_weight'): config.training.contrastive_loss_weight = 1.0
143
+
144
+ # --- Setup Experiment Environment (only on rank 0 if DDP) ---
145
+ timestamp_str = datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
146
+ # Include sampling type in experiment name for clarity
147
+ exp_name_ts = f"{args.exp_name}_{config.data.dataset}_len{config.data.traj_length}_{args.sampling_type}_{timestamp_str}"
148
+
149
+ exp_dir, models_save_dir, logs_dir, results_dir = Path("."), Path("."), Path("."), Path(".") # Defaults for non-rank0
150
+ if local_rank == 0:
151
+ root_dir = Path(__file__).resolve().parent # Project root
152
+ base_experiment_path = root_dir / "Experiments" # Changed from "Backups"
153
+
154
+ files_to_copy_snapshot = [
155
+ 'main.py', 'train.py', 'test.py', 'conf/config.py',
156
+ 'diffProModel/Diffusion.py', 'diffProModel/protoTrans.py', 'diffProModel/loss.py',
157
+ 'utils/utils.py', 'utils/logger.py', 'utils/metric.py', 'dataset/data_util.py'
158
+ ]
159
+ exp_dir, models_save_dir, logs_dir, results_dir = setup_experiment_environment(
160
+ base_experiment_path, exp_name_ts, config, files_to_copy_snapshot
161
+ )
162
+
163
+ # Logger setup (after exp_dir is known by all processes if DDP, or just for rank 0)
164
+ logger = None
165
+ if local_rank == 0:
166
+ log_file_path = logs_dir / f"log_{timestamp_str}.txt"
167
+ logger = Logger(
168
+ name=exp_name_ts,
169
+ log_path=log_file_path,
170
+ colorize=True,
171
+ level="debug" if args.debug else "info"
172
+ )
173
+ logger.info(f"Experiment directory: {exp_dir}")
174
+ log_info(config, logger) # Log the configuration details
175
+ logger.info(f"Using sampling type for periodic validation: {args.sampling_type}")
176
+ if args.sampling_type == 'ddim':
177
+ logger.info(f"DDIM Steps for validation: {args.ddim_steps}, Eta for validation: {args.ddim_eta}")
178
+
179
+ # Barrier to ensure exp_dir is created by rank 0 before other ranks proceed if DDP
180
+ if args.distributed:
181
+ torch.distributed.barrier()
182
+
183
+ # 设置设备
184
+ device = torch.device(f"cuda:{args.device_id}" if torch.cuda.is_available() else "cpu")
185
+
186
+ if args.mode == 'train':
187
+ # --- Main Execution: Call Training (which includes periodic validation) ---
188
+ if logger and local_rank == 0:
189
+ logger.info("Starting training with periodic validation...")
190
+
191
+ train_main(config, logger, exp_dir, timestamp_str)
192
+ else: # 测试模式
193
+ if logger and local_rank == 0:
194
+ logger.info("Starting model testing...")
195
+ logger.info(f"Loading model from: {args.model_path}")
196
+ logger.info(f"Using epoch: {args.model_epoch}")
197
+
198
+ # 加载数据
199
+ test_dataset = TrajectoryDataset(
200
+ file_paths=get_data_paths(config.data, for_train=False),
201
+ traj_length=config.data.traj_length
202
+ )
203
+ test_dataloader = DataLoader(
204
+ test_dataset,
205
+ batch_size=config.sampling.batch_size,
206
+ shuffle=False,
207
+ num_workers=config.data.num_workers if isinstance(config.data.num_workers, int) else 4,
208
+ pin_memory=True
209
+ )
210
+
211
+ # 初始化模型
212
+ diffusion_model = Diffusion(
213
+ loss_type=config.model.loss_type,
214
+ config=config,
215
+ clip_denoised=True,
216
+ predict_epsilon=True
217
+ ).to(device)
218
+ short_samples_model = TrajectoryTransformer(
219
+ input_dim=config.trans.input_dim,
220
+ embed_dim=config.trans.embed_dim,
221
+ num_layers=config.trans.num_layers,
222
+ num_heads=config.trans.num_heads,
223
+ forward_dim=config.trans.forward_dim,
224
+ seq_len=config.data.traj_length,
225
+ n_cluster=config.trans.N_CLUSTER,
226
+ dropout=config.trans.dropout
227
+ ).to(device)
228
+
229
+ # 加载模型权重 - 自动检测最新的timestamp目录
230
+ models_base_dir = Path(args.model_path) / "models"
231
+ # 获取models目录下的所有子目录,选择最新的timestamp目录
232
+ timestamp_dirs = [d for d in models_base_dir.iterdir() if d.is_dir()]
233
+ if not timestamp_dirs:
234
+ raise FileNotFoundError(f"No model timestamp directories found in {models_base_dir}")
235
+
236
+ # 按时间戳排序,选择最新的
237
+ latest_timestamp_dir = sorted(timestamp_dirs, key=lambda x: x.name)[-1]
238
+ model_dir = latest_timestamp_dir
239
+
240
+ diffusion_model_path = model_dir / f"diffusion_model_epoch_{args.model_epoch}.pt"
241
+ transformer_model_path = model_dir / f"transformer_epoch_{args.model_epoch}.pt"
242
+ prototypes_path = model_dir / f"prototypes_transformer_epoch_{args.model_epoch}.npy"
243
+
244
+ if logger and local_rank == 0:
245
+ logger.info(f"Loading diffusion model from: {diffusion_model_path}")
246
+ logger.info(f"Loading transformer model from: {transformer_model_path}")
247
+ logger.info(f"Loading prototypes from: {prototypes_path}")
248
+
249
+ diffusion_model.load_state_dict(torch.load(diffusion_model_path, map_location=device))
250
+ short_samples_model.load_state_dict(torch.load(transformer_model_path, map_location=device))
251
+ prototypes = torch.from_numpy(np.load(prototypes_path)).float().to(device)
252
+
253
+ # 运行测试
254
+ with torch.no_grad():
255
+ test_model(
256
+ test_dataloader=test_dataloader,
257
+ diffusion_model=diffusion_model,
258
+ short_samples_model=short_samples_model,
259
+ config=config,
260
+ epoch=args.model_epoch,
261
+ prototypes=prototypes,
262
+ device=device,
263
+ logger=logger,
264
+ exp_dir=exp_dir
265
+ )
266
+
267
+ if args.distributed:
268
+ if torch.distributed.is_initialized():
269
+ destroy_process_group()
270
+
271
+ if local_rank == 0 and logger:
272
+ logger.info("Main script execution finished.")
273
+
274
+ if __name__ == "__main__":
275
+ main()