#!/usr/bin/env python3
"""
GPU并行训练启动脚本
使用方法: python train_with_gpu.py --config your_config.json
"""

import argparse
import os
import sys
import json
import torch
from pathlib import Path

# 添加项目路径
sys.path.append(str(Path(__file__).parent))

from gpu_config import setup_gpu_parallel, get_gpu_config, monitor_gpu_memory
from embedtrack.scripts.train_all import main as train_main

def parse_args():
    parser = argparse.ArgumentParser(description='GPU并行训练启动脚本')
    parser.add_argument('--config', type=str, required=True, help='配置文件路径')
    parser.add_argument('--gpu-config', type=str, default='auto', 
                       choices=['auto', 'single_gpu', 'multi_gpu', 'high_memory'],
                       help='GPU配置类型')
    parser.add_argument('--workers', type=int, default=None, help='手动设置workers数量')
    parser.add_argument('--batch-size-multiplier', type=float, default=None, 
                       help='批次大小倍数')
    parser.add_argument('--monitor-memory', action='store_true', 
                       help='是否监控GPU内存使用')
    return parser.parse_args()

def main():
    args = parse_args()
    
    print("=" * 60)
    print("GPU并行训练启动")
    print("=" * 60)
    
    # 1. 设置GPU环境
    print("\n1. 设置GPU并行环境...")
    setup_gpu_parallel()
    
    # 2. 获取GPU配置
    print("\n2. 获取GPU配置...")
    gpu_config = get_gpu_config(args.gpu_config)
    print(f"GPU配置: {gpu_config}")
    
    # 3. 覆盖配置参数
    if args.workers is not None:
        gpu_config['workers'] = args.workers
    if args.batch_size_multiplier is not None:
        gpu_config['batch_size_multiplier'] = args.batch_size_multiplier
    
    # 4. 加载配置文件
    print(f"\n3. 加载配置文件: {args.config}")
    with open(args.config, 'r') as f:
        config = json.load(f)
    
    # 5. 应用GPU配置到训练配置
    print("\n4. 应用GPU配置...")
    
    # 调整批次大小
    if 'train_dict' in config:
        original_batch_size = config['train_dict'].get('train_batch_size', 4)
        new_batch_size = int(original_batch_size * gpu_config['batch_size_multiplier'])
        config['train_dict']['train_batch_size'] = new_batch_size
        print(f"  批次大小: {original_batch_size} -> {new_batch_size}")
    
    # 设置workers
    config['gpu_workers'] = gpu_config['workers']
    config['pin_memory'] = gpu_config['pin_memory']
    config['persistent_workers'] = gpu_config['persistent_workers']
    print(f"  Workers: {gpu_config['workers']}")
    print(f"  Pin Memory: {gpu_config['pin_memory']}")
    
    # 6. 保存修改后的配置
    config_backup = args.config.replace('.json', '_gpu_optimized.json')
    with open(config_backup, 'w') as f:
        json.dump(config, f, indent=2)
    print(f"\n5. 保存优化配置到: {config_backup}")
    
    # 7. 监控GPU内存（如果启用）
    if args.monitor_memory:
        print("\n6. GPU内存监控:")
        monitor_gpu_memory()
    
    # 8. 启动训练
    print("\n7. 启动训练...")
    print("=" * 60)
    
    try:
        # 这里需要根据你的训练脚本调整
        # train_main(config)  # 假设这是你的训练主函数
        
        # 或者直接调用训练脚本
        os.system(f"python embedtrack/scripts/train_all.py --config {config_backup}")
        
    except KeyboardInterrupt:
        print("\n训练被用户中断")
    except Exception as e:
        print(f"\n训练出错: {e}")
        raise
    finally:
        if args.monitor_memory:
            print("\n最终GPU内存状态:")
            monitor_gpu_memory()

if __name__ == "__main__":
    main() 