import argparse
from setup_training import setup_and_train

# Function to parse command-line arguments for ULTRA FAST training
def parse_args():
    parser = argparse.ArgumentParser(description='Configuration for ULTRA FAST training')
    
    # Data settings
    parser.add_argument('--data_dir', type=str, default=r'../split_data/webinat5000_train', 
                       help='Directory for split data')
    
    # Training settings - ULTRA OPTIMIZED FOR SPEED
    parser.add_argument('--batch_size', type=int, default=512, 
                       help='Ultra large batch size for maximum GPU utilization')
    parser.add_argument('--epochs', type=int, default=200, 
                       help='Reduced epochs for faster training')
    
    # Model settings
    parser.add_argument('--input_size', type=tuple, default=(224, 224), 
                       help='Input size for the model')
    parser.add_argument('--num_classes', type=int, default=5000, 
                       help='Number of classes')
    
    # Other settings
    parser.add_argument('--random_seed', type=int, default=43, 
                       help='Random seed for reproducibility')

    # Loss function settings
    parser.add_argument('--loss_function', type=str, default='CrossEntropyLoss', 
                       help='选择损失函数，例如：CrossEntropyLoss')

    # Training strategy settings - ULTRA OPTIMIZED
    parser.add_argument('--optimizer', type=str, default='AdamW', 
                       help='AdamW optimizer for better performance')
    parser.add_argument('--learning_rate', type=float, default=0.012, 
                       help='High learning rate for ultra large batch size')
    parser.add_argument('--weight_decay', type=float, default=1e-4, 
                       help='Weight decay for optimizer')

    # Scheduler settings - ULTRA OPTIMIZED
    parser.add_argument('--scheduler', type=str, default='CosineAnnealingLR', 
                       help='CosineAnnealingLR for better convergence')
    parser.add_argument('--T_max', type=int, default=200, 
                       help='Maximum number of iterations for CosineAnnealingLR')
    parser.add_argument('--eta_min', type=float, default=0.0001, 
                       help='Minimum learning rate for CosineAnnealingLR')
    parser.add_argument('--step_size', type=int, default=30, 
                       help='Step size for StepLR')
    parser.add_argument('--gamma', type=float, default=0.1, 
                       help='Gamma for StepLR and ExponentialLR')
    parser.add_argument('--factor', type=float, default=0.9, 
                       help='Factor for ReduceLROnPlateau')
    parser.add_argument('--patience', type=int, default=10, 
                       help='Patience for ReduceLROnPlateau')

    # YAML configuration file for augmentation
    parser.add_argument('--augmentation_config', type=str, default='augmentation_config.yml', 
                       help='路径到数据增广配置的YAML文件')
    
    # Model selection
    parser.add_argument('--model_name', type=str, default='SwinTransformer', 
                       help='选择模型的类名，例如：resnet101')
    parser.add_argument('--pre_trained_weights_path', type=str, default="pre-weight/best_accuracy_epoch144_acc43.01.pth",
                       help='预训练权重的路径，如果为None则不加载')
    parser.add_argument('--load_previous_weight_path', type=str, default="None",
                       help='已训练模型权重的路径，用于继续训练，如果为None则不加载')

    # Directory settings
    parser.add_argument('--log_dir', type=str, default='logs', 
                       help='日志保存目录')
    parser.add_argument('--weight_dir', type=str, default='weights', 
                       help='模型权重保存目录')

    return parser.parse_args()

# Example usage
if __name__ == "__main__":
    args = parse_args()
    print("🚀 ULTRA FAST Training Configuration:")
    print("=" * 60)
    for arg in vars(args):
        print(f"{arg}: {getattr(args, arg)}")
    print("=" * 60)
    print("⚡ Starting ULTRA FAST training with maximum optimization...")
    print("📊 Expected speed: 15-25 it/s (3-5x faster than current)")
    setup_and_train(args)
