#!/usr/bin/env python3
"""
Swin-Transformer-V2 配置文件
使用 Swin-Transformer-V2-B 模型进行训练
"""

import argparse
from setup_training import setup_and_train

def parse_args():
    parser = argparse.ArgumentParser(description='Swin-Transformer-V2 训练配置')
    
    # Data settings
    parser.add_argument('--data_dir', type=str, default=r'../split_data/webinat5000_train', 
                       help='Directory for split data')
    
    # Training settings
    parser.add_argument('--batch_size', type=int, default=64, help='Batch size for training (Tiny模型可以用更大batch)')
    parser.add_argument('--epochs', type=int, default=400, help='Number of training epochs')
    
    # Model settings
    parser.add_argument('--input_size', type=tuple, default=(224, 224), help='Input size for the model')
    parser.add_argument('--num_classes', type=int, default=5000, help='Number of classes')
    
    # Other settings
    parser.add_argument('--random_seed', type=int, default=43, help='Random seed for reproducibility')

    # Loss function settings
    parser.add_argument('--loss_function', type=str, default='CrossEntropyLoss', help='选择损失函数')

    # Training strategy settings
    parser.add_argument('--optimizer', type=str, default='AdamW', help='选择优化器 (推荐AdamW)')
    parser.add_argument('--learning_rate', type=float, default=0.001, help='Learning rate (Tiny模型可以用更大学习率)')
    parser.add_argument('--weight_decay', type=float, default=1e-4 , help='Weight decay for optimizer')

    # Scheduler settings
    parser.add_argument('--scheduler', type=str, default='CosineAnnealingLR', help='学习率调度器')
    parser.add_argument('--factor', type=float, default=0.8, help='学习率衰减因子')
    parser.add_argument('--patience', type=int, default=20, help='等待轮数')
    parser.add_argument('--T_max', type=int, default=400, help='Maximum number of iterations for CosineAnnealingLR')
    parser.add_argument('--eta_min', type=float, default=1e-6, help='Minimum learning rate for CosineAnnealingLR')
    parser.add_argument('--step_size', type=int, default=30, help='Step size for StepLR')
    parser.add_argument('--gamma', type=float, default=0.1, help='Gamma for StepLR and ExponentialLR')

    # Warmup settings - 针对Tiny模型优化
    parser.add_argument('--enable_warmup', action='store_true', default=True, help='启用预热')
    parser.add_argument('--warmup_epochs', type=int, default=10, help='Tiny模型预热期')
    parser.add_argument('--warmup_start_lr', type=float, default=1e-6, help='预热起始学习率')
    parser.add_argument('--warmup_type', type=str, default='linear', help='预热类型 (使用线性预热)')

    # YAML configuration file for augmentation
    parser.add_argument('--augmentation_config', type=str, default='updated_augmentation_config.yml', 
                       help='路径到数据增广配置的YAML文件')
    
    # Model selection - 使用Swin-Transformer-V2
    parser.add_argument('--model_name', type=str, default='SwinTransformerV2', help='使用Swin-Transformer-V2')
    parser.add_argument('--pre_trained_weights_path', type=str, default="None",
                        help='预训练权重的路径')
    parser.add_argument('--load_previous_weight_path', type=str, default="None",
                        help='已训练模型权重的路径')
    
    # Weight initialization settings
    parser.add_argument('--weight_init_type', type=str, default='xavier_uniform', 
                       help='权重初始化方法')
    
    # Gradient clipping settings
    parser.add_argument('--grad_clip_norm', type=float, default=1.0, help='梯度裁剪的最大范数')
    parser.add_argument('--check_nan_loss', action='store_true', default=True, help='检查NaN损失')
    
    # Mixed precision settings
    parser.add_argument('--use_mixed_precision', action='store_true', default=True, help='Tiny模型可以不用混合精度')

    # Directory settings
    parser.add_argument('--log_dir', type=str, default='logs', help='日志保存目录')
    parser.add_argument('--weight_dir', type=str, default='weights', help='模型权重保存目录')

    return parser.parse_args()

if __name__ == "__main__":
    args = parse_args()
    print("当前配置参数:")
    for arg in vars(args):
        print(f"{arg}: {getattr(args, arg)}")
    setup_and_train(args)
