# _base_ = [
#     './schedule_3x.py'
# ]

# optimizer = dict(
#     _delete_=True,
#     type='AdamW',
#     lr=0.0001,
#     betas=(0.9, 0.999),
#     weight_decay=0.05,
#     paramwise_cfg=dict(
#         custom_keys={
#             'absolute_pos_embed': dict(decay_mult=0.),
#             'relative_position_bias_table': dict(decay_mult=0.),
#             'norm': dict(decay_mult=0.)
#         }))

# lr_config = dict(
#     policy='step',
#     warmup='linear',
#     warmup_iters=500,
#     warmup_ratio=1.0 / 3,
#     step=[72, 90])

# runner = dict(type='EpochBasedRunner', max_epochs=100)


optimizer = dict(
    type='AdamW',
    lr=0.0001,  # 调低学习率以提升稳定性
    betas=(0.9, 0.999),
    weight_decay=0.01,  # 减少权重衰减以防止欠拟合
    paramwise_cfg=dict(
        custom_keys={
            'absolute_pos_embed': dict(decay_mult=0.),
            'relative_position_bias_table': dict(decay_mult=0.),
            'norm': dict(decay_mult=0.)
        }
    )
)

optimizer_config = dict(
    grad_clip=dict(max_norm=35, norm_type=2)  # 引入梯度裁剪
)

lr_config = dict(
    policy='CosineAnnealing',  # 使用余弦退火学习率策略
    min_lr=1e-6,  # 设置最小学习率
    warmup='linear',
    warmup_iters=1000,  # 增加预热步数
    warmup_ratio=1.0 / 10,  # 调整预热比例
)

runner = dict(type='EpochBasedRunner', max_epochs=100)

# 可选：引入学习率调度器的多步调整（如果仍希望结合使用）
# lr_config = dict(
#     policy='CosineAnnealing',
#     min_lr=1e-6,
#     warmup='linear',
#     warmup_iters=1000,
#     warmup_ratio=1.0 / 10,
# )

# evaluation
evaluation = dict(interval=1, metric='mAP')
checkpoint_config = dict(interval=1)
