# The new config inherits a base config to highlight the necessary modification
_base_ = '../rtmdet_m_8xb32-300e_coco.py'


# 数据集格式遵从COCO

metainfo = {
    'classes': ('gy', 'hh', 'yh', 'yhtp', 'None'), # 类别名称
    'palette': [(220, 20, 60), (119, 11, 32), (0, 0, 142), (0, 0, 230), (106, 0, 228)] # 对应类别检测框的颜色，RGB三通道
}

# Modify dataset related settings 数据集放置在根目录里的data里
data_root = 'data/custom/'

# 训练数据加载器设置（和数据集有关的设置）
train_dataloader = dict(
    batch_size=2,  # Batch size of a single GPU 单GPU的batch大小
    num_workers=2,  # 训练线程数
    persistent_workers=True,  # If ``True``, the dataloader will not shut down the worker processes after an epoch end, which can accelerate training speed.
    dataset=dict(
        data_root=data_root,
        metainfo=metainfo,
        ann_file='annotations/train.json',  # Path of annotation file 训练的annotation文件存放的目录
        data_prefix=dict(img='images/')))  # Prefix of image path 训练图片存放的目录
val_dataloader = dict(
    dataset=dict(
        data_root=data_root,
        metainfo=metainfo,
        ann_file='annotations/val.json',
        data_prefix=dict(img='images/')))
test_dataloader = val_dataloader

val_evaluator = dict(ann_file=data_root + 'annotations/val.json')
test_evaluator = val_evaluator

# 优化器optim设置
optim_wrapper = dict(  # Optimizer wrapper config
    type='OptimWrapper',  # Optimizer wrapper type, switch to AmpOptimWrapper to enable mixed precision training.
    optimizer=dict(  # Optimizer config. Support all kinds of optimizers in PyTorch. Refer to https://pytorch.org/docs/stable/optim.html#algorithms
        type='AdamW',  # Stochastic gradient descent optimizer 优化器类型
        lr=0.001,  # The base learning rate 学习率
        momentum=0.9,  # Stochastic gradient descent with momentum 权重衰减动量
        weight_decay=0.0001),  # Weight decay of SGD 权重衰减值
    clip_grad=None,  # Gradient clip option. Set None to disable gradient clip. Find usage in https://mmengine.readthedocs.io/en/latest/tutorials/optimizer.html
)

# 学习率参数设置
param_scheduler = [
    # Linear learning rate warm-up scheduler
    dict(
        type='LinearLR',  # Use linear policy to warmup learning rate 学习率衰减算法
        start_factor=0.001,  # The ratio of the starting learning rate used for warmup 使用热身策略的学习率大小范围
        by_epoch=False,  # The warmup learning rate is updated by iteration
        begin=0,  # Start from the first iteration
        end=500),  # End the warmup at the 500th iteration 热身结束的epoch
    # The main LRScheduler
    dict(
        type='MultiStepLR',  # Use multi-step learning rate policy during training
        by_epoch=True,  # The learning rate is updated by epoch
        begin=0,   # Start from the first epoch
        end=12,  # End at the 12th epoch
        milestones=[8, 11],  # Epochs to decay the learning rate
        gamma=0.1)  # The learning rate decay ratio
]

# 训练有关配置
train_cfg = dict(
    type='EpochBasedTrainLoop',  # The training loop type. Refer to https://github.com/open-mmlab/mmengine/blob/main/mmengine/runner/loops.py
    max_epochs=12,  # Maximum training epochs
    val_interval=1)  # Validation intervals. Run validation every epoch.
val_cfg = dict(type='ValLoop')  # The validation loop type
test_cfg = dict(type='TestLoop')  # The testing loop type


# We can use the pre-trained Mask RCNN model to obtain higher performance
# 可以选择预训练模型，如果不需要，就注释掉
load_from = 'https://download.openmmlab.com/mmdetection/v3.0/rtmdet/rtmdet_m_8xb32-300e_coco/rtmdet_m_8xb32-300e_coco_20220719_112220-229f527c.pth'
