# 基于该配置进行继承并重写部分配置, 重定义即可实现重写
# 一般需要重写：
_base_ = '../yolov5/yolov5_s-v61_syncbn_fast_8xb16-300e_coco.py'

# 数据配置
data_root = '/data/linjiahui/datasets/fire/'
class_name = ('fire', )
num_classes = len(class_name)
# metainfo 必须要传给后面的 dataloader 配置，否则无效
# palette 是可视化时候对应类别的显示颜色
# palette 长度必须大于或等于 classes 长度
# 画图时候的颜色，随便设置即可
metainfo = dict(classes=class_name, palette=[(20, 220, 60)])


# anchors base
# 基于 tools/analysis_tools/optimize_anchors.py 自适应计算的 anchor
anchors = [
    [(10, 13), (16, 30), (33, 23)],  # P3/8
    [(30, 61), (64, 45), (59, 119)],  # P4/16
    [(116, 90), (156, 198), (373, 326)]  # P5/32
]


# 训练参数
max_epochs = 100
train_batch_size_per_gpu = 32
# 根据自己的 GPU 情况，修改 base_lr，default_bs=16, 修改的比例是 base_lr_default * (your_bs / default_bs)
base_lr = 0.02
# max_epochs = 300  # Maximum training epochs
# linux
train_num_workers = 4
# windows
# train_num_workers = 0
# persistent_workers = False


# 加载 COCO 预训练权重
# load_from = 'https://download.openmmlab.com/mmyolo/v0/yolov5/yolov5_s-v61_syncbn_fast_8xb16-300e_coco/yolov5_s-v61_syncbn_fast_8xb16-300e_coco_20220918_084700-86e02187.pth'  # noqa


# The scaling factor that controls the depth of the network structure
deepen_factor = 0.33
# The scaling factor that controls the width of the network structure
widen_factor = 0.5

# model
model = dict(
    # 固定整个 backbone 权重，不进行训练
    #backbone=dict(frozen_stages=4),
    backbone=dict(
        _delete_=True, # 将 _base_ 中关于 backbone 的字段删除
        type='YOLOv6EfficientRep',
        deepen_factor=deepen_factor,
        widen_factor=widen_factor,
        norm_cfg=dict(type='BN', momentum=0.03, eps=0.001),
        act_cfg=dict(type='ReLU', inplace=True)),
    bbox_head=dict(
        head_module=dict(num_classes=num_classes),
        prior_generator=dict(base_sizes=anchors),
    
    # loss_cls 会根据 num_classes 动态调整，但是 num_classes = 1 的时候，loss_cls 恒为 0
    # loss_cls=dict(loss_weight=0.5 *
    #             (num_classes / 80 * 3 / _base_.num_det_layers)))
                )


# 数据加载 include: train、test、val
train_dataloader = dict(
    batch_size=train_batch_size_per_gpu,
    num_workers=train_num_workers,
    dataset=dict(
        # 数据量太少的话，可以使用 RepeatDataset ，在每个 epoch 内重复当前数据集 n 次，这里设置 5 是重复 5 次
        # type='RepeatDataset',
        # times=5,
        data_root=data_root,
        metainfo=metainfo,     # 前面已经设置好的颜色画板
        ann_file='annotations/train.json',
        data_prefix=dict(img='images/')))

val_dataloader = dict(
    dataset=dict(
        metainfo=metainfo,
        data_root=data_root,
        ann_file='annotations/test.json',
        data_prefix=dict(img='images/')))

test_dataloader = val_dataloader

_base_.optim_wrapper.optimizer.batch_size_per_gpu = train_batch_size_per_gpu

val_evaluator = dict(ann_file=data_root + 'annotations/test.json')
test_evaluator = val_evaluator

default_hooks = dict(
    # 每隔 10 个 epoch 保存一次权重，并且最多保存 2 个权重
    # 模型评估时候自动保存最佳模型
    checkpoint=dict(interval=10, max_keep_ckpts=2, save_best='auto'),
    # warmup_mim_iter 参数非常关键，因为 cat 数据集非常小，默认的最小 warmup_mim_iter 是 1000，导致训练过程学习率偏小
    param_scheduler=dict(max_epochs=max_epochs, warmup_mim_iter=10),
    # 日志打印间隔为 5
    logger=dict(type='LoggerHook', interval=5))


# 评估间隔为 10 epoch   
train_cfg = dict(max_epochs=max_epochs, val_interval=10)
# visualizer = dict(vis_backends = [dict(type='LocalVisBackend'), dict(type='WandbVisBackend')]) # noqa
