[net] # Testing #batch=1 #subdivisions=1 # Training batch=64 subdivisions=8 width=512 height=512 channels=3 momentum=0.949 decay=0.0005 angle=0 saturation = 1.5 exposure = 1.5 hue=.1 learning_rate=0.00261 burn_in=1000 max_batches = 500500 policy=steps steps=400000,450000 scales=.1,.1 #cutmix=1 mosaic=1 # ============ Backbone ============ # # Stem # 0 [convolutional] batch_normalize=1 filters=32 size=3 stride=1 pad=1 activation=silu # P1 # Downsample [convolutional] batch_normalize=1 filters=64 size=3 stride=2 pad=1 activation=silu # Residual Block [convolutional] batch_normalize=1 filters=32 size=1 stride=1 pad=1 activation=silu [convolutional] batch_normalize=1 filters=64 size=3 stride=1 pad=1 activation=silu # 4 (previous+1+3k) [shortcut] from=-3 activation=linear # P2 # Downsample [convolutional] batch_normalize=1 filters=128 size=3 stride=2 pad=1 activation=silu # Split [convolutional] batch_normalize=1 filters=64 size=1 stride=1 pad=1 activation=silu [route] layers = -2 [convolutional] batch_normalize=1 filters=64 size=1 stride=1 pad=1 activation=silu # Residual Block [convolutional] batch_normalize=1 filters=64 size=1 stride=1 pad=1 activation=silu [convolutional] batch_normalize=1 filters=64 size=3 stride=1 pad=1 activation=silu [shortcut] from=-3 activation=linear [convolutional] batch_normalize=1 filters=64 size=1 stride=1 pad=1 activation=silu [convolutional] batch_normalize=1 filters=64 size=3 stride=1 pad=1 activation=silu [shortcut] from=-3 activation=linear # Transition first [convolutional] batch_normalize=1 filters=64 size=1 stride=1 pad=1 activation=silu # Merge [-1, -(3k+4)] [route] layers = -1,-10 # Transition last # 17 (previous+7+3k) [convolutional] batch_normalize=1 filters=128 size=1 stride=1 pad=1 activation=silu # P3 # Downsample [convolutional] batch_normalize=1 filters=256 size=3 stride=2 pad=1 activation=silu # Split [convolutional] batch_normalize=1 filters=128 size=1 stride=1 pad=1 activation=silu [route] layers = -2 [convolutional] batch_normalize=1 filters=128 size=1 stride=1 pad=1 activation=silu # Residual Block [convolutional] batch_normalize=1 filters=128 size=1 stride=1 pad=1 activation=silu [convolutional] batch_normalize=1 filters=128 size=3 stride=1 pad=1 activation=silu [shortcut] from=-3 activation=linear [convolutional] batch_normalize=1 filters=128 size=1 stride=1 pad=1 activation=silu [convolutional] batch_normalize=1 filters=128 size=3 stride=1 pad=1 activation=silu [shortcut] from=-3 activation=linear [convolutional] batch_normalize=1 filters=128 size=1 stride=1 pad=1 activation=silu [convolutional] batch_normalize=1 filters=128 size=3 stride=1 pad=1 activation=silu [shortcut] from=-3 activation=linear [convolutional] batch_normalize=1 filters=128 size=1 stride=1 pad=1 activation=silu [convolutional] batch_normalize=1 filters=128 size=3 stride=1 pad=1 activation=silu [shortcut] from=-3 activation=linear [convolutional] batch_normalize=1 filters=128 size=1 stride=1 pad=1 activation=silu [convolutional] batch_normalize=1 filters=128 size=3 stride=1 pad=1 activation=silu [shortcut] from=-3 activation=linear [convolutional] batch_normalize=1 filters=128 size=1 stride=1 pad=1 activation=silu [convolutional] batch_normalize=1 filters=128 size=3 stride=1 pad=1 activation=silu [shortcut] from=-3 activation=linear [convolutional] batch_normalize=1 filters=128 size=1 stride=1 pad=1 activation=silu [convolutional] batch_normalize=1 filters=128 size=3 stride=1 pad=1 activation=silu [shortcut] from=-3 activation=linear [convolutional] batch_normalize=1 filters=128 size=1 stride=1 pad=1 activation=silu [convolutional] batch_normalize=1 filters=128 size=3 stride=1 pad=1 activation=silu [shortcut] from=-3 activation=linear # Transition first [convolutional] batch_normalize=1 filters=128 size=1 stride=1 pad=1 activation=silu # Merge [-1 -(4+3k)] [route] layers = -1,-28 # Transition last # 48 (previous+7+3k) [convolutional] batch_normalize=1 filters=256 size=1 stride=1 pad=1 activation=silu # P4 # Downsample [convolutional] batch_normalize=1 filters=512 size=3 stride=2 pad=1 activation=silu # Split [convolutional] batch_normalize=1 filters=256 size=1 stride=1 pad=1 activation=silu [route] layers = -2 [convolutional] batch_normalize=1 filters=256 size=1 stride=1 pad=1 activation=silu # Residual Block [convolutional] batch_normalize=1 filters=256 size=1 stride=1 pad=1 activation=silu [convolutional] batch_normalize=1 filters=256 size=3 stride=1 pad=1 activation=silu [shortcut] from=-3 activation=linear [convolutional] batch_normalize=1 filters=256 size=1 stride=1 pad=1 activation=silu [convolutional] batch_normalize=1 filters=256 size=3 stride=1 pad=1 activation=silu [shortcut] from=-3 activation=linear [convolutional] batch_normalize=1 filters=256 size=1 stride=1 pad=1 activation=silu [convolutional] batch_normalize=1 filters=256 size=3 stride=1 pad=1 activation=silu [shortcut] from=-3 activation=linear [convolutional] batch_normalize=1 filters=256 size=1 stride=1 pad=1 activation=silu [convolutional] batch_normalize=1 filters=256 size=3 stride=1 pad=1 activation=silu [shortcut] from=-3 activation=linear [convolutional] batch_normalize=1 filters=256 size=1 stride=1 pad=1 activation=silu [convolutional] batch_normalize=1 filters=256 size=3 stride=1 pad=1 activation=silu [shortcut] from=-3 activation=linear [convolutional] batch_normalize=1 filters=256 size=1 stride=1 pad=1 activation=silu [convolutional] batch_normalize=1 filters=256 size=3 stride=1 pad=1 activation=silu [shortcut] from=-3 activation=linear [convolutional] batch_normalize=1 filters=256 size=1 stride=1 pad=1 activation=silu [convolutional] batch_normalize=1 filters=256 size=3 stride=1 pad=1 activation=silu [shortcut] from=-3 activation=linear [convolutional] batch_normalize=1 filters=256 size=1 stride=1 pad=1 activation=silu [convolutional] batch_normalize=1 filters=256 size=3 stride=1 pad=1 activation=silu [shortcut] from=-3 activation=linear # Transition first [convolutional] batch_normalize=1 filters=256 size=1 stride=1 pad=1 activation=silu # Merge [-1 -(3k+4)] [route] layers = -1,-28 # Transition last # 79 (previous+7+3k) [convolutional] batch_normalize=1 filters=512 size=1 stride=1 pad=1 activation=silu # P5 # Downsample [convolutional] batch_normalize=1 filters=1024 size=3 stride=2 pad=1 activation=silu # Split [convolutional] batch_normalize=1 filters=512 size=1 stride=1 pad=1 activation=silu [route] layers = -2 [convolutional] batch_normalize=1 filters=512 size=1 stride=1 pad=1 activation=silu # Residual Block [convolutional] batch_normalize=1 filters=512 size=1 stride=1 pad=1 activation=silu [convolutional] batch_normalize=1 filters=512 size=3 stride=1 pad=1 activation=silu [shortcut] from=-3 activation=linear [convolutional] batch_normalize=1 filters=512 size=1 stride=1 pad=1 activation=silu [convolutional] batch_normalize=1 filters=512 size=3 stride=1 pad=1 activation=silu [shortcut] from=-3 activation=linear [convolutional] batch_normalize=1 filters=512 size=1 stride=1 pad=1 activation=silu [convolutional] batch_normalize=1 filters=512 size=3 stride=1 pad=1 activation=silu [shortcut] from=-3 activation=linear [convolutional] batch_normalize=1 filters=512 size=1 stride=1 pad=1 activation=silu [convolutional] batch_normalize=1 filters=512 size=3 stride=1 pad=1 activation=silu [shortcut] from=-3 activation=linear # Transition first [convolutional] batch_normalize=1 filters=512 size=1 stride=1 pad=1 activation=silu # Merge [-1 -(3k+4)] [route] layers = -1,-16 # Transition last # 98 (previous+7+3k) [convolutional] batch_normalize=1 filters=1024 size=1 stride=1 pad=1 activation=silu # ============ End of Backbone ============ # # ============ Neck ============ # # CSPSPP [convolutional] batch_normalize=1 filters=512 size=1 stride=1 pad=1 activation=silu [route] layers = -2 [convolutional] batch_normalize=1 filters=512 size=1 stride=1 pad=1 activation=silu [convolutional] batch_normalize=1 size=3 stride=1 pad=1 filters=512 activation=silu [convolutional] batch_normalize=1 filters=512 size=1 stride=1 pad=1 activation=silu ### SPP ### [maxpool] stride=1 size=5 [route] layers=-2 [maxpool] stride=1 size=9 [route] layers=-4 [maxpool] stride=1 size=13 [route] layers=-1,-3,-5,-6 ### End SPP ### [convolutional] batch_normalize=1 filters=512 size=1 stride=1 pad=1 activation=silu [convolutional] batch_normalize=1 size=3 stride=1 pad=1 filters=512 activation=silu [route] layers = -1, -13 # 113 (previous+6+5+2k) [convolutional] batch_normalize=1 filters=512 size=1 stride=1 pad=1 activation=silu # End of CSPSPP # FPN-4 [convolutional] batch_normalize=1 filters=256 size=1 stride=1 pad=1 activation=silu [upsample] stride=2 [route] layers = 79 [convolutional] batch_normalize=1 filters=256 size=1 stride=1 pad=1 activation=silu [route] layers = -1, -3 [convolutional] batch_normalize=1 filters=256 size=1 stride=1 pad=1 activation=silu # Split [convolutional] batch_normalize=1 filters=256 size=1 stride=1 pad=1 activation=silu [route] layers = -2 # Plain Block [convolutional] batch_normalize=1 filters=256 size=1 stride=1 pad=1 activation=silu [convolutional] batch_normalize=1 size=3 stride=1 pad=1 filters=256 activation=silu [convolutional] batch_normalize=1 filters=256 size=1 stride=1 pad=1 activation=silu [convolutional] batch_normalize=1 size=3 stride=1 pad=1 filters=256 activation=silu # Merge [-1, -(2k+2)] [route] layers = -1, -6 # Transition last # 127 (previous+6+4+2k) [convolutional] batch_normalize=1 filters=256 size=1 stride=1 pad=1 activation=silu # FPN-3 [convolutional] batch_normalize=1 filters=128 size=1 stride=1 pad=1 activation=silu [upsample] stride=2 [route] layers = 48 [convolutional] batch_normalize=1 filters=128 size=1 stride=1 pad=1 activation=silu [route] layers = -1, -3 [convolutional] batch_normalize=1 filters=128 size=1 stride=1 pad=1 activation=silu # Split [convolutional] batch_normalize=1 filters=128 size=1 stride=1 pad=1 activation=silu [route] layers = -2 # Plain Block [convolutional] batch_normalize=1 filters=128 size=1 stride=1 pad=1 activation=silu [convolutional] batch_normalize=1 size=3 stride=1 pad=1 filters=128 activation=silu [convolutional] batch_normalize=1 filters=128 size=1 stride=1 pad=1 activation=silu [convolutional] batch_normalize=1 size=3 stride=1 pad=1 filters=128 activation=silu # Merge [-1, -(2k+2)] [route] layers = -1, -6 # Transition last # 141 (previous+6+4+2k) [convolutional] batch_normalize=1 filters=128 size=1 stride=1 pad=1 activation=silu # PAN-4 [convolutional] batch_normalize=1 size=3 stride=2 pad=1 filters=256 activation=silu [route] layers = -1, 127 [convolutional] batch_normalize=1 filters=256 size=1 stride=1 pad=1 activation=silu # Split [convolutional] batch_normalize=1 filters=256 size=1 stride=1 pad=1 activation=silu [route] layers = -2 # Plain Block [convolutional] batch_normalize=1 filters=256 size=1 stride=1 pad=1 activation=silu [convolutional] batch_normalize=1 size=3 stride=1 pad=1 filters=256 activation=silu [convolutional] batch_normalize=1 filters=256 size=1 stride=1 pad=1 activation=silu [convolutional] batch_normalize=1 size=3 stride=1 pad=1 filters=256 activation=silu [route] layers = -1,-6 # Transition last # 152 (previous+3+4+2k) [convolutional] batch_normalize=1 filters=256 size=1 stride=1 pad=1 activation=silu # PAN-5 [convolutional] batch_normalize=1 size=3 stride=2 pad=1 filters=512 activation=silu [route] layers = -1, 113 [convolutional] batch_normalize=1 filters=512 size=1 stride=1 pad=1 activation=silu # Split [convolutional] batch_normalize=1 filters=512 size=1 stride=1 pad=1 activation=silu [route] layers = -2 # Plain Block [convolutional] batch_normalize=1 filters=512 size=1 stride=1 pad=1 activation=silu [convolutional] batch_normalize=1 size=3 stride=1 pad=1 filters=512 activation=silu [convolutional] batch_normalize=1 filters=512 size=1 stride=1 pad=1 activation=silu [convolutional] batch_normalize=1 size=3 stride=1 pad=1 filters=512 activation=silu [route] layers = -1,-6 # Transition last # 163 (previous+3+4+2k) [convolutional] batch_normalize=1 filters=512 size=1 stride=1 pad=1 activation=silu # ============ End of Neck ============ # # 164 [implicit_add] filters=256 # 165 [implicit_add] filters=512 # 166 [implicit_add] filters=1024 # 167 [implicit_mul] filters=255 # 168 [implicit_mul] filters=255 # 169 [implicit_mul] filters=255 # ============ Head ============ # # YOLO-3 [route] layers = 141 [convolutional] batch_normalize=1 size=3 stride=1 pad=1 filters=256 activation=silu [shift_channels] from=164 [convolutional] size=1 stride=1 pad=1 filters=255 activation=linear [control_channels] from=167 [yolo] mask = 0,1,2 anchors = 12, 16, 19, 36, 40, 28, 36, 75, 76, 55, 72, 146, 142, 110, 192, 243, 459, 401 classes=80 num=9 jitter=.3 ignore_thresh = .7 truth_thresh = 1 random=1 scale_x_y = 1.05 iou_thresh=0.213 cls_normalizer=1.0 iou_normalizer=0.07 iou_loss=ciou nms_kind=greedynms beta_nms=0.6 # YOLO-4 [route] layers = 152 [convolutional] batch_normalize=1 size=3 stride=1 pad=1 filters=512 activation=silu [shift_channels] from=165 [convolutional] size=1 stride=1 pad=1 filters=255 activation=linear [control_channels] from=168 [yolo] mask = 3,4,5 anchors = 12, 16, 19, 36, 40, 28, 36, 75, 76, 55, 72, 146, 142, 110, 192, 243, 459, 401 classes=80 num=9 jitter=.3 ignore_thresh = .7 truth_thresh = 1 random=1 scale_x_y = 1.05 iou_thresh=0.213 cls_normalizer=1.0 iou_normalizer=0.07 iou_loss=ciou nms_kind=greedynms beta_nms=0.6 # YOLO-5 [route] layers = 163 [convolutional] batch_normalize=1 size=3 stride=1 pad=1 filters=1024 activation=silu [shift_channels] from=166 [convolutional] size=1 stride=1 pad=1 filters=255 activation=linear [control_channels] from=169 [yolo] mask = 6,7,8 anchors = 12, 16, 19, 36, 40, 28, 36, 75, 76, 55, 72, 146, 142, 110, 192, 243, 459, 401 classes=80 num=9 jitter=.3 ignore_thresh = .7 truth_thresh = 1 random=1 scale_x_y = 1.05 iou_thresh=0.213 cls_normalizer=1.0 iou_normalizer=0.07 iou_loss=ciou nms_kind=greedynms beta_nms=0.6