# --------------------------------------------------------
# InternImage
# Copyright (c) 2022 OpenGVLab
# Licensed under The MIT License [see LICENSE for details]
# --------------------------------------------------------
# _base_ = [
#     '../_base_/datasets/coco_detection.py',
#     '../_base_/default_runtime.py',
#     '../_base_/schedules/schedule_1x.py',
# ]
_base_ = [
    '../_base_/default_runtime.py',
]
pretrained = 'https://huggingface.co/OpenGVLab/InternImage/resolve/main/internimage_t_1k_224.pth'

checkpoint = "/mnt/pde/algorithm/user/qxu/data/ckpt/InternImg/dino_4scale_internimage_t_1x_coco.pth"
# backbone for all models
backbone = dict(
        type='InternImage',
        core_op='DCNv3',
        channels=64,
        depths=[4, 4, 18, 4],
        groups=[4, 8, 16, 32],
        mlp_ratio=4.,
        drop_path_rate=0.2,
        norm_layer='LN',
        layer_scale=1.0,
        offset_scale=1.0,
        post_norm=False,
        with_cp=True,
        out_indices=(0, 1, 2, 3),
        init_cfg=dict(type='Pretrained', checkpoint=pretrained))

# use for global
# task = ['cls', 'detection', 'segmention']
task = ['detection', 'segmention']
work_dir = "/mnt/pde/algorithm/user/qxu/tmp/test_plane"
LOCAL_RANK = -1
WORLD_SIZE = -1
DISTRIBUTED = False 


## classification model config
cls_model = dict(
    num_classes=1000,
    aug=dict(
        mixup=0.0,
        cutmix=0.0,
        reprob=0.0,),
    loss=dict(
        label_smoothing=0.3,
    ),
)

# detection_model config
detection_model = dict(
    neck=dict(
        # type='ChannelMapper',
        in_channels=[128, 256, 512],
        kernel_size=1,
        out_channels=256,
        act_cfg=None,
        norm_cfg=dict(type='GN', num_groups=32),
        num_outs=4),
    bbox_head=dict(
        type='DINOHead',
        num_query=900,
        num_classes=80,
        in_channels=2048,
        sync_cls_avg_factor=True,
        as_two_stage=True,
        with_box_refine=True,
        dn_cfg=dict(
            type='CdnQueryGenerator',
            noise_scale=dict(label=0.5, box=1.0),
            group_cfg=dict(dynamic=True, num_groups=None, num_dn_queries=100)),
        transformer=dict(
            type='DinoTransformer',
            two_stage_num_proposals=900,
            encoder=dict(
                type='DetrTransformerEncoder',
                num_layers=6,
                transformerlayers=dict(
                    type='BaseTransformerLayer',
                    attn_cfgs=dict(
                        type='MultiScaleDeformableAttention',
                        embed_dims=256,
                        dropout=0.0),
                    feedforward_channels=2048,
                    ffn_dropout=0.0,  # 0.1 for DeformDETR
                    operation_order=('self_attn', 'norm', 'ffn', 'norm'))),
            decoder=dict(
                type='DinoTransformerDecoder',
                num_layers=6,
                return_intermediate=True,
                transformerlayers=dict(
                    type='DetrTransformerDecoderLayer',
                    attn_cfgs=[
                        dict(
                            type='MultiheadAttention',
                            embed_dims=256,
                            num_heads=8,
                            dropout=0.0),
                        dict(
                            type='MultiScaleDeformableAttention',
                            embed_dims=256,
                            dropout=0.0),
                    ],
                    feedforward_channels=2048,
                    ffn_dropout=0.0,
                    operation_order=('self_attn', 'norm', 'cross_attn', 'norm',
                                     'ffn', 'norm')))),
        positional_encoding=dict(
            type='SinePositionalEncoding',
            num_feats=128,
            temperature=20,
            normalize=True),
        loss_cls=dict(
            type='FocalLoss',
            use_sigmoid=True,
            gamma=2.0,
            alpha=0.25,
            loss_weight=1.0),
        loss_bbox=dict(type='L1Loss', loss_weight=5.0),
        loss_iou=dict(type='GIoULoss', loss_weight=2.0)),
    # training and testing settings
    train_cfg=dict(
        assigner=dict(
            type='HungarianAssigner',
            cls_cost=dict(type='FocalLossCost', weight=2.0),
            reg_cost=dict(type='BBoxL1Cost', weight=5.0, box_format='xywh'),
            iou_cost=dict(type='IoUCost', iou_mode='giou', weight=2.0))),
    test_cfg=dict(max_per_img=100))

# segmentation model config fellow config used for ade20K datasets
segmentation_model_norm_cfg=dict(type='SyncBN', requires_grad=True)
segmentation_model = dict(
    decode_head=dict(
        type='UPerHead',
        in_channels=[64, 128, 256, 512],
        in_index=[0, 1, 2, 3],
        pool_scales=(1, 2, 3, 6),
        channels=512,
        dropout_ratio=0.1,
        num_classes=150,
        norm_cfg=segmentation_model_norm_cfg,
        align_corners=False,
        loss_decode=dict(
            type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)),
    
    auxiliary_head=dict(
        type='FCNHead',
        in_channels=256,
        in_index=2,
        channels=256,
        num_convs=1,
        concat_input=False,
        dropout_ratio=0.1,
        num_classes=150,
        norm_cfg=segmentation_model_norm_cfg,
        align_corners=False,
        loss_decode=dict(
            type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)),
    # model training and testing settings
    train_cfg=dict(),
    test_cfg=dict(mode='whole')
)


# #img normal config
img_norm_cfg = dict(
    mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)

# train pipeline
train_pipeline = dict(
    detection=[
        dict(type='LoadImageFromFile'),
        dict(type='LoadAnnotations', with_bbox=True),
        dict(type='RandomFlip', flip_ratio=0.5),
        dict(
            type='AutoAugment',
            policies=[
                [
                    dict(
                        type='Resize',
                        img_scale=[(480, 1333), (512, 1333), (544, 1333),
                                (576, 1333), (608, 1333), (640, 1333),
                                (672, 1333), (704, 1333), (736, 1333),
                                (768, 1333), (800, 1333)],
                        multiscale_mode='value',
                        keep_ratio=True)
                ],
                [
                    dict(
                        type='Resize',
                        img_scale=[(400, 4200), (500, 4200), (600, 4200)],
                        multiscale_mode='value',
                        keep_ratio=True),
                    dict(
                        type='RandomCrop',
                        crop_type='absolute_range',
                        crop_size=(384, 600),
                        allow_negative_crop=False),
                    dict(
                        type='Resize',
                        img_scale=[(480, 1333), (512, 1333), (544, 1333),
                                (576, 1333), (608, 1333), (640, 1333),
                                (672, 1333), (704, 1333), (736, 1333),
                                (768, 1333), (800, 1333)],
                        multiscale_mode='value',
                        override=True,
                        keep_ratio=True)
                ]
            ]),
        dict(type='Normalize', **img_norm_cfg),
        dict(type='Pad', size_divisor=32),
        dict(type='DefaultFormatBundle'),
        dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
    ],
    cls = None
)

## for test
test_pipeline = dict(
    detection=[
        dict(type='LoadImageFromFile'),
        dict(
            type='MultiScaleFlipAug',
            img_scale=(1333, 800),
            flip=False,
            transforms=[
                dict(type='Resize', keep_ratio=True),
                dict(type='RandomFlip'),
                dict(type='Normalize', **img_norm_cfg),
                dict(type='Pad', size_divisor=32),
                dict(type='ImageToTensor', keys=['img']),
                dict(type='Collect', keys=['img']),
            ])
    ],
    cls=None,
    
)




cls_config = dict(
    data=dict(
        batch_size=128,
        img_size=224,
        data_path='/mnt/pde/algorithm/dataset/ImageNet/',
        interpolation='bicubic',
        cache_mode='no',
        pip_memory=True,
        num_workers=8,
        img_on_memory=True),
    train=dict(
        start_epoch = 0,
        epochs = 10,
        warmup_epochs=1,
        weight_decay=0.05,
        base_lr=0.0005,
        warmup_lr=5e-05,
        min_lr=5e-06,
        cilp_grad=5.0,
        auto_resume=True,
        # model_resume = "/mnt/pde/algorithm/user/qxu/data/ckpt/InternImg/internimage_t_1k_224.pth",
        model_resume = "/mnt/pde/algorithm/user/qxu/data/ckpt/InternImg/dino_4scale_internimage_t_1x_coco.pth",
        accumulation_steps=1,
        use_checkpoint=False
        ),
    lr_scheduler=dict(
        name='cosine',
        decay_epochs=30,
        decay_rate=0.1
    ),
    optimizer=dict(
        name="adamw",
        eps=1e-08,
        betas=(0.9, 0.999),
        momentum=0.9,
        use_zero=False,
        freeze_backbone=None,
        dcn_lr_mul=None
    ),
    ema=dict(
        enable=True,
        decay=0.9999,
    ),
    lr_layer_decay=False,
    lr_layer_decay_ratio=0.875,
    rand_init_ft_head=False,
    aug=dict(color_jitter=0.4),
    mean = (0.484, 0.456, 0.406),
    std = (0.229, 0.224, 0.225),
    test=dict(
        crop=True,
        sequential=False,
    ),
    amp_opt_level='01',
    output=work_dir,
    tag = 'default',
    save_freq=1,
    print_freq=10,
    eval_freq=1,
    seed=0,
    eval_model=False,
    throughput_mode=False,
    local_rank=0,
    amp_type='float16',
    save_ckpt_num=1,
)


# detection config all in
detection_data_root ='/mnt/pde/algorithm/user/qxu/data/detection/coco2017/'
detection_config = dict(
    data=dict(
        samples_per_gpu=2,
        base_batch_size=2, # base batch size for per GPU
        workers_per_gpu=2,
        workflow = [('train', 1), ('val', 1)],
        train=dict(
            type='CocoDataset',
            ann_file= detection_data_root + 'annotations/instances_train2017.json',
            img_prefix=detection_data_root + 'train2017/',
            pipeline=train_pipeline['detection']),
        val=dict(
            type='CocoDataset',
            ann_file=detection_data_root + 'annotations/instances_val2017.json',
            img_prefix=detection_data_root + 'val2017/',
            pipeline=test_pipeline['detection']),
        test=dict(
            type='CocoDataset',
            ann_file=detection_data_root + 'annotations/instances_val2017.json',
            img_prefix=detection_data_root + 'val2017/',
            pipeline=test_pipeline['detection']),
        ),
    
    test=dict(
        evaluation=dict(interval=1, metric='bbox', classwise=True),
        eval = "bbox",
        show = None,
        show_dir = None,     
        out = "/tmp/result.json" # output json save path
    ),
    
    train=dict(
        start_epoch = 0,
        epochs = 12,
        warmup_epochs=11,
        weight_decay=0.0001,
        base_lr=None,
        warmup_lr=5e-07,
        min_lr=5e-07,
        cilp_grad=5.0,
        auto_resume=True,
        model_resume = "/mnt/pde/algorithm/user/qxu/data/ckpt/InternImg/dino_4scale_internimage_t_1x_coco.pth",
        use_checkpoint=False
        ),
    lr_scheduler=dict(
        name='step',
        decay_epochs=5,
        decay_rate=0.1,
        auto_scale_lr = True,
        warmup='linear',
        warmup_iters=500,
        warmup_ratio=0.0001,
        step=[5],
        
    ),
    optimizer=dict(
        type="AdamW",lr=0.000001, weight_decay=0.0001,
        constructor='CustomLayerDecayOptimizerConstructor',
        paramwise_cfg=dict(num_layers=30, layer_decay_rate=0.9,
                       depths=[4, 4, 18, 4]),
    ),
    
    ema=dict(
        enable=True,
        decay=0.9999,
    ),
    
    lr_layer_decay=False,
    lr_layer_decay_ratio=0.875,
    rand_init_ft_head=False,
    aug=dict(color_jitter=0.4),
    img_norm_cfg = dict(
        mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True
        ),

    output=work_dir,
    tag = 'default',
    save_freq=1,
    print_freq=10,
    eval_freq=1,
    seed=0,
    eval_model=False,
    throughput_mode=False,
    local_rank=0,
    amp_type='float16',
    save_ckpt_num=1,
)


