_base_ = ["../custom_import.py"]

img_norm_cfg = dict(
    mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
# img_norm_cfg = dict(
#     mean=[122.771, 116.746, 104.094], std=[68.501, 66.632, 70.323], to_rgb=True)
# dataset settings
dataset_type = "PascalVOCDataset20"
data_root = "./data/VOCdevkit/VOC2012"
test_pipeline = [
    dict(type="LoadImageFromFile"),
    dict(type='ToRGB'),
    dict(
        type="MultiScaleFlipAug",
        img_scale=(2048, 448),
        flip=False,
        transforms=[
            dict(type="Resize", keep_ratio=True),
            dict(type="RandomFlip"),
            dict(type="ImageToTensorV2", keys=["img"]),
            dict(type="Collect", keys=["img"],
                 meta_keys=['ori_shape', 'img_shape', 'pad_shape', 'flip', 'img_info']),
        ],
    ),
]
img_scale=(2048, 448) # 与AAAI版本对齐
test_pipeline = [
    dict(type='LoadImageFromFile'),
    dict(
        type='MultiScaleFlipAug',
        img_scale=(2048, 336),
        # img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75],
        flip=False,
        transforms=[
            dict(type='Resize', keep_ratio=True),
            # dict(type='RandomFlip'),
            dict(type='Normalize', **img_norm_cfg),
            dict(type='ImageToTensor', keys=['img']),
            dict(type='Collect', keys=['img'], 
                 meta_keys=['ori_shape', 'img_shape', 'pad_shape', 'flip', 'img_info']),
        ])
]
data = dict(
    test=dict(
        type=dataset_type,
        data_root=data_root,
        img_dir="JPEGImages",
        ann_dir="SegmentationClass",
        split="ImageSets/Segmentation/val.txt",
        pipeline=test_pipeline,

    )
)

test_cfg = dict(mode="slide", stride=(112, 112), crop_size=(224, 224))
# test_cfg = dict(mode="slide", stride=(224, 224), crop_size=(112, 112))
test_cfg = dict(mode="slide", stride=(224, 224), crop_size=(448, 448)) # 与AAAI版本对齐


