_base_ = ["../custom_import.py"]
img_norm_cfg = dict(
    mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)

img_scale=(2048, 448)
img_scale=(2048, 336) # 与AAAI版本对齐


# dataset settings
dataset_type = "PascalContextDataset59"
data_root = "./data/VOCdevkit/VOC2010"
test_pipeline = [
    dict(type='LoadImageFromFile'),
    dict(type='ToRGB'),
    dict(
        type='MultiScaleFlipAug',
        img_scale=img_scale,
        # img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75],
        flip=False,
        transforms=[
            dict(type='Resize', keep_ratio=True),
            dict(type='RandomFlip'),
            # dict(type='Normalize', **img_norm_cfg),
            dict(type="ImageToTensorV2", keys=["img"]),
            # dict(type='ImageToTensor', keys=['img']),
            # dict(type='Collect', keys=['img']),
            dict(type="Collect", keys=["img"], meta_keys=['ori_shape', 'img_shape', 'pad_shape', 'flip', 'img_info']),
        ])
]
test_pipeline = [
    dict(type='LoadImageFromFile'),
    dict(
        type='MultiScaleFlipAug',
        img_scale=img_scale,
        # img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75],
        flip=False,
        transforms=[
            dict(type='Resize', keep_ratio=True),
            # dict(type='RandomFlip'),
            dict(type='Normalize', **img_norm_cfg),
            dict(type='ImageToTensor', keys=['img']),
            dict(type='Collect', keys=['img']),
        ])
]
data = dict(
    test=dict(
        type=dataset_type,
        data_root=data_root,
        img_dir="JPEGImages",
        ann_dir="SegmentationClassContext",
        split="ImageSets/SegmentationContext/val.txt",
        pipeline=test_pipeline,
    )
)

# test_cfg = dict(mode="slide", stride=(224, 224), crop_size=(112, 112))
test_cfg = dict(mode="slide", stride=(112, 112), crop_size=(224, 224)) # 与AAAI版本对齐

# test_cfg = dict(mode="whole")
