img_norm_cfg = dict(mean=[123.675, 116.28, 103.53],
                    std=[58.395, 57.12, 57.375],
                    to_rgb=True)
min_size = 16

train_pipeline = [
    dict(type="LoadImageFromFile", to_float32=True),
    dict(type="LoadAnnotations", with_bbox=True),
    dict(type="PhotoMetricDistortion",
         contrast_range=(0.8, 1.2),
         saturation_range=(0.8, 1.2),
         hue_delta=10),
    dict(type="Expand",
         mean=img_norm_cfg["mean"],
         to_rgb=img_norm_cfg["to_rgb"],
         ratio_range=(1, 2)),
    dict(type="MinIoURandomCrop",
         min_ious=(0.4, 0.5, 0.6, 0.7, 0.8, 0.9),
         min_crop_size=0.3),
    dict(type="Resize", img_scale=(1000, 600), keep_ratio=True),
    dict(type="RandomFlip", flip_ratio=0.5),
    dict(type="Normalize", **img_norm_cfg),
    dict(type="Pad", size_divisor=32),
    dict(type="DefaultFormatBundle"),
    dict(type="Collect", keys=["img", "gt_bboxes", "gt_labels"]),
]
test_pipeline = [
    dict(type="LoadImageFromFile"),
    dict(
        type="MultiScaleFlipAug",
        img_scale=(1000, 600),
        flip=False,
        transforms=[
            dict(type="Resize", keep_ratio=True),
            dict(type="RandomFlip"),
            dict(type="Normalize", **img_norm_cfg),
            dict(type="Pad", size_divisor=32),
            dict(type="ImageToTensor", keys=["img"]),
            dict(type="Collect", keys=["img"]),
        ],
    ),
]

# scut
scut_dataset_type = "SCUTDataset"
scut_data_root = "data/SCUT/"
scut_train = dict(
    type=scut_dataset_type,
    ann_file=[
        # scut_data_root + "SCUT_HEAD_Part_A/ImageSets/Main/train.txt",
        scut_data_root + "SCUT_HEAD_Part_B/ImageSets/Main/train.txt"
    ],
    img_prefix=[
        # scut_data_root + "SCUT_HEAD_Part_A/",
        scut_data_root + "SCUT_HEAD_Part_B/"
    ],
    pipeline=train_pipeline,
    min_size=min_size)
scut_val = dict(type=scut_dataset_type,
                ann_file=[
                    scut_data_root + "SCUT_HEAD_Part_A/ImageSets/Main/val.txt",
                    scut_data_root + "SCUT_HEAD_Part_B/ImageSets/Main/val.txt",
                ],
                img_prefix=[
                    scut_data_root + "SCUT_HEAD_Part_A/",
                    scut_data_root + "SCUT_HEAD_Part_B/"
                ],
                pipeline=test_pipeline,
                min_size=min_size)
scut_test = dict(
    type=scut_dataset_type,
    ann_file=[
        scut_data_root + "SCUT_HEAD_Part_A/ImageSets/Main/test.txt",
        scut_data_root + "SCUT_HEAD_Part_B/ImageSets/Main/test.txt",
    ],
    img_prefix=[
        scut_data_root + "SCUT_HEAD_Part_A/",
        scut_data_root + "SCUT_HEAD_Part_B/"
    ],
    pipeline=test_pipeline,
    min_size=min_size)

# brainwash
brainwash_dataset_type = "BrainwashDataset"
brainwash_data_root = "data/brainwash/"
brainwash_train = dict(type=brainwash_dataset_type,
                       ann_file=brainwash_data_root +
                       "ImageSets/Main/train.txt",
                       img_prefix=brainwash_data_root,
                       pipeline=train_pipeline,
                       min_size=min_size)
brainwash_val = dict(type=brainwash_dataset_type,
                     ann_file=brainwash_data_root + "ImageSets/Main/val.txt",
                     img_prefix=brainwash_data_root,
                     pipeline=test_pipeline,
                     min_size=min_size)
brainwash_test = dict(type=brainwash_dataset_type,
                      ann_file=brainwash_data_root + "ImageSets/Main/test.txt",
                      img_prefix=brainwash_data_root,
                      pipeline=test_pipeline,
                      min_size=min_size)

# hollywood
hollywood_dataset_type = "HollywoodDataset"
hollywood_data_root = "data/HollywoodHeads/"
hollywood_train = dict(type=hollywood_dataset_type,
                       ann_file=hollywood_data_root +
                       "ImageSets/Main/train.txt",
                       img_prefix=hollywood_data_root,
                       pipeline=train_pipeline,
                       min_size=min_size)
hollywood_val = dict(type=hollywood_dataset_type,
                     ann_file=hollywood_data_root + "ImageSets/Main/val.txt",
                     img_prefix=hollywood_data_root,
                     pipeline=test_pipeline,
                     min_size=min_size)
hollywood_test = dict(type=hollywood_dataset_type,
                      ann_file=hollywood_data_root + "ImageSets/Main/test.txt",
                      img_prefix=hollywood_data_root,
                      pipeline=test_pipeline,
                      min_size=min_size)

# psdb
psdb_dataset_type = "PSDBDataset"
psdb_data_root = "data/psdb/"
psdb_train = dict(type=psdb_dataset_type,
                  ann_file=psdb_data_root + "ImageSets/Main/train.txt",
                  img_prefix=psdb_data_root,
                  pipeline=train_pipeline,
                  min_size=min_size),
psdb_val = dict(type=psdb_dataset_type,
                ann_file=psdb_data_root + "ImageSets/Main/val.txt",
                img_prefix=psdb_data_root,
                pipeline=test_pipeline,
                min_size=min_size),
psdb_test = dict(type=psdb_dataset_type,
                 ann_file=psdb_data_root + "ImageSets/Main/test.txt",
                 img_prefix=psdb_data_root,
                 pipeline=test_pipeline,
                 min_size=min_size),

# crowdhuman
crowdhuman_dataset_type = "CrowdHumanDataset"
crowdhuman_data_root = "data/CrowdHuman/"
crowdhuman_train = dict(type=crowdhuman_dataset_type,
                        ann_file=crowdhuman_data_root +
                        "annotation_train.odgt",
                        img_prefix=crowdhuman_data_root + "train",
                        pipeline=train_pipeline),
crowdhuman_val = dict(type=crowdhuman_dataset_type,
                      ann_file=crowdhuman_data_root + "annotation_val.odgt",
                      img_prefix=crowdhuman_data_root + "val",
                      pipeline=test_pipeline),
crowdhuman_test = dict(type=crowdhuman_dataset_type,
                       ann_file=crowdhuman_data_root + "annotation_val.odgt",
                       img_prefix=crowdhuman_data_root + "val",
                       pipeline=test_pipeline),

data = dict(samples_per_gpu=16,
            workers_per_gpu=2,
            train=dict(type="ConcatDataset",
                       datasets=[scut_train, psdb_train, crowdhuman_train]),
            val=dict(type="ConcatDataset",
                     datasets=[
                         scut_val, brainwash_val, psdb_val, hollywood_val,
                         crowdhuman_val
                     ]),
            test=dict(type="ConcatDataset",
                      datasets=[
                          scut_test, brainwash_test, psdb_test, hollywood_test,
                          crowdhuman_test
                      ],
                      pipeline=test_pipeline))
