# -*- coding: utf-8 -*-
# @Time    : 2023/7/5 10:09
# @Author  : Pan
# @Software: PyCharm
# @Project : VisualFramework
# @FileName: UNet


image_size = (512, 512)
max_steps = 160000

config = {
    "type": "Image2Image",
    "base_info": {
        "step": max_steps,
        "dot": 50,
        "save_iters": 1000,
        "pretrained": None,
        "save_path": "output/",
        "log_dir": "log_dir/",
    },
    "train_dataset": {
        "type": "Image2ImageDataset",
        "batch_size": 8,
        "shuffle": True,
        "num_workers": 2,
        "dt_root": "data/train/image",
        "gt_root": "data/train/groundtruth",
        "transforms": [
            {
                "type": "LoadData",
                "keys": ["img_1", "img_2"],
                "func": "cv2"
            },
            {
                "type": "ResizeByShort",
                "keys": ["img_1", "img_2"],
                "short": [i for i in range(512, 1024, 1)],
                "inter": ["bilinear"]
            },
            {
                "type": "RandPaddingCrop",
                "keys": ["img_1", "img_2"],
                "pad_size": image_size,
                "crop_size": image_size
            },
            {
                "type": "ToTensor",
                "keys": ["img_1", "img_2"],
            },
            {
                "type": "Normalize",
                "keys": ["img_1", "img_2"],
                "mean": 0.5,
                "std": 0.5
            }
        ]
    },
    "val_dataset": {
        "type": "Image2ImageDataset",
        "batch_size": 4,
        "shuffle": True,
        "num_workers": 2,
        "dt_root": "data/val/image",
        "gt_root": "data/val/groundtruth",
        "transforms": [
            {
                "type": "LoadData",
                "keys": ["img_1", "img_2"],
                "func": "cv2"
            },
            {
                "type": "ResizeByShort",
                "keys": ["img_1", "img_2"],
                "short": [512],
                "inter": ["bilinear"]
            },
            # {
            #     "type": "RandPaddingCrop",
            #     "keys": ["img_1", "img_2"],
            #     "pad_size": image_size,
            #     "crop_size": image_size
            # },
            {
                "type": "ToTensor",
                "keys": ["img_1", "img_2"],
            },
            {
                "type": "Normalize",
                "keys": ["img_1", "img_2"],
                "mean": 0.5,
                "std": 0.5
            }
        ]
    },
    "optimizer": {
        "type": "sgd",
        "lr_scheduler": {
            "type": "WarmupCosineLR",    # Warm up 学习率刚开始是由小变大，Cosine
            "learning_rate": 0.001,
            "total_steps": max_steps,
            "warmup_steps": 500,
            "warmup_start_lr": 1e-7,
            "end_lr": 1e-7
        },
        "decay": {
            "type": "l2_decay",
            "coeff": 0.00001
        }
    },
    "network": {
        "type": "image2image",
        "network": {
            "type": "UNet"
        }
    },
    "loss": {
        "loss_list": [
            {
                "type": "L1Loss"
            }
        ],
        "loss_coef": [1]
    },
    "metric": [
        {
            "type": "PSNR",
        },
        {
            "type": "SSIM",
        },
        {
            "type": "MSSSIM",
        }
    ],
    # "amp": {
    #     "scale": 1024
    # }
}
