class Path_Hyperparameter:
    random_seed = 42

    # experiments
    tags: str = 'paper'

    # dataset hyper-parameter
    root_dir = '/mnt/data/datasets/'
    dataset_name = 'LEVIR-CD-256'
    dataset_format: str = 'new_version'  # new_version:A, B, label, list; old_version: train, val, test
    split: str = 'train'  # choice('trainval', 'train')
    split_val: str = 'val'  # choice('val', 'test')
    IMG_FOLDER_NAME: str = "A"
    IMG_POST_FOLDER_NAME: str = 'B'
    LIST_FOLDER_NAME: str = 'list'
    ANNOT_FOLDER_NAME: str = "label"
    label_suffix: str = '.png'  # jpg for gan dataset, others : png
    default: bool = True
    multi_run: bool = False
    dataset_mean_std = {'LEVIR-CD2+': [
                            [[0.44647954, 0.44253507, 0.37772247], [0.17393848, 0.16415723, 0.15207397]],
                            [[0.34149348, 0.33470663, 0.28560712], [0.12841433, 0.12489144, 0.11756364]]
                        ],
                        'LEVIR-CD256': [
                            [[0.45026044, 0.44666811, 0.38134658], [0.17456748, 0.16490024, 0.15318057]],
                            [[0.34552285, 0.33819558, 0.28881546], [0.12937804, 0.12601846, 0.1187869 ]]
                        ],
                        'LEVIR-CD-256': [
                            [[0.45045675, 0.44693024, 0.38149951], [0.17456373, 0.16488312, 0.15323941]],
                            [[0.34605959, 0.33865141, 0.28926112], [0.12950791, 0.12611583, 0.11890717]]
                        ]}
    default_batch_size: int = 64
    
    # training hyper-parameter
    epochs: int = 250  # Number of epochs
    batch_size: int = 64  # Batch size
    inference_ratio = 2  # batch_size in val and test equal to batch_size*inference_ratio
    learning_rate: float = 1e-3  # Learning rate
    factor = 0.1  # learning rate decreasing factor
    patience = 12  # schedular patience
    warm_up_step = 500  # warm up step
    weight_decay: float = 1e-3  # AdamW optimizer weight decay
    amp: bool = True  # if use mixed precision or not
    load: str = False  # Load model and/or optimizer from a .pth file for testing or continuing training
    max_norm: float = 20  # gradient clip max norm

    # evaluate hyper-parameter
    evaluate_epoch: int = 30  # start evaluate after training for evaluate epochs
    stage_epoch = [0, 0, 0, 0, 0]  # adjust learning rate after every stage epoch
    save_checkpoint: bool = True  # if save checkpoint of model or not
    save_interval: int = 10  # save checkpoint every interval epoch
    save_best_model: bool = True  # if save best model or not

    # log wandb hyper-parameter
    log_wandb_project: str = 'dpcd_paper'  # wandb project name

    # data transform hyper-parameter
    # noise_p: float = 0.8  # probability of adding noise
    noise_p: float = 0.3  # probability of adding noise LEVIR-CD

    # model hyper-parameter
    # dropout_p: float = 0.3  # probability of dropout
    dropout_p: float = 0.0  # probability of dropout LEVIR-CD
    patch_size: int = 256  # size of input image

    y = 2  # ECA-net parameter
    b = 1  # ECA-net parameter

    # inference parameter
    log_path = './log_feature/'

    def state_dict(self):
        return {k: getattr(self, k) for k, _ in Path_Hyperparameter.__dict__.items() \
                if not k.startswith('_')}


ph = Path_Hyperparameter()
