from yacs.config import CfgNode as CN

_C = CN()

################ general config ##############

# the name of the config file
_C.NAME = 'default_config'

# the root path to record logs and checkpoints
_C.ROOT = 'experiments'

_C.ALGORITHM = 'protonet'

################ the config of model   #############
_C.MODEL = CN()
# conv4, resnet12
_C.MODEL.BACKBONE = 'conv4'
_C.MODEL.PRE_TRAINED_PATH = ''
_C.MODEL.INPUT_CHANNEL = 3

################ the config of training #############
_C.TRAIN = CN()
# record infos every batch
_C.TRAIN.SAVE_BATCH = 10
# the number of episode when training in an epoch
_C.TRAIN.EPISODE_SIZE = 2000
# N-way N-shot in an episode
_C.TRAIN.N_WAY = 5
_C.TRAIN.N_SHOT = 5
# number of query images in an episode
_C.TRAIN.N_QUERY = 5
_C.TRAIN.BATCH_SIZE = 16
_C.TRAIN.EPOCH = 30

# whether to use AMP, 'True' of 'False'
_C.TRAIN.AMP = False

# the config of optimizer
_C.TRAIN.OPTIMIZER = 'adam'
_C.TRAIN.WEIGHT_DECAY = 0.0
_C.TRAIN.LR = 1e-3
# only useful for 'sgd'
_C.TRAIN.MOMENTUM = 0.0

# the early_stop epochs
_C.TRAIN.EARLY_STOP = 5

# the transformers implied to images
# the order of transforms should be noticed
_C.TRAIN.TRANSFORM_OPERATORS = ['shorter_resize_for_crop',
                                'random_crop',
                                'random_horizontal_flip',
                                'to_tensor',
                                'normalize']

###### the config of ddp #####
_C.TRAIN.DDP = CN()
# _C.TRAIN.DDP.OFF = False
# the amount of machines
_C.TRAIN.DDP.NODES = 1
# the current machine's node rank
_C.TRAIN.DDP.NR = 0
# nccl for linux, gloo for windows
_C.TRAIN.DDP.BACKEND = 'gloo'
# if the training is produced on GPUs in a server, you should set [gpu_num1, gpu_num2,...]
_C.TRAIN.DDP.DEVICES = (0,)
_C.TRAIN.DDP.MASTER_ADDR = 'localhost'
_C.TRAIN.DDP.MASTER_PORT = 9999
_C.TRAIN.DDP.FIND_UNUSED_PARAMETERS = False

################ the config of validating #############
_C.VALIDATE = CN()
_C.VALIDATE.EPISODE_SIZE = 2000
_C.VALIDATE.N_WAY = 5
_C.VALIDATE.N_SHOT = 5
_C.VALIDATE.N_QUERY = 5
_C.VALIDATE.TRANSFORM_OPERATORS = ['shorter_resize_for_crop',
                                   'center_crop',
                                   'to_tensor',
                                   'normalize']
_C.VALIDATE.BATCH_SIZE = 16

################ the config of testing #############
_C.TEST = CN()
_C.TEST.EPISODE_SIZE = 2000
_C.TEST.N_WAY = 5
_C.TEST.N_SHOT = 5
_C.TEST.N_QUERY = 5
_C.TEST.AMP = False
_C.TEST.MODEL_PATH = ''
_C.TEST.BATCH_SIZE = 16
_C.TEST.TRANSFORM_OPERATORS = ['shorter_resize_for_crop',
                               'center_crop',
                               'to_tensor',
                               'normalize']
###################### config of finetune ###################
_C.TEST.FINETUNE = CN()
_C.TEST.FINETUNE.LR = 1e-3
_C.TEST.FINETUNE.EPOCH = 100
_C.TEST.FINETUNE.OPTIMIZER = 'adam'
_C.TEST.FINETUNE.WEIGHT_DECAY = 0.0
# only useful for 'sgd'
_C.TEST.FINETUNE.MOMENTUM = 0.0
_C.TEST.FINETUNE.DAMPENING = 0.0
_C.TEST.FINETUNE.BATCH_SIZE = 4

###################### config of ddp ########################
_C.TEST.DDP = CN()
# _C.TRAIN.DDP.OFF = False
# the amount of machines
_C.TEST.DDP.NODES = 1
# the current machine's node rank
_C.TEST.DDP.NR = 0
# nccl for linux, gloo for windows
_C.TEST.DDP.BACKEND = 'gloo'
# if the training is produced on GPUs in a server, you should set [gpu_num1, gpu_num2,...]
_C.TEST.DDP.DEVICES = (0,)
_C.TEST.DDP.MASTER_ADDR = 'localhost'
_C.TEST.DDP.MASTER_PORT = 9999
_C.TEST.DDP.FIND_UNUSED_PARAMETERS = False

################ the config of dataset ###############
# the config of dataset
_C.DATASET = CN()
# the chosen dataset for experiment
# contain ['cub200']
_C.DATASET.NAME = 'cub200'
# the root of dataset
_C.DATASET.DATAPATH = 'D:/dataset/CUB_200_2011'
# the image size put into the net
_C.DATASET.IMAGE_SIZE = (84, 84)
# the default path of split json files if they are new created
# _C.DATASET.DEFAULT_SPLITPATH = 'split'
# categories for training
_C.DATASET.TRAIN_CLASS_NUM = 100
# categories for validating
_C.DATASET.VALID_CLASS_NUM = 50
# categories for testing
_C.DATASET.TEST_CLASS_NUM = 50
# Reading 2000 episodes takes 43.519 seconds with multiple threads and 148.033 seconds without them

# configs of dataloader
_C.DATASET.DROP_LAST = False
# 因为用的是ddp, 所以要设置成false, 否则报错
_C.DATASET.SHUFFLE = False
_C.DATASET.PIN_MEMORY = True
_C.DATASET.N_WORKERS = 0

################ the config of transform ###############
_C.TRANSFORM = CN()
# the size of input images
# if a tuple, the image will be resized to this
# if an int, the shorter edge will be resized to this

_C.TRANSFORM.RANDOM_RESIZED_CROP = CN()
_C.TRANSFORM.RANDOM_RESIZED_CROP.SCALE = (0.08, 1.0)
_C.TRANSFORM.RANDOM_RESIZED_CROP.RATIO = (3.0 / 4.0, 4.0 / 3.0)

_C.TRANSFORM.NORMALIZE = CN()
_C.TRANSFORM.NORMALIZE.MEAN = [0.485, 0.456, 0.406]
_C.TRANSFORM.NORMALIZE.STD = [0.229, 0.224, 0.225]

_C.TRANSFORM.RANDOM_CROP = CN()
_C.TRANSFORM.RANDOM_CROP.PADDING = 0

_C.TRANSFORM.IMAGE_JITTER = CN()
_C.TRANSFORM.IMAGE_JITTER.BRIGHTNESS = 0.4
_C.TRANSFORM.IMAGE_JITTER.CONTRAST = 0.4
_C.TRANSFORM.IMAGE_JITTER.COLOR = 0.4

_C.TRANSFORM.COLOR_JITTER = CN()
_C.TRANSFORM.COLOR_JITTER.BRIGHTNESS = 0.4
_C.TRANSFORM.COLOR_JITTER.CONTRAST = 0.4
_C.TRANSFORM.COLOR_JITTER.SATURATION = 0.4
_C.TRANSFORM.COLOR_JITTER.HUE = 0.4

############  the config of logger ################
_C.LOG = CN()
# the name of the logger, you can use self name
_C.LOG.LOGGER_NAME = 'tbf'


def update_config(cfg, args):
    cfg.defrost()
    if args.config_file:
        cfg.merge_from_file(args.config_file)
    if args.opts:
        cfg.merge_from_list(args.opts)
    cfg.freeze()

# cfg_str = _C.dump()
#
# with open('../../config/default.yaml', 'w') as f:
#     f.write(cfg_str)
