from pickle import FALSE
from torchvision import transforms
from datasets.dataset_tdmaterials import TDMaterials_dataset
from datasets.random_generator import RandomGenerator
from datasets.image_transform import down_sample

from utils import *

from datetime import datetime

class setting_config:
    """
    the config of training setting.
    """
    
    def __init__(self, args):
        self.material = args.material
        self.num_classes = args.num_classes
        self.epochs = args.max_epochs
        self.batch_size = args.batch_size
        self.input_size_h = args.img_size
        self.input_size_w = args.img_size
        self.seed = args.seed
        self.pretrained_path = args.pretrained_path
        
        self.cost = args.cost
        
        self._prepare()
    
    def _set_criterion(self, cost):
        """
        set the loss function.
        """
        if cost == 'ce-dice':
            self.criterion = CeDiceLoss(self.num_classes, self.loss_weight)
        elif cost == 'ce-focal':
            self.criterion = CeFocalLoss(self.loss_weight)
        elif cost == 'ce-tversky':
            self.criterion = CeTverskyLoss(self.loss_weight)
        elif cost == 'ce-jaccard':
            self.criterion = CeJaccardLoss(self.loss_weight)
        elif cost == 'ce-lovasz':
            self.criterion = CeLovaszLoss(self.loss_weight)
        else:
            raise Exception('cost in not right!')
    
    def _prepare(self):
        
        self.network = 'vmunet' 
        self.model_config = {
            'num_classes': self.num_classes, 
            'input_channels': 3, 
            # ----- VM-UNet ----- #
            'depths': [2,2,2,2],
            'depths_decoder': [2,2,2,1],
            'drop_path_rate': 0.2,
            'load_ckpt_path': './pre_trained_weights/vmamba_small_e238_ema.pth',
        }
        self.datasets_name = 'TDMaterials'
        if self.datasets_name == 'TDMaterials':
            self.data_path = '../../data/DL_2DMaterials/Dataset_DL_2DMaterials_npz/'
            self.datasets = TDMaterials_dataset
        else:
            raise Exception('datasets in not right!')
        
        self.loss_weight = [0.5, 0.5]
        self.z_spacing = 1
        self.input_channels = 3

        self.distributed = False
        self.local_rank = -1
        self.num_workers = 16
        self.world_size = None
        self.rank = None
        self.amp = False

        self.work_dir = 'results/' + self.network + '_' + self.datasets_name + '_' + datetime.now().strftime('%A_%d_%B_%Y_%Hh_%Mm_%Ss') + '/'
        # 'D:/CODES/MedSeg/BIBM22/results/datrm2_isic18_Sunday_04_September_2022_12h_04m_10s/'
        self.print_interval = 1
        self.val_interval = 20
        self.save_interval = 20
        self.test_weights_path = ''

        self.threshold = 0.5
        
        self.train_transformer = transforms.Compose([
            # myToTensor(),
            lambda data: down_sample(data, factor=2),
            RandomGenerator(output_size=[self.input_size_h, self.input_size_w], clip= True)
        ])
        self.test_transformer = self.train_transformer
        
        self._set_criterion(self.cost)
        self._set_opt()
        self._set_sch()

    def _set_opt(self, opt='SGD'):
        assert opt in ['Adadelta', 'Adagrad', 'Adam', 'AdamW', 'Adamax', 'ASGD', 'RMSprop', 'Rprop', 'SGD'], 'Unsupported optimizer!'
        self.opt = opt
        if opt == 'Adadelta':
            self.lr = 0.01  # default: 1.0 – coefficient that scale delta before it is applied to the parameters
            self.rho = 0.9  # default: 0.9 – coefficient used for computing a running average of squared gradients
            self.eps = 1e-6  # default: 1e-6 – term added to the denominator to improve numerical stability 
            self.weight_decay = 0.05  # default: 0 – weight decay (L2 penalty) 
        elif opt == 'Adagrad':
            self.lr = 0.01  # default: 0.01 – learning rate
            self.lr_decay = 0  # default: 0 – learning rate decay
            self.eps = 1e-10  # default: 1e-10 – term added to the denominator to improve numerical stability
            self.weight_decay = 0.05  # default: 0 – weight decay (L2 penalty)
        elif opt == 'Adam':
            self.lr = 0.0001  # default: 1e-3 – learning rate
            self.betas = (0.9, 0.999)  # default: (0.9, 0.999) – coefficients for computing running averages of gradient and its square
            self.eps = 1e-8  # default: 1e-8 – term added to the denominator to improve numerical stability 
            self.weight_decay = 0.05  # default: 0 – weight decay (L2 penalty) 
            self.amsgrad = False  # default: False – whether to use the AMSGrad variant
        elif opt == 'AdamW':
            self.lr = 0.001  # default: 1e-3 – learning rate
            self.betas = (0.9, 0.999)  # default: (0.9, 0.999) – coefficients for computing running averages of gradient and its square
            self.eps = 1e-8  # default: 1e-8 – term added to the denominator to improve numerical stability
            self.weight_decay = 1e-2  # default: 1e-2 – weight decay coefficient
            self.amsgrad = False  # default: False – whether to use the AMSGrad variant
        elif opt == 'Adamax':
            self.lr = 2e-3  # default: 2e-3 – learning rate
            self.betas = (0.9, 0.999)  # default: (0.9, 0.999) – coefficients for computing running averages of gradient and its square
            self.eps = 1e-8  # default: 1e-8 – term added to the denominator to improve numerical stability
            self.weight_decay = 0  # default: 0 – weight decay (L2 penalty) 
        elif opt == 'ASGD':
            self.lr = 0.01  # default: 1e-2 – learning rate 
            self.lambd = 1e-4  # default: 1e-4 – decay term
            self.alpha = 0.75  # default: 0.75 – power for eta update
            self.t0 = 1e6  # default: 1e6 – point at which to start averaging
            self.weight_decay = 0  # default: 0 – weight decay
        elif opt == 'RMSprop':
            self.lr = 1e-2  # default: 1e-2 – learning rate
            self.momentum = 0  # default: 0 – momentum factor
            self.alpha = 0.99  # default: 0.99 – smoothing constant
            self.eps = 1e-8  # default: 1e-8 – term added to the denominator to improve numerical stability
            self.centered = False  # default: False – if True, normalize the gradient by its variance estimation
            self.weight_decay = 0  # default: 0 – weight decay (L2 penalty)
        elif opt == 'Rprop':
            self.lr = 1e-2  # default: 1e-2 – learning rate
            self.etas = (0.5, 1.2)  # default: (0.5, 1.2) – multiplicative increase and decrease factors
            self.step_sizes = (1e-6, 50)  # default: (1e-6, 50) – minimal and maximal allowed step sizes 
        elif opt == 'SGD':
            self.lr = 0.01  # – learning rate
            self.momentum = 0.9  # default: 0 – momentum factor 
            self.weight_decay = 0.0001  # default: 0 – weight decay (L2 penalty) 
            self.dampening = 0  # default: 0 – dampening for momentum
            self.nesterov = False  # default: False – enables Nesterov momentum
            
    def _set_sch(self, sch = 'CosineAnnealingLR'):  
        self.sch = sch
        if sch == 'StepLR':
            self.step_size = self.epochs // 5 # – Period of learning rate decay.
            self.gamma = 0.5 # – Multiplicative factor of learning rate decay. Default: 0.1
            self.last_epoch = -1 # – The index of last epoch. Default: -1.
        elif sch == 'MultiStepLR':
            self.milestones = [60, 120, 150] # – List of epoch indices. Must be increasing.
            self.gamma = 0.1 # – Multiplicative factor of learning rate decay. Default: 0.1.
            self.last_epoch = -1 # – The index of last epoch. Default: -1.
        elif sch == 'ExponentialLR':
            self.gamma = 0.99 #  – Multiplicative factor of learning rate decay.
            self.last_epoch = -1 # – The index of last epoch. Default: -1.
        elif sch == 'CosineAnnealingLR':
            self.T_max = 100 # – Maximum number of iterations. Cosine function period.
            self.eta_min = 0.00001 # – Minimum learning rate. Default: 0.
            self.last_epoch = -1 # – The index of last epoch. Default: -1.  
        elif sch == 'ReduceLROnPlateau':
            self.mode = 'min' # – One of min, max. In min mode, lr will be reduced when the quantity monitored has stopped decreasing; in max mode it will be reduced when the quantity monitored has stopped increasing. Default: 'min'.
            self.factor = 0.1 # – Factor by which the learning rate will be reduced. new_lr = lr * factor. Default: 0.1.
            self.patience = 10 # – Number of epochs with no improvement after which learning rate will be reduced. For example, if patience = 2, then we will ignore the first 2 epochs with no improvement, and will only decrease the LR after the 3rd epoch if the loss still hasn't improved then. Default: 10.
            self.threshold = 0.0001 # – Threshold for measuring the new optimum, to only focus on significant changes. Default: 1e-4.
            self.threshold_mode = 'rel' # – One of rel, abs. In rel mode, dynamic_threshold = best * ( 1 + threshold ) in 'max' mode or best * ( 1 - threshold ) in min mode. In abs mode, dynamic_threshold = best + threshold in max mode or best - threshold in min mode. Default: 'rel'.
            self.cooldown = 0 # – Number of epochs to wait before resuming normal operation after lr has been reduced. Default: 0.
            self.min_lr = 0 # – A scalar or a list of scalars. A lower bound on the learning rate of all param groups or each group respectively. Default: 0.
            self.eps = 1e-08 # – Minimal decay applied to lr. If the difference between new and old lr is smaller than eps, the update is ignored. Default: 1e-8.
        elif sch == 'CosineAnnealingWarmRestarts':
            self.T_0 = 50 # – Number of iterations for the first restart.
            self.T_mult = 2 # – A factor increases T_{i} after a restart. Default: 1.
            self.eta_min = 1e-6 # – Minimum learning rate. Default: 0.
            self.last_epoch = -1 # – The index of last epoch. Default: -1. 
        elif sch == 'WP_MultiStepLR':
            self.warm_up_epochs = 10
            self.gamma = 0.1
            self.milestones = [125, 225]
        elif sch == 'WP_CosineLR':
            self.warm_up_epochs = 20
