import os

# from models.UNet3D import UNet3D
from torch import optim
from torch.optim import lr_scheduler

from models.AttentionUNet.AttentionUNetOfficial import AttentionUNet as AttentionUNet3D
from models.BRAVENet import BRAVENet
from torch.nn.modules.loss import CrossEntropyLoss
from models.Modified3DUNet import Modified3DUNet
from models.UNETR import UNETR
from models.UNet3DOfficial.UNet_3D import ResidualUNet3D
from models.VoxResNet import VoxResNet_V0
from models.loss import binary_dice_loss, binary_focal_loss


class UNet3DConfigure:
    threshold = 0.45
    class_num = 1
    crop_method = 4  # 1 is random crop   2 is direct crop  3 is cropping to 1 patch  4 is no crop
    if_debug = False

    if if_debug:
        if_record_train_log = False
    else:
        if_record_train_log = True

    model = ResidualUNet3D(1, class_num)

    if_data_augmentation = True
    train_valid_shuffle = True
    aug_probability = 0.3
    log_root_dir = os.path.join(os.path.join(os.path.abspath(".."), "run_logs"))
    weight_root_dir = os.path.join(os.path.join(os.path.abspath(".."), "weights"))

    old_weights_dir = None
    # old_weights_dir = r"C:\gs\code\parse2022-draft\weights\Training dataResidualUNet3DJul08_13-49-58\epoch_36"
    if old_weights_dir is not None:
        old_epoch = int(os.path.basename(old_weights_dir).split("_")[1])
    else:
        old_epoch = 0
    sample_prefix = "sample"
    if os.environ.get("COMPUTERNAME") == "DESKTOP-16QO4BT":
        batch_size = 1
        train_data_root_path = r"D:\dataset\vessel\xunfei_challenge_mri\Trainingdata"
    else:
        batch_size = 3
        train_data_root_path = r"D:\dataset\vessel\xunfei_challenge_mri\Trainingdata"
    epoch_size = 0
    valid_batch_size = 2
    learning_rate = 5e-4
    # learning_rate = 0.0033786452453014168
    training_epochs = 2000
    if_valid = True
    if_cuda = True
    if_lazy_load = True
    if_record_train_log = True
    save_model_interval = epoch_size * 3
    lr_schedule_interval = epoch_size
    valid_interval = epoch_size * 2
    recrop_data_interval = epoch_size * 5
    train_val_rate = 0.9
    if_shuffle = True
    if_mask = False
    channel_number = 1
    patch_size = (64, 128, 128)
    # patch_shape = (16, 64, 64)  # 256 256 128 is too big for some box
    group_size = 4
    loss_function = [binary_dice_loss, binary_focal_loss, ]

    # loss_function = [DiceLoss(), FocalLoss()]

    @classmethod
    def set_interval(cls):
        cls.save_model_interval = cls.epoch_size * 3
        cls.lr_schedule_interval = cls.epoch_size
        cls.valid_interval = cls.epoch_size * 2
        cls.recrop_data_interval = cls.epoch_size * 5


class ModifiedUNet3DConfigure(UNet3DConfigure):
    model = Modified3DUNet(1, UNet3DConfigure.class_num)
    patch_size = (32, 448, 448)
    batch_size = 2
    if_debug = False


class VoxResNetConfigure(UNet3DConfigure):
    model = VoxResNet_V0(1, UNet3DConfigure.class_num)
    batch_size = 6


class BRAVENetConfigure(UNet3DConfigure):
    patch_size = (64, 256, 256)
    threshold = 0.4
    model = BRAVENet(1, UNet3DConfigure.class_num, patch_size)
    # It isn't associated with the number of loss function but the number of outputs
    loss_weights = [0.5, 0.25, 0.25]
    batch_size = 12
    if_debug = True


class AttentionUNetConfigure(UNet3DConfigure):
    patch_size = (64, 128, 128)
    threshold = 0.4
    optimizer = optim.Adam
    scheduler = lr_scheduler.StepLR
    learning_rate = 1e-04
    model = AttentionUNet3D(in_channels=1, n_classes=UNet3DConfigure.class_num)
    batch_size = 3
    if_debug = True


class UNetTransformerConfigure(UNet3DConfigure):
    learning_rate = 0.01
    loss_function = [binary_dice_loss, binary_focal_loss]
    patch_size = (128, 128, 128)
    model = UNETR(1, UNet3DConfigure.class_num, patch_size)
    mlp_dim = 3072
    dropout_rate = 0.2
    max_epochs_num = 5000
    batch_size = 4
    optim_lr = 5e-4
    optim_name = 'adam'
    momentum = 0.99
    reg_weight = 1e-5
    dice_loss_smooth_dr = 1e-6
    dice_loss_smooth_nr = 0.0
    if_debug = False
