import argparse
from utils.pretrain_data_utils import get_loader
import torch
import os
import torch.nn.parallel
import torch.distributed as dist
import torch.multiprocessing as mp
import torch.utils.data.distributed
import numpy as np
from medical.optimizers.lr_scheduler import LinearWarmupCosineAnnealingLR
from utils.utils import AverageMeter, distributed_all_gather
import time
from utils.losses import forward_loss_reconstruct_mask, forward_loss_reconstruct, forward_constrast_loss, forward_loss_mask_region_patch, forward_loss_mask, forward_loss_mask_position
from tensorboardX import SummaryWriter
from pretrain_models.local_rec import LocalRecHead
from pretrain_models.swinunetr import SwinUNETR

parser = argparse.ArgumentParser(description='Swin UNETR segmentation pipeline for BRATS Challenge')
parser.add_argument('--model_name', default="swinunetr", help='the model will be trained')
parser.add_argument('--checkpoint', default=None, help='start training from saved checkpoint')
parser.add_argument('--logdir', default='test', type=str, help='directory to save the tensorboard logs')
parser.add_argument('--fold', default=0, type=int, help='data fold')
parser.add_argument('--pretrained_model_name', default='model.pt', type=str, help='pretrained model name')
parser.add_argument('--data_dir', default='/mnt/datasets/brats2020/MICCAI_BraTS2020_TrainingData', type=str, help='dataset directory')
parser.add_argument('--json_list', default='./brats2020_datajson.json', type=str, help='dataset json file')
parser.add_argument('--max_epochs', default=300, type=int, help='max number of training epochs')
parser.add_argument('--batch_size', default=2, type=int, help='number of batch size')
parser.add_argument('--sw_batch_size', default=4, type=int, help='number of sliding window batch size')
parser.add_argument('--optim_lr', default=1e-4, type=float, help='optimization learning rate')
parser.add_argument('--optim_name', default='adamw', type=str, help='optimization algorithm')
parser.add_argument('--reg_weight', default=1e-5, type=float, help='regularization weight')
parser.add_argument('--momentum', default=0.99, type=float, help='momentum')
parser.add_argument('--val_every', default=10, type=int, help='validation frequency')
parser.add_argument('--distributed', action='store_true', help='start distributed training')
parser.add_argument('--world_size', default=1, type=int, help='number of nodes for distributed training')
parser.add_argument('--rank', default=0, type=int, help='node rank for distributed training')
parser.add_argument('--dist-url', default='tcp://127.0.0.1:23456', type=str, help='distributed url')
parser.add_argument('--dist-backend', default='nccl', type=str, help='distributed backend')
parser.add_argument('--norm_name', default='instance', type=str, help='normalization name')
parser.add_argument('--workers', default=8, type=int, help='number of workers')
parser.add_argument('--feature_size', default=48, type=int, help='feature size')
parser.add_argument('--in_channels', default=4, type=int, help='number of input channels')
parser.add_argument('--out_channels', default=3, type=int, help='number of output channels')
parser.add_argument('--cache_dataset', action='store_true', help='use monai Dataset class')
parser.add_argument('--a_min', default=-175.0, type=float, help='a_min in ScaleIntensityRanged')
parser.add_argument('--a_max', default=250.0, type=float, help='a_max in ScaleIntensityRanged')
parser.add_argument('--b_min', default=0.0, type=float, help='b_min in ScaleIntensityRanged')
parser.add_argument('--b_max', default=1.0, type=float, help='b_max in ScaleIntensityRanged')
parser.add_argument('--space_x', default=1.0, type=float, help='spacing in x direction')
parser.add_argument('--space_y', default=1.0, type=float, help='spacing in y direction')
parser.add_argument('--space_z', default=1.0, type=float, help='spacing in z direction')
parser.add_argument('--roi_x', default=128, type=int, help='roi size in x direction')
parser.add_argument('--roi_y', default=128, type=int, help='roi size in y direction')
parser.add_argument('--roi_z', default=128, type=int, help='roi size in z direction')
parser.add_argument('--dropout_rate', default=0.0, type=float, help='dropout rate')
parser.add_argument('--dropout_path_rate', default=0.0, type=float, help='drop path rate')
parser.add_argument('--RandFlipd_prob', default=0.2, type=float, help='RandFlipd aug probability')
parser.add_argument('--RandRotate90d_prob', default=0.2, type=float, help='RandRotate90d aug probability')
parser.add_argument('--RandScaleIntensityd_prob', default=0.1, type=float, help='RandScaleIntensityd aug probability')
parser.add_argument('--RandShiftIntensityd_prob', default=0.1, type=float, help='RandShiftIntensityd aug probability')
parser.add_argument('--infer_overlap', default=0.25, type=float, help='sliding window inference overlap')
parser.add_argument('--lrschedule', default='warmup_cosine', type=str, help='type of learning rate scheduler')
parser.add_argument('--warmup_epochs', default=50, type=int, help='number of warmup epochs')
parser.add_argument('--resume_ckpt', action='store_true', help='resume training from pretrained checkpoint')

def save_checkpoint(model,
                    args,
                    filename='model.pt'):
    state_dict = model.state_dict() if not args.distributed else model.module.state_dict()
    filename=os.path.join(args.logdir, filename)
    torch.save(state_dict, filename)
    print('Saving checkpoint', filename)

def train_epoch(model,
                loader,
                optimizer,
                epoch,
                args,
                ):
    model.train()
    start_time = time.time()
    run_loss = AverageMeter()
    for idx, batch in enumerate(loader):
        batch = {
            x: batch[x].to(torch.device('cuda', args.rank))
            for x in batch if x not in ['fold', 'label', 'image_meta_dict', 'label_meta_dict', 'foreground_start_coord', 'foreground_end_coord', 'image_transforms', 'label_transforms']
        }

        image = batch["image"]
        for param in model.parameters(): param.grad = None
        model_out = model(image)
        x_rec = model_out["logits"]
        labels = model_out['images']
        mask = model_out["mask"]
        pred_mask_region = model_out["pred_mask_region"]
        # pred_mask_position_region = model_out["pred_mask_position_region"]
        # random_patches = model_out["random_patches"]
        mask_labels = model_out["mask_labels"]
        contrast_pred_1 = model_out["contrast_pred_1"]
        contrast_pred_2 = model_out["contrast_pred_2"]
        loss_rec = forward_loss_reconstruct(x_rec, labels)
        loss_mask_region = forward_loss_mask(pred_mask_region, mask_labels)
        loss_contrast = forward_constrast_loss(contrast_pred_1, contrast_pred_2)
        # loss_mask_position = forward_loss_mask_position(pred_mask_position_region, mask)

        # if args.rank == 0:
        #     print(f"mask is {mask[0]}, mask num is {mask[0].sum(dim=0)}, mask num pred is {pred_mask_region[0].argmax(dim=-1)}, mask position pred is {(torch.sigmoid(pred_mask_position_region[0]) > 0.5).float()}")

        if args.distributed:
            if args.rank == 0:
                print(f"loss_rec is {loss_rec}, loss_mask_num is {loss_mask_region}, loss_cl is {loss_contrast}")
        else :
            print(f"loss_rec is {loss_rec}")

        loss = loss_rec + 0.1 * loss_mask_region + 0.1 * loss_contrast
        loss.backward()
        optimizer.step()
        if args.distributed:
            loss_list = distributed_all_gather([loss],
                                               out_numpy=True,
                                               is_valid=idx < loader.sampler.valid_length)
            run_loss.update(np.mean(np.mean(np.stack(loss_list, axis=0), axis=0), axis=0),
                            n=args.batch_size * args.world_size)
        else:
            run_loss.update(loss.item(), n=args.batch_size)
        if args.rank == 0:
            print('Epoch {}/{} {}/{}'.format(epoch, args.max_epochs, idx, len(loader)),
                  'loss: {:.4f}'.format(run_loss.avg),
                  'time {:.2f}s'.format(time.time() - start_time))
        start_time = time.time()
    for param in model.parameters() : param.grad = None
    logpath = os.path.join(args.logdir, "log.txt")
    os.makedirs(args.logdir, exist_ok=True)

    with open(logpath, "a+") as f:
        f.write('Epoch {}/{}'.format(epoch, args.max_epochs) + "\n"+
                'loss: {:.4f}'.format(run_loss.avg) + "\n" +
                'time {:.2f}s'.format(time.time() - start_time) + "\n"
                )

    return run_loss.avg

def pretrain(model,
             epoches,
             train_loader,
             optimizer,
             args,
             scheduler=None,
             ):
    pass
    writer = None
    if args.logdir is not None and args.rank == 0:
        writer = SummaryWriter(log_dir=args.logdir)
        if args.rank == 0: print('Writing Tensorboard logs to ', args.logdir)

    for epoch in range(epoches):
        train_loss = train_epoch(model,
                                 train_loader,
                                 optimizer,
                                 epoch,
                                 args)
        # save model
        if args.rank == 0 and args.logdir is not None:
            save_checkpoint(model, args, filename="pretrain_model.pt")
            print(f"pretrain model has been saved in {args.logdir}")
        if args.rank==0 and writer is not None:
            writer.add_scalar('train_loss', train_loss, epoch)
        if scheduler is not None:
            scheduler.step()

def main():
    args = parser.parse_args()
    args.logdir = './runs/' + args.logdir
    if args.distributed:
        args.ngpus_per_node = torch.cuda.device_count()
        print('Found total gpus', args.ngpus_per_node)
        args.world_size = args.ngpus_per_node * args.world_size
        mp.spawn(main_worker,
                 nprocs=args.ngpus_per_node,
                 args=(args,))
    else:
        main_worker(gpu=0, args=args)

def main_worker(gpu, args):

    if args.distributed:
        torch.multiprocessing.set_start_method('fork', force=True)
    np.set_printoptions(formatter={'float': '{: 0.3f}'.format}, suppress=True)
    args.gpu = gpu
    if args.distributed:
        args.rank = args.rank * args.ngpus_per_node + gpu
        dist.init_process_group(backend=args.dist_backend,
                                init_method=args.dist_url,
                                world_size=args.world_size,
                                rank=args.rank)
    torch.cuda.set_device(args.gpu)
    torch.backends.cudnn.benchmark = True
    args.test_mode = False

    train_loader, val_loader = get_loader(args)

    print(args.rank, ' gpu', args.gpu)
    if args.rank == 0:
        print('Batch size is:', args.batch_size, 'epochs', args.max_epochs)
    inf_size = [args.roi_x, args.roi_y, args.roi_z]

    model = SwinUNETR(inf_size,
                      in_channels=args.in_channels,
                      out_channels=args.out_channels,
                      drop_rate=0.1,
                      feature_size=args.feature_size,
                      pretrain=True)

    pytorch_total_params = sum(p.numel() for p in model.parameters() if p.requires_grad)
    print('Total parameters count', pytorch_total_params)

    model.cuda(args.gpu)

    if args.distributed:
        torch.cuda.set_device(args.gpu)
        if args.norm_name == 'batch':
            model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model)
        model.cuda(args.gpu)
        model = torch.nn.parallel.DistributedDataParallel(model,
                                                          device_ids=[args.gpu],
                                                          output_device=args.gpu,
                                                          find_unused_parameters=True,
                                                          )
    if args.optim_name == 'adam':
        optimizer = torch.optim.Adam(model.parameters(),
                                     lr=args.optim_lr,
                                     weight_decay=args.reg_weight)
    elif args.optim_name == 'adamw':
        optimizer = torch.optim.AdamW(model.parameters(),
                                      lr=args.optim_lr,
                                      weight_decay=args.reg_weight)
    elif args.optim_name == 'sgd':
        optimizer = torch.optim.SGD(model.parameters(),
                                    lr=args.optim_lr,
                                    momentum=args.momentum,
                                    nesterov=True,
                                    weight_decay=args.reg_weight)
    else:
        raise ValueError('Unsupported Optimization Procedure: ' + str(args.optim_name))

    if args.lrschedule == 'warmup_cosine':
        scheduler = LinearWarmupCosineAnnealingLR(optimizer,
                                                  warmup_epochs=args.warmup_epochs,
                                                  max_epochs=args.max_epochs)
    elif args.lrschedule == 'cosine_anneal':
        scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer,
                                                               T_max=args.max_epochs)
    else:
        scheduler = None

    pretrain(model,
             epoches=args.max_epochs,
             train_loader=train_loader,
             optimizer=optimizer,
             args=args,
             scheduler=scheduler)

if __name__ == '__main__':
    main()
