import argparse, os
import torch
import random
import torch.backends.cudnn as cudnn
import torch.nn as nn
import torch.optim as optim
from torch.autograd import Variable
from torch.utils.data import DataLoader
# from lapsrn import Net, L1_Charbonnier_loss
# from lapsrn_unet import LapNet, L1_Charbonnier_loss
# from lapsrn_unet import Net, L1_Charbonnier_loss
# from lapsrn_unet2 import Net, L1_Charbonnier_loss
# from lapsrn_unet3 import Net, L1_Charbonnier_loss
# from lapsrn_unet4_2 import Net, L1_Charbonnier_loss
# from myNet.lapsrn_unet4_2 import Net, L1_Charbonnier_loss
from myNet.lapsrn_unet4_3_DC import Net, L1_Charbonnier_loss

# from dataset import DatasetFromHdf5
# from dataset_MRI3 import DatasetFromHdf5
from DataLoader.dataset_MRI3 import DatasetFromHdf5
import numpy as np

# Training settings
parser = argparse.ArgumentParser(description="PyTorch LapSRN")
parser.add_argument("--batchSize", type=int, default=6, help="training batch size")
parser.add_argument("--nEpochs", type=int, default=100, help="number of epochs to train for")
parser.add_argument("--lr", type=float, default=1e-4, help="Learning Rate. Default=1e-4")
parser.add_argument("--step", type=int, default=20, help="Sets the learning rate to the initial LR decayed by momentum every n epochs, Default: n=10")
parser.add_argument("--cuda", action="store_true",default=True, help="Use cuda?")

parser.add_argument("--gpus", default="1", type=str, help="gpu ids (default: 0)")

parser.add_argument("--resume", default="", type=str, help="Path to checkpoint (default: none)")
parser.add_argument("--start-epoch", default=1, type=int, help="Manual epoch number (useful on restarts)")
parser.add_argument("--threads", type=int, default=0, help="Number of threads for data loader to use, Default: 1")
parser.add_argument("--momentum", default=0.9, type=float, help="Momentum, Default: 0.9")
parser.add_argument("--weight-decay", "--wd", default=1e-4, type=float, help="weight decay, Default: 1e-4")
parser.add_argument("--pretrained", default="", type=str, help="path to pretrained model (default: none)")


def main():

    global opt, model
    opt = parser.parse_args()
    print(opt)

    cuda = opt.cuda                                            
    if cuda:                                                   
        print("=> use gpu id: '{}'".format(opt.gpus))            
        os.environ["CUDA_VISIBLE_DEVICES"] = opt.gpus
        if not torch.cuda.is_available():
                raise Exception("No GPU found or Wrong gpu id, please run without --cuda")
#     if cuda and not torch.cuda.is_available():
#         raise Exception("No GPU found, please run without --cuda")

    opt.seed = random.randint(1, 10000)
    print("Random Seed: ", opt.seed)
    torch.manual_seed(opt.seed)
    if cuda:
        torch.cuda.manual_seed(opt.seed)

    cudnn.benchmark = True

    print("===> Loading datasets")
    train_set = DatasetFromHdf5("/home/wks/users/lxj/MRI/pytorch-LapSRN-master/data/S08.Sub(2-5).Slice3.T2-T1-T2Dn8.lapSRN_NoInterleaved.h5")
#     train_set = DatasetFromHdf5("/home/wks/users/lyw/lxx/S08.Sub(2-5).Slice3.T2-T1-T2Dn8.lapSRN2.h5")
#     train_set = DatasetFromHdf5("./data/lap_pry_x4_small.h5")
    training_data_loader = DataLoader(dataset=train_set, num_workers=opt.threads, batch_size=opt.batchSize, shuffle=True)

    print("===> Building model")
    model = Net()
    criterion = L1_Charbonnier_loss()
#     criterion = nn.MSELoss()

    print("===> Setting GPU")
    if cuda:
        model = model.cuda()
        criterion = criterion.cuda()
    else:
        model = model.cpu()

    # optionally resume from a checkpoint
    if opt.resume:
        if os.path.isfile(opt.resume):
            print("=> loading checkpoint '{}'".format(opt.resume))
            checkpoint = torch.load(opt.resume)
            opt.start_epoch = checkpoint["epoch"] + 1
            model.load_state_dict(checkpoint["model"].state_dict())
        else:
            print("=> no checkpoint found at '{}'".format(opt.resume))

    # optionally copy weights from a checkpoint
    if opt.pretrained:
        if os.path.isfile(opt.pretrained):
            print("=> loading model '{}'".format(opt.pretrained))
            weights = torch.load(opt.pretrained)
            model.load_state_dict(weights['model'].state_dict())
        else:
            print("=> no model found at '{}'".format(opt.pretrained)) 

    print("===> Setting Optimizer")
    optimizer = optim.Adam(model.parameters(), lr=opt.lr)

    print("===> Training")
    for epoch in range(opt.start_epoch, opt.nEpochs + 1): 
        train(training_data_loader, optimizer, model, criterion, epoch)
        save_checkpoint(model, epoch)

def adjust_learning_rate(optimizer, epoch):
    """Sets the learning rate to the initial LR decayed by 10 every 10 epochs"""
#     lr = opt.lr * (0.1 ** (epoch // opt.step))
    if epoch == 0 or epoch % 50:
        return opt.lr
    opt.lr = opt.lr/ 10.0
    return opt.lr

def train(training_data_loader, optimizer, model, criterion, epoch):

    lr = adjust_learning_rate(optimizer, epoch-1)

    for param_group in optimizer.param_groups:
        param_group["lr"] = lr

    
    print("Epoch={}, lr={}".format(epoch, optimizer.param_groups[0]["lr"]))

    model.train()
#     mask = np.zeros((opt.batchSize,3,336,264))   #(6,3,336,264)
#     d_i = range(148,190)
#     mask[:,d_i] = 1
#     mask_v = Variable(torch.from_numpy(mask).float())
#     mask_v = mask_v.view(mask_v.shape[0],mask_v.shape[1],mask_v.shape[2],mask_v.shape[3],1)  #(6,3,336,264,1)
#     mask_v2 = troch.cat((mask_v,mask_v),-1)  #(6,3,336,264,2)
#     print("mask_v2",mask_v2.shape)

    for iteration, batch in enumerate(training_data_loader, 1):

        T1,T2Dn,label_x2,  label_x4 = Variable(batch[0]),Variable(batch[1]),Variable(batch[2]),  Variable(batch[3], requires_grad=False)
        
        mask = np.zeros((T2Dn.shape))
        d_i = range(148,190)
        mask[:,:,d_i] = 1
        mask_v = Variable(torch.from_numpy(mask).float())
        mask_v = mask_v.view(mask_v.shape[0],mask_v.shape[1],mask_v.shape[2],mask_v.shape[3],1)
        mask_v2 = torch.cat((mask_v,mask_v),-1)
#         print("mask_v2",mask_v2.shape)
        
        if opt.cuda:
            T1 = T1.cuda()
            T2Dn = T2Dn.cuda()
            label_x2 = label_x2.cuda()
            label_x4 = label_x4.cuda()
            mask_v2 = mask_v2.cuda()

        HR_2x, HR_4x = model(T1,T2Dn,mask_v2)

        loss_x2 = criterion(HR_2x, label_x2)
        loss_x4 = criterion(HR_4x, label_x4)
        loss = loss_x2 + loss_x4

        optimizer.zero_grad()

        loss_x2.backward(retain_graph=True)

        loss_x4.backward()

        optimizer.step()

        if iteration%20 == 0:
#         if iteration%100 == 0:
            print("===> Epoch[{}]({}/{}): Loss: {:.10f}".format(epoch, iteration, len(training_data_loader), loss.data))
#             print("===> Epoch[{}]({}/{}): Loss: {:.10f}".format(epoch, iteration, len(training_data_loader), loss.data[0]))

def save_checkpoint(model, epoch):
    model_folder = "lapsrn_unet4.3_DC_0.0001_50/"
    model_out_path = model_folder + "lapsrn_UNet_model_epoch_{}.pth".format(epoch)
    state = {"epoch": epoch ,"model": model}
    if not os.path.exists(model_folder):
        os.makedirs(model_folder)

    torch.save(state, model_out_path)

    print("Checkpoint saved to {}".format(model_out_path))

if __name__ == "__main__":
    main()
