# -*- coding: utf-8 -*-

from __future__ import print_function

import logging
import argparse
import shutil
import os
import glob
import nibabel as nib

import torch
import torch.nn as nn

import torch.backends.cudnn as cudnn
import torch.utils.data as Data
import numpy as np

from Unet import UNet


parser = argparse.ArgumentParser(description='PyTorch Thorax Training')
parser.add_argument('-j', '--workers', default=4, type=int, metavar='N',
                    help='number of data loading workers (default: 8)')
parser.add_argument('--epochs', default=100, type=int, metavar='N',
                    help='number of total epochs to run')
parser.add_argument('--patches', default=1000, type=int, metavar='N',
                    help='number of segmentation patches')
parser.add_argument('-b', '--batch-size', default=10, type=int,
                    metavar='N', help='mini-batch size (default: 10)')
parser.add_argument('--lr', '--learning-rate', default=1e-1, type=float,
                    metavar='LR', help='initial learning rate')
parser.add_argument('--resume', default='', type=str, metavar='PATH',
                    help='path to latest checkpoint (default: none)')

best_loss = float('inf')

log_file = os.path.join("train_log.txt")
logging.basicConfig(level=logging.INFO, format='%(asctime)s %(message)s', filename=log_file)

console = logging.StreamHandler()
console.setLevel(logging.INFO)
console.setFormatter(logging.Formatter('%(asctime)s %(message)s'))
logging.getLogger('').addHandler(console)


def main():
    global args, best_loss
    args = parser.parse_args()
    args.start_epoch = 0
    # os.environ['CUDA_VISIBLE_DEVICES'] = '2,3'

    # create model
    model = UNet(4, 6).cuda()
    model = nn.DataParallel(model, device_ids=[0])

    # optionally resume from a checkpoint
    if args.resume:
        if os.path.isfile(args.resume):
            print("=> loading checkpoint '{}'".format(args.resume))
            model = torch.load(args.resume)
            modelName = args.resume
            s = modelName.split('_')
            s2 = s[-1].split('.')
            args.start_epoch = int(s2[0])
        else:
            print("=> no checkpoint found at '{}'".format(args.resume))

    cudnn.benchmark = True

    ckpts = 'ckpts'
    if not os.path.exists(ckpts): os.makedirs(ckpts)

    args.arch = 'CT_Seg'

    trainImgPath = glob.glob(os.path.abspath('.') + '/TrainingData2D/*/*/img.nii.gz')[:5]
    trainGtPath = glob.glob(os.path.abspath('.') + '/TrainingData2D/*/*/gt.nii.gz')[:5]
    testImgPath = glob.glob(os.path.abspath('.') + '/newTestData2D/*/*/img.nii.gz')[:5]
    testGtPath = glob.glob(os.path.abspath('.') + '/newTestData2D/*/*/gt.nii.gz')[:5]

    print("Total: ", len(trainImgPath), "train data paths")
    print("Total: ", len(testImgPath), "test data paths" )
    print(len(trainGtPath))

    train_loader = Data.DataLoader(
        MyDataset(trainImgPath, trainGtPath),
        batch_size=args.batch_size,
        shuffle=True,
        num_workers=args.workers
    )

    valid_loader = Data.DataLoader(
        MyDataset(testImgPath, testGtPath),     
        batch_size=args.batch_size,
        shuffle=False,
        num_workers=args.workers
    )

    criterion = nn.CrossEntropyLoss()
    optimizer = torch.optim.RMSprop(model.parameters(),args.lr,
                                   momentum=0.6,
                                   weight_decay=1e-5)

    logging.info('-------------- New training session, LR = %f ----------------' % (args.lr, ))

    for epoch in range(args.start_epoch, args.epochs):
        adjust_learning_rate(optimizer, epoch)

        # train for one epoch
        train_loss = train(train_loader, model, criterion, optimizer, epoch)
        # evaluate on validation set
        valid_loss = validate(valid_loader, model, criterion, epoch)

        # remember best prec@1 and save checkpoint
        is_best = valid_loss < best_loss
        best_loss = min(valid_loss, best_loss)
        file_name = os.path.join(ckpts, 'model_epoch_%d.pkl' % (epoch + 1, ))
        save_checkpoint(model, is_best, filename=file_name)

        msg = 'Epoch: {0:02d} Train loss {1:.4f} Valid loss {2:.4f}'.format(epoch+1, train_loss, valid_loss)
        logging.info(msg)


def train(train_loader, model, criterion, optimizer, epoch):
    losses = AverageMeter()

    # switch to train mode
    model.train()
    for imgPath, targetPath in train_loader:
        x = np.zeros((len(imgPath), 4, 512, 512), dtype = 'float32')
        label = np.zeros((len(targetPath), 512, 512))
        for i in range(len(imgPath)):
            niiImg = nib.load(imgPath[i])
            niiTarget = nib.load(targetPath[i])
            x[i] = niiImg.get_data()
            label[i] = niiTarget.get_data()

        x = torch.autograd.Variable(torch.Tensor(x).float()).cuda()
        target = torch.autograd.Variable(torch.Tensor(label).long()).cuda()
        optimizer.zero_grad()
        output = model(x)
        output = output.view(-1, 6, 512**2).permute(0, 2, 1).contiguous()
        output = output.view(-1, 6)
        target = target.view(-1)
        loss = criterion(output, target)
        losses.update(loss.data[0], target.size(0))
        loss.backward()
        optimizer.step()

    return losses.avg


def validate(valid_loader, model, criterion, epoch):
    losses = AverageMeter()

    # switch to evaluate mode
    model.eval()
    for imgPath, targetPath in valid_loader:
        x = np.zeros((len(imgPath), 4, 512, 512), dtype = 'float32')
        label = np.zeros((len(targetPath), 512, 512))
        for i in range(len(imgPath)):
            niiImg = nib.load(imgPath[i])
            niiTarget = nib.load(targetPath[i])
            x[i] = niiImg.get_data()
            label[i] = niiTarget.get_data()

        x = torch.autograd.Variable(torch.Tensor(x).float()).cuda()
        target = torch.autograd.Variable(torch.Tensor(label).long()).cuda()

        output = model(x) # nx5x256x256
        output = output.view(-1, 6, 512 ** 2).permute(0, 2, 1).contiguous()
        output = output.view(-1, 6)
        target = target.view(-1)
        loss = criterion(output, target)

        losses.update(loss.data[0], target.size(0))

    return losses.avg

class MyDataset(Data.Dataset):
    def __init__(self, images, labels):
        self.images = images
        self.labels = labels

    def __getitem__(self, index):  # 返回的是tensor
        img, target = self.images[index], self.labels[index]
        return img, target

    def __len__(self):
        return len(self.images)

def adjust_learning_rate(optimizer, epoch):
    if(epoch % 10 == 1):
        for param_group in optimizer.param_groups:
            param_group['lr'] = param_group['lr'] * 0.5


class AverageMeter(object):
    """Computes and stores the average and current value"""
    def __init__(self):
        self.reset()

    def reset(self):
        self.val = 0
        self.avg = 0
        self.sum = 0
        self.count = 0

    def update(self, val, n=1):
        self.val = val
        self.sum += val * n
        self.count += n
        self.avg = self.sum / self.count


def save_checkpoint(model, is_best, filename='checkpoint.pth.pkl'):
    torch.save(model, filename)
    if is_best:
        shutil.copyfile(filename, 'model_best.pth.pkl')


if __name__ == "__main__":
    main()
