#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time    : 2018/12/25 11:48
# @USER    : Connor
# @File    : submit_macLV.py
# @Software: PyCharm
# @Version  : Python-3.6
# @TASK:
import pydicom, cv2, re
import os, fnmatch, sys
import numpy as np
from keras.preprocessing.image import ImageDataGenerator
from keras import backend as K
from keras import optimizers
import UnetModel as models
from UnetModel import loss

from helpers import center_crop, lr_poly_decay, get_SAX_SERIES

seed = 1234
np.random.seed(seed)

SAX_SERIES = get_SAX_SERIES()
SUNNYBROOK_ROOT_PATH = '/media/heshengji/Ubuntu/CardiacData/MICCAI2009'

TRAIN_CONTOUR_PATH = os.path.join(SUNNYBROOK_ROOT_PATH,
                            'Sunnybrook Cardiac MR Database ContoursPart3',
                            'TrainingDataContours')
TRAIN_IMG_PATH = os.path.join(SUNNYBROOK_ROOT_PATH,
                        'challenge_training')


def shrink_case(case):
    toks = case.split('-')
    def shrink_if_number(x):
        try:
            cvt = int(x)
            return str(cvt)
        except ValueError:
            return x
    return '-'.join([shrink_if_number(t) for t in toks])


class Contour(object):
    def __init__(self, ctr_path):
        self.ctr_path = ctr_path
        match = re.search(r'/([^/]*)/contours-manual/IRCCI-expert/IM-0001-(\d{4})-.*', ctr_path)
        self.case = shrink_case(match.group(1))
        self.img_no = int(match.group(2))

    def __str__(self):
        return '<Contour for case %s, image %d>' % (self.case, self.img_no)

    __repr__ = __str__


def read_contour(contour, data_path):
    filename = 'IM-%s-%04d.dcm' % (SAX_SERIES[contour.case], contour.img_no)
    full_path = os.path.join(data_path, contour.case, filename)
    f = pydicom.read_file(full_path)
    img = f.pixel_array.astype('int')
    mask = np.zeros_like(img, dtype='uint8')
    coords = np.loadtxt(contour.ctr_path, delimiter=' ').astype('int')
    cv2.fillPoly(mask, [coords], 1)
    if img.ndim < 3:
        img = img[..., np.newaxis]
        mask = mask[..., np.newaxis]

    return img, mask


def map_all_contours(contour_path, contour_type, shuffle=True):
    contours = [os.path.join(dirpath, f)
        for dirpath, dirnames, files in os.walk(contour_path)
        for f in fnmatch.filter(files,
                        'IM-0001-*-'+contour_type+'contour-manual.txt')]
    if shuffle:
        print('Shuffling data')
        np.random.shuffle(contours)
    print('Number of examples: {:d}'.format(len(contours)))
    contours = list(map(Contour, contours))

    return contours


def export_all_contours(contours, data_path, crop_size):
    print('Processing {:d} images and labels ...'.format(len(contours)))
    images = np.zeros((len(contours), crop_size, crop_size, 1))
    masks = np.zeros((len(contours), crop_size, crop_size, 1))
    for idx, contour in enumerate(contours):
        img, mask = read_contour(contour, data_path)
        img = center_crop(img, crop_size=crop_size)
        mask = center_crop(mask, crop_size=crop_size)
        images[idx] = img
        masks[idx] = mask

    return images, masks


if __name__ == '__main__':
    if len(sys.argv) < 2:
        sys.exit('Usage: python %s <i/o> <gpu_id>' % sys.argv[0])
    elif len(sys.argv) < 3:
        contour_type = sys.argv[1]
    else:
        contour_type = sys.argv[1]
        os.environ['CUDA_VISIBLE_DEVICES'] = sys.argv[2]

    # crop_size = 100
    crop_size = 128

    print('Mapping ground truth '+contour_type+' contours to images in train...')
    train_ctrs = map_all_contours(TRAIN_CONTOUR_PATH, contour_type, shuffle=True)
    print('Done mapping training set')

    split = int(0.1*len(train_ctrs))
    dev_ctrs = train_ctrs[0:split]
    train_ctrs = train_ctrs[split:]

    print('Building Train dataset ...')
    img_train, mask_train = export_all_contours(train_ctrs,
                                                TRAIN_IMG_PATH,
                                                crop_size=crop_size)
    print('Building Dev dataset ...')
    img_dev, mask_dev = export_all_contours(dev_ctrs,
                                            TRAIN_IMG_PATH,
                                            crop_size=crop_size)

    # model = fcn_model(input_shape, num_classes, weights=weights)
    print("Building model...")
    # input_shape = (crop_size, crop_size, 1)
    num_classes = 2
    model_select = 'unet'
    weights = None
    # if contour_type == 'i':
    #     weights = 'weights/' + model_select + 'sunnybrook_i.h5'
    # elif contour_type == 'o':
    #     weights = 'weights/' + model_select + 'sunnybrook_o.h5'
    # else:
    #     sys.exit('\ncontour type "%s" not recognized\n' % contour_type)

    string_to_model = {
        "unet": models.unet,
        "dilated-unet": models.dilated_unet,
        "dilated-densenet": models.dilated_densenet,
        "dilated-densenet2": models.dilated_densenet2,
        "dilated-densenet3": models.dilated_densenet3,
    }
    model = string_to_model[model_select]
    m = model(height=crop_size, width=crop_size, channels=1, classes=num_classes,
              features=64, depth=4, padding='same',
              temperature=1.0, batchnorm=False,
              dropout=0)

    m.summary()     # print model setting

    if weights is not None:
        m.load_weights(weights)

    # select loss function: pixel-wise crossentropy, soft dice or soft
    # jaccard coefficient
    loss_select = 'pixel'
    loss_weight = [0.5, 0.5]
    if loss_select == 'pixel':
        def lossfunc(y_true, y_pred):
            return loss.weighted_categorical_crossentropy(
                y_true, y_pred, loss_weight)
    elif loss_select == 'dice':
        def lossfunc(y_true, y_pred):
            return loss.sorensen_dice_loss(y_true, y_pred, loss_weight)
    elif loss_select == 'jaccard':
        def lossfunc(y_true, y_pred):
            return loss.jaccard_loss(y_true, y_pred, loss_weight)
    else:
        raise Exception("Unknown loss ({})".format(loss_select))

    def dice(y_true, y_pred):
        batch_dice_coefs = loss.sorensen_dice(y_true, y_pred, axis=[1, 2])
        dice_coefs = K.mean(batch_dice_coefs, axis=0)
        return dice_coefs[1]    # HACK for 2-class case

    def jaccard(y_true, y_pred):
        batch_jaccard_coefs = loss.jaccard(y_true, y_pred, axis=[1, 2])
        jaccard_coefs = K.mean(batch_jaccard_coefs, axis=0)
        return jaccard_coefs[1] # HACK for 2-class case

    metrics = ['accuracy', dice, jaccard]

    m.compile(optimizer=optimizers.Adam(), loss=lossfunc, metrics=metrics)

    # m.compile(optimizer=optimizers.Adam(), loss=dice_coef_loss,
    #               metrics=['accuracy', dice_coef, jaccard_coef])

    kwargs = dict(
        rotation_range=180,
        zoom_range=0.0,
        width_shift_range=0.0,
        height_shift_range=0.0,
        horizontal_flip=True,
        vertical_flip=True,
    )
    image_datagen = ImageDataGenerator(**kwargs)
    mask_datagen = ImageDataGenerator(**kwargs)

    epochs = 1000
    mini_batch_size = 5

    image_generator = image_datagen.flow(img_train, shuffle=False,
                                    batch_size=mini_batch_size, seed=seed)
    mask_generator = mask_datagen.flow(mask_train, shuffle=False,
                                    batch_size=mini_batch_size, seed=seed)
    train_generator = zip(image_generator, mask_generator)

    max_iter = (len(train_ctrs) // mini_batch_size) * epochs
    curr_iter = 0
    base_lr = K.eval(m.optimizer.lr)
    lrate = lr_poly_decay(m, base_lr, curr_iter, max_iter, power=0.5)

    import datetime

    op_acc = 0.98
    for e in range(epochs):
        starttime = datetime.datetime.now()
        print('\nMain Epoch {:d}'.format(e + 1))
        print('Learning rate: {:6f}'.format(lrate))
        train_result = []
        for iteration in range(len(img_train) // mini_batch_size):
            img, mask = next(train_generator)
            res = m.train_on_batch(img, mask)
            curr_iter += 1
            lrate = lr_poly_decay(m, base_lr, curr_iter,
                                  max_iter, power=0.5)
            train_result.append(res)
        train_result = np.asarray(train_result)
        # train_result = np.nan_to_num(train_result)
        # print(train_result)
        train_result = np.mean(train_result, axis=0).round(decimals=10)
        print('Train result {}: {}'.format(m.metrics_names, train_result))
        print('\nEvaluating dev set ...')
        result = m.evaluate(img_dev, mask_dev, batch_size=32)
        result = np.round(result, decimals=10)
        print('Dev set result {}:  {}'.format(m.metrics_names, result))

        if not os.path.exists('model_logs'):
            os.makedirs('model_logs')

        if result[1] > op_acc or e == epochs - 1:
            op_acc = round(result[1], 4)

            save_file = '_'.join(['sunnybrook_', model_select, '_', contour_type,
                                  'epoch', str(e+1), str(op_acc)]) + '.h5'
            save_path = os.path.join('model_logs', save_file)
            print('\nSaving model weights to {}\n'.format(save_path))
            m.save_weights(save_path)

        endtime = datetime.datetime.now()
        print('time:{:d}s\n'.format((endtime - starttime).seconds))
