import os
import argparse
import time
import shutil

import numpy as np
import tensorflow as tf
import keras.backend as K
from keras.optimizers import SGD, RMSprop, Adam

from VGG13_v15 import *


def log_loss(y_true, y_pred):
    log_loss = np.sum(- y_true * np.log(np.clip(y_pred, 1e-15, 1)), axis=1)
    return np.mean(log_loss)


def build_model():
    sub_model = vgg13_shortcuts_v2(vgg13_config.input_shape, vgg13_config.base_nb_filters, vgg13_config.weight_decay, init='he_normal', drop_rate=0.5)
    main_model = Model(input=sub_model.inputs[0], output=[sub_model.outputs[0], sub_model.get_layer(name='feature').output])
    return main_model, K.function([sub_model.inputs[0], K.learning_phase()], [sub_model.get_layer(name='feature').output])


if __name__ == '__main__':
    parser = argparse.ArgumentParser(description="vgg13_v9 with center loss")

    parser.add_argument("-r", "--lr",
                        type=float, dest="lr", metavar="lr",
                        default=vgg13_config.lr, required=False,
                        help="learning rate")

    parser.add_argument("-g", "--gpu", required=True,
                        dest="gpu", metavar="gpu",
                        help="gpu device to train")

    parser.add_argument("-f", "--fold", required=True,
                        type=int, dest="valid_fold", metavar="valid fold index",
                        help="fold hold out for validation")

    parser.add_argument("-i", "--include", required=True,
                        type=int, dest="include_test", metavar="include test flag",
                        help="flag whether to include kaggle testset")

    parser.add_argument("-o", "--output", required=True,
                        dest="train_dir", metavar="train dir",
                        help="train dir")

    parser.add_argument("-w", "--weights", required=True,
                        dest="weights_path", metavar="model weights",
                        help="model weights path")

    parser.add_argument("-d", "--hdf5", required=True,
                        dest="hdf5_path", metavar="candidates volume hdf5",
                        help="candidates volume path")

    parser.add_argument("-p", "--pkl", required=True,
                        dest="candidates_path", metavar="candidates info path",
                        help="candidates info pkl path")

    args = parser.parse_args()

    os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu
    tf_config = tf.ConfigProto()
    tf_config.log_device_placement = False
    tf_config.gpu_options.allow_growth = True
    K.set_session(tf.Session(config=tf_config))

    # build the model
    model, get_feature = build_model()

    if args.weights_path is not None:
        model.load_weights(args.weights_path, by_name=True)
        print('successfully load weights from %s' % args.weights_path)

    optimizer = Adam(lr=args.lr)
    model.compile(optimizer=optimizer,
                  loss=['categorical_crossentropy', 'mean_squared_error'],
                  loss_weights=[1.0, vgg13_config.center_weight],
                  metrics={'activation_19':'categorical_accuracy'}
                  )
    model.summary()

    gen = Generator(args.hdf5_path, args.candidates_path, args.valid_fold, (args.include_test!=0))
    train_generator = gen.train_generate(vgg13_config.batch_size)
    gen.generate_val_labels()

    # main
    # train
    train_dir = os.path.join(args.train_dir, 'fold_%d' % args.valid_fold)
    if not os.path.exists(train_dir):
        os.makedirs(train_dir)
    shutil.copy('VGG13_v15/config.py', os.path.join(train_dir, 'config.py'))

    for epoch in range(vgg13_config.max_epoch):
        print('Epoch:%d' % epoch)
        epoch_dir = os.path.join(train_dir, 'epoch_%d' % epoch)
        train_accs = list()

        if not os.path.exists(epoch_dir):
            os.makedirs(epoch_dir)

        for batch in range(int(vgg13_config.sample_per_epoch/vgg13_config.batch_size)):
            subvols, tri_labels, centers = next(train_generator)
            bi_labels = tri2bi(tri_labels)

            train_hist = model.train_on_batch(subvols, [bi_labels, centers], class_weight=[vgg13_config.malign_weight, 1])
            print('Batch:%d' % batch, train_hist)
            train_accs.append(train_hist[-1])

            features = get_feature([subvols, np.True_])[0]
            gen.update_center(features, tri_labels)

        print('centers:', np.mean(gen.centers, axis=1))

        t = time.time()
        val_generator = gen.val_generate(32)
        preds = model.predict_generator(val_generator, len(gen.val_candidates), max_q_size=4)[0]
        val_generator.close()
        print('predict', time.time() - t)

        val_loss = log_loss(gen.val_labels, preds)
        preds = np.argmax(preds, axis=1)
        val_labels = np.argmax(gen.val_labels, axis=1)
        pos_acc = np.sum(np.logical_and(preds == val_labels, val_labels == 0))/gen.val_pos
        neg_acc = np.sum(np.logical_and(preds == val_labels, val_labels == 1)) / gen.val_neg

        print('validate:', val_loss, pos_acc, neg_acc)
        model.save_weights(os.path.join(epoch_dir, '%.2f_%.2f_%.2f.hdf5' %(np.mean(train_accs), pos_acc, neg_acc)))
        lr = K.get_value(optimizer.lr)
        print('lr:', lr)
        K.set_value(optimizer.lr, lr*0.98)

    K.clear_session()