from keras.callbacks import Callback, ModelCheckpoint
import numpy as np
from train_config import get_config
import logging
import os
import time
import datetime
from easy_io import write_pkl_file
import global_config
import model_zoo


from keras.backend.tensorflow_backend import set_session
import tensorflow as tf
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
set_session(tf.Session(config=config))


class Statistician(Callback):
    def __init__(self, valid_generator, nb_valid_samples, statistical_function,
                 result_saveto, logging_str, logger=None):
        self._valid_generator = valid_generator
        self._nb_valid_samples = nb_valid_samples
        self._statistical_function = statistical_function
        self._result_saveto = result_saveto
        self._logging_str = logging_str
        if logger:
            self._print = logger.info
        else:
            self._print = print
        self._start_time = None
        super().__init__()

    def on_epoch_begin(self, epoch, logs=None):
        self._start_time = time.time()

    def on_epoch_end(self, epoch, logs=None):
        probs = []
        nb_seen_samples = 0
        while nb_seen_samples < self._nb_valid_samples:
            samples = next(self._valid_generator)
            probs.append(self.model.predict_on_batch(samples))
            nb_seen_samples += len(samples)
        assert nb_seen_samples == self._nb_valid_samples
        probs = np.concatenate(probs, axis=0)
        write_pkl_file(self._result_saveto.format(epoch=epoch), probs)
        statistics = self._statistical_function(probs)
        self._print(self._logging_str.format(epoch=epoch, now=datetime.datetime.today(),
                                             time=int(time.time()-self._start_time), **statistics, **logs))


class WarmPrinter(Callback):
    def __init__(self):
        self._notice = True
        super().__init__()

    def on_train_begin(self, logs=None):
        print('Process begin...')

    def on_batch_end(self, batch, logs=None):
        if self._notice:
            print('Batch {} finished...'.format(batch))
            self._notice = False


def train_model(name, candidate_file, candidate_vol_file, info_file, modelname,
                valid_folds, batchsize, crop_shape, shift_range, optimizer, lr, nb_epoches, seed, loadfrom, callbacks,
                **kwargs):
    args = locals()
    args.update(args.pop('kwargs'))

    logger = logging.getLogger('')
    logger.setLevel(level=logging.DEBUG)
    console = logging.StreamHandler()
    console.setLevel(level=logging.INFO)
    os.makedirs(global_config.log_folder, exist_ok=True)
    file = logging.FileHandler(os.path.join(global_config.log_folder, name + '.log'))
    file.setLevel(level=logging.INFO)

    formatter = logging.Formatter(fmt='%(message)s')
    console.setFormatter(formatter)
    file.setFormatter(formatter)

    logger.addHandler(console)
    logger.addHandler(file)

    logger.info('')
    logger.info('*' * 20)
    for k in sorted(args.keys()):
        logger.info('{k}: {v}'.format(k=k, v=args[k]))
    logger.info('*' * 20)
    logger.info('')

    config_dict = get_config(candidate_file, candidate_vol_file, info_file,
                             valid_folds, batchsize, crop_shape, shift_range, seed, logger)
    train_generator = config_dict['train_generator']
    nb_train_samples = config_dict['nb_train_samples']
    valid_generator = config_dict['valid_generator']
    nb_valid_samples = config_dict['nb_valid_samples']
    statistical_function = config_dict['statistical_function']
    logging_str = config_dict['logging_str']

    np.random.seed(seed)
    tf.set_random_seed(seed)

    kwargs['input_shape'] = config_dict['input_shape']
    model = getattr(model_zoo, modelname)(**kwargs)
    if loadfrom:
        model.load_weights(loadfrom)
    model.compile(
        optimizer=optimizer(lr=lr),
        loss='categorical_crossentropy',
        metrics=['accuracy']
    )

    os.makedirs(os.path.join(global_config.model_folder, name), exist_ok=True)
    model_saveto = os.path.join(global_config.model_folder, name, 'epoch-{epoch:04d}.hdf5')
    os.makedirs(os.path.join(global_config.result_folder, name), exist_ok=True)
    result_saveto = os.path.join(global_config.result_folder, name, 'epoch-{epoch:04d}.pkl')
    try:
        model.fit_generator(
            generator=train_generator,
            samples_per_epoch=nb_train_samples,
            nb_epoch=nb_epoches,
            verbose=0,
            callbacks=[
                ModelCheckpoint(model_saveto),
                Statistician(valid_generator, nb_valid_samples, statistical_function,
                             result_saveto, logging_str, logger),
                WarmPrinter(),
            ] + callbacks,
        )
    except KeyboardInterrupt:
        pass
