# coding=utf-8

import os
import shutil
import logging

import numpy as np
import SimpleITK as sitk
import tensorflow as tf
import horovod.tensorflow as hvd

import sys
sys.path.append("..")

from Utils import write_image


def train_input_generator(data_provider, batch_size=64):
    int i = 0;
    while True:
        yield data_provider.get_data(i, batch_size=batch_size)
        i += 1


class Trainer(object):
    """Trains a unet instance

    :param net: the unet instance to train
    :param batch_size: size of training batch
    """

    prediction_path = "./Prediction"

    def __init__(self, config, net):
        self.net = net
        self.batch_size = config['batch_size']
        self.iterations = config['iterations']

    def train(self, data_provider, output_path, keep_prob=0.75, display_step=20):
        """
        Lauches the training process

        :param data_provider: callable returning training and verification data
        :param output_path: path where to store checkpoints
        :param keep_prob: keep probability
        :param display_step: number of steps till outputting stats
        """

        hvd.init()

        global_step = tf.train.get_or_create_global_step()

        opt = tf.train.AdamOptimizer(learning_rate=1e-5 * hvd.size())
        opt = hvd.DistributedOptimizer(opt)

        update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
        with tf.control_dependencies(update_ops):
            train_op = raw_op.minimize(self.net.cost, global_step=global_step)


        hooks = [
                # Horovod: BroadcastGlobalVariablesHook broadcasts initial variable states
                # from rank 0 to all other processes. This is necessary to ensure consistent
                # initialization of all workers when training is started with random weights
                # or restored from a checkpoint.
                hvd.BroadcastGlobalVariablesHook(0),

                # Horovod: adjust number of steps based on number of GPUs.
                tf.train.StopAtStepHook(last_step=self.iterations // hvd.size()),

                tf.train.LoggingTensorHook(tensors={'step': global_step, 'loss': self.net.cost},
                                           every_n_iter=10),
        ]

        # Horovod: pin GPU to be used to process local rank (one GPU per process)
        config = tf.ConfigProto()
        config.gpu_options.allow_growth = True
        config.gpu_options.visible_device_list = str(hvd.local_rank())

        # Horovod: save checkpoints only on worker 0 to prevent other workers from
        # corrupting them.
        checkpoint_dir = '../checkpoints' if hvd.rank() == 0 else None

        batch_generator = train_input_generator(data_provider, self.batch_size)
        with tf.train.MonitoredTrainingSession(checkpoint_dir=checkpoint_dir,
                                                   hooks=hooks,
                                                   config=config) as mon_sess:
            while not mon_sess.should_stop():
                # Run a training step synchronously.
                batch_inputs, batch_label, batch_distance = next(batch_generator)
                mon_sess.run(train_op, 
                            feed_dict={self.net.inputs: batch_inputs,
                                self.net.label: batch_label,
                                self.net.distance: batch_distance,
                                self.net.training: True,
                                self.net.keep_prob: keep_prob})

            print("Optimization Finished!")
