# -*- coding: utf-8 -*-

from read_data import generate_image_batch
from vgg import vgg_network
import tensorflow as tf
import time


def inference_loss(predict, ground_truth):
    predict_shape = predict.get_shape().as_list()
    predict = tf.reshape(predict, [predict_shape[0], predict_shape[1]*predict_shape[2]*predict_shape[3]])
    ground_truth_shape = ground_truth.get_shape().as_list()
    ground_truth = tf.reshape(ground_truth, [ground_truth_shape[0],
                                             ground_truth_shape[1]*ground_truth_shape[2]*ground_truth_shape[3]])
    sigmoid_loss = tf.nn.sigmoid_cross_entropy_with_logits(predict, ground_truth)
    return tf.reduce_mean(tf.reduce_sum(sigmoid_loss, reduction_indices=1))


def main():
    folder = "/home/lijun/Dataset/vehicle_segment"
    batch_size = 32

    with tf.Graph().as_default():
        image_batch_op, mask_image_batch_op = generate_image_batch(folder, num_epochs=10, batch_size=batch_size)
        train_x_input_op = tf.placeholder(dtype=tf.float32, shape=[batch_size, 180, 320, 3])
        train_y_input_op = tf.placeholder(dtype=tf.float32, shape=[batch_size, 180, 320, 1])
        vgg_net, train_y_predict_op = vgg_network(train_x_input_op, batch_size=batch_size)
        reg_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
        train_loss_op = inference_loss(train_y_predict_op, train_y_input_op) + sum(reg_losses)
        tf.scalar_summary("train loss", train_loss_op)

        global_step = tf.Variable(0, trainable=False)
        learning_rate = tf.train.exponential_decay(learning_rate=1.0e-8, global_step=global_step,
                                                   decay_steps=500, decay_rate=0.1, staircase=True)
        momentum_opt = tf.train.MomentumOptimizer(learning_rate=learning_rate, momentum=0.95)
        opt_op = momentum_opt.minimize(train_loss_op, global_step=global_step)
        coord = tf.train.Coordinator()
        saver = tf.train.Saver()
        merged_summary = tf.merge_all_summaries()

        with tf.Session() as session:
            session.run(tf.initialize_all_variables())
            session.run(tf.initialize_local_variables())
            train_summary_writer = tf.train.SummaryWriter("./log", session.graph)
            vgg_net.load("/home/lijun/data/models/VGG16.npy", session, ignore_missing=True)
            threads = tf.train.start_queue_runners(sess=session, coord=coord)
            try:
                while not coord.should_stop():
                    start = time.time()
                    image_batch, mask_image_batch = session.run([image_batch_op, mask_image_batch_op])
                    train_loss, _ = session.run([train_loss_op, opt_op],
                                                feed_dict={train_x_input_op: image_batch,
                                                           train_y_input_op: mask_image_batch})
                    step = global_step.eval()
                    end = time.time()
                    print("time elapsed: ", end - start)
                    print("step: {}, train loss: {}, learning rate: {}".format(
                        step, float(train_loss), learning_rate.eval()))
                    if step % 50 == 0:
                        summary = session.run(merged_summary,
                                              feed_dict={train_x_input_op: image_batch,
                                                         train_y_input_op: mask_image_batch})
                        train_summary_writer.add_summary(summary, step)
            except tf.errors.OutOfRangeError:
                print("all images have been used")
            finally:
                coord.request_stop()
            coord.join(threads)
            saver.save(session, "./model/vehicle_L2Loss.npk", global_step)


if __name__ == "__main__":
    main()
