# -*- coding: utf-8 -*-

from read_data import generate_image_batch
from vgg16 import vgg16_network
import tensorflow as tf
import numpy as np
import time


def inference_loss(predict, ground_truth):
    predict_shape = predict.get_shape().as_list()
    predict = tf.reshape(predict, [predict_shape[0], predict_shape[1]*predict_shape[2]*predict_shape[3]])
    ground_truth_shape = ground_truth.get_shape().as_list()
    ground_truth = tf.reshape(ground_truth, [ground_truth_shape[0],
                                             ground_truth_shape[1]*ground_truth_shape[2]*ground_truth_shape[3]])
    sigmoid_loss = tf.nn.sigmoid_cross_entropy_with_logits(predict, ground_truth)
    return tf.reduce_mean(tf.reduce_sum(sigmoid_loss, reduction_indices=1))


def main():
    folder = "/home/lijun/Dataset/vehicle_segment/train"
    model_path = "/home/lijun/data/models/VGG16.npy"
    data_dict = np.load(model_path, encoding="latin1").item()
    assert isinstance(data_dict, dict)

    with tf.Graph().as_default():
        image_op, mask_image_op = generate_image_batch(folder, num_epochs=10, batch_size=16)
        predict_op = vgg16_network(image_op)
        predict_loss_op = inference_loss(predict_op, mask_image_op)
        regular_op = tf.reduce_sum(tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES))
        train_loss_op = predict_loss_op + regular_op

        global_step = tf.Variable(0, trainable=False)
        learning_rate = tf.train.exponential_decay(learning_rate=1.0e-8, global_step=global_step,
                                                   decay_steps=1000, decay_rate=0.1, staircase=True)
        momentum_opt = tf.train.MomentumOptimizer(learning_rate=learning_rate, momentum=0.95)
        opt_op = momentum_opt.minimize(train_loss_op, global_step=global_step)

        saver = tf.train.Saver()
        coord = tf.train.Coordinator()
        with tf.Session() as session:
            tf.initialize_all_variables().run()
            tf.initialize_local_variables().run()
            for op_name in data_dict:
                with tf.variable_scope(op_name, reuse=True):
                    try:
                        session.run(tf.get_variable("weights").assign(data_dict[op_name]["weights"]))
                        session.run(tf.get_variable("biases").assign(data_dict[op_name]["biases"]))
                    except ValueError:
                        pass
            threads = tf.train.start_queue_runners(sess=session, coord=coord)
            try:
                while not coord.should_stop():
                    start = time.time()
                    predict_loss, regular_loss, _ = session.run([predict_loss_op, regular_op, opt_op])
                    end = time.time()
                    print("step: {}, predict loss: {}, weight decay: {}, time: {}, learning rate: {}".format(
                        global_step.eval(), predict_loss, regular_loss, end - start, learning_rate.eval()))
            except tf.errors.OutOfRangeError:
                print("all images have been used")
            finally:
                coord.request_stop()
            coord.join(threads)
            saver.save(session, "./model/vehicle_L2Loss.npk", global_step)


if __name__ == "__main__":
    main()
