# -*- coding: utf-8 -*-

import random
import os
import tensorflow as tf
import numpy as np
import h5py
from metro.read_data import get_images_path, process_image


def vgg16_convolution(in_put, in_channel, out_channel, layer_name):
    with tf.variable_scope(layer_name) as scope:
        weights = tf.get_variable(name="weights", shape=[3, 3, in_channel, out_channel],
                                  initializer=tf.contrib.layers.xavier_initializer_conv2d(),
                                  regularizer=tf.contrib.layers.l2_regularizer(0.00001))
        biases = tf.get_variable(name="biases", shape=[out_channel],
                                 initializer=tf.constant_initializer())
        convolution_output = tf.nn.conv2d(input=in_put, filter=weights, strides=[1, 1, 1, 1],
                                          padding="SAME")
        output = tf.nn.relu(tf.nn.bias_add(convolution_output, biases), name=scope.name)
    return output


def vgg16_max_pool(in_put):
    return tf.nn.max_pool(value=in_put, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1],
                          padding="SAME")


def vgg16_network(in_put):
    conv1_1 = vgg16_convolution(in_put=in_put, in_channel=3, out_channel=64, layer_name="conv1_1")
    conv1_2 = vgg16_convolution(in_put=conv1_1, in_channel=64, out_channel=64, layer_name="conv1_2")
    pool_1 = vgg16_max_pool(in_put=conv1_2)
    conv2_1 = vgg16_convolution(in_put=pool_1, in_channel=64, out_channel=128, layer_name="conv2_1")
    conv2_2 = vgg16_convolution(in_put=conv2_1, in_channel=128, out_channel=128, layer_name="conv2_2")
    pool_2 = vgg16_max_pool(in_put=conv2_2)
    conv3_1 = vgg16_convolution(in_put=pool_2, in_channel=128, out_channel=256, layer_name="conv3_1")
    conv3_2 = vgg16_convolution(in_put=conv3_1, in_channel=256, out_channel=256, layer_name="conv3_2")
    conv3_3 = vgg16_convolution(in_put=conv3_2, in_channel=256, out_channel=256, layer_name="conv3_3")
    with tf.variable_scope("conv3_4"):
        weights = tf.get_variable(name="weights",
                                  shape=[1, 1, 256, 1],
                                  dtype=tf.float32,
                                  initializer=tf.contrib.layers.xavier_initializer_conv2d(),
                                  regularizer=tf.contrib.layers.l2_regularizer(0.00001))
        conv3_4 = tf.nn.conv2d(input=conv3_3,
                               filter=weights,
                               strides=[1, 1, 1, 1],
                               padding="SAME",
                               name="convolution")
    return conv3_4


def generate_shuffle_train_data(batch_size=16):
    folder = "/home/lijun/Dataset/metro/train"
    sub_folders = [os.path.join(folder, name) for name in os.listdir(folder)
                   if os.path.isdir(os.path.join(folder, name))]
    full_images_path, full_ground_truths_path = [], []
    for sub_folder in sub_folders:
        images_path, ground_truths_path = get_images_path(sub_folder)
        full_images_path.extend(images_path)
        full_ground_truths_path.extend(ground_truths_path)
    assert len(full_images_path) == len(full_ground_truths_path)
    indices = list(range(len(full_images_path)))
    random.shuffle(indices)
    for i in range(0, len(indices) // batch_size):
        image_batch = np.zeros(shape=[batch_size, 260, 480, 3], dtype=np.float32)
        ground_truth_batch = np.zeros(shape=[batch_size, 65, 120, 1], dtype=np.float32)
        for j in range(0, batch_size):
            index = indices[i*batch_size+j]
            image, ground_truth = process_image(full_images_path[index],
                                                full_ground_truths_path[index])
            image_batch[j] = image
            ground_truth = np.expand_dims(ground_truth, axis=-1)
            ground_truth_batch[j] = ground_truth
        yield image_batch, ground_truth_batch


def main():
    # layers_name = ["conv1_1", "conv1_2",
    #                "conv2_1", "conv2_2",
    #                "conv3_1", "conv3_2", "conv3_3", "conv3_4"]

    with tf.Graph().as_default():
        input_x = tf.placeholder(dtype=tf.float32, shape=[None, 260, 480, 3])
        input_y = tf.placeholder(dtype=tf.float32, shape=[None, 65, 120, 1])
        inference_op = vgg16_network(input_x)
        inference_loss_op = tf.nn.l2_loss(tf.sub(input_y, inference_op)) / (16*1000)
        reg_loss_op = sum(tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES))
        train_loss_op = tf.add(inference_loss_op, reg_loss_op)

        global_step = tf.Variable(initial_value=0, trainable=False)
        # learning_rate = tf.train.exponential_decay(learning_rate=1.0e-8, global_step=global_step,
        #                                            decay_steps=5000, decay_rate=0.1, staircase=True)
        # momentum_opt = tf.train.MomentumOptimizer(learning_rate=learning_rate, momentum=0.95)
        # opt_op = momentum_opt.minimize(train_loss_op, global_step=global_step)
        opt_op = tf.train.AdamOptimizer().minimize(train_loss_op, global_step=global_step)

        saver = tf.train.Saver()
        with tf.Session() as session:
            tf.initialize_all_variables().run()
            tf.initialize_local_variables().run()

            # load pre-trained weight
            # h5_file = h5py.File("vgg-21000.h5", "r")
            # for layer_name in layers_name:
            #     with tf.variable_scope(layer_name, reuse=True):
            #         session.run(tf.get_variable("weights").assign(np.array(h5_file[layer_name+"/"+"weights"])))
            #         if layer_name != "conv3_4":
            #             session.run(tf.get_variable("biases").assign(np.array(h5_file[layer_name+"/"+"biases"])))
            # h5_file.close()
            # print("assign weight succeed.")

            saver.restore(session, "../model/metro/vgg-11200")
            # for op_name in data_dict:
            #     with tf.variable_scope(op_name, reuse=True):
            #         try:
            #             session.run(tf.get_variable("weights").assign(data_dict[op_name]["weights"]))
            #             session.run(tf.get_variable("biases").assign(data_dict[op_name]["biases"]))
            #         except ValueError:
            #             pass
            for epoch in range(20):
                for image, ground_truth in generate_shuffle_train_data(batch_size=16):
                    inference_loss, reg_loss, step, inference, _ = session.run(
                        [inference_loss_op, reg_loss_op, global_step, inference_op, opt_op],
                        feed_dict={input_x: image, input_y: ground_truth})
                    if step % 100 == 0:
                        print(np.min(inference[0]), np.max(inference[0]),
                              np.min(ground_truth[0]), np.max(ground_truth[0]))
                        print("epoch: {}, step: {}, inference_loss: {}, reg_loss: {}".format(
                            epoch+1, step, inference_loss, reg_loss))
                        print("predict: {}, ground truth: {}, difference: {}".format(
                            np.sum(inference)/1000, np.sum(ground_truth)/1000,
                            np.abs(np.sum(inference) - np.sum(ground_truth))/1000))
            saver.save(session, "../model/metro/vgg", global_step=global_step)


if __name__ == "__main__":
    main()
