# -*- coding: utf-8 -*-

import time
import h5py
import numpy as np
import tensorflow as tf
from data_reader import DataReader
from segnet import seg_net_network
from utility import seg_net_parameters


IMAGE_HEIGHT = 256
IMAGE_WIDTH = 480
IMAGE_DEPTH = 3
GROUND_TRUTH_DEPTH = 1
GPU_NUMBER = 1


def _loss(data_reader, name_scope):
    images, ground_truths = data_reader.read_data()
    predict = seg_net_network(in_put=images, is_training=True)

    _, road_tensor = tf.split(value=tf.nn.softmax(predict), num_or_size_splits=2, axis=3)
    tf.summary.image(name="origin_image", tensor=images)
    tf.summary.image(name="predict_image", tensor=road_tensor)

    inference_loss = tf.nn.sparse_softmax_cross_entropy_with_logits(
        logits=predict, labels=ground_truths)
    inference_loss = tf.reduce_sum(inference_loss)
    weight_loss = tf.add_n(tf.get_collection(key="weight_loss", scope=name_scope))
    train_loss = inference_loss + weight_loss
    return inference_loss, weight_loss, train_loss


def _average_gradients(tower_grads):
    average_grads = []
    for grad_and_vars in zip(*tower_grads):
        grads = []
        for g, _ in grad_and_vars:
            expanded_g = tf.expand_dims(g, 0)
            grads.append(expanded_g)
        grad = tf.concat_v2(grads, 0)
        grad = tf.reduce_mean(grad, 0)
        v = grad_and_vars[0][1]
        grad_and_var = (grad, v)
        average_grads.append(grad_and_var)
    return average_grads


def _compute_loss_and_gradient(optimizer, data_reader):
    tower_grads = []
    inference_losses, weight_losses, train_losses = [], [], []
    for i in range(GPU_NUMBER):
        with tf.device("/gpu:%d" % i), tf.name_scope("tower_%d" % i) as scope:
            if i > 0:
                with tf.variable_scope(tf.get_variable_scope(), reuse=True):
                    inference_loss, weight_loss, train_loss = _loss(data_reader, scope)
                    inference_losses.append(inference_loss)
                    weight_losses.append(weight_loss)
                    train_losses.append(train_loss)
                    grad = optimizer.compute_gradients(train_loss)
                    tower_grads.append(grad)
            else:
                inference_loss, weight_loss, train_loss = _loss(data_reader, scope)
                inference_losses.append(inference_loss)
                weight_losses.append(weight_loss)
                train_losses.append(train_loss)
                grad = optimizer.compute_gradients(train_loss)
                tower_grads.append(grad)
    inference_loss = inference_losses[0]
    weight_loss = weight_losses[0]
    train_loss = train_losses[0]
    grads = _average_gradients(tower_grads)
    return inference_loss, weight_loss, train_loss, grads


# def _validate(data_reader):
#     images, ground_truths = data_reader.read_data()
#     predict = seg_net_network(in_put=images, is_training=False)
#     return tf.reduce_sum(predict)/1000, tf.reduce_sum(ground_truths)/1000


def _load_parameters_value(session, parameter_file_path, parameter_dict):
    h5_file = h5py.File(parameter_file_path)
    for key in parameter_dict:
        with tf.variable_scope(key, reuse=True):
            parameters_name = parameter_dict[key]
            for name in parameters_name:
                variable = tf.get_variable(name=name)
                variable_value = np.array(h5_file[key + "/" + name])
                session.run(variable.assign(variable_value))
    h5_file.close()


def main():
    parameter_file_path = "./model/road_seg_net-5000.h5"
    with tf.Graph().as_default():
        input_x = tf.placeholder(dtype=tf.float32,
                                 shape=[IMAGE_HEIGHT, IMAGE_WIDTH, IMAGE_DEPTH])
        input_y = tf.placeholder(dtype=tf.int32,
                                 shape=[IMAGE_HEIGHT, IMAGE_WIDTH])
        data_reader = DataReader(input_x=input_x, input_y=input_y)
        optimizer = tf.train.AdamOptimizer()
        inference_loss, weight_loss, train_loss, grads = _compute_loss_and_gradient(optimizer, data_reader)
        global_step = tf.Variable(initial_value=0, trainable=False)
        apply_gradient_op = optimizer.apply_gradients(grads, global_step)
        variables_average = tf.train.ExponentialMovingAverage(0.9999, global_step)
        variables_average_op = variables_average.apply(tf.trainable_variables())
        train_op = tf.group(apply_gradient_op, variables_average_op)
        # with tf.variable_scope(tf.get_variable_scope(), reuse=True):
        #     validate_op = _validate(data_reader)
        saver = tf.train.Saver()
        tf.summary.scalar("inference loss", inference_loss)
        tf.summary.scalar("weight loss", weight_loss)
        summary_op = tf.summary.merge(tf.get_collection(tf.GraphKeys.SUMMARIES))
        with tf.Session(config=tf.ConfigProto(allow_soft_placement=True,
                                              log_device_placement=False)) as session:
            session.run(tf.global_variables_initializer())
            session.run(tf.local_variables_initializer())
            # _load_parameters_value(session, parameter_file_path, seg_net_parameters)
            # print("load parameters succeed.")
            saver.restore(session, "./model/480_256/seg_net-10000")

            summary_writer = tf.summary.FileWriter("./log", session.graph)
            data_reader.start(session)
            time_duration = 0
            while True:
                start = time.time()
                _, inference_l, weight_l, step = session.run([train_op, inference_loss, weight_loss, global_step])
                time_duration += (time.time() - start)
                if step % 10 == 0:
                    print("step: {}, one step time: {:>.2f}, inference loss: {:>.5f}, weight loss: {:>.5f}".format(
                        step, time_duration / 10, inference_l, weight_l))
                    time_duration = 0
                    summary_writer.add_summary(session.run(summary_op), step)
                # if step % 100 == 0:
                #     predict, ground_truth = session.run(validate_op)
                #     print("predict: {:>.5f}, ground truth: {:>.5f}".format(predict, ground_truth))
                if step % 1000 == 0:
                    saver.save(session, "./model/480_256/seg_net", global_step=global_step)
                if step > 60000:
                    data_reader.stop(session)
                    break


if __name__ == "__main__":
    main()
