# -*- coding: utf-8 -*-


import tensorflow as tf
import numpy as np
import os


def vgg16_convolution(in_put, in_channel, out_channel, layer_name):
    with tf.variable_scope(layer_name) as scope:
        weights = tf.get_variable(name="weights", shape=[3, 3, in_channel, out_channel],
                                  regularizer=tf.contrib.layers.l2_regularizer(0.00005))
        biases = tf.get_variable(name="biases", shape=[out_channel])
        convolution_output = tf.nn.conv2d(input=in_put, filter=weights, strides=[1, 1, 1, 1],
                                          padding="SAME")
        output = tf.nn.relu(tf.nn.bias_add(convolution_output, biases), name=scope.name)
    return output


def vgg16_atrous_convolution(in_put, in_channel, out_channel, layer_name, rate=2):
    with tf.variable_scope(layer_name) as scope:
        weights = tf.get_variable(name="weights", shape=[3, 3, in_channel, out_channel],
                                  regularizer=tf.contrib.layers.l2_regularizer(0.00005))
        biases = tf.get_variable(name="biases", shape=[out_channel])
        convolution_output = tf.nn.atrous_conv2d(value=in_put, filters=weights, rate=rate, padding="SAME")
        output = tf.nn.relu(tf.nn.bias_add(convolution_output, biases), name=scope.name)
    return output


def vgg16_fc_convolution(in_put, out_channel, layer_name, use_relu=True):
    with tf.variable_scope(layer_name):
        input_shape = in_put.get_shape()
        assert len(input_shape) == 4
        height, width, in_channel = input_shape[1:]
        weights = tf.get_variable(name="weights", shape=[height*width*in_channel, out_channel])
        biases = tf.get_variable(name="biases", shape=[out_channel])
        reshape_weights = tf.reshape(weights,
                                     shape=[tf.to_int32(height), tf.to_int32(width),
                                            tf.to_int32(in_channel), out_channel])
        convolution_output = tf.nn.conv2d(input=in_put, filter=reshape_weights, strides=[1, 1, 1, 1],
                                          padding="VALID")
        output = tf.nn.bias_add(convolution_output, biases)
        if use_relu:
            output = tf.nn.relu(output)
        return output


def vgg16_max_pool(in_put):
    return tf.nn.max_pool(value=in_put, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1],
                          padding="SAME")


def vgg16_fc(in_put, num_out, layer_name, use_relu=True):
    with tf.variable_scope(layer_name) as scope:
        input_shape = in_put.get_shape()
        if input_shape.ndims == 4:
            dim = 1
            for d in input_shape[1:].as_list():
                dim *= d
            feed_in = tf.reshape(in_put, [-1, dim])
        else:
            feed_in, dim = (in_put, input_shape[-1].value)
        weights = tf.get_variable(name="weights", shape=[dim, num_out])
        biases = tf.get_variable(name="biases", shape=[num_out])
        op = tf.nn.relu_layer if use_relu else tf.nn.xw_plus_b
        output = op(feed_in, weights, biases, name=scope.name)
    return output


def vgg16_network(in_put):
    conv1_1_output = vgg16_convolution(in_put=in_put, in_channel=3, out_channel=64, layer_name="conv1_1")
    conv1_2_output = vgg16_convolution(in_put=conv1_1_output, in_channel=64, out_channel=64, layer_name="conv1_2")
    pool_1_output = vgg16_max_pool(in_put=conv1_2_output)
    conv2_1_output = vgg16_convolution(in_put=pool_1_output, in_channel=64, out_channel=128, layer_name="conv2_1")
    conv2_2_output = vgg16_convolution(in_put=conv2_1_output, in_channel=128, out_channel=128, layer_name="conv2_2")
    pool_2_output = vgg16_max_pool(in_put=conv2_2_output)
    conv3_1_output = vgg16_convolution(in_put=pool_2_output, in_channel=128, out_channel=256, layer_name="conv3_1")
    conv3_2_output = vgg16_convolution(in_put=conv3_1_output, in_channel=256, out_channel=256, layer_name="conv3_2")
    conv3_3_output = vgg16_convolution(in_put=conv3_2_output, in_channel=256, out_channel=256, layer_name="conv3_3")
    # pool_3_output = vgg16_max_pool(in_put=conv3_3_output)
    conv4_1_output = vgg16_atrous_convolution(in_put=conv3_3_output, in_channel=256, out_channel=512,
                                              layer_name="conv4_1")
    conv4_2_output = vgg16_atrous_convolution(in_put=conv4_1_output, in_channel=512, out_channel=512,
                                              layer_name="conv4_2")
    conv4_3_output = vgg16_atrous_convolution(in_put=conv4_2_output, in_channel=512, out_channel=512,
                                              layer_name="conv4_3")
    # pool_4_output = vgg16_max_pool(in_put=conv4_3_output)
    conv5_1_output = vgg16_atrous_convolution(in_put=conv4_3_output, in_channel=512, out_channel=512,
                                              layer_name="conv5_1", rate=4)
    conv5_2_output = vgg16_atrous_convolution(in_put=conv5_1_output, in_channel=512, out_channel=512,
                                              layer_name="conv5_2", rate=4)
    conv5_3_output = vgg16_atrous_convolution(in_put=conv5_2_output, in_channel=512, out_channel=512,
                                              layer_name="conv5_3", rate=4)
    with tf.variable_scope("conv5_4"):
        weights = tf.get_variable(name="weights", shape=[1, 1, 512, 1], dtype=tf.float32,
                                  initializer=tf.contrib.layers.xavier_initializer_conv2d(),
                                  regularizer=tf.contrib.layers.l2_regularizer(0.00005))
        conv5_4_output = tf.nn.conv2d(input=conv5_3_output, filter=weights,
                                      strides=[1, 1, 1, 1], padding="SAME")
    return conv5_4_output


def image_process(image_path):
    image_content = tf.read_file(image_path)
    img = tf.image.decode_jpeg(image_content, channels=0)
    img = tf.reverse(img, [False, False, True])
    img = tf.image.resize_image_with_crop_or_pad(img, 224, 224)
    return tf.to_float(img) - [103.939, 116.779, 123.68]


def read_validate_image():
    val_folder = "/home/lijun/data/DataSet/ImageNet/ILSVRC2012/Data/CLS-LOC/val"
    val_file_path = "/home/lijun/data/DataSet/ImageNet/ILSVRC2012/caffe_ilsvrc12/val.txt"
    with open(val_file_path) as val_file:
        str_lines = val_file.readlines()
        images_name = [line.strip().split(" ")[0] for line in str_lines]
        labels = [int(line.strip().split(" ")[1]) for line in str_lines]
        images_path = [os.path.join(val_folder, name) for name in images_name]
    image_path, label = tf.train.slice_input_producer(
        [tf.convert_to_tensor(images_path, dtype=tf.string), tf.convert_to_tensor(labels, dtype=tf.int32)],
        num_epochs=1, shuffle=False)
    image = image_process(image_path)
    image = tf.reshape(image, shape=[-1] + image.get_shape().as_list())
    label = tf.reshape(label, shape=[-1] + label.get_shape().as_list())
    return image, label

"""
def main():
    with tf.Graph().as_default():
        in_put = tf.placeholder(dtype=tf.float32, shape=[None, 180, 320, 3])
        output = vgg16_network(in_put)
        print(output.get_shape())
"""


def main():
    model_path = "/home/lijun/data/models/VGG16.npy"
    data_dict = np.load(model_path, encoding="latin1").item()
    assert isinstance(data_dict, dict)

    with tf.Graph().as_default():
        image_op, label_op = read_validate_image()
        predict_op = vgg16_network(image_op)
        # top_k_op = tf.nn.in_top_k(predict_op, label_op, 5)
        coord = tf.train.Coordinator()
        with tf.Session() as session:
            tf.initialize_all_variables().run()
            tf.initialize_local_variables().run()
            for op_name in data_dict:
                with tf.variable_scope(op_name, reuse=True):
                    try:
                        session.run(tf.get_variable("weights").assign(data_dict[op_name]["weights"]))
                        session.run(tf.get_variable("biases").assign(data_dict[op_name]["biases"]))
                    except ValueError:
                        pass
            threads = tf.train.start_queue_runners(sess=session, coord=coord)
            count, i = 0, 0
            try:
                while not coord.should_stop():
                    predict = session.run(predict_op)
                    print(predict.shape)
                    break
                    # count += predict[0]
                    # if i % 100 == 0 and i > 0:
                    #     print(i, count / i)
                    # i += 1
                    # if i > 10000:
                    #     break
            except tf.errors.OutOfRangeError:
                print("all images have been used")
            finally:
                coord.request_stop()
            coord.join(threads)


if __name__ == "__main__":
    main()
