# encoding=utf-8
import os
import numpy as np
import tensorflow as tf
import cv2
import time

HASHING_BITS = 12
CURRENT_DIR=os.path.abspath(os.path.join(os.getcwd(), ".."))
IMAGE_NAME="0_10.jpg"
IMAGE_PATH = os.path.join(CURRENT_DIR,"data", "img",IMAGE_NAME)
MODEL_PATH = os.path.join(CURRENT_DIR,"data", "export")

def weight(name, shape, stddev=0.02, trainable=True):
    w = tf.get_variable(name, shape, tf.float32, trainable=trainable,
                        initializer=tf.random_normal_initializer(
                            stddev=stddev, dtype=tf.float32))
    return w

def bias(name, shape, bias_start=0.0, trainable=True):
    b = tf.get_variable(name, shape, tf.float32, trainable=trainable,
                        initializer=tf.constant_initializer(
                            bias_start, dtype=tf.float32))
    return b

def conv2d(x, output_dim, k_h=5, k_w=5, strides=[1, 1, 1, 1], name='conv2d'):
    with tf.variable_scope(name):
        w = weight('weights', [k_h, k_w, x.get_shape()[-1], output_dim])
        conv = tf.nn.conv2d(x, w, strides=strides, padding='SAME')
        b = bias('biases', [output_dim])
        conv = tf.reshape(tf.nn.bias_add(conv, b), conv.get_shape())
        return conv

def relu(value, name='relu'):
    with tf.variable_scope(name):
        return tf.nn.relu(value)

def pool_max(x, k_size=[1, 3, 3, 1], strides=[1, 2, 2, 1], name='pool1'):
    with tf.variable_scope(name):
        pool = tf.nn.max_pool(x, ksize=k_size, strides=strides, padding='VALID')
        return pool

def pool_avg(x, k_size=[1, 3, 3, 1], strides=[1, 2, 2, 1], name='pool1'):
    with tf.variable_scope(name):
        pool = tf.nn.avg_pool(x, ksize=k_size, strides=strides, padding='VALID')
        return pool

def fully_connected(x, output_shape, name='fully_connected', with_w=False):
    size = 1
    x = tf.reshape(x, [size, -1])
    shape = x.get_shape().as_list()

    with tf.variable_scope(name):
        w = weight('weights', [shape[1], output_shape], 0.02)
        b = bias('biases', [output_shape], 0.0)

    if with_w:
        return tf.matmul(x, w) + b, w, b
    else:
        return tf.matmul(x, w) + b

# 局部响应归一化
# 局部响应归一化原理是仿造生物学上活跃的神经元对相邻神经元的抑制现象（侧抑制）
def lrn(x, depth_radius=1, alpha=5e-05, beta=0.75, name='lrn1'):
    with tf.variable_scope(name):
        norm1 = tf.nn.lrn(x, depth_radius=depth_radius, bias=1.0, alpha=alpha, beta=beta)
        return norm1

def discriminator(x, hashing_bits, reuse=False, name='discriminator'):
    with tf.name_scope(name):
        if reuse:
            tf.get_variable_scope().reuse_variables()
        # 卷积层1,32个卷积核分别关注32个特征
        # 卷积、RELU激活、池化、LRN归一化
        conv1 = conv2d(x, output_dim=32, name='d_conv1')
        relu1 = relu(pool_max(conv1, name='d_pool1'), name='d_relu1')
        # 卷积层2,32个卷积核分别关注32个特征
        conv2 = conv2d(lrn(relu1, name='d_lrn1'), output_dim=32, name='d_conv2')
        relu2 = relu(pool_avg(conv2, name='d_pool2'), name='d_relu2')
        # 卷积层3,64个卷积核分别关注32个特征
        conv3 = conv2d(lrn(relu2, name='d_lrn2'), output_dim=64, name='d_conv3')
        pool3 = pool_avg(relu(conv3, name='d_relu3'), name='d_pool3')
        # 全连接1
        # 全连接、激活、LRN归一化
        fc1 = fully_connected(pool3, output_shape=500, name='d_ip1')
        relu4 = relu(fc1, name='d_relu4')
        # 全连接2
        fc2 = fully_connected(relu4, output_shape=hashing_bits, name='d_ip2')
        return fc2

def toBinaryString(prediction):
    numOfImage, bit_length = prediction.shape
    list_string_binary = []
    for i in range(numOfImage):
        str = ''
        for j in range(bit_length):
            str += '0' if prediction[i][j] <= 0 else '1'
        list_string_binary.append(str)
    return list_string_binary

def model_init(model_path):
    # 运行session
    os.environ['CUDA_VISIBLE_DEVICES'] = str(0)
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    sess = tf.Session(config=config)
    x = tf.placeholder(tf.float32, [1, 32, 32, 3], name='x')
    fc2 = discriminator(x, HASHING_BITS)
    saver = tf.train.Saver(tf.all_variables())
    # 加载模型一
    # ckpt = tf.train.get_checkpoint_state(model_path)
    # if ckpt and ckpt.model_checkpoint_path:
    #     # restore model
    #     ckpt_name = os.path.basename(ckpt.model_checkpoint_path)
    #     saver.restore(sess, os.path.join(model_path, ckpt_name))
    #     return sess,saver,fc2,x
    # 加载模型二
    model_path=os.path.join(model_path, "my_DSH_model.ckpt-1000")
    if os.path.exists(model_path+".meta"):
        saver.restore(sess,model_path)
        return sess,saver,fc2,x

def hashing(img,sess,model,fc2,x):
    # image
    def _img_ndarray(img):
        if type(img)==str:
            img = cv2.imread(img, cv2.IMREAD_COLOR)
        if img is None:return
        img = cv2.resize(img, (32, 32), interpolation=cv2.INTER_LINEAR)
        img = np.reshape(img, [1, 32, 32, 3])
        return img
    img = _img_ndarray(IMAGE_PATH)

    if img is None:
        return

    # prediction
    prediction = sess.run(fc2, feed_dict={x: img})
    return toBinaryString(prediction)

def test():
    start_init=time.time()
    sess,model,fc2,x=model_init(MODEL_PATH)
    print("model init use time: %fs"%(time.time()-start_init))
    start_t = time.time()
    hash_code = hashing(img=IMAGE_PATH, sess=sess, model=model, fc2=fc2, x=x)
    print("predict image use time: %fs" % (time.time() - start_t))
    y = IMAGE_NAME.split("_")[0]
    print("hash code: ", hash_code)
    print("label: ", y)

if __name__ == '__main__':
    test()