# encoding: utf-8
import os
import numpy as np
import tensorflow as tf
from tensorflow.python.ops.image_ops_impl import ResizeMethod
import pickle
import cv2
import time

BATCH_SIZE = 1000
LR = 0.001  # Learning rate
EPOCH = 1
LOAD_MODEL = False  # Whether or not continue train from saved model
TRAIN = False       # train True,test False
IMAGE_TEST=True     # image test work when TRAIN = False &IMAGE_TEST=True
display_step = 10
HASHING_BITS = 12
CURRENT_DIR = os.getcwd()
DATA_PATH=os.path.join(CURRENT_DIR,"data","cifar-10-python")
MODEL_PATH=os.path.join(CURRENT_DIR,"data","model")
LOGS_PATH=os.path.join(CURRENT_DIR,"data","logs")

def init_prepare():
    # data
    if not os.path.exists(DATA_PATH):
        os.makedirs(DATA_PATH)

    # logs
    if os.path.exists(LOGS_PATH):
        if not os.path.exists(os.path.join(LOGS_PATH,"train")):
            os.makedirs(os.path.join(LOGS_PATH, "train"))
            os.makedirs(os.path.join(LOGS_PATH, "test"))
        lists=os.listdir(os.path.join(LOGS_PATH,"train"))
        for i in lists:
            os.remove(os.path.join(LOGS_PATH,"train",i))
        lists = os.listdir(os.path.join(LOGS_PATH, "test"))
        for i in lists:
            os.remove(os.path.join(LOGS_PATH, "test", i))
    else:
        os.makedirs(os.path.join(LOGS_PATH,"train"))
        os.makedirs(os.path.join(LOGS_PATH,"test"))
    # model
    if os.path.exists(MODEL_PATH):
        pass
        lists = os.listdir(MODEL_PATH)
        for i in lists:
            os.remove(os.path.join(MODEL_PATH,i))
    else:
        os.makedirs(MODEL_PATH)

def weight(name, shape, stddev=0.02, trainable=True):
    w = tf.get_variable(name, shape, tf.float32, trainable=trainable,
                          initializer=tf.random_normal_initializer(
                              stddev=stddev, dtype=tf.float32))
    return w

def bias(name, shape, bias_start=0.0, trainable=True):
    b = tf.get_variable(name, shape, tf.float32, trainable=trainable,
                          initializer=tf.constant_initializer(
                              bias_start, dtype=tf.float32))
    return b

def conv2d(x, output_dim, k_h=5, k_w=5,strides=[1, 1, 1, 1], name='conv2d'):
    with tf.variable_scope(name):
        w = weight('weights',[k_h, k_w, x.get_shape()[-1], output_dim])
        conv = tf.nn.conv2d(x, w, strides=strides, padding='SAME')
        b = bias('biases', [output_dim])
        conv = tf.reshape(tf.nn.bias_add(conv, b), conv.get_shape())
        return conv

def relu(value, name='relu'):
    with tf.variable_scope(name):
        return tf.nn.relu(value)

def pool_max(x, k_size=[1,3,3,1],strides=[1, 2, 2, 1], name='pool1'):
    with tf.variable_scope(name):
        pool = tf.nn.max_pool(x, ksize=k_size, strides=strides, padding='VALID')
        return pool

def pool_avg(x, k_size=[1,3,3,1],strides=[1, 2, 2, 1], name='pool1'):
    with tf.variable_scope(name):
        pool = tf.nn.avg_pool(x, ksize=k_size, strides=strides, padding='VALID')
        return pool

def fully_connected(x, output_shape, name='fully_connected', with_w=False):
    size=BATCH_SIZE
    if not TRAIN:
        size=1
    x = tf.reshape(x, [size, -1])
    shape = x.get_shape().as_list()

    with tf.variable_scope(name):
        w = weight('weights', [shape[1], output_shape], 0.02)
        b = bias('biases', [output_shape], 0.0)

    if with_w:
        return tf.matmul(x, w) + b, w, b
    else:
        return tf.matmul(x, w) + b

# 局部响应归一化
# 局部响应归一化原理是仿造生物学上活跃的神经元对相邻神经元的抑制现象（侧抑制）
def lrn(x, depth_radius=1,alpha=5e-05,beta=0.75, name='lrn1'):
    with tf.variable_scope(name):
        norm1 = tf.nn.lrn(x, depth_radius=depth_radius, bias=1.0, alpha=alpha, beta=beta)
        return norm1

def discriminator(x, hashing_bits,reuse=False, name='discriminator'):
    with tf.name_scope(name):
        if reuse:
            tf.get_variable_scope().reuse_variables()
        # 卷积层1,32个卷积核分别关注32个特征
        # 卷积、RELU激活、池化、LRN归一化
        conv1 = conv2d(x, output_dim=32, name='d_conv1')
        relu1 = relu(pool_max(conv1, name='d_pool1'), name='d_relu1')
        # 卷积层2,32个卷积核分别关注32个特征
        conv2 = conv2d(lrn(relu1,name='d_lrn1'), output_dim=32, name='d_conv2')
        relu2 = relu(pool_avg(conv2, name='d_pool2'), name='d_relu2')
        # 卷积层3,64个卷积核分别关注32个特征
        conv3 = conv2d(lrn(relu2, name='d_lrn2'), output_dim=64, name='d_conv3')
        pool3 = pool_avg(relu(conv3, name='d_relu3'), name='d_pool3')
        # 全连接1
        # 全连接、激活、LRN归一化
        fc1=fully_connected(pool3,output_shape=500, name='d_ip1')
        relu4 = relu(fc1, name='d_relu4')
        # 全连接2
        fc2 = fully_connected(relu4,output_shape=hashing_bits, name='d_ip2')
        return fc2

def read_cifar10_data():
    train_name = 'data_batch_'
    test_name = 'test_batch'
    train_X = None
    train_Y = None
    test_X = None
    test_Y = None

    # train data
    for i in range(1,6):
        file_path = os.path.join(DATA_PATH,train_name+str(i))
        with open(file_path, 'rb') as fo:
            dict = pickle.load(fo,encoding="bytes")
            if  train_X is None:
                train_X = dict[b'data']
                train_Y = dict[b'labels']
            else:
                train_X = np.concatenate((train_X, dict[b'data']), axis=0)
                train_Y = np.concatenate((train_Y, dict[b'labels']), axis=0)
    # test_data
    file_path = os.path.join(DATA_PATH,test_name)
    with open(file_path, 'rb') as fo:
        dict = pickle.load(fo,encoding="bytes")
        test_X = dict[b'data']
        test_Y = dict[b'labels']
    train_X = train_X.reshape((50000, 3, 32, 32)).transpose(0, 2, 3, 1).astype(np.float)
    # train_Y = train_Y.reshape((50000)).astype(np.float)
    test_X = test_X.reshape((10000, 3, 32, 32)).transpose(0, 2, 3, 1).astype(np.float)
    # test_Y.reshape((10000)).astype(np.float)

    train_y_vec = np.zeros((len(train_Y), 10), dtype=np.float)
    test_y_vec = np.zeros((len(test_Y), 10), dtype=np.float)
    for i, label in enumerate(train_Y):
        train_y_vec[i, int(train_Y[i])] = 1.  # y_vec[1,3] means #2 row, #4column
    for i, label in enumerate(test_Y):
        test_y_vec[i, int(test_Y[i])] = 1.  # y_vec[1,3] means #2 row, #4column

    return train_X/255., train_y_vec, test_X/255., test_y_vec

def hashing_loss(image,label,alpha,m):
    fc2 = discriminator(image,HASHING_BITS)
    w_label = tf.matmul(label,label,False,True)
    
    r = tf.reshape(tf.reduce_sum(fc2*fc2,1),[-1,1])
    p2_distance = r - 2*tf.matmul(fc2,fc2,False,True)+tf.transpose(r)
    temp = w_label*p2_distance + (1-w_label)*tf.maximum(m-p2_distance,0)
    
    regularizer = tf.reduce_sum(tf.abs(tf.abs(fc2) - 1))
    hash_loss = tf.reduce_sum(temp)/(BATCH_SIZE*(BATCH_SIZE-1)) + alpha * regularizer/BATCH_SIZE

    return hash_loss

def train():
    # prepare
    init_prepare()
    global_step = tf.Variable(0, name='global_step', trainable=False)
    x = tf.placeholder(tf.float32, [BATCH_SIZE, 32,32,3], name='image')
    y = tf.placeholder(tf.float32, [BATCH_SIZE,10], name='label')

    alpha = tf.constant(0.01,dtype=tf.float32,name='tradeoff')
    # set m = 2*HASHING_BITS
    m = tf.constant(HASHING_BITS*2,dtype=tf.float32,name='bi_margin')

    # loss
    hash_loss = hashing_loss(x,y,alpha,m)
    tf.summary.scalar("hash loss", hash_loss)

    # optimizer
    t_vars = tf.trainable_variables()
    d_vars = [var for var in t_vars if 'd_' in var.name]
    optimizer = tf.train.AdamOptimizer(LR, beta1=0.5).minimize(hash_loss, var_list=d_vars, global_step=global_step)

    merged=tf.summary.merge_all()

    # 配置GPU，启动session，初始化OP
    os.environ['CUDA_VISIBLE_DEVICES'] = str(0)                     # 控制使用哪块GPU，指定GPU编号0的设备，可分配多块
    config = tf.ConfigProto()                                       # 配置
    # config.gpu_options.per_process_gpu_memory_fraction = 0.9      # 限制GPU显存
    config.gpu_options.allow_growth=True                            # 内存动态分配
    sess = tf.Session(config=config)                                # 加载配置
    sess.run(tf.global_variables_initializer())                     # 初始化OP

    # finetuning
    saver = tf.train.Saver()
    if LOAD_MODEL:
        print(" [*] Reading checkpoints...")
        ckpt = tf.train.get_checkpoint_state(MODEL_PATH)

        if ckpt and ckpt.model_checkpoint_path:
            ckpt_name = os.path.basename(ckpt.model_checkpoint_path)
            saver.restore(sess, os.path.join(MODEL_PATH, ckpt_name))
            global_step = ckpt.model_checkpoint_path.split('/')[-1] \
                .split('-')[-1]
            print('Loading success, global_step is %s' % global_step)

        start = int(global_step)

    # train
    train_x, train_y, test_x, test_y = read_cifar10_data()

    train_writer = tf.summary.FileWriter(os.path.join(LOGS_PATH, "train"), sess.graph)
    test_writer = tf.summary.FileWriter(os.path.join(LOGS_PATH, "test"), sess.graph)
    total_train_batch = 50000 // BATCH_SIZE
    total_test_batch=10000//BATCH_SIZE

    for epoch in range(EPOCH):
        for i in range(total_train_batch):
            batch_xs= train_x[i*BATCH_SIZE:(i+1)*BATCH_SIZE]
            batch_ys = train_y[i*BATCH_SIZE:(i+1)*BATCH_SIZE]

            _,train_loss,summary=sess.run([optimizer,hash_loss,merged],feed_dict={x: batch_xs, y:batch_ys})
            train_writer.add_summary(summary,epoch)

            if (i+1) % total_train_batch == 0:
                print("[%3d/%3d][%4d/%4d] train hash_loss: %.8f" % (epoch+1,EPOCH,i+1, total_train_batch,train_loss))

        if (epoch+1) % display_step == 0:
            # save logs
            test_xs = test_x[(epoch%total_test_batch) * BATCH_SIZE:((epoch%total_test_batch) + 1) * BATCH_SIZE]
            test_ys = test_y[(epoch%total_test_batch) * BATCH_SIZE:((epoch%total_test_batch) + 1) * BATCH_SIZE]

            summary,test_loss = sess.run([merged,hash_loss], feed_dict={x: test_xs, y: test_ys})
            test_writer.add_summary(summary, epoch)
            print("[%3d/%3d] test hash_loss: %.8f" % (epoch + 1, EPOCH, test_loss))
            # save model
            checkpoint_path = os.path.join(MODEL_PATH,'my_DSH_model.ckpt')
            saver.save(sess, checkpoint_path, global_step=epoch + 1)
    sess.close()
    print("Finished!")

def toBinaryString(prediction):
        numOfImage,bit_length = prediction.shape
        list_string_binary = []
        for i in range(numOfImage):
            str = ''
            for j in range(bit_length):
                str += '0' if prediction[i][j] <= 0 else '1'
            list_string_binary.append(str)
        return list_string_binary

def evaluate():
    image = tf.placeholder(tf.float32, [BATCH_SIZE, 32, 32, 3], name='image')
    D = discriminator(image,HASHING_BITS)
    res = tf.sign(D)                     

    # 运行session
    print("Reading checkpoints...")
    ckpt = tf.train.get_checkpoint_state(MODEL_PATH)
    saver = tf.train.Saver(tf.all_variables())                          
    os.environ['CUDA_VISIBLE_DEVICES'] = str(0)
    config = tf.ConfigProto()
    config.gpu_options.per_process_gpu_memory_fraction = 0.8
    sess = tf.InteractiveSession(config=config)

    # 数据读取
    train_x, train_y, test_x, test_y = read_cifar10_data()
    file_write = open('result.txt','w')
    # sys.stdout = file_res
    if ckpt and ckpt.model_checkpoint_path:
        # restore model
        ckpt_name = os.path.basename(ckpt.model_checkpoint_path)
        global_step = ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1]
        saver.restore(sess, os.path.join(MODEL_PATH, ckpt_name))
        print('Loading success, global_step is %s' % global_step)
        print("Processing...")
        # prediction
        for i in range(int(10000/BATCH_SIZE)):
            train_pred = sess.run(D, feed_dict={image: test_x[i*BATCH_SIZE:(i+1)*BATCH_SIZE]})
            # print(i,train_pred)
            pred_string = toBinaryString(train_pred)
            y = np.argmax(test_y[i*BATCH_SIZE:(i+1)*BATCH_SIZE],axis=1)
            for j in range(BATCH_SIZE):
                file_write.write(pred_string[j]+'\t'+str(y[j])+'\n')
        for i in range(int(50000/BATCH_SIZE)):
            test_pred = sess.run(D, feed_dict={image: train_x[i*BATCH_SIZE:(i+1)*BATCH_SIZE]})
           # print(eval_sess)
            pred_string = toBinaryString(test_pred)
            y = np.argmax(train_y[i*BATCH_SIZE:(i+1)*BATCH_SIZE],axis=1)
            for j in range(BATCH_SIZE):
                file_write.write(pred_string[j]+'\t'+str(y[j])+'\n')

       # eval_sess = sess.run(res, feed_dict={image: test_x[:BATCH_SIZE]})
       # eval_sess = sess.run(res, feed_dict={image: test_x[:BATCH_SIZE]})
       # print(eval_sess)
    file_write.close()
    sess.close()
    print("Finished!")

def model_init():
    ckpt = tf.train.get_checkpoint_state(os.path.join(MODEL_PATH, "..", "export"))
    os.environ['CUDA_VISIBLE_DEVICES'] = str(0)
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    sesss = tf.Session(config=config)
    saverq = tf.train.Saver(tf.all_variables())

    if ckpt and ckpt.model_checkpoint_path:
        # restore model
        ckpt_name = os.path.basename(ckpt.model_checkpoint_path)
        saverq.restore(sesss, os.path.join(MODEL_PATH, ckpt_name))
        return sesss, saverq

# t1, t1 = model_init()

def prediction():
    def _img_read(filename):
        if not tf.gfile.Exists(filename):
            tf.logging.fatal('File does not exists %s', filename)
        img=tf.image.decode_jpeg(tf.read_file(filename),channels=3)
        # img_data = tf.image.convert_image_dtype(img_jpeg,channels=3,dtype=tf.float32)
        img = tf.image.resize_images(img, (32, 32), method=ResizeMethod.BILINEAR)
        img = tf.expand_dims(img, -1)
        img = tf.reshape(img, (1, 32, 32, 3))
        return img

    def _img_ndarray(filename):
        img=cv2.imread(filename,cv2.IMREAD_COLOR)
        img=cv2.resize(img,(32,32),interpolation=cv2.INTER_LINEAR)
        img=np.reshape(img,[1,32,32,3])
        return img

    # image
    img_name="6_4877.jpg"
    img_path=os.path.join(CURRENT_DIR,"data","img","test",img_name)
    # img=_img_read(img_path)
    img = _img_ndarray(img_path)


    # prediction prepare
    x = tf.placeholder(tf.float32, [1, 32, 32, 3], name='x')
    fc2 = discriminator(x, HASHING_BITS)
    # res = tf.sign(fc2)

    # 运行session


    ckpt = tf.train.get_checkpoint_state(os.path.join(MODEL_PATH,"..","export"))
    os.environ['CUDA_VISIBLE_DEVICES'] = str(0)
    config = tf.ConfigProto()
    config.gpu_options.allow_growth=True
    sess = tf.Session(config=config)
    saver = tf.train.Saver(tf.all_variables())
    saver = tf.train.Saver(tf.all_variables())

    # 数据读取
    if ckpt and ckpt.model_checkpoint_path:
        # restore model
        ckpt_name = os.path.basename(ckpt.model_checkpoint_path)
        saver.restore(sess, os.path.join(MODEL_PATH, ckpt_name))
        # prediction
        train_pred = sess.run(fc2, feed_dict={x:img})
        hash_code = toBinaryString(train_pred)
        y = img_name.split("_")[0]
        print("hash code: ",hash_code)
        print("label: ",y)
    else:
        print("No model file...")

    sess.close()

if __name__ == '__main__':
    if TRAIN:
        train()
    else:
        if IMAGE_TEST:
            start_t=time.time()
            prediction()
            print("predict image use time: %fs"%(time.time()-start_t))
        else:
            evaluate()
