#! -- encoding:utf8 --
import tensorflow as tf
from tensorflow.python.framework import ops
from tensorflow.python.framework import dtypes


def read_label_file(file):
    sourcepaths = []
    contrastpaths = []
    with open(file,'r') as f:
        for line in f:
            spath, cpath = line.split(",")
            sourcepaths.append(spath)
            contrastpaths.append(cpath)
    return sourcepaths, contrastpaths

def inputdata(dataset_path = "/data/ztl/uploadData/ILSVRC2012/",data_path_file = "train_contrast.csv",IMAGE_HEIGHT = 224,IMAGE_WIDTH = 224,NUM_CHANNELS = 3,BATCH_SIZE = 150):
    # reading labels and file path
    train_filepaths, contrast_filepaths = read_label_file(data_path_file)
    # transform relative path into full path
    train_filepaths = [dataset_path + fp for fp in train_filepaths]
    contrast_filepaths = [dataset_path + fp for fp in contrast_filepaths]

    print contrast_filepaths[0]

    # convert string into tensors
    train_images = ops.convert_to_tensor(train_filepaths, dtype=dtypes.string)
    contrast_images = ops.convert_to_tensor(contrast_filepaths, dtype=dtypes.string)

    # create input queues
    train_input_queue = tf.train.slice_input_producer(
        [train_images, contrast_images],
        shuffle=False)
    # process path and string tensor into an image and a label
    train_file_content = tf.read_file(train_input_queue[0])
    train_image = tf.image.decode_jpeg(train_file_content, channels=NUM_CHANNELS)

    contrast_file_content = tf.read_file(train_input_queue[1])
    contrast_image=tf.image.decode_jpeg(contrast_file_content,channels=NUM_CHANNELS)

    train_image=tf.image.convert_image_dtype(train_image,dtype=tf.float32)
    contrast_image=tf.image.convert_image_dtype(contrast_image,dtype=tf.float32)

    train_image = tf.image.resize_images(train_image, [IMAGE_HEIGHT, IMAGE_WIDTH])
    train_image.set_shape([IMAGE_HEIGHT, IMAGE_WIDTH, NUM_CHANNELS])

    contrast_image = tf.image.resize_images(contrast_image, [IMAGE_HEIGHT, IMAGE_WIDTH])
    contrast_image.set_shape([IMAGE_HEIGHT, IMAGE_WIDTH, NUM_CHANNELS])

    # collect batches of images before processing
    train_image_batch, contrast_image_batch = tf.train.batch([train_image, contrast_image],batch_size=BATCH_SIZE)


    print "input pipeline ready"


    # define two placeholder
    x = tf.placeholder(tf.float32, [BATCH_SIZE, 224,224,3])
    y = tf.placeholder(tf.float32, [BATCH_SIZE, 224,224,3])

    # convolution_layer_1
    w_conv1 = tf.Variable(tf.truncated_normal([3, 3, 3, 32], stddev=0.1))# 生成一个截断的正态分布,3*3的采样窗口
    b_conv1 = tf.Variable(tf.constant(0.1, [32]))  # each kernel has a bias
    h_conv1 = tf.nn.relu(tf.nn.conv2d(x, w_conv1, strides=[1, 1, 1, 1], padding='SAME') + b_conv1)

    # convolution_layer_2
    w_conv2 = tf.Variable(tf.truncated_normal([3, 3, 32, 64],stddev=0.1))
    b_conv2 = tf.Variable(tf.constant(0.1,[64]))
    h_conv2 = tf.nn.relu(tf.nn.conv2d(h_conv1, w_conv2, strides=[1, 1, 1, 1], padding='SAME') + b_conv2)

    # convolution_layer_3
    w_conv3 = tf.Variable(tf.truncated_normal([3, 3, 64, 32],stddev=0.1))  # 3*3的采样窗口,64个卷积核从32个平面抽取特征
    b_conv3 = tf.Variable(tf.constant(0.1,[32]),)
    h_conv3 = tf.nn.relu(tf.nn.conv2d(h_conv2, w_conv3, strides=[1, 1, 1, 1], padding='SAME') + b_conv3)

    w_conv4 = tf.Variable(tf.truncated_normal([3, 3, 32, 3],stddev=0.1))
    b_conv4 = tf.Variable(tf.constant(0.1,[3]))
    h_conv4 = tf.nn.relu(tf.nn.conv2d(h_conv3, w_conv4, strides=[1, 1, 1, 1], padding='SAME') + b_conv4)

    # Loss
    cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y, logits=h_conv4))

    # Train
    train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)

    init = tf.global_variables_initializer()

    print "convolution model ready"

    with tf.Session() as sess:
        # initialize the variables
        sess.run(init)

        # initialize the queue threads to start to shovel data
        coord = tf.train.Coordinator()
        threads = tf.train.start_queue_runners(coord=coord)

        print "from the train set:"
        for i in range(20):
            sess.run(train_step,feed_dict={x:train_image_batch,y:contrast_image_batch})
            print sess.run(train_image_batch).shape
            print sess.run(cross_entropy,feed_dict={x:train_image_batch,y:contrast_image_batch})


        # stop our queue threads and properly close the session
        coord.request_stop()
        coord.join(threads)
        sess.close()



if __name__ == '__main__':
    inputdata()