import tensorflow as tf
from tensorflow.python.framework import ops
from tensorflow.python.framework import dtypes

def encode_label(label):
    return int(label)

def read_label_file(file):
    filepaths = []
    labels = []
    with open(file,'r') as f:
        for line in f:
            filepath, label = line.split(",")
            filepaths.append(filepath)
            labels.append(encode_label(label))
    return filepaths, labels

def inputdata(dataset_path = "/data/ztl/uploadData/ILSVRC2012/",train_labels_file = "train.csv",IMAGE_HEIGHT = 224,IMAGE_WIDTH = 224,NUM_CHANNELS = 3,BATCH_SIZE = 150):
    # reading labels and file path
    train_filepaths, train_labels = read_label_file(train_labels_file)
    # transform relative path into full path
    train_filepaths = [dataset_path + fp for fp in train_filepaths]
    # convert string into tensors
    train_images = ops.convert_to_tensor(train_filepaths, dtype=dtypes.string)
    train_labels = ops.convert_to_tensor(train_labels, dtype=dtypes.int32)
    # create input queues
    train_input_queue = tf.train.slice_input_producer(
        [train_images, train_labels],
        shuffle=False)
    # process path and string tensor into an image and a label
    file_content = tf.read_file(train_input_queue[0])
    train_image = tf.image.decode_jpeg(file_content, channels=NUM_CHANNELS)
    train_label = train_input_queue[1]

    train_image = tf.image.resize_images(train_image, [IMAGE_HEIGHT, IMAGE_WIDTH])
    train_image.set_shape([IMAGE_HEIGHT, IMAGE_WIDTH, NUM_CHANNELS])

    # collect batches of images before processing
    train_image_batch, train_label_batch = tf.train.batch([train_image, train_label],batch_size=BATCH_SIZE)

    print "input pipeline ready"



    # define two placeholder
    x = tf.placeholder(tf.float32, [None, 224,224,3], name='x')
    y = tf.placeholder(tf.int32, [None, 1000], name='y')

    img = tf.Variable(tf.random_normal([128, 32, 32, 64]))
    axis = list(range(len(img.get_shape()) - 1))
    mean, variance = tf.nn.moments(img, axis)

    # convolution_layer_1
    w_conv1 = tf.Variable(tf.truncated_normal([3, 3, 1, 3], stddev=0.1), name='w_conv1')# 生成一个截断的正态分布,3*3的采样窗口
    b_conv1 = tf.Variable(tf.constant(0.1, [3]), name='b_conv1')  # each kernel has a bias
    h_conv1 = tf.nn.relu(tf.nn.conv2d(x, w_conv1, strides=[1, 1, 1, 1], padding='SAME') + b_conv1)

    # convolution_layer_2
    w_conv2 = tf.Variable(tf.truncated_normal([3, 3, 3, 32],stddev=0.1), name='w_conv2')
    b_conv2 = tf.Variable(tf.constant(0.1,[32]), name='b_conv2')
    h_conv2 = tf.nn.relu(tf.nn.conv2d(h_conv1, w_conv2, strides=[1, 1, 1, 1], padding='SAME') + b_conv2)

    # convolution_layer_3
    w_conv3 = tf.Variable(tf.truncated_normal([3, 3, 32, 64],stddev=0.1), name='w_conv3')  # 3*3的采样窗口,64个卷积核从32个平面抽取特征
    b_conv3 = tf.Variable(tf.constant(0.1,[64]), name='b_conv3')
    h_conv3 = tf.nn.relu(tf.nn.conv2d(h_conv2, w_conv3, strides=[1, 1, 1, 1], padding='SAME') + b_conv3)

    w_conv4 = tf.Variable(tf.truncated_normal([3, 3, 64, 1000],stddev=0.1), name='w_conv4')
    b_conv4 = tf.Variable(tf.constant(0.1,[1000]), name='b_conv4')
    h_conv4 = tf.nn.relu(tf.nn.conv2d(h_conv3, w_conv4, strides=[1, 1, 1, 1], padding='SAME') + b_conv4)


    # Loss
    cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y, logits=h_conv4))
    tf.summary.scalar('loss', cross_entropy)

    # Train
    train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)

    init = tf.global_variables_initializer()

    print "convolution model ready"


############################################################################   don't finished code


    with tf.Session() as sess:
        # initialize the variables
        sess.run(init)

        # initialize the queue threads to start to shovel data
        coord = tf.train.Coordinator()
        threads = tf.train.start_queue_runners(coord=coord)

        print "from the train set:"
        for i in range(20):
            train_label_batch=tf.one_hot(sess.run(train_label_batch),1000)
            print sess.run(train_label_batch).shape
            sess.run(train_step,feed_dict={x:train_image_batch,y:train_label_batch})


        # stop our queue threads and properly close the session
        coord.request_stop()
        coord.join(threads)
        sess.close()



if __name__ == '__main__':
    inputdata()