# Example on how to use the tensorflow input pipelines. The explanation can be found here ischlag.github.io.
import tensorflow as tf
import random
from tensorflow.python.framework import ops
from tensorflow.python.framework import dtypes

traindata_path = "/data/ztl/uploadData/ILSVRC2012/img_train/train_jpg/"
valdata_path="/data/ztl/uploadData/ILSVRC2012/img_val/"
val_labels_file = "val.txt"
train_labels_file = "train.txt"

test_set_size = 5

IMAGE_HEIGHT = 250
IMAGE_WIDTH = 250
NUM_CHANNELS = 3
BATCH_SIZE = 15


def encode_label(label):
    return int(label)


def read_label_file(file):
    f = open(file, "r")
    filepaths = []
    labels = []
    for line in f:
        filepath, label = line.split(" ")
        filepaths.append(filepath)
        labels.append(encode_label(label))
    return filepaths, labels


# reading labels and file path
train_filepaths, train_labels = read_label_file(train_labels_file)
# val_filepaths, val_labels = read_label_file(val_labels_file)

# transform relative path into full path
train_images = [traindata_path + fp for fp in train_filepaths]
# val_images = [valdata_path + fp for fp in val_filepaths]

print train_images[1]

# convert string into tensors
train_images = ops.convert_to_tensor(train_images, dtype=dtypes.string)
train_labels = ops.convert_to_tensor(train_labels, dtype=dtypes.int32)

# create input queues
train_input_queue = tf.train.slice_input_producer(
    [train_images, train_labels],
    shuffle=False)

# val_images = ops.convert_to_tensor(val_images, dtype=dtypes.string)
# val_labels = ops.convert_to_tensor(val_labels, dtype=dtypes.int32)
#
# # create input queues
# val_input_queue = tf.train.slice_input_producer(
#     [val_images, val_labels],
#     shuffle=False)


# process path and string tensor into an image and a label
file_content = tf.read_file(train_input_queue[0])
train_image = tf.image.decode_jpeg(file_content, channels=NUM_CHANNELS)
train_label = train_input_queue[1]


# define tensor shape
# change follow code to VGGnet,[have rain,no rain]
train_image.set_shape([IMAGE_HEIGHT, IMAGE_WIDTH, NUM_CHANNELS])

# collect batches of images before processing
train_image_batch, train_label_batch = tf.train.batch(
    [train_image, train_label],
    batch_size=BATCH_SIZE
)

print "input pipeline ready"


with tf.Session() as sess:
    # initialize the variables
    sess.run(tf.global_variables_initializer())

    # initialize the queue threads to start to shovel data
    coord = tf.train.Coordinator()
    threads = tf.train.start_queue_runners(coord=coord)

    print "from the train set:"
    for i in range(20):
        label_onehot_pre=sess.run(train_label_batch)
        # onehot = tf.one_hot(label_onehot_pre, 1000)
        print label_onehot_pre.shape

    # stop our queue threads and properly close the session
    coord.request_stop()
    coord.join(threads)
    sess.close()