import tensorflow as tf


def read_and_decode(filename):
    filename_queue = tf.train.string_input_producer([filename])

    reader = tf.TFRecordReader()
    _, serialized_example = reader.read(filename_queue)
    features = tf.parse_single_example(serialized_example,
                                       features={
                                           'label': tf.FixedLenFeature([], tf.int64),
                                           'img_raw': tf.FixedLenFeature([], tf.string),
                                       })

    img = tf.decode_raw(features['img_raw'], tf.uint8)
    img = tf.reshape(img, [227, 227, 3])
    print(img)
    img = tf.cast(img, tf.float32) * (1. / 255) - 0.5
    label = tf.cast(features['label'], tf.int32)

    return img, label


def read_and_decode(tfrecords_file, batch_size):
    '''''read and decode tfrecord file, generate (image, label) batches
    Args:
        tfrecords_file: the directory of tfrecord file
        batch_size: number of images in each batch
    Returns:
        image: 4D tensor - [batch_size, width, height, channel]
        label: 1D tensor - [batch_size]
    '''
    # make an input queue from the tfrecord file文件队列
    filename_queue = tf.train.string_input_producer([tfrecords_file])

    reader = tf.TFRecordReader()
    _, serialized_example = reader.read(filename_queue)
    # 从TFRecords文件中读取数据， 可以使用tf.TFRecordReader的tf.parse_single_example解析器。
    # 这个操作可以将Example协议内存块(protocol buffer)解析为张量
    img_features = tf.parse_single_example(
        serialized_example,
        features={
            'label': tf.FixedLenFeature([], tf.int64),
            'img_raw': tf.FixedLenFeature([], tf.string),
        })
    image = tf.decode_raw(img_features['img_raw'], tf.uint8)  # 解析张量

    ##########################################################
    # you can put data augmentation here, I didn't use it
    ##########################################################
    # all the images of not MNIST are 28*28, you need to change the image size if you use other dataset.

    image = tf.reshape(image, [227, 227, 3])
    label = tf.cast(img_features['label'], tf.float32)
    image = tf.image.per_image_standardization(image)  # 标准化函数
    image_batch, label_batch = tf.train.batch([image, label],
                                              batch_size=batch_size,
                                              num_threads=64,
                                              capacity=256)
    # print(tf.reshape(label_batch, [batch_size]))
    # print(label_batch)
    return image_batch, tf.reshape(label_batch, [batch_size])


# create_record()

if __name__ == '__main__':
    img, label = read_and_decode("F:\\001-python\\catvsdogtrain227.tfrecords", 10)
    # img_batch, label_batch = tf.train.batch([img, label], batch_size=10, num_threads=1, capacity=128)

    # img_batch, label_batch = tf.train.shuffle_batch([img, label],
    #                                                 batch_size=10,
    #                                                 capacity=200,  # capacity是队列的长度
    #                                                 min_after_dequeue=100)  # min_after_dequeue是出队后，队列至少剩下min_after_dequeue个数据

    # 初始化所有的op
    init = tf.global_variables_initializer()
    sess = tf.Session()
    sess.run(init)
    # 启动队列
    threads = tf.train.start_queue_runners(sess=sess)
    for i in range(50):
        val, l = sess.run([img, label])
        # l = to_categorical(l, 12)
        print(val.shape, l)
        # print(i)
