
import tensorflow as tf
import hashlib
import io
import logging
import os

from lxml import etree
import PIL.Image

from object_detection.utils import dataset_util
from object_detection.utils import label_map_util

output_path = 'C:\\Users\\谷雪松\\Documents\\gxs\\models\\models_try\\research\\object_detection\\data\\pascal_trainval.record'

filename_queue = tf.train.string_input_producer([output_path], num_epochs=6)
reader = tf.TFRecordReader()
_, serialized_example = reader.read(filename_queue)

features = tf.parse_single_example(serialized_example,
                                   features={
      'image/height': tf.FixedLenFeature([], tf.int64),
      'image/width': tf.FixedLenFeature([], tf.int64),
      'image/filename': tf.FixedLenFeature([], tf.string),
      'image/source_id': tf.FixedLenFeature([], tf.string),
      'image/key/sha256': tf.FixedLenFeature([], tf.string),
      'image/encoded': tf.FixedLenFeature([], tf.string),
      'image/format':tf.FixedLenFeature([], tf.string),
      'image/object/bbox/xmin': tf.FixedLenFeature([], tf.float32) ,
      'image/object/bbox/xmax': tf.FixedLenFeature([], tf.float32),
      'image/object/bbox/ymin': tf.FixedLenFeature([], tf.float32),
      'image/object/bbox/ymax': tf.FixedLenFeature([], tf.float32),
      'image/object/class/text': tf.FixedLenFeature([], tf.string),
      'image/object/class/label': tf.FixedLenFeature([], tf.int64),
      'image/object/difficult': tf.FixedLenFeature([], tf.int64),
      'image/object/truncated': tf.FixedLenFeature([], tf.int64),
      'image/object/view': tf.FixedLenFeature([], tf.string),
      })
image = features['image/object/class/text']
label = features['image/object/class/label']

# if shuffle_batch:
#         images, masks, names, labels, widths, heights = tf.train.shuffle_batch([image, mask, name, label, width, height],
#                                                 batch_size=4,
#                                                 capacity=8000,
#                                                 num_threads=4,
#                                                 min_after_dequeue=2000)
#     else:
#         images, masks, names, labels, widths, heights = tf.train.batch([image, mask, name, label, width, height],
#                                         batch_size=4,
#                                         capacity=8000,
#                                         num_threads=4)

images,labels = tf.train.shuffle_batch([image, label],
                                        batch_size=2,
                                        capacity=8000,
                                        num_threads=4, min_after_dequeue=2000)
init_op = tf.group(tf.global_variables_initializer(),
                   tf.local_variables_initializer())

with tf.Session() as sess:
    sess.run(init_op)
    coord = tf.train.Coordinator()
    threads = tf.train.start_queue_runners(coord=coord)
    imgs, labs = sess.run([images, labels])

    for i in range(2):
        print("images: %s, labels: %s" % (imgs[i], labs[i]))

    coord.request_stop()
    coord.join(threads)



    # init_op = tf.group(tf.global_variables_initializer(),
    #                    tf.local_variables_initializer())
    # config = tf.ConfigProto(allow_soft_placement=True)
    # # 这一行设置 gpu 随使用增长，我一般都会加上
    # config.gpu_options.allow_growth = True
    # with tf.Session(config=config) as sess:
    #     sess.run(init_op)
    #     coord = tf.train.Coordinator()
    #     threads = tf.train.start_queue_runners(coord=coord)
    #     imgs, labs = sess.run([image, classes_gt])
    #
    #     for i in range(2):
    #         print("images: %s, labels: %s" % (imgs[i], labs[i]))
    #
    #     coord.join(threads)
    #     coord.request_stop()
