# coding:utf-8
# __user__ = hiicy redldw
# __time__ = 2019/7/22
# __file__ = dataset_utils
# __desc__ =
import random
import sys

import tensorflow as tf
from tensorflow.contrib import slim
import os
from xml.etree import ElementTree as ET


VOC_LABELS = {
	'none': (0, 'Background'),
	'aeroplane': (1, 'Vehicle'),
	'bicycle': (2, 'Vehicle'),
	'bird': (3, 'Animal'),
	'boat': (4, 'Vehicle'),
	'bottle': (5, 'Indoor'),
	'bus': (6, 'Vehicle'),
	'car': (7, 'Vehicle'),
	'cat': (8, 'Animal'),
	'chair': (9, 'Indoor'),
	'cow': (10, 'Animal'),
	'diningtable': (11, 'Indoor'),
	'dog': (12, 'Animal'),
	'horse': (13, 'Animal'),
	'motorbike': (14, 'Vehicle'),
	'person': (15, 'Person'),
	'pottedplant': (16, 'Indoor'),
	'sheep': (17, 'Animal'),
	'sofa': (18, 'Indoor'),
	'train': (19, 'Vehicle'),
	'tvmonitor': (20, 'Indoor'),
}
allow_suffix = ['jpg','png','jpeg']
def int64_feature(value):
	if not isinstance(value,list):
		value=[value]
	return tf.train.Feature(int64_list=tf.train.Int64Llist(value=value))
def float_feature(value):
	"""Wrapper for inserting float features into Example proto.
	"""
	if not isinstance(value, list):
		value = [value]
	return tf.train.Feature(float_list=tf.train.FloatList(value=value))
def bytes_feature(value):
	"""Wrapper for inserting bytes features into Example proto.
	"""
	if not isinstance(value, list):
		value = [value]
	return tf.train.Feature(bytes_list=tf.train.BytesList(value=value))

DIRECTORY_ANNOTATIONS = 'Annotations/'
DIRECTORY_IMAGES = 'JPEGImages/'
RANDOM_SEED = 4242
SAMPLES_PER_FILES = 200


def _process_image(directory, name):
	# Read the image file.
	filename = directory + DIRECTORY_IMAGES + name + '.jpg'
	image_data = tf.gfile.FastGFile(filename, 'r').read()

	# Read the XML annotation file.
	filename = os.path.join(directory, DIRECTORY_ANNOTATIONS, name + '.xml')
	tree = ET.parse(filename)
	root = tree.getroot()

	# Image shape.
	size = root.find('size')
	shape = [int(size.find('height').text),
			 int(size.find('width').text),
			 int(size.find('depth').text)]
	# Find annotations.
	bboxes = []
	labels = []
	labels_text = []
	for obj in root.findall('object'):
		label = obj.find('name').text
		labels.append(int(VOC_LABELS[label][0]))
		labels_text.append(label.encode('ascii'))

		bbox = obj.find('bndbox')
		# todo:/shape[1] 减小比例
		bboxes.append((float(bbox.find('xmin').text),
					   float(bbox.find('ymin').text),
					   float(bbox.find('xmax').text),
					   float(bbox.find('ymax').text)
					   ))
	return image_data, shape, bboxes, labels, labels_text
def _convert_to_example(image_data,labels,labels_text,bboxes,shape):
	xmin = []
	ymin = []
	xmax = []
	ymax = []
	for b in bboxes:
		assert len(b) == 4
		#  REW:zip的trick
		[l.append(point) for l,point in zip([xmin,ymin,xmax,ymax],b)]
	example = tf.train.Example(features=tf.train.Features(feature={
		"image/height":int64_feature(shape[0]),
		'image/width':int64_feature(shape[1]),
		'image/channels':int64_feature(shape[2]),
		'image/shape':int64_feature(shape),
		'image/object/bbox/xmin':float_feature(xmin),# 一张图片所有盒子的xmin
		'image/object/bbox/ymin':float_feature(ymin),
		'image/object/bbox/xmax':float_feature(xmax),
		'image/object/bbox/ymax':float_feature(ymax),
		'image/object/bbox/label':int64_feature(labels),
		'image/object/bbox/label_text':bytes_feature(labels_text),
		'image/encoded': bytes_feature(image_data)}))
	return example

def _add_to_tfrecord(dataset_dir, name, tfrecord_writer):
	image_data, shape, bboxes, labels, labels_text = \
		_process_image(dataset_dir, name)
	example = _convert_to_example(image_data, labels, labels_text,
								  bboxes, shape)
	tfrecord_writer.write(example.SerializeToString())
def _get_output_filename(output_dir, name, idx):
	return '%s/%s_%03d.tfrecord' % (output_dir, name, idx)
def run(dataset_dir,output_dir,name='voc_train',shuffling=False):
	if not tf.gfile.Exists(dataset_dir):
		tf.gfile.MakeDirs(dataset_dir)
	# Dataset filenames, and shuffling.
	path = os.path.join(dataset_dir, DIRECTORY_ANNOTATIONS)
	filenames = sorted(os.listdir(path))
	if shuffling:
		random.seed(RANDOM_SEED)
		random.shuffle(filenames)
	# Process dataset files.
	i = 0
	fidx = 0
	while i < len(filenames):
		# Open new TFRecord file.
		tf_filename = _get_output_filename(output_dir, name, fidx)
		with tf.python_io.TFRecordWriter(tf_filename) as tfrecord_writer:
			j = 0
			while i < len(filenames) and j < SAMPLES_PER_FILES:
				sys.stdout.write('\r>> Converting image %d/%d' % (i + 1, len(filenames)))
				sys.stdout.flush()

				filename = filenames[i]
				if filename.rsplit(".")[-1] not in allow_suffix:
					raise TypeError("图片格式不对")
				if filename.endswith(allow_suffix[-1]):
					img_name = filename[:-4]
				else:
					img_name = filename[:-3]
				_add_to_tfrecord(dataset_dir, img_name, tfrecord_writer)
				i += 1
				j += 1
			fidx += 1
	print('\nFinished converting the Pascal VOC dataset!')

def get_dataSet(tfrecord_path,num_class=21,resize=300):
	reader = tf.TFRecordReader
	keys_to_features = {
		'image/encoded':tf.FixedLenFeature((),tf.string,default_value=''),
		'image/height': tf.FixedLenFeature([1], tf.int64),
		'image/width': tf.FixedLenFeature([1], tf.int64),
		'image/channels': tf.FixedLenFeature([1], tf.int64),
		'image/shape':tf.FixedLenFeature([3],tf.int64),
		'image/object/bbox/xmin':tf.VarLenFeature(dtype=tf.float32),
		'image/object/bbox/ymin':tf.VarLenFeature(dtype=tf.float32),
		'image/object/bbox/xmax':tf.VarLenFeature(dtype=tf.float32),
		'image/object/bbox/ymax':tf.VarLenFeature(dtype=tf.float32),
		'image/object/bbox/label':tf.VarLenFeature(dtype=tf.int64),
	}
	item_to_handlers = {
		'image':slim.tfexample_decoder.Image('image/encoded'),
		'shape':slim.tfexample_decoder.Tensor("image/shape"),
		'object/bbox':slim.tfexample_decoder.BoundingBox(
			['xmin','ymin','xmax','ymax'],'image/object/bbox/'),
		'object/label':slim.tfexample_decoder.Tensor('image/object/bbox/label'),
	}
	decoder = slim.tfexample_decoder.TFExampleDecoder(
		keys_to_features,item_to_handlers
	)
	# a.data_source: 为tfrecord文件地址
	# b.reader: 一般设置为tf.TFRecordReader阅读器
	# c.decoder: 为第一步设置的decoder
	# d.num_samples: 样本数量
	# e.items_to_description: 对样本及标签的描述
	# f.num_classes: 分类的数量
	return slim.dataset.Dataset(
		data_sources=tfrecord_path,
		reader=reader,
		decoder=decoder,
		num_samples=split_to_sizes[split_name],
		items_to_descriptions=items_to_descriptions,
		num_classes=21,
		labels_to_names=labels_to_names)









