# coding:utf-8
# Author : hiicy redldw
# Date : 2019/04/16
import random
from pathlib import Path
from xml.etree import ElementTree as ET
import tensorflow as tf
import numpy as np

# import tensorflow_.python as tf
# Dataset可以看作是相同类型“元素”的有序列表。在实际使用时，单个“元素”可以是向量，也可以是字符串、图片，甚至是tuple或者dict
dataset = tf.data.Dataset.from_tensor_slices((np.array([1.0, 2.0, 3.0, 4.0, 5.0]),[2,0,0,1,0]))
iterator = dataset.make_one_shot_iterator()
one_element = iterator.get_next()
with tf.Session() as sess:
    for i in range(5):
        print(sess.run(one_element))

print('----------')
sess = tf.Session()
max_value = tf.placeholder(tf.int64, shape=[])
dataset = tf.data.Dataset.range(max_value)
iterator = dataset.make_initializable_iterator()
next_element = iterator.get_next()

# Initialize an iterator over a dataset with 10 elements.
sess.run(iterator.initializer, feed_dict={max_value: 10})
for i in range(10):
    value = sess.run(next_element)
    assert i == value

# Initialize the same iterator over a dataset with 100 elements.
sess.run(iterator.initializer, feed_dict={max_value: 100})
for i in range(100):
    value = sess.run(next_element)
    assert i == value
print('*' * 40)

# Define training and validation datasets with the same structure.
training_dataset = tf.data.Dataset.range(100).map(
    lambda x: x + tf.random_uniform([], -10, 10, tf.int64))
validation_dataset = tf.data.Dataset.range(50)

# A reinitializable iterator is defined by its structure. We could use the
# `output_types` and `output_shapes` properties of either `training_dataset`
# or `validation_dataset` here, because they are compatible.
iterator = tf.data.Iterator.from_structure(training_dataset.output_types,
                                           training_dataset.output_shapes)
next_element = iterator.get_next()

training_init_op = iterator.make_initializer(training_dataset)
validation_init_op = iterator.make_initializer(validation_dataset)

# Run 20 epochs in which the training dataset is traversed, followed by the
# validation dataset.
for _ in range(20):
    # Initialize an iterator over the training dataset.
    sess.run(training_init_op)
    for _ in range(100):
        sess.run(next_element)

    # Initialize an iterator over the validation dataset.
    sess.run(validation_init_op)
    for _ in range(50):
        sess.run(next_element)


###########################################
# dataset API
###########################################
def Loadset(input_images,input_labels,_parse_img,FLAGS):
    train_image = tf.data.Dataset.from_tensor_slices((input_images,input_labels))
    # val_image = tf.data.Dataset.from_tensor_slices((val_images,val_labels))
    train_imaged = train_image.map(_parse_img)
    # val_imaged = val_image.map(_parse_img)
    train_imageb = train_imaged.batch(batch_size=FLAGS.batch).shuffle().repeat(FLAGS.iter)
    # val_imageb = val_imaged.batch(batch_size=FLAGS.batch).shuffle().repeat(FLAGS.iter)
    train_iter = train_imageb.make_one_shot_iterator().get_next()

###########################################
# slice_input_producer 实例
###########################################
class DataPipeLine:
    def __init__(self, resize_shape, datadir, class_labels, batch_size=1, test_size_ratio=0.2):
        self.data_dir = datadir
        self.test_size_ratio = test_size_ratio
        self.class_labels = class_labels
        self.im_shape = resize_shape
        self.batch = batch_size

    def build_pipelines(self):
        imglist, annolist = self._generate_path_list(self.data_dir)
        all_imgs = tf.convert_to_tensor(imglist, dtype=tf.string)
        all_annos = tf.convert_to_tensor(annolist, dtype=tf.string)
        # 分区 训练测试划分
        test_size = int(len(all_imgs) * self.test_size_ratio)
        partitions = [0] * len(all_imgs)
        partitions[:test_size] = [1] * test_size
        random.shuffle(partitions)
        train_images, test_images = tf.dynamic_partition(all_imgs, partitions, 2)
        train_annos, test_annos = tf.dynamic_partition(all_annos, partitions, 2)
        # slice_input_producer将tensors切分成许许多多的单个实例，并使用多线程将它们入队列
        train_input_queue = tf.train.slice_input_producer(
            [train_images, train_annos],
            shuffle=False)
        test_input_queue = tf.train.slice_input_producer(
            [test_images, test_annos],
            shuffle=False)
        train_image, train_anno = self._parse_queue(train_input_queue)
        test_image, test_anno = self._parse_queue(test_input_queue)
        train_image.set_shape([self.im_shape[0], self.im_shape[1], self.im_shape[2]])
        test_image.set_shape([self.im_shape[0], self.im_shape[1], self.im_shape[2]])
        # 分组抽样并汇成一批批 ; 后面都要这一步
        # 实现是使用queue ,queue的QueueRunner被添加到当前计算图的"QUEUE_RUNNER"集合中
        train_image_batch, train_anno_batch = tf.train.batch([train_image, train_anno], self.batch, 2)
        test_image_batch, test_anno_batch = tf.train.batch([test_image, test_anno], self.batch, 2)
        return (train_image_batch, train_anno_batch), (test_image_batch, test_anno_batch)

    def _generate_path_list(self):
        imgList = []
        annoList = []
        imgdir = Path(self.data_dir) / "JPEGImages"
        annodir = Path(self.data_dir) / "Annotations"
        for img, anno in zip(tf.gfile.ListDirectory(imgdir), tf.gfile.ListDirectory(annodir)):
            pimg = imgdir / img
            panno = annodir / anno
            imgList.append(pimg)
            annoList.append(panno)
        return imgList, annoList

    def _convert_annotation(self, anno_file):
        try:
            tree = ET.parse(anno_file)
            root = tree.getroot()
            boxes = []
            labels = []
            for obj in root.iter('object'):
                cls = obj.find('name').text
                if cls not in self.class_labels:
                    continue
                cls_id = self.class_labels.index(cls)
                bndbox = obj.find('bndbox')
                b = [
                    float(bndbox.find(t).text) - 1
                    for t in ['xmin', 'ymin', 'xmax', 'ymax']
                ]
                boxes.append(b)
                labels.append(cls_id)
        except:
            print(anno_file)
            raise ValueError("读取annotation文件出错")
        else:
            return boxes, labels

    # todo:图片预处理
    def _parse_queue(self, input_queue):
        file_content = tf.read_file(input_queue[0])
        train_image = tf.image.decode_image(file_content, channels=3)
        train_anno = self._convert_annotation(input_queue[1])  # 元组(boxes，labels)
        return train_image, train_anno

# 强强组合 datasetApi + tfrecord