# -*- coding: utf-8 -*-

import random
import os
import numpy as np
import cv2
import threading
import tensorflow as tf

g_data_folder = "/home/lijun/DataBase/road_segment/480_256"
QUEUE_CAPACITY = 128
BATCH_SIZE = 2


class DataReader(object):
    def __init__(self, input_x, input_y, data_folder=g_data_folder):
        self._coordinate = tf.train.Coordinator()
        self._input_x = input_x
        self._input_y = input_y
        self._data_folder = data_folder
        self._q = tf.FIFOQueue(capacity=QUEUE_CAPACITY,
                               dtypes=[input_x.dtype, input_y.dtype],
                               shapes=[input_x.get_shape(), input_y.get_shape()])
        self._q_enq_op = self._q.enqueue([input_x, input_y])
        self._q_close_op = self._q.close(cancel_pending_enqueues=True)
        self._threads = []

    @staticmethod
    def _read_full_image_and_ground_truth_path():
        image_folder = os.path.join(g_data_folder, "images")
        full_images_path = [os.path.join(image_folder, name) for name in os.listdir(image_folder)
                            if "jpg" in name]
        full_ground_truth_files_path = [path.replace("images", "ground_truth")
                                        for path in full_images_path]
        return full_images_path, full_ground_truth_files_path

    def _read_data_thread(self, session):
        full_images_path, full_ground_truth_files_path = \
            self._read_full_image_and_ground_truth_path()
        with self._coordinate.stop_on_exception():
            while True:
                index = random.choice(range(0, len(full_images_path)))
                image = cv2.imread(full_images_path[index])
                image = image.astype(dtype=np.float32) - [116.779, 123.68, 103.939]
                assert isinstance(image, np.ndarray)
                ground_truth = cv2.imread(full_ground_truth_files_path[index],
                                          cv2.IMREAD_GRAYSCALE)
                ground_truth = ground_truth.astype(dtype=np.int32) / 255
                assert isinstance(ground_truth, np.ndarray)
                session.run(self._q_enq_op, feed_dict={self._input_x: image,
                                                       self._input_y: ground_truth})
                if self._coordinate.should_stop():
                    break

    def read_data(self):
        image, ground_truth = self._q.dequeue()
        images, ground_truths = tf.train.batch(tensors=[image, ground_truth],
                                               batch_size=BATCH_SIZE)
        return images, ground_truths

    def start(self, session):
        t = threading.Thread(target=self._read_data_thread, args=(session, ))
        t.daemon = True
        t.start()
        self._threads.append(t)
        threads = tf.train.start_queue_runners(sess=session, coord=self._coordinate)
        self._threads.extend(threads)

    def stop(self, session):
        self._coordinate.request_stop()
        session.run(self._q_close_op)
        self._coordinate.join(self._threads)
        print("stopped")


def main():
    import time
    with tf.Graph().as_default():
        input_x = tf.placeholder(dtype=tf.float32, shape=[256, 480, 3])
        input_y = tf.placeholder(dtype=tf.int32, shape=[256, 480])
        data_reader = DataReader(input_x=input_x, input_y=input_y)
        image_tensor, ground_truth_tensor = data_reader.read_data()
        with tf.Session() as session:
            session.run(tf.global_variables_initializer())
            session.run(tf.local_variables_initializer())
            data_reader.start(session)
            for i in range(10):
                image, ground_truth = session.run([image_tensor, ground_truth_tensor])
                print(image.shape, ground_truth.shape, np.sum(ground_truth))
                a = np.where(ground_truth == 1)
                b = np.where(ground_truth == 0)
                print(len(a[0]) + len(b[0]))
                time.sleep(1)
            data_reader.stop(session)


if __name__ == "__main__":
    main()
