import tensorflow as tf
import numpy as np
import os
import scipy.io as spio
from matplotlib import pyplot as plt
from imageio import imread


def get_files_list(base_dataset_dir, images_folder_name, annotations_folder_name, filename):
    images_dir = os.path.join(base_dataset_dir, images_folder_name)
    annotations_dir = os.path.join(base_dataset_dir, annotations_folder_name)

    file = open(filename, 'r')
    images_filename_list = [line for line in file]
    return images_filename_list


def _bytes_feature(value):
    return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))


def _int64_feature(value):
    return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))


def read_annotation_from_mat_file(annotations_dir, image_name):
    annotations_path = os.path.join(annotations_dir, (image_name.strip() + ".mat"))
    mat = spio.loadmat(annotations_path)
    img = mat['GTcls']['Segmentation'][0][0]
    return img


def create_tfrecord_dataset(filename_list, writer, img_dir, anno_dir):
    # create training tfrecord
    read_imgs_counter = 0
    for i, image_name in enumerate(filename_list):

        try:
            image_np = imread(os.path.join(img_dir, image_name.strip() + ".jpg"))
        except FileNotFoundError:
            try:
                # read from Pascal VOC path
                image_np = imread(os.path.join(images_dir_voc, image_name.strip() + ".jpg"))
            except FileNotFoundError:
                print("File:", os.path.join(images_dir_voc, image_name.strip() + ".jpg"), "not found.")
                continue
        try:
            annotation_np = read_annotation_from_mat_file(anno_dir, image_name)
        except FileNotFoundError:
            # read from Pascal VOC path
            try:
                annotation_np = imread(os.path.join(annotations_dir_voc, image_name.strip() + ".png"))
            except FileNotFoundError:
                print("File:", os.path.join(annotations_dir_voc, image_name.strip() + ".png"), "not found.")
                continue

        read_imgs_counter += 1
        image_h = image_np.shape[0]
        image_w = image_np.shape[1]

        img_raw = image_np.tostring()
        annotation_raw = annotation_np.tostring()

        example = tf.train.Example(features=tf.train.Features(feature={
            'height': _int64_feature(image_h),
            'width': _int64_feature(image_w),
            'image_raw': _bytes_feature(img_raw),
            'annotation_raw': _bytes_feature(annotation_raw)}))

        writer.write(example.SerializeToString())

    print("End of TfRecord. Total of image written:", read_imgs_counter)
    writer.close()


base_dataset_dir_voc = 'D:/PycharmProjects/VOC2012'
images_folder_name_voc = "JPEGImages"
annotations_folder_name_voc = "SegmentationClass_1D"
images_dir_voc = os.path.join(base_dataset_dir_voc, images_folder_name_voc)
annotations_dir_voc = os.path.join(base_dataset_dir_voc, annotations_folder_name_voc)

images_filename_list = get_files_list(base_dataset_dir_voc, images_dir_voc, annotations_dir_voc, "custom_train.txt")
print("Total number of training images:", len(images_filename_list))

np.random.shuffle(images_filename_list)
val_images_filename_list = images_filename_list[:int(0.10 * len(images_filename_list))]
train_images_filename_list = images_filename_list[int(0.10 * len(images_filename_list)):]

print("train set size:", len(train_images_filename_list))
print("val set size:", len(val_images_filename_list))

TRAIN_DATASET_DIR = "./tfrecords/"
if not os.path.exists(TRAIN_DATASET_DIR):
    os.mkdir(TRAIN_DATASET_DIR)

TRAIN_FILE = 'train.tfrecords'
VALIDATION_FILE = 'validation.tfrecords'
train_writer = tf.python_io.TFRecordWriter(os.path.join(TRAIN_DATASET_DIR, TRAIN_FILE))
val_writer = tf.python_io.TFRecordWriter(os.path.join(TRAIN_DATASET_DIR, VALIDATION_FILE))

create_tfrecord_dataset(train_images_filename_list, train_writer, images_dir_voc, annotations_dir_voc)

create_tfrecord_dataset(val_images_filename_list, val_writer, images_dir_voc, annotations_dir_voc)
