#ww read data

import os
import sys 
import tensorflow as tf
import numpy as np
import glob
import math
from PIL import Image
import random

def base_augment(images, masks,
            resize=None, # (width, height) tuple or None
            horizontal_flip=False,
            vertical_flip=False,
            rotate=0, # Maximum rotation angle in degrees
            crop_probability=0, # How often we do crops
            crop_min_percent=0.6, # Minimum linear dimension of a crop
            crop_max_percent=1.,  # Maximum linear dimension of a crop
            color_probobility=0.5,):
    if images.dtype != tf.float32:
        images = tf.image.convert_image_dtype(images,dtype=tf.float32)
    if horizontal_flip and  np.random.rand() > 0.5:
        images = tf.image.flip_left_right(images)
        masks = tf.image.flip_left_right(masks)
    if vertical_flip and np.random.rand() > 0.5:
        images = tf.image.flip_left_right(images)
        masks = tf.image.flip_left_right(masks)
    if color_probobility > 0:
        images = distort_imagecolor(images,prob=color_probobility)
    return images,masks
  

def distort_imagecolor(image,prob = 0.5, max_delta=32./255.,lower=0.5, upper=1.5,max_deltah=0.2):
    """
    distort the image color randomly
    input: an image array
    color_ordering is different ditort method order range of (0,3) 
    output: distorted image
    """
    def brightness():
        return tf.image.random_brightness(image, max_delta=max_delta)
    def hue():
        return tf.image.random_brightness(image, max_delta=max_delta)
    def saturation():
        return tf.image.random_saturation(image, lower=lower, upper=upper)
    def contrast():
        return tf.image.random_contrast(image,lower=lower,upper=upper)
    color_ordering = np.random.rand()
    if color_ordering < prob:
        function_list = [brightness, hue, saturation, contrast]
        random.shuffle(function_list)
        for func in function_list:
            #print(func)
            image = func()
    else: 
        return image
    return image

def augment(images, labels,
            resize=None, # (width, height) tuple or None
            horizontal_flip=False,
            vertical_flip=False,
            rotate=0, # Maximum rotation angle in degrees
            crop_probability=0, # How often we do crops
            crop_min_percent=0.6, # Minimum linear dimension of a crop
            crop_max_percent=1.,  # Maximum linear dimension of a crop
            color_probobility=0.3,
            mixup=0):  # Mixup coeffecient, see https://arxiv.org/abs/1710.09412.pdf
    if resize is not None:
        images = tf.image.resize_bilinear(images, resize)
    # My experiments showed that casting on GPU improves training performance
    if images.dtype != tf.float32:
        images = tf.image.convert_image_dtype(images, dtype=tf.float32)
        images = tf.subtract(images, 0.5)
        images = tf.multiply(images, 2.0)
    labels = tf.to_float(labels)
    with tf.name_scope('augmentation'):
        shp = tf.shape(images)
        batch_size, height, width = shp[0], shp[1], shp[2]
        width = tf.cast(width, tf.float32)
        height = tf.cast(height, tf.float32)
    #The list of affine transformations that our image will go under.
    # Every element is Nx8 tensor, where N is a batch size.
    transforms = []
    identity = tf.constant([1, 0, 0, 0, 1, 0, 0, 0], dtype=tf.float32)
    if horizontal_flip:
        coin = tf.less(tf.random_uniform([batch_size], 0, 1.0), 0.5)
        flip_transform = tf.convert_to_tensor(\
            [-1., 0., width, 0., 1., 0., 0., 0.], dtype=tf.float32)
        transforms.append(
            tf.where(coin,\
                tf.tile(tf.expand_dims(flip_transform, 0), [batch_size, 1]),
                tf.tile(tf.expand_dims(identity, 0), [batch_size, 1])))

    if vertical_flip:
        coin = tf.less(tf.random_uniform([batch_size], 0, 1.0), 0.5)
        flip_transform = tf.convert_to_tensor(\
            [1, 0, 0, 0, -1, height, 0, 0], dtype=tf.float32)
        transforms.append(
              tf.where(coin,
                   tf.tile(tf.expand_dims(flip_transform, 0), [batch_size, 1]),
                   tf.tile(tf.expand_dims(identity, 0), [batch_size, 1])))

    if rotate > 0:
        angle_rad = rotate / 180 * math.pi
        angles = tf.random_uniform([batch_size], -angle_rad, angle_rad)
        transforms.append(\
            tf.contrib.image.angles_to_projective_transforms(\
                angles, height, width))

    if crop_probability > 0:
        crop_pct = tf.random_uniform([batch_size], crop_min_percent,
                                   crop_max_percent)
        left = tf.random_uniform([batch_size], 0, width * (1 - crop_pct))
        top = tf.random_uniform([batch_size], 0, height * (1 - crop_pct))
        crop_transform = tf.stack([
            crop_pct,
            tf.zeros([batch_size]), top,
            tf.zeros([batch_size]), crop_pct, left,
            tf.zeros([batch_size]),
            tf.zeros([batch_size])
            ], 1)
        coin = tf.less(
            tf.random_uniform([batch_size], 0, 1.0), crop_probability)
        transforms.append(
            tf.where(coin, crop_transform,\
                tf.tile(tf.expand_dims(identity, 0), [batch_size, 1])))
    for t in transforms:
        print(t)
    print(tf.contrib.image.compose_transforms(*transforms))

    if transforms:
        images = tf.contrib.image.transform(
            images,
            tf.contrib.image.compose_transforms(*transforms),
            interpolation='NEAREST') # or 'NEAREST'

    def cshift(values): # Circular shift in batch dimension
        return tf.concat([values[-1:, ...], values[:-1, ...]], 0)

    if mixup > 0:
        mixup = 1.0 * mixup # Convert to float, as tf.distributions.Beta requires floats.
        beta = tf.distributions.Beta(mixup, mixup)
        lam = beta.sample(batch_size)
        ll = tf.expand_dims(tf.expand_dims(tf.expand_dims(lam, -1), -1), -1)
        images = ll * images + (1 - ll) * cshift(images)
        labels = lam * labels + (1 - lam) * cshift(labels)

    if color_probobility > 0:
        images = distort_imagecolor(images,prob=color_probobility)
    return images, labels

def trainset_read(filename,is_training):
    opts = tf.python_io.TFRecordOptions(tf.python_io.TFRecordCompressionType.GZIP)
    reader = tf.TFRecordReader(options=opts)
    _, serialized_example = reader.read(filename)
    features = tf.parse_single_example(
        serialized_example,
        features={
            'image_raw': tf.FixedLenFeature([], tf.string),
            'label_masks': tf.FixedLenFeature([], tf.string),# of shape (N, height, width)
        }
    )
    image_raw = tf.decode_raw(features['image_raw'],tf.uint8)
    image_raw = tf.reshape(image_raw, (512,512, 1))
    image_raw = tf.image.grayscale_to_rgb(image_raw)
    image_raw = tf.cast(image_raw, tf.float32)
    gt_masks = tf.decode_raw(features['label_masks'],tf.uint8)
    gt_masks = tf.reshape(gt_masks, (512,512, 3))
    gt_masks = tf.cast(gt_masks, tf.float32)
    if is_training:
        image_raw, gt_masks = base_augment(image_raw,gt_masks,\
            horizontal_flip=True,vertical_flip=True,crop_probability=0.6)
    image_raw = tf.cast(tf.divide(image_raw,255.0),tf.float32) 
    gt_masks = tf.cast(gt_masks, tf.float32)
    return image_raw, gt_masks



def dataset_segment_read(serialized_example,is_training):
    features = tf.parse_single_example(
        serialized_example,
        features={
            'image_raw': tf.FixedLenFeature([], tf.string),
            'label_masks': tf.FixedLenFeature([], tf.string),# of shape (N, height, width)
        }
    )
    image_raw = tf.decode_raw(features['image_raw'],tf.uint8)
    image_raw = tf.reshape(image_raw, (512,512, 1))
    image_raw = tf.image.grayscale_to_rgb(image_raw)
    image_raw = tf.cast(image_raw, tf.float32)
    gt_masks = tf.decode_raw(features['label_masks'],tf.uint8)
    gt_masks = tf.reshape(gt_masks, (512,512, 3))
    gt_masks = tf.cast(gt_masks, tf.float32)
    if is_training:
        image_raw, gt_masks = base_augment(image_raw,gt_masks,\
            horizontal_flip=True,vertical_flip=True,crop_probability=0.6)
    image_raw = tf.cast(tf.divide(image_raw,255.0),tf.float32) 
    image_raw = tf.reshape(image_raw, (512,512, 3))
    gt_masks = tf.cast(gt_masks, tf.float32)
    gt_masks = tf.reshape(gt_masks, (512,512, 3))
    return image_raw, gt_masks

def get_dataset(tfrecords, epoch = 10000, shuffle = 1000,\
    batch_size=1, is_training=False,reused=True):
    scan_dataset = tf.data.TFRecordDataset([tfrecords],compression_type="GZIP")
    scan_dataset = scan_dataset.map(lambda x: dataset_segment_read(x, is_training))
    if epoch and is_training:
        scan_dataset = scan_dataset.repeat(epoch)
    else:
        scan_dataset = scan_dataset.repeat(1)
    #epoch_counter = scan_dataset._count
    if shuffle and is_training:
        scan_dataset = scan_dataset.shuffle(shuffle)
    scan_dataset = scan_dataset.batch(batch_size)
    if reused:
        iterator = scan_dataset.make_initializable_iterator()
    else:
        iterator = scan_dataset.make_one_shot_iterator() 
    next_element = iterator.get_next()
    return next_element,iterator

def imagearray_show(image_a):
    im = Image.fromarray(image_a)
    print(im)
    im.show()

def traindata_api_test(tfdata_store_path):
    filename_queue=tf.train.string_input_producer(#如果样本量很大，可以分成若干文件，把文件名列表传入
                [tfdata_store_path],num_epochs=1)  
    ims, masks = trainset_read(filename_queue)
    image_i,mask_i=tf.train.shuffle_batch(
                [ims,masks],batch_size=1,num_threads=1,
                capacity=1000+3*1,
                min_after_dequeue=1000)
    with tf.Session() as sess:
        sess.run(tf.local_variables_initializer())
        sess.run(tf.global_variables_initializer())
        coord = tf.train.Coordinator()
        threads = tf.train.start_queue_runners(sess=sess, coord=coord)
        for i in range(10):
            #sess.run(it.initializer,feed_dict={filenames: [tfdata_store_path]})
            
            test_image,test_masks = sess.run([image_i,mask_i])
            print("image shape:",np.shape(test_image))
            print("mask shape:",np.shape(test_masks))
            imshow = True
            maskshow = True
            if imshow:
                test_image = np.reshape(test_image,(512,512,3)).astype(np.uint8)
                imagearray_show(test_image)
            if maskshow:
                test_masks = np.reshape(test_masks,(512,512,3))*255
                for i in range(3):
                    imagearray_show(test_masks[:,:,i])
        coord.request_stop()
        # Terminate as usual.  It is innocuous to request stop twice.
        coord.join(threads)
        sess.close()
    print("finished all test")


def data_api_test(tfdata_store_path):
    element = get_dataset(tfdata_store_path,batch_size=128,is_training=True)
    with tf.Session() as sess:
        for i in range(100):
            test_image, test_masks = sess.run(element)
            #epoch_num = sess.run(epoch)
            print("image shape:",np.shape(test_image))
            print("mask shape:",np.shape(test_masks))
            #print("epoch numbers:",epoch_num)
            imshow = False
            maskshow = False
            if imshow:
                print(test_image.dtype)
                test_image = np.reshape(test_image*255,(512,512,3)).astype(np.uint8)
                imagearray_show(test_image)
            if maskshow:
                test_masks = test_masks.astype(np.uint8)
                test_masks = np.reshape(test_masks,(512,512,3))*255
                for i in range(3):
                    imagearray_show(test_masks[:,:,i])
        sess.close()
    print("finished all test")


def getSize(filename):
    st = os.stat(filename)
    return st.st_size

def get_tfrecord_numbers(filename):
    example_size = 0
    example = tf.train.Example()
    for x in tf.python_io.tf_record_iterator(filename):
        example.ParseFromString(x)
        example_size = example.ByteSize()
        break
    file_size = getSize(filename)
    n = file_size / (example_size + 16)
    print("file size in bytes:{}".format(file_size))
    print("example size in bytes:{}".format(example_size))
    print("N:{}".format(n))
    
if __name__ == '__main__':
    tfdata_store_path1 = "/home/ubuntu/桌面/segmentation/train_data.tfrecords"
    #filenames = tf.placeholder(tf.string,shape=[None])
    data_api_test(tfdata_store_path1)
    #get_tfrecord_numbers(tfdata_store_path1)
    #opts = tf.python_io.TFRecordOptions(tf.python_io.TFRecordCompressionType.GZIP)
    #print(sum(1 for _ in tf.python_io.tf_record_iterator(tfdata_store_path1,options=opts)))