#coding:utf-8
import tensorflow as tf
import time
import glob
import os.path
import image
from tensorflow.python.platform import gfile
import numpy as np

INPUT_DATA =  'E:/flower_photos'
# 初始化各个数据集
training_images = []
testing_images = []
validation_images = []

dict = {0:"daisy",1:"dandelion",2:"roses",3:"sunflowers",4:"tulips"}

#测试数据和验证数据比例
VALIDATION_PERCENTAGE = 10
TEST_PERCENTAGE = 10


def _int64_feature(value):
    return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))

def _bytes_feature(value):
    return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))


#读出数据，并将数据分为验证、测试、
def create_image_lists(sess,testing_percentage,validatation_percentage):
    current_label = 0
    num_shards = 0
    sub_dirs = [x[0] for x in os.walk(INPUT_DATA)] #os.walk() 方法用于通过在目录树中游走输出在目录中的文件名，向上或者向下
    is_root_dir = True
    print(sub_dirs)
    #读取所有子目录
    for sub_dir in sub_dirs:
        if is_root_dir:
            is_root_dir = False
            continue

        #获取一个子目录中所有的图片文件
        #由于windows系统下，文件名不区分大小写，所以应写作：
        extensions = ['jpg','jpeg']
        file_list = []
        dir_name = os.path.basename(sub_dir)#用到os.path.basename(),返回path最后的文件名。若path以/或\结尾，那么就会返回空值。
        for extension in extensions:
            file_glob = os.path.join(INPUT_DATA,dir_name,'*.'+extension) #将路径名整合到一起
            # extend将多个列表整合在一起，glob.glob函数匹配所有的符合条件的文件，并将其以list的形式返回,
            # 相当于通配符 daisy文件夹大约有633张JPG图片
            file_list.extend(glob.glob(file_glob))
        if not file_list:continue
        print("processing:", dir_name,"-",len(file_list))
        print(time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time())))
        #处理图片数据
        for file_name in file_list:
            #读取并解析图片，
            image_raw_data = gfile.FastGFile(file_name,'rb').read()
            image_value = tf.image.decode_jpeg(image_raw_data)
            #随机化分数据集 8:1:1
            chance = np.random.randint(100)
            if chance < validatation_percentage:
                distort_image = image.preprocess_for_train(image_value, 299, 299, None)
                distort_image =tf.multiply(distort_image,255.0)
                image_info = sess.run(tf.cast(distort_image, tf.uint8)).tostring() #将图片转化为原生bytes.tostring()
                validation_images.append(image_info)


            elif chance < (testing_percentage + validatation_percentage):
                distort_image = image.preprocess_for_train(image_value, 299, 299, None)
                distort_image = tf.multiply(distort_image, 255.0)
                image_info = sess.run(tf.cast(distort_image, tf.uint8)).tostring()  # 将图片转化为原生bytes
                testing_images.append(image_info)

            else:
                distort_image = image.preprocess_for_train(image_value, 299, 299, None,True)
                distort_image = tf.multiply(distort_image, 255.0)
                image_info = sess.run(tf.cast(distort_image, tf.uint8)).tostring()  # 将图片转化为原生bytes
                training_images.append(image_info)



        print("开始写入文件",time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time())))
        print("num_shard--",num_shards,"label--",current_label)
        #写入文件
        writer(num_shards,testing_images,current_label,1)
        writer(num_shards,validation_images,current_label,2)
        writer(num_shards,training_images,current_label,3)
        testing_images.clear()
        validation_images.clear()
        training_images.clear()
        num_shards = num_shards + 1  # 轮次
        current_label += 1
        print("写入文件完毕",time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time())))

        #创建一个write来写TFRcord文件

#写入文件函数 写入数据集和标签
def writer(num_shards,img,label,class_of_list):
    if class_of_list == 1:
        file = ("E:/picture/test/data.tfrecords-%s-of-%.5d"%(dict[num_shards],len(img)))
    elif class_of_list == 2:
        file = ("E:/picture/validation/data.tfrecords-%s-of-%.5d"%(dict[num_shards],len(img)))
    elif class_of_list == 3:
        file = ("E:/picture/train/data.tfrecords-%s-of-%.5d"%(dict[num_shards],len(img)))
    else:
        print("未找到文件")
        exit()
    writer = tf.python_io.TFRecordWriter(file)
    for data in img:
        example = tf.train.Example(features=tf.train.Features(feature={
            'label': _int64_feature(label),
            'image_raw': _bytes_feature(data)}))
        # 将一个Example 写入TFRecord文件
        writer.write(example.SerializeToString())
    writer.close()
#数据整理函数
def main():
    with tf.Session() as sess:
       create_image_lists(sess,TEST_PERCENTAGE,VALIDATION_PERCENTAGE)

if __name__ == '__main__':
    main()
