import os
import re
import math
import sys
import tensorflow.compat.v1 as tf

def int64_feature(values):
  if not isinstance(values, (tuple, list)):
    values = [values]
  return tf.train.Feature(int64_list=tf.train.Int64List(value=values))

def bytes_feature(values):
  return tf.train.Feature(bytes_list=tf.train.BytesList(value=[values]))

#将图片信息转换为tfrecords可以保存的序列化信息
def image_to_tfexample(split_name, image_data, image_format, height, width, class_id = 1, class_name = '', filename = ''):
    '''
    :param split_name: train或val或test
    :param image_data: 图片的二进制数据
    :param image_format: 图片的格式
    :param height: 图片的高
    :param width: 图片的宽
    :filename: 图片的标签或图片的名称,当split_name为test时,filename为图片的名称否则为图片标签
    :return:
    '''
    if split_name == "test":
        return tf.train.Example(features=tf.train.Features(feature={
              'image/height': int64_feature(height),
              'image/width': int64_feature(width),
              'image/encoded': bytes_feature(image_data),
              'image/format': bytes_feature(image_format),
              'image/filename': bytes_feature(str(class_name).encode('utf8')),
              'image/source_id': bytes_feature(str(filename).encode('utf8')),
          }))
    else:
          return tf.train.Example(features=tf.train.Features(feature={
              'image/height': int64_feature(height),
              'image/width': int64_feature(width),
              'image/encoded': bytes_feature(image_data),
              'image/format': bytes_feature(image_format),
              'image/label': int64_feature(class_id),
              'image/filename': bytes_feature(str(filename).encode('utf8')),
              'image/source_id': bytes_feature(str(filename).encode('utf8')),
          }))

def _convert_tfrecord_dataset(split_name, filenames, label_name_to_id, dataset_dir, tfrecord_filename, _NUM_SHARDS):
    '''
    :param split_name:train或eval或test
    :param filenames:图片的路径列表
    :param label_name_to_id:标签名与数字标签的对应关系
    :param dataset_dir:数据存放的目录
    :param tfrecord_filename:文件保存的前缀名
    :param _NUM_SHARDS:将整个数据集分为几个文件
    :return:
    '''
    assert split_name in ['train', 'eval','test']
    #计算平均每一个tfrecords文件保存多少张图片
    num_per_shard = int(math.ceil(len(filenames) / float(_NUM_SHARDS)))
    with tf.Graph().as_default():
        image_reader = ImageReader()
        with tf.Session('') as sess:
            for shard_id in range(_NUM_SHARDS):
                #获取tfrecord文件的名称
                output_filename = _get_dataset_filename(dataset_dir, split_name, shard_id, tfrecord_filename = tfrecord_filename, _NUM_SHARDS = _NUM_SHARDS)
                #写tfrecords文件
                with tf.python_io.TFRecordWriter(output_filename) as tfrecord_writer:
                    start_ndx = shard_id * num_per_shard
                    end_ndx = min((shard_id+1) * num_per_shard, len(filenames))
                    for i in range(start_ndx, end_ndx):
                        #更新控制台中已经完成的图片数量
                        sys.stdout.write('\r>> Converting image %d/%d shard %d' % (
                            i+1, len(filenames), shard_id))
                        sys.stdout.flush()
                        #读取图片,将图片数据读取为bytes
                        image_data = tf.gfile.FastGFile(filenames[i], 'rb').read()
                        #获取图片的高和宽
                        height, width = image_reader.read_image_dims(sess, image_data)
                        #获取路径中的图片名称
                        img_name = os.path.basename(filenames[i])
                        #获取图片的类别
                        class_name = img_name.split(".")[0]
                        if re.match(r'(.*)positive(.*?)', filenames[i], re.M|re.I):
                          class_name = 'positive'
                        elif re.match(r'(.*)negative(.*?)', filenames[i], re.M|re.I):
                          class_name = 'negative'
                        class_id = label_name_to_id[class_name]
                        example = image_to_tfexample(
                            split_name, image_data, b'jpg', height, width, class_id, class_name, img_name)
                        tfrecord_writer.write(example.SerializeToString())
                sys.stdout.write('\n')
                sys.stdout.flush()


class ImageReader(object):
  """Helper class that provides TensorFlow image coding utilities."""
 
  def __init__(self):
    # Initializes function that decodes RGB JPEG data.
    self._decode_jpeg_data = tf.placeholder(dtype=tf.string)
    self._decode_jpeg = tf.image.decode_jpeg(self._decode_jpeg_data, channels=3)
 
  def read_image_dims(self, sess, image_data):
    image = self.decode_jpeg(sess, image_data)
    return image.shape[0], image.shape[1]
 
  def decode_jpeg(self, sess, image_data):
    image = sess.run(self._decode_jpeg,
                     feed_dict={self._decode_jpeg_data: image_data})
    #pdb.set_trace()
    assert len(image.shape) == 3
    assert image.shape[2] == 3
    return image

def _get_dataset_filename(dataset_dir, split_name, shard_id = 1, tfrecord_filename = None, _NUM_SHARDS = 1):
  name = ''
  if tfrecord_filename:
    name = tfrecord_filename + '_' + name
  if split_name:
    name = name + '_' + split_name
  if _NUM_SHARDS > 1:
    name = name + '_' + str(shard_id) + '_of_' + str(_NUM_SHARDS)
  return dataset_dir + '/' + name + '.tfrecord'

def _get_dateset_imgPaths(dataset_dir, split_name):
  #获取文件所在路径
  dataset_dir = os.path.join(dataset_dir, split_name)
  cat_img_paths = []
  dog_img_paths = []
  #遍历目录下的所有图片
  for filename in os.listdir(dataset_dir):
      #获取文件的路径
      file_path = os.path.join(dataset_dir, filename)
      if file_path.endswith("jpg") and os.path.exists(file_path):
          #获取类别的名称
          label_name = filename.split(".")[0]
          if label_name == "cat":
              cat_img_paths.append(file_path)
          elif label_name == "dog":
              dog_img_paths.append(file_path)
      return cat_img_paths,dog_img_paths

def _get_pn_imgPaths(dataset_dir, split_name):
  #获取文件所在路径
  split_dir = os.path.join(dataset_dir, split_name)
  paths = []
  for sub_dir in ['negative', 'positive']:
    data_dir = os.path.join(split_dir, sub_dir)
    #遍历目录下的所有图片
    for filename in os.listdir(data_dir):
        #获取文件的路径
        file_path = os.path.join(data_dir, filename)
        if file_path.endswith("jpg") and os.path.exists(file_path):
            paths.append(file_path)
  return paths

#数据所在的目录路径
dataset_dir_path = "E:/workspace/web/www/zzd/daelui-tensorflow/crack_detection/dataset/Concrete Crack Images for Classification"
#类标名称和数字的对应关系
#label_name_to_num = {"cat":0,"dog":1}
label_name_to_num = {"positive": 1, "negative": 2}
label_num_to_name = {value:key for key,value in label_name_to_num.items()}
#设置验证集占整个数据集的比例
val_size = 0.2
batch_size = 1

#生成tfrecord文件
def generate_tfreocrd():
    # #获取目录下所有的猫和狗图片的路径
    # cat_img_paths,dog_img_paths = _get_dateset_imgPaths(dataset_dir_path, "train")
    # #打乱路径列表的顺序
    # np.random.shuffle(cat_img_paths)
    # np.random.shuffle(dog_img_paths)
    # #计算不同类别验证集所占的图片数量
    # cat_val_num = int(len(cat_img_paths) * val_size)
    # dog_val_num = int(len(dog_img_paths) * val_size)
    # #将所有的图片路径分为训练集和验证集
    # train_img_paths = cat_img_paths[cat_val_num:]
    # eval_img_paths = cat_img_paths[:cat_val_num]
    # train_img_paths.extend(dog_img_paths[dog_val_num:])
    # eval_img_paths.extend(dog_img_paths[:dog_val_num])
    # #打乱训练集和验证集的顺序
    # np.random.shuffle(train_img_paths)
    # np.random.shuffle(eval_img_paths)
    train_img_paths = _get_pn_imgPaths(dataset_dir_path, "train")
    eval_img_paths = _get_pn_imgPaths(dataset_dir_path, "eval")
    #将训练集保存为tfrecord文件
    _convert_tfrecord_dataset("train", train_img_paths, label_name_to_num, dataset_dir_path, "catVSdog", 1)
    #将验证集保存为tfrecord文件
    _convert_tfrecord_dataset("eval", eval_img_paths, label_name_to_num, dataset_dir_path, "catVSdog", 1)

if __name__ == '__main__':
  generate_tfreocrd()