"""这个文件用于在gpu版本的tensorflow中加载flower_photos数据集的代码

官网的tensorflow在cnn的demo中使用了tf-nightly的代码，而gpu稳定版没有这些代码，需要自行实现
使用者直接使用 load_img_dataset_from_directory 这个方法即可

如果你想测试下图片加载的结果，可以使用showImgForPath这个方法来展示对应路径的图片，这个方法把路径对应的图片读取出来并解析成tensor，然后用pyplot展示
"""

import tensorflow as tf
import pathlib
import matplotlib.pyplot as plt


def check_import():
    return True


def read_path_and_decode_img(path, img_size, format="jpeg"):
    """读取一个路径对应的图片并按照图片格式解析成tensor


    Args:
        path (str): 图片路径
        img_size (int64[2]): 返回结果的图片大小
        format (str): 图片格式，支持jpeg, png

    Returns:
        返回 tensor
    """
    raw_img = tf.io.read_file(path)
    if (format == "jpeg"):
        return decode_jpeg_to_tensor(raw_img, img_size)


def decode_jpeg_to_tensor(raw_img, img_size):
    """把jpeg图片对象转换成tensor


    Args:
        raw_img: jpeg图片对象
        img_size: 返回结果的图片大小

    Returns:
        tensor
    """
    img = tf.image.decode_jpeg(raw_img, channels=3)
    img = tf.image.resize(img, img_size)
    img /= 255.0
    return img


def showImgForPath(path, label, img_size):
    """使用pyplot展示一个路径对应的图片与标签


    Args:
        path (str): 图片路径
        label (str): 标签
        img_size (int64[2]): 展示图片的尺寸
    """
    img_tensor = read_path_and_decode_img(path, img_size)
    showImgForTensor(img_tensor, label)


def showImgForTensor(img_tensor, label):
    """使用pyplot展示tensor的图片


    Args:
        img_tensor: 图片的tensor
        label (str): 图片标签
    """
    plt.imshow(img_tensor)
    plt.grid(False)
    plt.xlabel('img')
    plt.title(label)
    plt.show


def generatePathStrList(data_dir):
    """生成目标目录下所有路径的集合


    Args:
        data_dir (pathlib.Path): 路径对象

    Returns:
        list(str)
    """
    # 遍历路径构造generator然后生成path对象的List
    all_img_paths = list(data_dir.glob('*/*'))
    print("list type:", type(all_img_paths[0]))
    # 把path列表转换成string列表，所有路径对象编程string值
    all_img_paths = [str(path) for path in all_img_paths]
    # random.shuffle(all_img_paths)
    print("array type:", type(all_img_paths[0]))
    return all_img_paths


def generateLabelNames(data_dir):
    """按照目标目录下的子目录名生成标签名字典

    按照字母顺序排序

    Args:
        data_dir (pathlib.Path): 路径对象

    Returns:
        {str:int}
    """
    label_names = sorted(item.name for item in data_dir.glob('*/')
                         if item.is_dir())
    label_to_index = dict(
        (name, index) for index, name in enumerate(label_names))
    return label_to_index, label_names


def get_training_or_validation_split(samples, labels, validation_split,
                                     subset):
    """把数据和标签按照验证集比例分割，并按照subset取出需要的数据

    Args:
        samples (list): 数据集合
        labels (list): 数据集对应的标签集合
        validation_split (Float): 验证集的比例
        subset (str): 需要取出的数据，training训练集，validation验证集

    Returns:
        tuple (samples, labels)
    """
    if not validation_split:
        return samples, labels

    num_val_samples = int(validation_split * len(samples))
    if subset == 'training':
        print('Using %d files for training.' %
              (len(samples) - num_val_samples, ))
        samples = samples[:-num_val_samples]
        labels = labels[:-num_val_samples]
    elif subset == 'validation':
        print('Using %d files for validation.' % (num_val_samples, ))
        samples = samples[-num_val_samples:]
        labels = labels[-num_val_samples:]
    else:
        raise ValueError('`subset` must be either "training" '
                         'or "validation", received: %s' % (subset, ))
    return samples, labels


def load_StrPaths_and_LabelIndexs(data_dir):
    """加载目标下的所有图片地址和图片对应的标签


    Args:
        data_dir (pathlib.Path): 路径对象

    Returns:
        tuple (list(str), list(int))
    """
    all_img_paths = generatePathStrList(data_dir)
    # 把标签按数组顺序数字化，先转换成数字枚举，然后转成dictionary
    label_to_index, class_names = generateLabelNames(data_dir)
    # 把所有图片按照文件夹打标签
    all_img_labels = [
        label_to_index[pathlib.Path(path).parent.name]
        for path in all_img_paths
    ]
    return all_img_paths, all_img_labels, class_names


def paths_and_labels_to_dataset(paths, labels, img_size):
    """路径和标签数据转换成dataset

    Args:
        paths (str[]): 路径集合
        labels (int[]): 标签集合
        img_size (int[2]): 图片最终压缩的尺寸

    Returns:
        dataset
    """
    path_ds = tf.data.Dataset.from_tensor_slices(paths)
    AUTOTUNE = tf.data.experimental.AUTOTUNE
    img_ds = path_ds.map(lambda path: read_path_and_decode_img(path, img_size),
                         num_parallel_calls=AUTOTUNE).prefetch(AUTOTUNE)

    label_ds = tf.data.Dataset.from_tensor_slices(tf.cast(labels, tf.int64)).prefetch(AUTOTUNE)

    img_and_label_ds = tf.data.Dataset.zip((img_ds, label_ds))

    return img_and_label_ds


def load_img_dataset_from_directory(data_dir,
                                    validation_split,
                                    subset,
                                    seed,
                                    img_size=[256, 256],
                                    batch_size=32):
    """加载目标目录的图片并转换成dataset

    先按照validation的比例把数据集拆分，然后返回需要的数据比如validation比例0.2，
    subset为training就是返回前边80%的数据,subset等于validation就返回后20%的数据

    Args:
        data_dir (pathlib.Path): 目录对象
        validation_split (Float): 验证集比例
        subset (str): 要取出的数据集 training训练集，validation验证集
        seed (int64): 结果集打乱的随机数种子
        img_size (int64[2]): 结果集里图片的尺寸
        batch_size (int64): 分组大小

    Returns:
        dataset
    """
    # 加载数据与标签
    paths, labels, class_names = load_StrPaths_and_LabelIndexs(data_dir)
    # 分隔提取training或者validation的数据
    paths, labels = get_training_or_validation_split(paths, labels,
                                                     validation_split, subset)

    # 读取路径对应的数据转换成dataset
    img_and_label_ds = paths_and_labels_to_dataset(paths, labels, img_size)
    # 打乱
    ds = img_and_label_ds.shuffle(buffer_size=batch_size * 8, seed=seed)
    # 重复数据
    # ds = ds.repeat()
    # 分组
    ds = ds.batch(batch_size)
    ds = ds.cache(filename='./cache.tf-data')
    AUTOTUNE = tf.data.experimental.AUTOTUNE
    ds = ds.prefetch(buffer_size=AUTOTUNE)
    ds.class_names = class_names
    return ds


# data_root_origin = tf.keras.utils.get_file(
#     origin='https://storage.googleapis.com/download.tensorflow.org/example_images/flower_photos.tgz', fname='flower_photos', untar=True)
# data_root = pathlib.Path(data_root_origin)