# 创建私有数据集实验
'''
方法1
import tensorflow as tf
import pathlib
import os

# 设置路径和参数
base_dir = 'C:\Temp\TF2.4 training\splited data'  # 数据根目录
train_dir = os.path.join(base_dir, 'train')
valid_dir = os.path.join(base_dir, 'val')
test_dir = os.path.join(base_dir, 'test')

BATCH_SIZE = 32
# IMG_SIZE = (2048, 1536)  # 统一调整图像尺寸
IMG_SIZE = (1024, 768)

# 加载数据集
train_dataset = tf.keras.preprocessing.image_dataset_from_directory(
    train_dir,
    batch_size=BATCH_SIZE,
    image_size=IMG_SIZE,
    shuffle=True,  # 训练集建议打乱
    seed=123
)

valid_dataset = tf.keras.preprocessing.image_dataset_from_directory(
    valid_dir,
    batch_size=BATCH_SIZE,
    image_size=IMG_SIZE,
    shuffle=True,  # 验证集可打乱
    seed=123
)

test_dataset = tf.keras.preprocessing.image_dataset_from_directory(
    test_dir,
    batch_size=BATCH_SIZE,
    image_size=IMG_SIZE,
    shuffle=False  # 测试集不建议打乱
)

# 获取类别名称（自动从文件夹名推断）
class_names = train_dataset.class_names
print("Class names:", class_names)

# # 性能优化：预取数据
# train_dataset = train_dataset.prefetch(buffer_size=BATCH_SIZE)
# valid_dataset = valid_dataset.prefetch(buffer_size=BATCH_SIZE)
# test_dataset = test_dataset.prefetch(buffer_size=BATCH_SIZE)
'''

# import tensorflow as tf
# import pathlib
#
# # 加载图片路径和标签
# data_root = pathlib.Path('C:\Temp\TF2.4 training\dataset_root')
# all_image_paths = list(data_root.glob('*/*'))
# all_image_paths = [str(path) for path in all_image_paths]  # 转为字符串路径
#
# # 创建标签映射
# label_names = sorted(item.name for item in data_root.glob('*/') if item.is_dir())
# print(label_names)
#
# label_to_index = dict((name, index) for index, name in enumerate(label_names))
# all_image_labels = [label_to_index[pathlib.Path(path).parent.name] for path in all_image_paths]
# print(all_image_labels)

# 创建Dataset
# ds = tf.data.Dataset.from_tensor_slices((all_image_paths, all_image_labels))

# # 定义预处理函数
# def load_and_preprocess_image(path, label):
#     image = tf.io.read_file(path)
#     image = tf.image.decode_jpeg(image, channels=3)
#     image = tf.image.resize(image, [128, 128])  # 调整大小
#     image = image / 255.0  # 归一化
#     return image, label
#
# # 应用预处理
# dataset = ds.map(load_and_preprocess_image).batch(32)  # 分批次


import tensorflow as tf

BATCH_SIZE = 32
# IMG_SIZE = (2048, 1536)  # 统一调整图像尺寸
IMG_SIZE = (1024, 768)
base_dir = 'C:\Temp\TF2.4 training\splited dataset\\train'

# 2、数据增强.py
## 方法一、Sequential 增强层
# augmentation = tf.keras.Sequential([
#     tf.keras.layers.RandomFlip("horizontal"),
#     tf.keras.layers.RandomRotation(0.2),
# ])

## 方法二、ImageDataGenerator
# from tensorflow.keras.preprocessing.image import ImageDataGenerator
#
# datagen = ImageDataGenerator(
#     rotation_range=20,          # 随机旋转 ±20度
#     horizontal_flip=True,       # 水平翻转
#     zoom_range=0.2,             # 随机缩放 [0.8, 1.2]
#     rescale=1./255              # 归一化
# )
# train_generator = datagen.flow_from_directory(
#     'train_dir/',              # 从目录加载
#     target_size=(150, 150),
#     batch_size=32
# )
# model.fit(train_generator, steps_per_epoch=100)

# 数据管道
def build_pipeline(data_dir, batch_size=BATCH_SIZE, is_train=False):
    ds = tf.keras.preprocessing.image_dataset_from_directory(
        data_dir,
        validation_split=0.2,
        subset="training" if is_train else "validation",
        seed=42,
        image_size=IMG_SIZE,
        shuffle=True,
        batch_size=batch_size)
    print(ds)

    # if is_train:
    #     # 直接应用到数据集 （也可以嵌入模型）
    #     ds = ds.map(
    #         lambda x, y: (augmentation(x, training=True), y),
    #         num_parallel_calls=tf.data.AUTOTUNE)

    return ds.cache().prefetch(tf.data.AUTOTUNE)

train_ds = build_pipeline(base_dir, is_train=True)
val_ds = build_pipeline(base_dir)

print(train_ds)
print(val_ds)