import tensorflow as tf
import numpy as np

BATCH_SIZE = 1
# IMG_SIZE = (2048, 1536)  # 统一调整图像尺寸
IMG_SIZE = (1024, 768)
base_dir = 'D:\Engineer_workshop\SW station\work\JDEC\project data\initial data'

# 数据管道
def build_pipeline(data_dir, batch_size=BATCH_SIZE, is_train=False):
    ds = tf.keras.preprocessing.image_dataset_from_directory(
        data_dir,
        labels='inferred',  # 从子目录自动推断标签（每个子目录为一类）
        label_mode="int",   # 默认值 int (categorical、binary、None)
        # class_names=None,
        color_mode="rgb",   #默认值 rgb (grayscale、rgba), 这里用灰度值
        batch_size=batch_size,
        image_size=IMG_SIZE,
        shuffle=True,
        seed=42,
        validation_split=0.2,
        subset="training" if is_train else "validation",
        # interpolation='bilinear', #插值方式，默认 bilinear
        # follow_links=False
        )
    return ds

# 加载训练集和验证集
train_ds = build_pipeline(base_dir, is_train=True)  # 53
val_ds = build_pipeline(base_dir)

# 拆分验证集为验证集和测试集
val_batches = tf.data.experimental.cardinality(val_ds)
test_ds = val_ds.take(val_batches // 2) # 取50%作为测试集  # 7
val_ds = val_ds.skip(val_batches // 2)  # 剩余作为验证集   # 7
print(val_ds)

x_train, y_train = next(iter(train_ds))
print(x_train)
print(y_train)

# # print(len(y_train))x_train)
# print(y_train)
#

# def generateds(datasets):
#     x, y_ = [], []
#     for i in datasets:

        # value = content.split()
        # img_path = path + value[0]          # 生成照片路径
        # img = Image.open(img_path)          # 获取图像张量
        # image = img.resize((32, 24))
        # img = np.array(img.convert('L'))    # 通过加权平均法将 RGB 三通道的像素值转换为单通道灰度值
        # img = img / 255.                    # 归一化到[0,1]范围
        # x.append(img)
        # y_.append(value[1])
        # print('loading :' + content)

    # x = np.array(x)
    # y_ = np.array(y_)
    # y_ = y_.astype(np.int64)
    # print(x)
    # print(y_)
    # return x, y_