import tensorflow as tf
import numpy as np
# from matplotlib import pyplot as plt
# from tensorflow.keras import layers

# parameter、hyper-parameter
num_classes = 2         # 需要分类的类别数
base_lr = 1e-3          # 基础学习率
base_epochs = 60        # 基础epoch轮数
Initial_Image_Size = (2048, 1536)

Input_Image_Size = (512, 384)
BATCH_SIZE = 32
# Input_Image_Size = (1024, 768)
# BATCH_SIZE = 16

# base_dir = 'D:\Engineer_workshop\SW station\work\JDEC\project data\initial data'
base_dir = 'C:\Temp\TF2.4 training\mine pictures'

# 数据管道
def build_pipeline(data_dir, batch_size=BATCH_SIZE, is_train=False):
    ds = tf.keras.preprocessing.image_dataset_from_directory(
        data_dir,
        labels='inferred',  # 从子目录自动推断标签（每个子目录为一类）
        label_mode="int",   # 默认值 int (categorical、binary、None)
        # class_names=None,
        color_mode="rgb",   #默认值 rgb (grayscale、rgba)
        batch_size=batch_size,
        image_size=Initial_Image_Size,
        shuffle=True,
        seed=42,
        validation_split=0.2,
        subset="training" if is_train else "validation",
        # interpolation='bilinear', #插值方式，默认 bilinear
        # follow_links=False
        )
    return ds

# 加载训练集和验证集
train_ds = build_pipeline(base_dir, is_train=True)
val_ds = build_pipeline(base_dir)

# 拆分验证集为验证集和测试集
val_batches = tf.data.experimental.cardinality(val_ds)
test_ds = val_ds.take(val_batches // 2)  # 取50%作为测试集
val_ds = val_ds.skip(val_batches // 2)  # 剩余作为验证集
# shapes: ((None, 1024, 768, 3), (None,)), types: (tf.float32, tf.int32)
print("initial train ds:" ,train_ds)
# print("initial train ds len: ", len(train_ds))

def print_shape(image):
    print("initial train image: ", image)
    # print("initial train image shape: ", tf.shape(image))
    # print("initial train image shape: ", image.numpy())
    original_shape = tf.shape(image)[:2]
    print("initial train image - length: ", original_shape[0])
    print("initial train image - width: ", original_shape[1])

train_ds.map(lambda x, y: (print_shape(x), y))

# 数据增强
# 所有像素数据（包括训练集、验证集、测试集）归一化处理的实例化
# normalization_layer = tf.keras.layers.experimental.preprocessing.Rescaling(1./255)

from tensorflow.keras.layers import (
    RandomFlip,         # 随机翻转
    RandomRotation,     # 随机旋转
    RandomZoom,         # 随机缩放
    RandomContrast,     # 随机对比度调整
    # RandomBrightness,   # 随机亮度调整
    # RandomTranslation,  # 随机平移
    Rescaling,          # 归一化（非增强，但常配合使用）
    # RandomCrop,         # 随机裁剪
)

normalization_layer = Rescaling(1./255)
data_augmentation = tf.keras.Sequential([
    # RandomFlip("horizontal_and_vertical"),
    # RandomRotation(factor=0.2),
    RandomZoom(height_factor=0.1,
               width_factor=0.1,
               fill_mode='constant',
               interpolation='bilinear',
               seed=None,
               fill_value=0.0),
    # RandomContrast(0.1),
])

def center_crop(image):
    offset_height = (Input_Image_Size[0] - Input_Image_Size[0]) // 2  # 计算垂直方向上的偏移
    offset_width = (Input_Image_Size[1] - Input_Image_Size[1]) // 2  # 计算水平方向上的偏移

    # 执行裁剪
    cropped_image = tf.image.crop_to_bounding_box(
        image,
        offset_height=offset_height,
        offset_width=offset_width,
        target_height=Input_Image_Size[0],
        target_width=Input_Image_Size[1]
    )
    return cropped_image

def ds_enhancement(datasets, is_train=False):
    # 归一化：将像素值缩放到 [0,1] 范围
    ds = datasets.map(lambda x, y: (normalization_layer(x), y))

    #对训练集应用随机变换（如旋转、翻转）以增加泛化性
    if is_train:
        ds = ds.map(lambda x, y: (data_augmentation(x), y))

    ds = ds.map(lambda x, y: (center_crop(x), y))

    # 缓存与预加载：提升训练效率
    ds = ds.cache().prefetch(buffer_size=tf.data.AUTOTUNE)
    return ds

train_ds = ds_enhancement(train_ds, is_train=True)
val_ds = ds_enhancement(val_ds)
test_ds = ds_enhancement(test_ds)

print("enhanced train ds: " ,train_ds)
print("enhanced train ds: " ,val_ds)
print("enhanced train ds: " ,test_ds)
