import tensorflow as tf
from matplotlib import pyplot as plt
from tensorflow.keras import layers

from custom_datasets import CustomDatasets
from custom_settings import ModelConfigs

Fine_Tuning_En = True

model_configs = ModelConfigs()
custom_datasets = CustomDatasets()

# 生成训练集、验证集和测试集
train_ds = custom_datasets.build_train_ds()
val_ds, test_ds = custom_datasets.build_val_test_ds()
# shapes: ((None, 1024, 768, 3), (None,)), types: (tf.float32, tf.int32)
print("train_batches:", train_ds)
print("val_batches:", val_ds)
print("test_batches:", test_ds)

# 数据集预处理（归一化）
custom_datasets.ds_preprocessing()

# 加载基础模型
# base_model = tf.keras.applications.MobileNetV2(
# base_model = tf.keras.applications.VGG16(
# base_model = tf.keras.applications.InceptionV3(
base_model = tf.keras.applications.ResNet50V2(
    include_top=False,
    weights=None,
    # input_tensor=None,
    input_shape=custom_datasets.input_image_size + (3,),
    pooling=None,
    # pooling='max',
    # classes=1000,
    # classifier_activation='softmax'
)
# 冻结基础模型
base_model.trainable = False

# 添加自定义层
model = tf.keras.Sequential([  # CBAPD
    layers.Input(shape=custom_datasets.input_image_size + (3,)),
    base_model,
    layers.GlobalAveragePooling2D(), # 全局池化操作
    # layers.GlobalMaxPooling2D(),
    # layers.MaxPooling2D()
    layers.Dense(256, activation='relu'),
    layers.Dropout(0.5), # 正则化技术之一，随机丢弃神经元
    layers.Dense(model_configs.num_classes, activation='softmax')
])

# 编译模型
model.compile(
    optimizer=tf.keras.optimizers.Adam(learning_rate=1e-3, clipnorm=1.0),
    loss='sparse_categorical_crossentropy',  # 若标签为整数，使用sparse_categorical
    metrics=['accuracy']
)

# 设置回调函数
callbacks = [
    # tf.keras.callbacks.EarlyStopping(patience=5, restore_best_weights=True),
    tf.keras.callbacks.ModelCheckpoint('best_model.h5', save_best_only=True),
]

# 执行训练
history = model.fit(
    custom_datasets.ds_enhancement(train_ds, is_train=True),
    epochs=50,
    validation_data=val_ds,
    callbacks=callbacks
)

if Fine_Tuning_En:
    # 解冻最后N层（例如后20层）
    for layer in base_model.layers[-20:]:
        if not isinstance(layer, layers.BatchNormalization): # 解冻避开BN
            layer.trainable = True

    # 重新编译模型（使用更小的学习率）
    model.compile(
        optimizer=tf.keras.optimizers.SGD(1e-5, momentum=0.9),
        # optimizer=tf.keras.optimizers.Adam(learning_rate=1e-5, clipnorm=1.0),
        loss='sparse_categorical_crossentropy',
        metrics=['accuracy']
        )

    # 继续训练（微调阶段）
    history = model.fit(
        custom_datasets.ds_enhancement(train_ds, is_train=True),
        epochs=50,
        validation_data=val_ds,
        callbacks=callbacks
    )

# 评估模型
test_loss, test_acc = model.evaluate(test_ds)
print(f'Test accuracy: {test_acc}')

# 保存模型
# model.save('my_multiclass_model.h5')

# 提取训练和验证损失
train_loss = history.history['loss']
val_loss = history.history['val_loss']

# 绘制曲线
plt.figure(figsize=(10, 5))
plt.plot(train_loss, label='Training Loss')
plt.plot(val_loss, label='Validation Loss')
plt.title('Training and Validation Loss')
plt.xlabel('Epoch')
plt.ylabel('Loss')
plt.legend()
plt.grid(True)
plt.show()


