import tensorflow as tf
from tensorflow import keras
from sklearn.model_selection import train_test_split
import matplotlib
matplotlib.use('Agg')  # 添加在导入 pyplot 之前
import matplotlib.pyplot as plt# Helper libraries
from pandas import read_csv
import numpy as np
import pandas as pd
import cv2

print(tf.__version__)

array_of_img = []  # 用于存储所有图像数据
image_size = 100

# 加载训练图像
directory_name = "train/train/"
for i in range(1, 18001):
    img = cv2.imread(directory_name + str(i) + ".jpg")
    img = img / 255.0  # 归一化处理
    img = cv2.resize(img, (image_size, image_size))
    array_of_img.append(img)
train_images = np.array(array_of_img)
array_of_img = []  # 清空列表以节省内存

# 加载训练标签
dataframe = read_csv('train.csv')
array = dataframe.values
train_labels = np.array(array[:, 1], dtype='int8')
del dataframe
del array

class_names = ['male', 'female']

# 划分训练集和验证集
X_train, X_val, Y_train, Y_val = train_test_split(train_images, train_labels, test_size=0.1, random_state=3)

# 数据增强层，添加输入形状定义
data_augmentation = keras.Sequential(
    [
        keras.layers.InputLayer(input_shape=(image_size, image_size, 3)),
        keras.layers.GaussianNoise(0.1),
        keras.layers.RandomFlip("horizontal"),
        keras.layers.RandomTranslation(0.1, 0.1),
        keras.layers.RandomRotation(0.1),
        keras.layers.RandomZoom(0.1),
    ]
)

# 构建CNN模型
model = keras.Sequential([
    data_augmentation,
    keras.layers.Conv2D(25, kernel_size=(3, 3), padding='same', activation='relu'),
    keras.layers.Conv2D(50, kernel_size=(3, 3), padding='same', activation='relu'),
    keras.layers.BatchNormalization(),
    keras.layers.MaxPooling2D(),
    keras.layers.Dropout(0.2),
    keras.layers.Conv2D(50, kernel_size=(3, 3), padding='same', activation='relu'),
    keras.layers.Conv2D(100, kernel_size=(3, 3), padding='same', activation='relu'),
    keras.layers.BatchNormalization(),
    keras.layers.MaxPooling2D(),
    keras.layers.Dropout(0.2),
    keras.layers.Conv2D(100, kernel_size=(3, 3), padding='same', activation='relu'),
    keras.layers.Conv2D(200, kernel_size=(3, 3), padding='same', activation='relu'),
    keras.layers.BatchNormalization(),
    keras.layers.MaxPooling2D(),
    keras.layers.Dropout(0.2),
    keras.layers.Flatten(),
    keras.layers.Dense(50, activation='relu'),
    keras.layers.BatchNormalization(),
    keras.layers.Dropout(0.2),
    keras.layers.Dense(100, activation='relu'),
    keras.layers.BatchNormalization(),
    keras.layers.Dense(200, activation='relu'),
    keras.layers.BatchNormalization(),
    keras.layers.Dropout(0.2),
    keras.layers.Dense(2, activation='softmax')
])

model.summary()

# 编译模型
model.compile(optimizer='adam',
              loss=tf.keras.losses.SparseCategoricalCrossentropy(),
              metrics=['accuracy'])

# 定义权重保存路径（修正为符合Keras要求的格式）
save_weights = 'save_weights.weights.h5'  # 之前训练的最佳权重
last_weights = 'last_weights.weights.h5'  # 最终训练后的权重
best_weights = 'best_weights.weights.h5'  # 验证集表现最佳的权重

# 设置回调函数
checkpoint = keras.callbacks.ModelCheckpoint(best_weights, monitor='val_accuracy', save_best_only=True, mode='max', verbose=1)
reduce = keras.callbacks.ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=3, verbose=0, mode='auto', min_delta=0.0001, cooldown=0, min_lr=0)
earlyStopping = keras.callbacks.EarlyStopping(monitor='val_loss', patience=50, verbose=1, mode='auto')
callbacks = [checkpoint]

# 训练模型
hist = model.fit(X_train, Y_train, epochs=50, validation_data=(X_val, Y_val), callbacks=callbacks)

# 保存最终模型权重（修正文件名格式）
model.save_weights(last_weights)

# 绘制训练历史
plt.figure()
acc = hist.history['accuracy']
val_acc = hist.history['val_accuracy']
loss = hist.history['loss']
val_loss = hist.history['val_loss']
epochs = range(len(acc))

plt.plot(epochs, acc, 'r', label='Training acc')
plt.plot(epochs, val_acc, 'b', label='Validation acc')
plt.plot(epochs, loss, 'g', label='Training loss')
plt.plot(epochs, val_loss, 'y', label='Validation loss')
plt.title('Training and validation accuracy & loss')
plt.legend()
plt.show()

# 加载已保存的最佳权重（根据需要选择加载哪个权重）
# model.load_weights(best_weights)  # 加载本次训练最佳权重
model.load_weights(save_weights)  # 加载之前已保存的能复现结果的最佳权重

# 释放训练数据占用的内存
del train_images
del train_labels

# 加载测试数据
directory_name = "test/test/"
for i in range(18001, 23709):
    img = cv2.imread(directory_name + str(i) + ".jpg")
    img = img / 255.0
    img = cv2.resize(img, (image_size, image_size))
    array_of_img.append(img)
test_images = np.array(array_of_img)
del array_of_img  # 释放内存

# 预测并生成提交文件
predictions = model.predict(test_images)
results = np.argmax(predictions, axis=1)
submissions = pd.read_csv('test.csv')
submissions['label'] = results
submissions.to_csv('submission.csv', index=False)