# pip install tensorflow -i https://mirrors.aliyun.com/pypi/simple/
import os

# # 设置环境变量TF_CPP_MIN_LOG_LEVEL为"2"来减少TensorFlow的日志输出
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"
from skimage import io, transform  # 用于图像读取和处理
import glob  # 用于文件路径匹配
import tensorflow as tf  # tensorflow和keras用于构建神经网络
from tensorflow.keras import layers
from tensorflow.keras import regularizers  # 修改导入方式
import numpy as np  # numpy用于数值计算

# 数据集地址
path = "D:/项目/python/拍图识花/flower_photos/"
# 模型保存地址（添加.keras扩展名）
model_path = "D:/项目/python/拍图识花/flower_model/flower_model.keras"

# 将所有的图片resize成100*100 通道数(3，即RGB彩色图像)
w = 100
h = 100
c = 3


# 读取图片
def read_img(path):
    cate = [path + x for x in os.listdir(path) if os.path.isdir(path + x)]
    imgs = []
    labels = []
    for idx, folder in enumerate(cate):
        # 使用 glob.glob(folder + "/*.jpg") 获取当前子文件夹下所有 .jpg 格式的图像文件路径
        for im in glob.glob(folder + "/*.jpg"):
            print("reading the images:%s" % (im))
            img = io.imread(im)
            img = transform.resize(img, (w, h))
            imgs.append(img)
            labels.append(idx)
    return np.asarray(imgs, np.float32), np.asarray(labels, np.int32)


data, label = read_img(path)
# 打乱顺序
num_example = data.shape[0]
arr = np.arange(num_example)
np.random.shuffle(arr)
data = data[arr]
label = label[arr]
# 将所有数据分为训练集和验证集
ratio = 0.8
s = int(num_example * ratio)
x_train, y_train = data[:s], label[:s]
x_val = data[s:]
y_val = label[s:]


# 定义模型 使用 TensorFlow/Keras 定义一个卷积神经网络（CNN）模型，适用于图像分类任务
def build_model():
    inputs = tf.keras.Input(shape=(w, h, c), name="input")

    # 卷积层部分
    x = layers.Conv2D(
        32,
        (5, 5),
        padding="same",
        activation="relu",
        kernel_regularizer=regularizers.l2(0.0001),
    )(inputs)
    x = layers.MaxPooling2D((2, 2))(x)

    x = layers.Conv2D(
        64,
        (5, 5),
        padding="same",
        activation="relu",
        kernel_regularizer=regularizers.l2(0.0001),
    )(x)
    x = layers.MaxPooling2D((2, 2))(x)

    x = layers.Conv2D(
        128,
        (3, 3),
        padding="same",
        activation="relu",
        kernel_regularizer=regularizers.l2(0.0001),
    )(x)
    x = layers.MaxPooling2D((2, 2))(x)

    x = layers.Conv2D(
        256,
        (3, 3),
        padding="same",
        activation="relu",
        kernel_regularizer=regularizers.l2(0.0001),
    )(x)
    x = layers.MaxPooling2D((2, 2))(x)

    # 展平层
    x = layers.Flatten()(x)

    # 全连接层
    x = layers.Dense(
        1024, activation="relu", kernel_regularizer=regularizers.l2(0.0001)
    )(x)
    x = layers.Dropout(0.5)(x)

    x = layers.Dense(
        512, activation="relu", kernel_regularizer=regularizers.l2(0.0001)
    )(x)
    x = layers.Dropout(0.5)(x)

    # 输出层
    outputs = layers.Dense(5)(x)  # 假设有5个类别

    return tf.keras.Model(inputs=inputs, outputs=outputs)


# 创建模型
model = build_model()
model.summary()

# 编译模型
model.compile(
    optimizer=tf.keras.optimizers.Adam(0.001),
    loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
    metrics=["accuracy"],
)

# 训练模型
history = model.fit(
    x_train, y_train, batch_size=32, epochs=10, validation_data=(x_val, y_val)
)

# 保存模型（使用.keras扩展名）
model.save(model_path)  # 保存为Keras格式

# 评估模型
test_loss, test_acc = model.evaluate(x_val, y_val, verbose=2)
print(f"\nTest accuracy: {test_acc}")
