import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
import os,pathlib,PIL
from tensorflow import keras
# from tensorflow.keras import layers,models,Sequential,Input,Model
# from tensorflow.keras.layers import Conv2D,MaxPooling2D,Flatten,Dense

# 支持中文
plt.rcParams['font.sans-serif'] = ['SimHei']  # 用来正常显示中文标签
plt.rcParams['axes.unicode_minus'] = False  # 用来正常显示负号


#数据所在文件地址
data_dir = "D:/tmp/.keras/datasets/cats_and_dogs"
data_dir = pathlib.Path(data_dir)
img_count = len(list(data_dir.glob('*/*.jpg')))
print(img_count)#共3400张图片

all_images_paths = list(data_dir.glob('*'))##”*”匹配0个或多个字符
all_images_paths = [str(path) for path in all_images_paths]
all_label_names = [path.split("\\")[5].split(".")[0] for path in all_images_paths]
#['cats','dogs']


height = 224
width = 224
epochs = 40
batch_size = 128


train_data_gen = tf.keras.preprocessing.image.ImageDataGenerator(
    rescale=1./255,
    rotation_range=45,
    shear_range=0.2,
    zoom_range=0.2,
    validation_split=0.2,
    horizontal_flip=True
)
train_ds = train_data_gen.flow_from_directory(
    directory=data_dir,
    target_size=(height,width),
    batch_size=batch_size,
    shuffle=True,
    class_mode='categorical',
    subset='training'
)
test_ds = train_data_gen.flow_from_directory(
    directory=data_dir,
    target_size=(height,width),
    batch_size=batch_size,
    shuffle=True,
    class_mode='categorical',
    subset='validation'
)

plt.figure(figsize=(15, 10))  # 图形的宽为15高为10

for images, labels in train_ds:
    for i in range(8):
        ax = plt.subplot(5, 8, i + 1)
        plt.imshow(images[i])
        plt.title(all_label_names[np.argmax(labels[i])])
        plt.axis("off")
    break
plt.show()


model = tf.keras.Sequential([
    tf.keras.layers.Conv2D(16,3,padding="same",activation="relu",input_shape=(height,width,3)),
    tf.keras.layers.MaxPool2D(),
    tf.keras.layers.Conv2D(32,3,padding="same",activation="relu"),
    tf.keras.layers.MaxPool2D(),
    tf.keras.layers.Conv2D(64,3,padding="same",activation="relu"),
    tf.keras.layers.MaxPool2D(),
    tf.keras.layers.Dropout(0.5),
    tf.keras.layers.Flatten(),
    tf.keras.layers.Dense(512,activation="relu"),
    tf.keras.layers.Dense(2)
])

#设置优化器
#起始学习率
init_learning_rate = 1e-4
lr_sch = tf.keras.optimizers.schedules.ExponentialDecay(
    initial_learning_rate=init_learning_rate,
    decay_steps=50,
    decay_rate=0.96,
    staircase=True
)

gen_optimizer = tf.keras.optimizers.Adam(learning_rate=lr_sch)


model.compile(
    optimizer=gen_optimizer,
    loss=tf.keras.losses.BinaryCrossentropy(from_logits=True),
    metrics=['accuracy']
)

history = model.fit(
    train_ds,
    epochs=epochs,
    validation_data=test_ds
)


#训练结果可视化
accuracy = history.history["accuracy"]
test_accuracy = history.history["val_accuracy"]
loss = history.history["loss"]
test_loss = history.history["val_loss"]
epochs_range = range(epochs)
plt.figure(figsize=(10,5))
plt.subplot(1,2,1)
plt.plot(epochs_range,accuracy,label = "Training Acc")
plt.plot(epochs_range,test_accuracy,label = "Test Acc")
plt.legend()
plt.title("Training and Test Acc")

plt.subplot(1,2,2)
plt.plot(epochs_range,loss,label = "Training loss")
plt.plot(epochs_range,test_loss,label = "Test loss")
plt.legend()
plt.title("Training and Test loss")
plt.show()


model.save("D:/tmp/.keras/datasets/model.h5")

model = tf.keras.models.load_model("D:/tmp/.keras/datasets/model.h5")


plt.figure(figsize=(18,18))
plt.suptitle("预测结果展示")
for images,labels in test_ds:
    for i in range(16):
        ax = plt.subplot(4,4,i+1)
        plt.imshow(images[i])
        img_array = tf.expand_dims(images[i], 0)

        # 使用模型预测图片中的动物
        predictions = model.predict(img_array)
        plt.title(all_label_names[np.argmax(predictions)])

        plt.axis("off")
    break
plt.show()


conv_base = tf.keras.applications.VGG19(weights='imagenet',include_top=False)
#设置为不可训练
conv_base.trainable =False
#模型搭建
model = tf.keras.Sequential()
model.add(conv_base)
model.add(tf.keras.layers.GlobalAveragePooling2D())
model.add(tf.keras.layers.Dense(512,activation='relu'))
model.add(tf.keras.layers.Dense(2,activation='sigmoid'))


from sklearn.metrics import confusion_matrix
import seaborn as sns
import pandas as pd

#绘制混淆矩阵
def plot_cm(labels,pre):
    conf_numpy = confusion_matrix(labels,pre)#根据实际值和预测值绘制混淆矩阵
    conf_df = pd.DataFrame(conf_numpy,index=all_label_names,columns=all_label_names)#将data和all_label_names制成DataFrame
    plt.figure(figsize=(8,7))

    sns.heatmap(conf_df,annot=True,fmt="d",cmap="BuPu")#将data绘制为混淆矩阵
    plt.title('混淆矩阵',fontsize = 15)
    plt.ylabel('真实值',fontsize = 14)
    plt.xlabel('预测值',fontsize = 14)
    plt.show()
test_pre = []
test_label = []
for images,labels in test_ds:
    for image,label in zip(images,labels):
        img_array = tf.expand_dims(image,0)#增加一个维度
        pre = model.predict(img_array)#预测结果
        test_pre.append(all_label_names[np.argmax(pre)])#将预测结果传入列表
        test_label.append(all_label_names[np.argmax(label)])#将真实结果传入列表
    break#由于硬件问题。这里我只用了一个batch，一共128张图片。
plot_cm(test_label,test_pre)#绘制混淆矩阵










