import pathlib
import random
import tensorflow as tf
import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn.metrics import classification_report, confusion_matrix

AUTOTUNE = tf.data.experimental.AUTOTUNE
BATCH_SIZE = 32
IMG_SIZE = (200, 200)


def preprocess_image(image):
    image = tf.image.decode_jpeg(image, channels=3)
    image = tf.image.resize(image, [200, 200])
    return image


def load_and_preprocess_image(path_):
    image = tf.io.read_file(path_)
    return preprocess_image(image)


def load_model():
    """ 网络层"""
    tf_se = tf.keras.Sequential()
    preprocess_input = tf.keras.applications.resnet.preprocess_input
    IMG_SHAPE = IMG_SIZE + (3,)
    base_model = tf.keras.applications.ResNet152(input_shape=IMG_SHAPE,
                                                 include_top=False,
                                                 weights='imagenet')
    base_model.trainable = True
    fine_tune_at = 450
    for layer in base_model.layers[:fine_tune_at]:
        layer.trainable = False
    global_average_layer = tf.keras.layers.GlobalAveragePooling2D()
    prediction_layer = tf.keras.layers.Dense(3, activation='softmax')
    inputs = tf.keras.Input(shape=(200, 200, 3))
    x = tf_se(inputs)
    x = preprocess_input(x)
    x = base_model(x, training=False)
    x = global_average_layer(x)
    x = tf.keras.layers.Dense(32, activation='relu')(x)
    x = tf.keras.layers.Dropout(0.5)(x)
    x = tf.keras.layers.Dense(16, activation='relu')(x)
    x = tf.keras.layers.Dropout(0.2)(x)
    outputs = prediction_layer(x)
    models = tf.keras.Model(inputs, outputs)
    models.compile(loss=tf.keras.losses.SparseCategoricalCrossentropy(),
                   optimizer=tf.keras.optimizers.Adam(learning_rate=0.0001),
                   metrics=['accuracy'])
    return models


def construct_batch(image_paths, image_labels):
    """
    构建总的测试集
    :return:
    """
    image_paths_ds = tf.data.Dataset.from_tensor_slices(image_paths)
    image_paths_ds = image_paths_ds.map(load_and_preprocess_image, num_parallel_calls=AUTOTUNE)
    image_labels_ds = tf.data.Dataset.from_tensor_slices(tf.cast(image_labels, tf.int32))
    path_label = tf.data.Dataset.zip((image_paths_ds, image_labels_ds))
    data_batch = path_label.batch(BATCH_SIZE)
    dataset = data_batch.prefetch(buffer_size=AUTOTUNE)
    return dataset


def load_image_label(paths):
    data_root = pathlib.Path(paths)
    all_image_paths_ = list(data_root.glob('*/*'))
    all_image_paths_ = [str(path_) for path_ in all_image_paths_]  # 所有的图片路径
    random.shuffle(all_image_paths_)  # 随机打乱这些数据名
    label_names = sorted(item.name for item in data_root.glob('*/') if item.is_dir())
    label_to_index = dict((name, index_) for index_, name in enumerate(label_names))
    all_image_labels_ = [label_to_index[pathlib.Path(path_).parent.name]
                         for path_ in all_image_paths_]  # 所有的标签
    return all_image_paths_, all_image_labels_


if __name__ == '__main__':
    path_train = r"G:\image2\train"
    all_image_paths_train, all_image_labels_train = load_image_label(path_train)  # 图片已经经过打乱
    # 构建训练图片集
    # 测试集
    path_test = r"G:\image2\test"
    all_image_paths_test, all_image_labels_test = load_image_label(path_test)
    # 验证集，不参与训练
    path_val = r'G:\image2\val'
    all_image_paths_val, all_image_labels_val = load_image_label(path_val)

    train_dataset = construct_batch(all_image_paths_train, all_image_labels_train)
    test_dataset = construct_batch(all_image_paths_test, all_image_labels_test)
    val_dataset = construct_batch(all_image_paths_val, all_image_labels_val)
    model = load_model()
    # 加载十折交叉验证的最好的结果，进行最后的训练
    path = 'resnet_9_5_ten_verification/resnet3_16_5/cp.ckpt'
    model.load_weights(path)
    checkpoint_path = "model_last/cp.ckpt"
    callback_list = [
        tf.keras.callbacks.EarlyStopping(
            monitor='accuracy',  # 监控的指标，精度
            patience=8  # 精度在多于三轮未改善，即停止训练
        ),
        tf.keras.callbacks.ModelCheckpoint(filepath=checkpoint_path,
                                           save_best_only=True,
                                           save_weights_only=True,
                                           verbose=1,
                                           monitor='loss'
                                           )
    ]

    initial_epochs = 5  # 每一轮训练的次数
    history = model.fit(train_dataset,
                        validation_data=val_dataset,
                        epochs=initial_epochs,
                        callbacks=callback_list)
    save_path = 'save_model_h5/VGG_conv.h5'
    model.save(save_path)

    acc = history.history['accuracy']
    val_acc = history.history['val_accuracy']
    loss = history.history['loss']
    val_loss = history.history['val_loss']
    print("acc:", acc)
    print("val_acc:", val_acc)
    plt.figure(figsize=(8, 8))
    plt.subplot(2, 1, 1)
    plt.plot(acc, label='Training Accuracy')
    plt.plot(val_acc, label='Validation Accuracy')
    plt.legend(loc='lower right')
    plt.ylabel('Accuracy')
    plt.ylim([min(plt.ylim()), 1])
    plt.title('Training and Validation Accuracy')

    plt.subplot(2, 1, 2)
    plt.plot(loss, label='Training Loss')
    plt.plot(val_loss, label='Validation Loss')
    plt.legend(loc='upper right')
    plt.ylabel('Cross Entropy')
    plt.ylim([0, 1.0])
    plt.title('Training and Validation Loss')
    plt.xlabel('epoch')
    plt.show()
    time.sleep(5)

    # 加载保存的h5权重
    new_model = tf.keras.models.load_model(save_path)
    score = new_model.predict(test_dataset)
    score_pri_t = [i.tolist() for i in score]  # 预测到的分数
    label_pri_t = []
    for i in score:
        """ 得到预测值对应的标签"""
        label_pri_t.append(np.argmax(i))
    print("label_pri :", label_pri_t)  # 预测的标签
    print("label_true:", all_image_labels_test)  # 实际的标签
    sums = 0
    for index, i in enumerate(label_pri_t):
        """ 判断预测准确的个数"""
        if int(i) == int(all_image_labels_test[index]):
            sums += 1
    print("预测正确的个数：", sums)
    print("总预测样本数：", len(all_image_labels_test))
    print("预测准确率：", sums / float(len(all_image_labels_test)))
    print("各项指标")
    print(classification_report(all_image_labels_test, label_pri_t))
    print(confusion_matrix(all_image_labels_test, label_pri_t))
    with open('score1.txt', 'a+') as f:
        f.write(str(score_pri_t) + '\n')
        f.write(str(all_image_labels_test) + '\n')
        f.write(str(label_pri_t) + '\n')
    score = new_model.predict(val_dataset)
    score_pri_t = [i.tolist() for i in score]  # 预测到的分数
    label_pri_t = []
    for i in score:
        """ 得到预测值对应的标签"""
        label_pri_t.append(np.argmax(i))
    print("label_pri :", label_pri_t)  # 预测的标签
    print("label_true:", all_image_labels_val)  # 实际的标签
    sums = 0
    for index, i in enumerate(label_pri_t):
        """ 判断预测准确的个数"""
        if int(i) == int(all_image_labels_val[index]):
            sums += 1
    print("预测正确的个数：", sums)
    print("总预测样本数：", len(all_image_labels_val))
    print("预测准确率：", sums / float(len(all_image_labels_val)))
    print("各项指标")
    print(classification_report(all_image_labels_val, label_pri_t))
    print(confusion_matrix(all_image_labels_val, label_pri_t))
    with open('score2.txt', 'a+') as f:
        f.write(str(score_pri_t) + '\n')
        f.write(str(all_image_labels_val) + '\n')
        f.write(str(label_pri_t) + '\n')
