import pathlib
import random
import tensorflow as tf
import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn.metrics import classification_report, confusion_matrix
from sklearn.model_selection import KFold

AUTOTUNE = tf.data.experimental.AUTOTUNE
BATCH_SIZE = 32
IMG_SIZE = (200, 200)


def preprocess_image(image):
    image = tf.image.decode_jpeg(image, channels=3)
    image = tf.image.resize(image, [200, 200])
    return image


def load_and_preprocess_image(path_):
    image = tf.io.read_file(path_)
    return preprocess_image(image)


def load_model():
    """ 网络层"""
    tf_se = tf.keras.Sequential()
    preprocess_input = tf.keras.applications.resnet.preprocess_input
    IMG_SHAPE = IMG_SIZE + (3,)
    base_model = tf.keras.applications.ResNet152(input_shape=IMG_SHAPE,
                                                 include_top=False,
                                                 weights='imagenet')
    base_model.trainable = True
    fine_tune_at = 450
    for layer in base_model.layers[:fine_tune_at]:
        layer.trainable = False
    global_average_layer = tf.keras.layers.GlobalAveragePooling2D()
    prediction_layer = tf.keras.layers.Dense(3, activation='softmax')
    inputs = tf.keras.Input(shape=(200, 200, 3))
    x = tf_se(inputs)
    x = preprocess_input(x)
    x = base_model(x, training=False)
    x = global_average_layer(x)
    x = tf.keras.layers.Dense(32, activation='relu')(x)
    x = tf.keras.layers.Dropout(0.5)(x)
    x = tf.keras.layers.Dense(16, activation='relu')(x)
    x = tf.keras.layers.Dropout(0.2)(x)
    outputs = prediction_layer(x)
    models = tf.keras.Model(inputs, outputs)
    models.compile(loss=tf.keras.losses.SparseCategoricalCrossentropy(),
                   optimizer=tf.keras.optimizers.Adam(learning_rate=0.0001),
                   metrics=['accuracy'])
    return models


def construct_batch(image_paths, image_labels):
    """
    构建总的测试集
    :return:
    """
    image_paths_ds = tf.data.Dataset.from_tensor_slices(image_paths)
    image_paths_ds = image_paths_ds.map(load_and_preprocess_image, num_parallel_calls=AUTOTUNE)
    image_labels_ds = tf.data.Dataset.from_tensor_slices(tf.cast(image_labels, tf.int32))
    path_label = tf.data.Dataset.zip((image_paths_ds, image_labels_ds))
    data_batch = path_label.batch(BATCH_SIZE)
    dataset = data_batch.prefetch(buffer_size=AUTOTUNE)
    return dataset


def construct_batch_spilt(image_paths, image_labels):
    """
    构建分割十折交叉验证的训练集和验证集 4:1 20% 作为验证集
    :return:
    """
    image_paths_ds = tf.data.Dataset.from_tensor_slices(image_paths)
    image_paths_ds = image_paths_ds.map(load_and_preprocess_image, num_parallel_calls=AUTOTUNE)
    image_labels_ds = tf.data.Dataset.from_tensor_slices(tf.cast(image_labels, tf.int32))
    path_label = tf.data.Dataset.zip((image_paths_ds, image_labels_ds))
    data_batch = path_label.batch(BATCH_SIZE)
    val_batches = tf.data.experimental.cardinality(data_batch)
    validation_d = data_batch.take(val_batches // 5)
    train_d = data_batch.skip(val_batches // 5)
    validation = validation_d.prefetch(buffer_size=AUTOTUNE)
    train = train_d.prefetch(buffer_size=AUTOTUNE)
    return train, validation


def load_image_label(paths):
    data_root = pathlib.Path(paths)
    all_image_paths_ = list(data_root.glob('*/*'))
    all_image_paths_ = [str(path_) for path_ in all_image_paths_]  # 所有的图片路径
    random.shuffle(all_image_paths_)  # 随机打乱这些数据名
    label_names = sorted(item.name for item in data_root.glob('*/') if item.is_dir())
    label_to_index = dict((name, index_) for index_, name in enumerate(label_names))
    all_image_labels_ = [label_to_index[pathlib.Path(path_).parent.name]
                         for path_ in all_image_paths_]  # 所有的标签
    return all_image_paths_, all_image_labels_


if __name__ == '__main__':
    path_train = r"G:\image2\train"
    all_image_paths_train, all_image_labels_train = load_image_label(path_train)  # 图片已经经过打乱
    # 构建训练图片集
    # 测试集
    path_test = r"G:\image2\test"
    all_image_paths_test, all_image_labels_test = load_image_label(path_test)
    # 验证集，不参与训练
    path_val = r'G:\image2\val'
    all_image_paths_val, all_image_labels_val = load_image_label(path_val)

    all_image_labels = all_image_labels_train + all_image_labels_test + all_image_labels_val
    all_image_paths = all_image_paths_train + all_image_paths_test + all_image_paths_val
    print("数据集总数量：",len(all_image_labels))
    kf = KFold(n_splits=10)

    num = 0
    print("十折交叉验证精度开始！")
    for train_index, test_index in kf.split(all_image_paths):
        tf.keras.backend.clear_session()
        model = load_model()
        index_train = list(train_index)  # 转化为列表
        test_index = list(test_index)
        train_image_path = [all_image_paths[i] for i in index_train]
        train_image_label = [all_image_labels[i] for i in index_train]
        test_image_path = [all_image_paths[i] for i in test_index]
        test_image_label = [all_image_labels[i] for i in test_index]
        train_dataset, validation_dataset = construct_batch_spilt(train_image_path, train_image_label)
        test_data = construct_batch(test_image_path, test_image_label)
        num = num + 1
        checkpoint_path = "resnet_9_5_ten_verification/resnet9_5_" + str(num) + "/" + "cp.ckpt"
        callback_list = [
            tf.keras.callbacks.EarlyStopping(
                monitor='accuracy',  # 监控的指标，精度
                patience=5  # 精度在多于三轮未改善，即停止训练
            ),
            tf.keras.callbacks.ModelCheckpoint(filepath=checkpoint_path,
                                               save_best_only=True,
                                               save_weights_only=True,
                                               verbose=1,
                                               monitor='val_accuracy'
                                               )
        ]
        initial_epochs = 50  # 每一轮训练的次数
        history = model.fit(train_dataset,
                            validation_data=validation_dataset,
                            epochs=initial_epochs,
                            callbacks=callback_list)

        acc = history.history['accuracy']
        val_acc = history.history['val_accuracy']
        loss = history.history['loss']
        val_loss = history.history['val_loss']
        print("acc:", acc)
        print("val_acc:", val_acc)
        plt.figure(figsize=(8, 8))
        plt.subplot(2, 1, 1)
        plt.plot(acc, label='Training Accuracy')
        plt.plot(val_acc, label='Validation Accuracy')
        plt.legend(loc='lower right')
        plt.ylabel('Accuracy')
        plt.ylim([min(plt.ylim()), 1])
        plt.title('Training and Validation Accuracy')

        plt.subplot(2, 1, 2)
        plt.plot(loss, label='Training Loss')
        plt.plot(val_loss, label='Validation Loss')
        plt.legend(loc='upper right')
        plt.ylabel('Cross Entropy')
        plt.ylim([0, 1.0])
        plt.title('Training and Validation Loss')
        plt.xlabel('epoch')
        plt.show()
        time.sleep(5)

        model.load_weights(checkpoint_path)  # 加载保存的最好权重
        scores = model.predict(test_data)
        scores_pri_t = [i.tolist() for i in scores]  # 预测到的分数
        label_pri_t = []
        for i in scores:
            """ 得到预测值对应的标签"""
            label_pri_t.append(np.argmax(i))
        print("label_pri :", label_pri_t)  # 预测的标签
        print("label_true:", test_image_label)  # 实际的标签
        sums = 0
        for index, i in enumerate(label_pri_t):
            """ 判断预测准确的个数"""
            if int(i) == int(test_image_label[index]):
                sums += 1
        print("预测正确的个数：", sums)
        print("总预测样本数：", len(test_image_label))
        print("预测准确率：", sums / float(len(test_image_label)))
        print(classification_report(test_image_label, label_pri_t))
        print(confusion_matrix(test_image_label, label_pri_t))
        with open('score.txt', 'a+') as f:
            f.write(str(scores_pri_t) + '\n')
            f.write(str(test_image_label) + '\n')
            f.write(str(label_pri_t) + '\n')
    print("十折交叉验证结束！")
