import pandas as pd
import numpy as np
import tensorflow as tf
import PIL
import os
import cv2
import matplotlib.pyplot as plt
from tensorflow.keras import layers
from tensorflow.keras import Model
from tensorflow.keras.models import Sequential
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint
from tensorflow.keras.applications import Xception
from tensorflow.keras.applications.xception import preprocess_input
from sklearn.metrics import classification_report, roc_curve, auc

path = r"D:\kaggle\final\data1\train"
for files in os.listdir(path):
    print(os.path.join(path, files))

train_path = r"D:\kaggle\final\data1\train"
valid_path = r"D:\kaggle\final\data1\valid"
test_path = r"D:\kaggle\final\data1\test"

# Helper-function for joining a directory and list of filenames.
def path_join(dirname, filenames):
    return [os.path.join(dirname, filename) for filename in filenames]

# Helper-function for plotting images
def plot_images(images, cls_true, cls_pred=None, smooth=True):
    assert len(images) == len(cls_true)

    # Create figure with sub-plots.
    fig, axes = plt.subplots(3, 3, figsize=(15, 15))

    # Adjust vertical spacing.
    if cls_pred is None:
        hspace = 0.3
    else:
        hspace = 0.6
    fig.subplots_adjust(hspace=hspace, wspace=0.3)

    # Interpolation type.
    if smooth:
        interpolation = 'spline16'
    else:
        interpolation = 'nearest'

    for i, ax in enumerate(axes.flat):
        # There may be less than 9 images, ensure it doesn't crash.
        if i < len(images):
            # Plot image.
            ax.imshow(images[i], interpolation=interpolation)

            # Name of the true class.
            cls_true_name = class_names[cls_true[i]]

            # Show true and predicted classes.
            if cls_pred is None:
                xlabel = "True: {0}".format(cls_true_name)
            else:
                # Name of the predicted class.
                cls_pred_name = class_names[cls_pred[i]]

                xlabel = "True: {0}\nPred: {1}".format(cls_true_name, cls_pred_name)

            # Show the classes as the label on the x-axis.
            ax.set_xlabel(xlabel)

        # Remove ticks from the plot.
        ax.set_xticks([])
        ax.set_yticks([])

    # Ensure the plot is shown correctly with multiple plots
    # in a single Notebook cell.
    plt.show()

# Helper-function for printing confusion matrix
from sklearn.metrics import confusion_matrix

def print_confusion_matrix(cls_pred):
    # cls_pred is an array of the predicted class-number for
    # all images in the test-set.

    # Get the confusion matrix using sklearn.
    cm = confusion_matrix(y_true=cls_test,  # True class for test-set.
                          y_pred=cls_pred)  # Predicted class.

    print("Confusion matrix:")

    # Print the confusion matrix as text.
    print(cm)

    # Print the class-names for easy reference.
    for i, class_name in enumerate(class_names):
        print("({0}) {1}".format(i, class_name))

# Helper-function for plotting example errors
def plot_example_errors(cls_pred):
    # cls_pred is an array of the predicted class-number for
    # all images in the test-set.

    # Boolean array whether the predicted class is incorrect.
    incorrect = (cls_pred != cls_test)

    # Get the file-paths for images that were incorrectly classified.
    image_paths = np.array(image_paths_test)[incorrect]

    # Load the first 9 images.
    images = load_images(image_paths=image_paths[0:9])

    # Get the predicted classes for those images.
    cls_pred = cls_pred[incorrect]

    # Get the true classes for those images.
    cls_true = cls_test[incorrect]

    # Plot the 9 images we have loaded and their corresponding classes.
    # We have only loaded 9 images so there is no need to slice those again.
    plot_images(images=images,
                cls_true=cls_true[0:9],
                cls_pred=cls_pred[0:9])

# Function for calculating the predicted classes of the entire test-set and calling
# the above function to plot a few examples of mis-classified images.
def example_errors():
    # The Keras data-generator for the test-set must be reset
    # before processing. This is because the generator will loop
    # infinitely and keep an internal index into the dataset.
    # So it might start in the middle of the test-set if we do
    # not reset it first. This makes it impossible to match the
    # predicted classes with the input images.
    # If we reset the generator, then it always starts at the
    # beginning so we know exactly which input-images were used.
    test_generator.reset()

    # Predict the classes for all images in the test-set.
    y_pred = model.predict(test_generator, steps=STEPS_TEST)

    # Convert the predicted classes from arrays to integers.
    cls_pred = np.argmax(y_pred, axis=1)

    print(classification_report(cls_test, cls_pred, target_names=class_names, digits=4))
    fpr, tpr, _ = roc_curve(cls_test, cls_pred)
    roc_auc = auc(fpr, tpr)
    plt.figure()
    plt.plot(fpr, tpr, label='ROC curve (area = %0.4f)' % roc_auc)
    plt.plot([0, 1], [0, 1], 'k--')
    plt.xlim([0.0, 1.0])
    plt.ylim([0.0, 1.05])
    plt.xlabel('False Positive Rate')
    plt.ylabel('True Positive Rate')
    plt.title('Receiver Operating Characteristic')
    plt.legend(loc="lower right")
    plt.show()

    # Plot examples of mis-classified images.
    plot_example_errors(cls_pred)

    # Print the confusion matrix.
    print_confusion_matrix(cls_pred)

# Helper-function for loading images
def load_images(image_paths):
    # Load the images from disk.
    images = [plt.imread(path) for path in image_paths]

    # Convert to a numpy array and return it.
    return np.asarray(images)

# Set some important constants here
IMAGE_SIZE = 224
N_CLASSES = 2
BATCH_SIZE = 5

# ImageDataGenerator is needed because the dataset has no many data.
# The data augmentation can be useful to generate many augmented images from a single image

train_datagen = ImageDataGenerator(dtype='float32', preprocessing_function=preprocess_input)
train_generator = train_datagen.flow_from_directory(train_path,
                                                   batch_size=BATCH_SIZE,
                                                   target_size=(IMAGE_SIZE, IMAGE_SIZE),
                                                   class_mode='categorical')

valid_datagen = ImageDataGenerator(dtype='float32', preprocessing_function=preprocess_input)
valid_generator = valid_datagen.flow_from_directory(valid_path,
                                                   batch_size=BATCH_SIZE,
                                                   target_size=(IMAGE_SIZE, IMAGE_SIZE),
                                                   class_mode='categorical')

test_datagen = ImageDataGenerator(dtype='float32', preprocessing_function=preprocess_input)
test_generator = test_datagen.flow_from_directory(test_path,
                                                   batch_size=BATCH_SIZE,
                                                   target_size=(IMAGE_SIZE, IMAGE_SIZE),
                                                   class_mode='categorical')

# save some values to be used later

cls_train = train_generator.classes
cls_valid = valid_generator.classes
cls_test = test_generator.classes
class_names = list(train_generator.class_indices.keys())
print(class_names)
num_classes = train_generator.num_classes
print("num classes:", num_classes)

image_paths_train = path_join(train_path, train_generator.filenames)
image_paths_valid = path_join(valid_path, valid_generator.filenames)
image_paths_test = path_join(test_path, test_generator.filenames)

STEPS_TEST = test_generator.n / BATCH_SIZE

xcep_model = Xception(include_top=False, weights='imagenet', input_shape=(IMAGE_SIZE, IMAGE_SIZE, 3))

for layer in xcep_model.layers:
    if 'block14' not in layer.name:
        layer.trainable = False

# Check if all layers layers are not trainable
for i, layer in enumerate(xcep_model.layers):
    print(i, layer.name, "-", layer.trainable)

model = Sequential()
model.add(xcep_model)
model.add(layers.GlobalAveragePooling2D())
model.add(layers.Dropout(0.5))
model.add(layers.Dense(1024, activation='relu'))
model.add(layers.BatchNormalization())
model.add(layers.Dropout(0.5))
model.add(layers.Dense(512, activation='relu'))
model.add(layers.BatchNormalization())
model.add(layers.Dense(N_CLASSES, activation='softmax'))  # 修改为2

model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['acc'])
model.summary()

checkpoint = ModelCheckpoint(filepath='../chest_CT_SCAN-Xception.h5',
                             monitor='val_loss',
                             mode='auto',
                             save_best_only=True)
early_stopping = EarlyStopping(verbose=1, patience=3)
import time

start_time = time.time()
steps_per_epoch = len(train_generator)  # 使用训练集的实际大小
validation_steps = len(valid_generator)  # 使用验证集的实际大小
history = model.fit(train_generator,
                    steps_per_epoch=steps_per_epoch,  # 使用实际的步数
                    epochs=20,
                    verbose=1,
                    validation_data=valid_generator,
                    validation_steps=validation_steps,  # 使用实际的步数
                    callbacks=[checkpoint, early_stopping])

import matplotlib.pyplot as plt

acc = history.history['acc']
val_acc = history.history['val_acc']
loss = history.history['loss']
val_loss = history.history['val_loss']

epochs = range(len(acc))

plt.plot(epochs, acc, 'r', label='Training accuracy')
plt.plot(epochs, val_acc, 'b', label='Validation accuracy')
plt.title('Training and validation accuracy')
plt.legend(loc=0)
plt.figure()

plt.show()

result = model.evaluate(test_generator, steps=STEPS_TEST)

example_errors()

end_time = time.time()

print("Total execution time: {:.4f} seconds".format(end_time - start_time))

'''
              precision    recall  f1-score   support

      cancer     0.6761    0.6761    0.6761        71
      normal     0.2333    0.2333    0.2333        30

    accuracy                         0.5446       101
   macro avg     0.4547    0.4547    0.4547       101
weighted avg     0.5446    0.5446    0.5446       101
'''