import pandas as pd
import numpy as np
import tensorflow as tf
import PIL
import os
import cv2
import matplotlib.pyplot as plt
from tensorflow.keras import layers
from tensorflow.keras import Model
from tensorflow.keras.models import Sequential
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint
from tensorflow.keras.applications import InceptionV3
from tensorflow.keras.applications.inception_v3 import preprocess_input
from sklearn.metrics import classification_report, roc_curve, auc
import time
# Paths
train_path = r"D:\kaggle\final\data1\train"
valid_path = r"D:\kaggle\final\data1\valid"
test_path = r"D:\kaggle\final\data1\test"

# Helper-function for joining a directory and list of filenames.
def path_join(dirname, filenames):
    return [os.path.join(dirname, filename) for filename in filenames]

# Helper-function for plotting images
def plot_images(images, cls_true, cls_pred=None, smooth=True):
    assert len(images) == len(cls_true)

    # Create figure with sub-plots.
    fig, axes = plt.subplots(3, 3, figsize=(15, 15))

    # Adjust vertical spacing.
    if cls_pred is None:
        hspace = 0.3
    else:
        hspace = 0.6
    fig.subplots_adjust(hspace=hspace, wspace=0.3)

    # Interpolation type.
    if smooth:
        interpolation = 'spline16'
    else:
        interpolation = 'nearest'

    for i, ax in enumerate(axes.flat):
        # There may be less than 9 images, ensure it doesn't crash.
        if i < len(images):
            # Plot image.
            ax.imshow(images[i], interpolation=interpolation)

            # Name of the true class.
            cls_true_name = class_names[cls_true[i]]

            # Show true and predicted classes.
            if cls_pred is None:
                xlabel = "True: {0}".format(cls_true_name)
            else:
                # Name of the predicted class.
                cls_pred_name = class_names[cls_pred[i]]

                xlabel = "True: {0}\nPred: {1}".format(cls_true_name, cls_pred_name)

            # Show the classes as the label on the x-axis.
            ax.set_xlabel(xlabel)

        # Remove ticks from the plot.
        ax.set_xticks([])
        ax.set_yticks([])

    # Ensure the plot is shown correctly with multiple plots
    # in a single Notebook cell.
    plt.show()

# Helper-function for printing confusion matrix
from sklearn.metrics import confusion_matrix

def print_confusion_matrix(cls_pred):
    cm = confusion_matrix(y_true=cls_test, y_pred=cls_pred)
    print("Confusion matrix:")
    print(cm)
    for i, class_name in enumerate(class_names):
        print("({0}) {1}".format(i, class_name))

# Helper-function for plotting example errors
def plot_example_errors(cls_pred):
    incorrect = (cls_pred != cls_test)
    image_paths = np.array(image_paths_test)[incorrect]
    images = load_images(image_paths=image_paths[0:9])
    cls_pred = cls_pred[incorrect]
    cls_true = cls_test[incorrect]
    plot_images(images=images, cls_true=cls_true[0:9], cls_pred=cls_pred[0:9])

def example_errors():
    test_generator.reset()
    y_pred = model.predict(test_generator, steps=STEPS_TEST)
    cls_pred = np.argmax(y_pred, axis=1)
    print(classification_report(cls_test, cls_pred, target_names=class_names, digits=4))
    fpr, tpr, _ = roc_curve(cls_test, cls_pred)
    roc_auc = auc(fpr, tpr)
    plt.figure()
    plt.plot(fpr, tpr, label='ROC curve (area = %0.4f)' % roc_auc)
    plt.plot([0, 1], [0, 1], 'k--')
    plt.xlim([0.0, 1.0])
    plt.ylim([0.0, 1.05])
    plt.xlabel('False Positive Rate')
    plt.ylabel('True Positive Rate')
    plt.title('Receiver Operating Characteristic')
    plt.legend(loc="lower right")
    plt.show()
    plot_example_errors(cls_pred)
    print_confusion_matrix(cls_pred)

def load_images(image_paths):
    images = [plt.imread(path) for path in image_paths]
    return np.asarray(images)

# Set some important constants here
IMAGE_SIZE = 224
N_CLASSES = 2
BATCH_SIZE = 5

# ImageDataGenerator for data augmentation
train_datagen = ImageDataGenerator(dtype='float32', preprocessing_function=preprocess_input)
valid_datagen = ImageDataGenerator(dtype='float32', preprocessing_function=preprocess_input)
test_datagen = ImageDataGenerator(dtype='float32', preprocessing_function=preprocess_input)

train_generator = train_datagen.flow_from_directory(train_path, batch_size=BATCH_SIZE, target_size=(IMAGE_SIZE, IMAGE_SIZE), class_mode='categorical')
valid_generator = valid_datagen.flow_from_directory(valid_path, batch_size=BATCH_SIZE, target_size=(IMAGE_SIZE, IMAGE_SIZE), class_mode='categorical')
test_generator = test_datagen.flow_from_directory(test_path, batch_size=BATCH_SIZE, target_size=(IMAGE_SIZE, IMAGE_SIZE), class_mode='categorical')

cls_train = train_generator.classes
cls_valid = valid_generator.classes
cls_test = test_generator.classes
class_names = list(train_generator.class_indices.keys())
num_classes = train_generator.num_classes

image_paths_train = path_join(train_path, train_generator.filenames)
image_paths_valid = path_join(valid_path, valid_generator.filenames)
image_paths_test = path_join(test_path, test_generator.filenames)

STEPS_TEST = test_generator.n / BATCH_SIZE

inc_model = InceptionV3(include_top=False, weights='imagenet', input_shape=(IMAGE_SIZE, IMAGE_SIZE, 3))
inc_model.trainable = False

for layer in inc_model.layers[-30:]:
    if isinstance(layer, layers.BatchNormalization):
        layer.trainable = False
    else:
        layer.trainable = True

model = Sequential()
model.add(inc_model)
model.add(layers.GlobalAveragePooling2D())
model.add(layers.Dense(1024, activation='relu'))
model.add(layers.BatchNormalization())
model.add(layers.Dropout(0.5))
model.add(layers.Dense(512, activation='relu'))
model.add(layers.BatchNormalization())
model.add(layers.Dropout(0.5))
model.add(layers.Dense(64, activation='relu'))
model.add(layers.BatchNormalization())
model.add(layers.Dense(N_CLASSES, activation='softmax'))

model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['acc'])

checkpoint = ModelCheckpoint(filepath='../chest_CT_SCAN-InceptionV3.h5', monitor='val_loss', mode='auto', save_best_only=True)
early_stopping = EarlyStopping(monitor='val_loss', patience=3, verbose=1)

start_time = time.time()

steps_per_epoch = len(train_generator)
validation_steps = len(valid_generator)
history = model.fit(train_generator, steps_per_epoch=steps_per_epoch, epochs=200, verbose=1, validation_data=valid_generator, validation_steps=validation_steps, callbacks=[checkpoint, early_stopping])

acc = history.history['acc']
val_acc = history.history['val_acc']
loss = history.history['loss']
val_loss = history.history['val_loss']

epochs = range(len(acc))

plt.plot(epochs, acc, 'r', label='Training accuracy')
plt.plot(epochs, val_acc, 'b', label='Validation accuracy')
plt.title('Training and validation accuracy')
plt.legend(loc=0)
plt.figure()
plt.show()

example_errors()
end_time = time.time()

print(f"Total execution time: {end_time - start_time:.4f} seconds")

'''
              precision    recall  f1-score   support

      cancer     0.7286    0.7183    0.7234        71
      normal     0.3548    0.3667    0.3607        30

    accuracy                         0.6139       101
   macro avg     0.5417    0.5425    0.5420       101
weighted avg     0.6176    0.6139    0.6157       101

Confusion matrix:
[[51 20]
 [19 11]]
(0) cancer
(1) normal
Total execution time: 497.6627 seconds
'''