import pandas as pd
import numpy as np
import tensorflow as tf
import PIL
import os
import cv2
import matplotlib.pyplot as plt
from tensorflow.keras import layers
from tensorflow.keras import Model
from tensorflow.keras.models import Sequential
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint
from tensorflow.keras.applications import Xception
from tensorflow.keras.applications.xception import preprocess_input
from sklearn.metrics import classification_report, roc_curve, auc

path = r"D:\kaggle\final\data1\train"
for files in os.listdir(path):
    print(os.path.join(path,files))

train_path = r"D:\kaggle\final\data1\train"
test_path = r"D:\kaggle\final\data1\test"

# Helper-function for joining a directory and list of filenames.
def path_join(dirname, filenames):
    return [os.path.join(dirname, filename) for filename in filenames]


# Helper-function for plotting images
def plot_images(images, cls_true, cls_pred=None, smooth=True):
    assert len(images) == len(cls_true)

    # Create figure with sub-plots.
    fig, axes = plt.subplots(3, 3, figsize=(15, 15))

    # Adjust vertical spacing.
    if cls_pred is None:
        hspace = 0.3
    else:
        hspace = 0.6
    fig.subplots_adjust(hspace=hspace, wspace=0.3)

    # Interpolation type.
    if smooth:
        interpolation = 'spline16'
    else:
        interpolation = 'nearest'

    for i, ax in enumerate(axes.flat):
        # There may be less than 9 images, ensure it doesn't crash.
        if i < len(images):
            # Plot image.
            ax.imshow(images[i],
                      interpolation=interpolation)

            # Name of the true class.
            cls_true_name = class_names[cls_true[i]]

            # Show true and predicted classes.
            if cls_pred is None:
                xlabel = "True: {0}".format(cls_true_name)
            else:
                # Name of the predicted class.
                cls_pred_name = class_names[cls_pred[i]]

                xlabel = "True: {0}\nPred: {1}".format(cls_true_name, cls_pred_name)

            # Show the classes as the label on the x-axis.
            ax.set_xlabel(xlabel)

        # Remove ticks from the plot.
        ax.set_xticks([])
        ax.set_yticks([])

    # Ensure the plot is shown correctly with multiple plots
    # in a single Notebook cell.
    plt.show()


# Helper-function for printing confusion matrix

# Import a function from sklearn to calculate the confusion-matrix.
from sklearn.metrics import confusion_matrix


def print_confusion_matrix(cls_pred):
    # cls_pred is an array of the predicted class-number for
    # all images in the test-set.

    # Get the confusion matrix using sklearn.
    cm = confusion_matrix(y_true=cls_test,  # True class for test-set.
                          y_pred=cls_pred)  # Predicted class.

    print("Confusion matrix:")

    # Print the confusion matrix as text.
    print(cm)

    # Print the class-names for easy reference.
    for i, class_name in enumerate(class_names):
        print("({0}) {1}".format(i, class_name))


# Helper-function for plotting example errors
def plot_example_errors(cls_pred):
    # cls_pred is an array of the predicted class-number for
    # all images in the test-set.

    # Boolean array whether the predicted class is incorrect.
    incorrect = (cls_pred != cls_test)

    # Get the file-paths for images that were incorrectly classified.
    image_paths = np.array(image_paths_test)[incorrect]

    # Load the first 9 images.
    images = load_images(image_paths=image_paths[0:9])

    # Get the predicted classes for those images.
    cls_pred = cls_pred[incorrect]

    # Get the true classes for those images.
    cls_true = cls_test[incorrect]

    # Plot the 9 images we have loaded and their corresponding classes.
    # We have only loaded 9 images so there is no need to slice those again.
    plot_images(images=images,
                cls_true=cls_true[0:9],
                cls_pred=cls_pred[0:9])


# Function for calculating the predicted classes of the entire test-set and calling
# the above function to plot a few examples of mis-classified images.
def example_errors():
    # The Keras data-generator for the test-set must be reset
    # before processing. This is because the generator will loop
    # infinitely and keep an internal index into the dataset.
    # So it might start in the middle of the test-set if we do
    # not reset it first. This makes it impossible to match the
    # predicted classes with the input images.
    # If we reset the generator, then it always starts at the
    # beginning so we know exactly which input-images were used.
    test_generator.reset()

    # Predict the classes for all images in the test-set.
    y_pred = model.predict(test_generator, steps=STEPS_TEST)

    # Convert the predicted classes from arrays to integers.
    cls_pred = np.argmax(y_pred, axis=1)

    print(classification_report(cls_test, cls_pred, target_names=class_names))
    fpr, tpr, _ = roc_curve(cls_test, cls_pred)
    roc_auc = auc(fpr, tpr)
    plt.figure()
    plt.plot(fpr, tpr, label='ROC curve (area = %0.2f)' % roc_auc)
    plt.plot([0, 1], [0, 1], 'k--')
    plt.xlim([0.0, 1.0])
    plt.ylim([0.0, 1.05])
    plt.xlabel('False Positive Rate')
    plt.ylabel('True Positive Rate')
    plt.title('Receiver Operating Characteristic')
    plt.legend(loc="lower right")
    plt.show()

    # Plot examples of mis-classified images.
    plot_example_errors(cls_pred)

    # Print the confusion matrix.
    print_confusion_matrix(cls_pred)

# Helper-function for loading images
def load_images(image_paths):
    # Load the images from disk.
    images = [plt.imread(path) for path in image_paths]

    # Convert to a numpy array and return it.
    return np.asarray(images)

# Set some important constants here
IMAGE_SIZE = 224
N_CLASSES = 2
BATCH_SIZE = 5

# ImageDataGenerator is needed because the dataset has no many data.
# The data augmentation can be useful to generate many augmented images from a single image

train_datagen = ImageDataGenerator(dtype='float32', preprocessing_function=preprocess_input)
train_generator = train_datagen.flow_from_directory(train_path,
                                                   batch_size = BATCH_SIZE,
                                                   target_size = (IMAGE_SIZE, IMAGE_SIZE),
                                                   class_mode = 'categorical')

# test_datagen = ImageDataGenerator(rescale = 1.0/255.0)
test_datagen = ImageDataGenerator(dtype='float32', preprocessing_function=preprocess_input)
test_generator = test_datagen.flow_from_directory(test_path,
                                                   batch_size = BATCH_SIZE,
                                                   target_size = (IMAGE_SIZE, IMAGE_SIZE),
                                                   class_mode = 'categorical')

# save some values to be used later

cls_train = train_generator.classes
cls_test = test_generator.classes
class_names = list(train_generator.class_indices.keys())
print(class_names)
num_classes = train_generator.num_classes
print("num classes:",num_classes)

image_paths_train = path_join(train_path, train_generator.filenames)
image_paths_test = path_join(test_path, test_generator.filenames)

STEPS_TEST = test_generator.n / BATCH_SIZE

xcep_model = Xception(include_top=False, weights='imagenet', input_shape = (IMAGE_SIZE, IMAGE_SIZE, 3))

for layer in xcep_model.layers:
    if 'block14' not in layer.name:
        layer.trainable = False

# Check if all layers layers are not trainable
for i, layer in enumerate(xcep_model.layers):
    print(i, layer.name, "-", layer.trainable)

model = Sequential()
model.add(xcep_model)
model.add(layers.GlobalAveragePooling2D())
model.add(layers.Dropout(0.5))
model.add(layers.Dense(1024, activation = 'relu'))
model.add(layers.BatchNormalization())
model.add(layers.Dropout(0.5))
model.add(layers.Dense(512, activation = 'relu'))
model.add(layers.BatchNormalization())
model.add(layers.Dense(N_CLASSES, activation = 'softmax'))  # 修改为2

model.compile(optimizer='adam', loss = 'categorical_crossentropy', metrics = ['acc'])
model.summary()

checkpoint = ModelCheckpoint(filepath='../chest_CT_SCAN-Xception.h5',
                             monitor='val_loss',
                             mode='auto',
                             save_best_only=True)
early_stopping = EarlyStopping(verbose=1, patience=3)
import time

start_time = time.time()
steps_per_epoch = len(train_generator)  # 使用训练集的实际大小
validation_steps = len(test_generator)  # 使用验证集的实际大小
history = model.fit(train_generator,
                    steps_per_epoch = steps_per_epoch,  # 使用实际的步数
                    epochs = 20,
                    verbose = 1,
                    validation_data = test_generator,
                    validation_steps = validation_steps,  # 使用实际的步数
                    callbacks = [checkpoint, early_stopping])

import matplotlib.pyplot as plt

acc = history.history['acc']
val_acc = history.history['val_acc']
loss = history.history['loss']
val_loss = history.history['val_loss']

epochs = range(len(acc))

plt.plot(epochs, acc, 'r', label='Training accuracy')
plt.plot(epochs, val_acc, 'b', label='Validation accuracy')
plt.title('Training and validation accuracy')
plt.legend(loc=0)
plt.figure()

plt.show()


result = model.evaluate(test_generator, steps=STEPS_TEST)

example_errors()

end_time = time.time()

print("Total execution time: {} seconds".format(end_time - start_time))


'''
D:\kaggle\final\data\train\cancer
D:\kaggle\final\data\train\normal
Found 363 images belonging to 2 classes.
Found 121 images belonging to 2 classes.
['cancer', 'normal']
num classes: 2
2024-03-27 16:04:14.692181: I tensorflow/core/platform/cpu_feature_guard.cc:193] This TensorFlow binary is optimized with oneAPI Deep Neural Network Library (oneDNN) to use the following CPU instructions in performance-critical operations:  AVX2
To enable them in other operations, rebuild TensorFlow with the appropriate compiler flags.
2024-03-27 16:04:14.693106: I tensorflow/core/common_runtime/process_util.cc:146] Creating new thread pool with default inter op setting: 2. Tune using inter_op_parallelism_threads for best performance.
0 input_1 - False
1 block1_conv1 - False
2 block1_conv1_bn - False
3 block1_conv1_act - False
4 block1_conv2 - False
5 block1_conv2_bn - False
6 block1_conv2_act - False
7 block2_sepconv1 - False
8 block2_sepconv1_bn - False
9 block2_sepconv2_act - False
10 block2_sepconv2 - False
11 block2_sepconv2_bn - False
12 conv2d - False
13 block2_pool - False
14 batch_normalization - False
15 add - False
16 block3_sepconv1_act - False
17 block3_sepconv1 - False
18 block3_sepconv1_bn - False
19 block3_sepconv2_act - False
20 block3_sepconv2 - False
21 block3_sepconv2_bn - False
22 conv2d_1 - False
23 block3_pool - False
24 batch_normalization_1 - False
25 add_1 - False
26 block4_sepconv1_act - False
27 block4_sepconv1 - False
28 block4_sepconv1_bn - False
29 block4_sepconv2_act - False
30 block4_sepconv2 - False
31 block4_sepconv2_bn - False
32 conv2d_2 - False
33 block4_pool - False
34 batch_normalization_2 - False
35 add_2 - False
36 block5_sepconv1_act - False
37 block5_sepconv1 - False
38 block5_sepconv1_bn - False
39 block5_sepconv2_act - False
40 block5_sepconv2 - False
41 block5_sepconv2_bn - False
42 block5_sepconv3_act - False
43 block5_sepconv3 - False
44 block5_sepconv3_bn - False
45 add_3 - False
46 block6_sepconv1_act - False
47 block6_sepconv1 - False
48 block6_sepconv1_bn - False
49 block6_sepconv2_act - False
50 block6_sepconv2 - False
51 block6_sepconv2_bn - False
52 block6_sepconv3_act - False
53 block6_sepconv3 - False
54 block6_sepconv3_bn - False
55 add_4 - False
56 block7_sepconv1_act - False
57 block7_sepconv1 - False
58 block7_sepconv1_bn - False
59 block7_sepconv2_act - False
60 block7_sepconv2 - False
61 block7_sepconv2_bn - False
62 block7_sepconv3_act - False
63 block7_sepconv3 - False
64 block7_sepconv3_bn - False
65 add_5 - False
66 block8_sepconv1_act - False
67 block8_sepconv1 - False
68 block8_sepconv1_bn - False
69 block8_sepconv2_act - False
70 block8_sepconv2 - False
71 block8_sepconv2_bn - False
72 block8_sepconv3_act - False
73 block8_sepconv3 - False
74 block8_sepconv3_bn - False
75 add_6 - False
76 block9_sepconv1_act - False
77 block9_sepconv1 - False
78 block9_sepconv1_bn - False
79 block9_sepconv2_act - False
80 block9_sepconv2 - False
81 block9_sepconv2_bn - False
82 block9_sepconv3_act - False
83 block9_sepconv3 - False
84 block9_sepconv3_bn - False
85 add_7 - False
86 block10_sepconv1_act - False
87 block10_sepconv1 - False
88 block10_sepconv1_bn - False
89 block10_sepconv2_act - False
90 block10_sepconv2 - False
91 block10_sepconv2_bn - False
92 block10_sepconv3_act - False
93 block10_sepconv3 - False
94 block10_sepconv3_bn - False
95 add_8 - False
96 block11_sepconv1_act - False
97 block11_sepconv1 - False
98 block11_sepconv1_bn - False
99 block11_sepconv2_act - False
100 block11_sepconv2 - False
101 block11_sepconv2_bn - False
102 block11_sepconv3_act - False
103 block11_sepconv3 - False
104 block11_sepconv3_bn - False
105 add_9 - False
106 block12_sepconv1_act - False
107 block12_sepconv1 - False
108 block12_sepconv1_bn - False
109 block12_sepconv2_act - False
110 block12_sepconv2 - False
111 block12_sepconv2_bn - False
112 block12_sepconv3_act - False
113 block12_sepconv3 - False
114 block12_sepconv3_bn - False
115 add_10 - False
116 block13_sepconv1_act - False
117 block13_sepconv1 - False
118 block13_sepconv1_bn - False
119 block13_sepconv2_act - False
120 block13_sepconv2 - False
121 block13_sepconv2_bn - False
122 conv2d_3 - False
123 block13_pool - False
124 batch_normalization_3 - False
125 add_11 - False
126 block14_sepconv1 - True
127 block14_sepconv1_bn - True
128 block14_sepconv1_act - True
129 block14_sepconv2 - True
130 block14_sepconv2_bn - True
131 block14_sepconv2_act - True
Model: "sequential"
_________________________________________________________________
 Layer (type)                Output Shape              Param #   
=================================================================
 xception (Functional)       (None, 7, 7, 2048)        20861480  
                                                                 
 global_average_pooling2d (G  (None, 2048)             0         
 lobalAveragePooling2D)                                          
                                                                 
 dropout (Dropout)           (None, 2048)              0         
                                                                 
 dense (Dense)               (None, 1024)              2098176   
                                                                 
 batch_normalization_4 (Batc  (None, 1024)             4096      
 hNormalization)                                                 
                                                                 
 dropout_1 (Dropout)         (None, 1024)              0         
                                                                 
 dense_1 (Dense)             (None, 512)               524800    
                                                                 
 batch_normalization_5 (Batc  (None, 512)              2048      
 hNormalization)                                                 
                                                                 
 dense_2 (Dense)             (None, 2)                 1026      
                                                                 
=================================================================
Total params: 23,491,626
Trainable params: 7,375,874
Non-trainable params: 16,115,752
_________________________________________________________________
Epoch 1/20
73/73 [==============================] - 44s 553ms/step - loss: 0.8098 - acc: 0.7879 - val_loss: 0.2276 - val_acc: 0.9008
Epoch 2/20
73/73 [==============================] - 41s 561ms/step - loss: 0.4213 - acc: 0.8678 - val_loss: 0.0402 - val_acc: 1.0000
Epoch 3/20
73/73 [==============================] - 39s 538ms/step - loss: 0.1504 - acc: 0.9477 - val_loss: 0.1046 - val_acc: 0.8843
Epoch 4/20
73/73 [==============================] - 40s 551ms/step - loss: 0.1332 - acc: 0.9477 - val_loss: 0.0653 - val_acc: 0.9835
Epoch 5/20
73/73 [==============================] - 41s 558ms/step - loss: 0.0944 - acc: 0.9697 - val_loss: 0.0105 - val_acc: 0.9917
Epoch 6/20
73/73 [==============================] - 41s 561ms/step - loss: 0.1070 - acc: 0.9614 - val_loss: 0.4525 - val_acc: 0.9339
Epoch 7/20
73/73 [==============================] - 39s 542ms/step - loss: 0.0540 - acc: 0.9807 - val_loss: 0.0440 - val_acc: 0.9504
Epoch 8/20
73/73 [==============================] - 40s 548ms/step - loss: 0.0318 - acc: 0.9862 - val_loss: 0.0700 - val_acc: 0.9504
Epoch 8: early stopping
Can't find filter element
Can't find filter element
24/24 [==============================] - 10s 395ms/step - loss: 0.0700 - acc: 0.9504
24/24 [==============================] - 10s 387ms/step
              precision    recall  f1-score   support

      cancer       0.83      0.88      0.85       100
      normal       0.20      0.14      0.17        21

    accuracy                           0.75       121
   macro avg       0.52      0.51      0.51       121
weighted avg       0.72      0.75      0.74       121

Confusion matrix:
[[88 12]
 [18  3]]
(0) cancer
(1) normal
Total execution time: 356.13553643226624 seconds'''


'''
2024-04-08 21:44:15.487872: I tensorflow/core/platform/cpu_feature_guard.cc:193] This TensorFlow binary is optimized with oneAPI Deep Neural Network Library (oneDNN) to use the following CPU instructions in performance-critical operations:  AVX2
To enable them in other operations, rebuild TensorFlow with the appropriate compiler flags.
2024-04-08 21:44:15.493082: I tensorflow/core/common_runtime/process_util.cc:146] Creating new thread pool with default inter op setting: 2. Tune using inter_op_parallelism_threads for best performance.
Epoch 1/10
13/13 [==============================] - 342s 26s/step - loss: 388.0331 - accuracy: 0.5039 - val_loss: 0.6820 - val_accuracy: 0.6298
Epoch 2/10
13/13 [==============================] - 260s 20s/step - loss: 1.5785 - accuracy: 0.5426 - val_loss: 2.3772 - val_accuracy: 0.6298
Epoch 3/10
13/13 [==============================] - 316s 25s/step - loss: 1.0675 - accuracy: 0.5633 - val_loss: 0.6640 - val_accuracy: 0.6298
Epoch 4/10
13/13 [==============================] - 348s 27s/step - loss: 0.7282 - accuracy: 0.6305 - val_loss: 0.5785 - val_accuracy: 0.6298
Epoch 5/10
13/13 [==============================] - 274s 21s/step - loss: 0.7751 - accuracy: 0.5762 - val_loss: 0.6256 - val_accuracy: 0.6298
Epoch 6/10
13/13 [==============================] - 248s 19s/step - loss: 0.8326 - accuracy: 0.5995 - val_loss: 0.6337 - val_accuracy: 0.6298
Epoch 7/10
13/13 [==============================] - 250s 19s/step - loss: 0.8039 - accuracy: 0.5866 - val_loss: 0.6164 - val_accuracy: 0.6298
Epoch 8/10
13/13 [==============================] - 261s 20s/step - loss: 0.5697 - accuracy: 0.6899 - val_loss: 0.7360 - val_accuracy: 0.4340
Epoch 9/10
13/13 [==============================] - 255s 20s/step - loss: 0.4648 - accuracy: 0.7571 - val_loss: 0.7893 - val_accuracy: 0.6298
Epoch 10/10
13/13 [==============================] - 253s 20s/step - loss: 0.3746 - accuracy: 0.7674 - val_loss: 18.1949 - val_accuracy: 0.6298
8/8 [==============================] - 45s 6s/step
D:\anaconda1\envs\DL\lib\site-packages\sklearn\metrics\_classification.py:1334: UndefinedMetricWarning: Precision and F-score are ill-defined and being set to 0.0 in labels with no predicted samples. Use `zero_division` parameter to control this behavior.
  _warn_prf(average, modifier, msg_start, len(result))
D:\anaconda1\envs\DL\lib\site-packages\sklearn\metrics\_classification.py:1334: UndefinedMetricWarning: Precision and F-score are ill-defined and being set to 0.0 in labels with no predicted samples. Use `zero_division` parameter to control this behavior.
  _warn_prf(average, modifier, msg_start, len(result))
D:\anaconda1\envs\DL\lib\site-packages\sklearn\metrics\_classification.py:1334: UndefinedMetricWarning: Precision and F-score are ill-defined and being set to 0.0 in labels with no predicted samples. Use `zero_division` parameter to control this behavior.
  _warn_prf(average, modifier, msg_start, len(result))
              precision    recall  f1-score   support

           0       0.00      0.00      0.00        87
           1       0.63      1.00      0.77       148

    accuracy                           0.63       235
   macro avg       0.31      0.50      0.39       235
weighted avg       0.40      0.63      0.49       235

Program running time: 2808.0324528217316
'''
