import numpy as np
import pandas as pd
import os
import matplotlib.pyplot as plt
import seaborn as sns
import skimage.io
import tensorflow
import glob
import tqdm
from keras.models import load_model

from tqdm import tqdm

from skimage.io import imread, imshow
from skimage.transform import resize

from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.applications.vgg16 import VGG16
from tensorflow.keras.layers import InputLayer, Dense, BatchNormalization, Dropout, Flatten, Activation
from tensorflow.keras.models import Sequential
from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint
from tensorflow.keras.preprocessing.image import load_img, img_to_array

#matplotlib inline


train_normal = glob.glob('data/train/NORMAL/*.jpeg')
a = len(train_normal)
print(a)

train_pneumonia = glob.glob('data/train/PNEUMONIA/*.jpeg')
b = len(train_pneumonia)
print(b)

print("Total nos. of training images are: {}".format(a + b))


train_datagen = ImageDataGenerator(rescale = 1.0 / 255.0,
                                   zoom_range = 0.4,
                                   validation_split = 0.2)

valid_datagen = ImageDataGenerator(rescale = 1.0 / 255.0,
                                   validation_split = 0.2)

test_datagen  = ImageDataGenerator(rescale = 1.0 / 255.0)


train_dataset = train_datagen.flow_from_directory(directory = 'data/train',
                                                  target_size = (224,224),
                                                  class_mode = 'binary',
                                                  subset = 'training',
                                                  batch_size = 64)


valid_dataset = valid_datagen.flow_from_directory(directory = 'data/train',
                                                  target_size = (224,224),
                                                  class_mode = 'binary',
                                                  subset = 'validation',
                                                  batch_size = 64)

print(train_dataset.class_indices)
print(len(train_dataset))



fig, ax = plt.subplots(nrows = 1, ncols = 5, figsize = (20,20))

for i in tqdm(range(0, 5)):
    rand1 = np.random.randint(len(train_dataset))
    rand2 = np.random.randint(64)
    ax[i].imshow(train_dataset[rand1][0][rand2])
    ax[i].axis('off')
    label = train_dataset[rand1][1][rand2]
    if label == 1:
        ax[i].set_title('PNEUMONIA')
    else:
        ax[i].set_title('NORMAL')


base_model = VGG16(input_shape=(224,224,3),
                   include_top=False,
                   weights="imagenet")
for layer in base_model.layers:
    layer.trainable=False
base_model.summary()

model=Sequential()
model.add(base_model)
model.add(Dropout(0.2))
model.add(Flatten())
model.add(BatchNormalization())
model.add(Dense(1024,kernel_initializer='he_uniform'))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Dropout(0.2))
model.add(Dense(1024,kernel_initializer='he_uniform'))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Dropout(0.2))
model.add(Dense(1,activation='sigmoid'))
model.summary()

OPT = tensorflow.keras.optimizers.Adam(lr=0.001)

model.compile(loss='binary_crossentropy',
              metrics=[tensorflow.keras.metrics.AUC(name = 'auc')],
              optimizer=OPT)

filepath = 'model/best_weights.hdf5'

earlystopping = EarlyStopping(monitor = 'val_auc',
                              mode = 'max' ,
                              patience = 3,
                              verbose = 1)

checkpoint    = ModelCheckpoint(filepath,
                                monitor = 'val_auc',
                                mode='max',
                                save_best_only=True,
                                verbose = 1)


callback_list = [earlystopping, checkpoint]

model_history=model.fit(train_dataset,
                        validation_data=valid_dataset,
                        epochs = 10,
                        callbacks = callback_list,
                        verbose = 1)

model.save('static/model/best_weights.hdf5')
model_history=load_model('static/model/best_weights.hdf5')


plt.plot(model_history.history['loss'])
plt.plot(model_history.history['val_loss'])
plt.title('Model Loss')
plt.ylabel('Loss')
plt.xlabel('Epoch')
plt.legend(['Train', 'Validation'], loc='upper left', bbox_to_anchor=(1,1))
plt.show()

plt.plot(model_history.history['auc'])
plt.plot(model_history.history['val_auc'])
plt.title('Model AUC')
plt.ylabel('AUC')
plt.xlabel('Epoch')
plt.legend(['Train', 'Validation'], loc='upper left', bbox_to_anchor=(1,1))
plt.show()



test_dataset = test_datagen.flow_from_directory(directory = 'data/test',
                                                target_size = (224,224),
                                                class_mode = 'binary',
                                                batch_size = 64)



model.evaluate(test_dataset)




dic = test_dataset.class_indices
idc = {k:v for v, k in dic.items()}

img = load_img('data/valid/mvj123987hxnuyqw.jpeg', target_size=(224,224))

img = img_to_array(img)
img = img/255
imshow(img)
plt.axis('off')
img = np.expand_dims(img,axis=0)
answer = model_history.predict_proba(img)
print(answer)



if answer[0][0] > 0.5:
    print("PNEUMONIA!")
    if answer>0.9999:
        print('The Risk of Getting PNEUMONIA is over 99.99%!')
    else:
        print('The Risk of Getting PNEUMONIA is nearly',round(float(answer * 100),2),'%.')
else:
    print("NORMAL.")
    if answer <0.0001:
        print('The Risk of Getting PNEUMONIA is less than 0.01%!')
    else:
        print('The Risk of Getting PNEUMONIA is nearly', round(float(answer * 100),2), ' %.')



