Spaces:
Sleeping
Sleeping
Upload 6 files
Browse files- .gitattributes +1 -0
- app.py +100 -0
- augment_images.py +64 -0
- data_check.py +55 -0
- evaluate_model.py +21 -0
- model.py +20 -0
- xray_image_classifier_model.keras +3 -0
.gitattributes
CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
36 |
+
xray_image_classifier_model.keras filter=lfs diff=lfs merge=lfs -text
|
app.py
ADDED
@@ -0,0 +1,100 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
from tensorflow.keras.models import load_model
|
3 |
+
import numpy as np
|
4 |
+
from PIL import Image
|
5 |
+
|
6 |
+
model = load_model('xray_image_classifier_model.keras')
|
7 |
+
|
8 |
+
def predict(image):
|
9 |
+
img = image.resize((150, 150))
|
10 |
+
img_array = np.array(img) / 255.0
|
11 |
+
img_array = np.expand_dims(img_array, axis=0)
|
12 |
+
prediction = model.predict(img_array)
|
13 |
+
predicted_class = 'Pneumonia' if prediction > 0.5 else 'Normal'
|
14 |
+
return predicted_class
|
15 |
+
|
16 |
+
css = """
|
17 |
+
.gradio-container {
|
18 |
+
background-color: #f5f5f5;
|
19 |
+
font-family: Arial, sans-serif;
|
20 |
+
}
|
21 |
+
|
22 |
+
.gr-button {
|
23 |
+
background-color:#007bff;
|
24 |
+
color: blue;
|
25 |
+
border: none; /* Remove default border */
|
26 |
+
border-radius: 5px; /* Rounded corners */
|
27 |
+
font-size: 16px; /* Adjust font size */
|
28 |
+
padding: 10px 20px; /* Add padding for a larger clickable area */
|
29 |
+
cursor: pointer; /* Change cursor to pointer to indicate it's clickable */
|
30 |
+
transition: background-color 0.3s ease; /* Smooth hover transition */
|
31 |
+
}
|
32 |
+
|
33 |
+
.gr-button:hover {
|
34 |
+
background-color: #0056b3; /* Darker blue on hover */
|
35 |
+
}
|
36 |
+
.gr-textbox, .gr-image {
|
37 |
+
border: 2px dashed #007bff;
|
38 |
+
padding: 20px;
|
39 |
+
border-radius: 10px;
|
40 |
+
background-color: #ffffff;
|
41 |
+
}
|
42 |
+
.gr-box-text {
|
43 |
+
color: #007bff;
|
44 |
+
font-size: 22px;
|
45 |
+
font-weight: bold;
|
46 |
+
text-align: center;
|
47 |
+
}
|
48 |
+
h1 {
|
49 |
+
font-size: 36px;
|
50 |
+
color: #007bff;
|
51 |
+
text-align: center;
|
52 |
+
}
|
53 |
+
p {
|
54 |
+
font-size: 20px;
|
55 |
+
color: #333;
|
56 |
+
text-align: center;
|
57 |
+
}
|
58 |
+
"""
|
59 |
+
|
60 |
+
description = """
|
61 |
+
**Automated Pneumonia Detection via Chest X-ray Classification**
|
62 |
+
|
63 |
+
This model leverages deep learning techniques to classify chest X-ray images as either 'Pneumonia' or 'Normal.' By utilizing the InceptionV3 architecture for transfer learning, combined with data preprocessing and augmentation, the model aims to deliver powerful performance in medical image analysis. It enhances the automation of diagnostic processes, aiding in the detection of pneumonia with high accuracy.
|
64 |
+
|
65 |
+
**Technologies Employed:**
|
66 |
+
- TensorFlow & Keras for model development
|
67 |
+
- InceptionV3 for transfer learning
|
68 |
+
- Numpy, Pandas, and Matplotlib for data handling and visualization
|
69 |
+
- Flask and Gradio for deployment and user interaction
|
70 |
+
|
71 |
+
"""
|
72 |
+
|
73 |
+
examples = [
|
74 |
+
["samples/normal_xray1.png"],
|
75 |
+
["samples/pneumonia_xray1.png"],
|
76 |
+
]
|
77 |
+
|
78 |
+
with gr.Blocks(css=css) as interface:
|
79 |
+
gr.Markdown("<h1>Automated Pneumonia Detection via Chest X-ray Classification</h1>")
|
80 |
+
gr.Markdown("<p>Submit a chest X-ray image below.</p>")
|
81 |
+
|
82 |
+
with gr.Row():
|
83 |
+
image_input = gr.Image(label="Drop Image Here", type="pil", elem_classes=["gr-image", "gr-box-text"])
|
84 |
+
output = gr.Textbox(label="Model Analysis Output", elem_classes=["gr-textbox", "gr-box-text"])
|
85 |
+
|
86 |
+
submit_btn = gr.Button("Initiate Diagnostic Analysis", elem_classes=["gr-button"])
|
87 |
+
submit_btn.click(fn=predict, inputs=image_input, outputs=output)
|
88 |
+
|
89 |
+
gr.Markdown(
|
90 |
+
'<div style="background-color: yellow; padding: 10px; border-radius: 5px; font-weight: bold;">'
|
91 |
+
'Sample Images: To test the model, select one of the sample images provided below. '
|
92 |
+
'Click on an image and then press the "Initiate Diagnostic Analysis" button to receive the results.'
|
93 |
+
'</div>'
|
94 |
+
)
|
95 |
+
|
96 |
+
gr.Examples(examples=examples, inputs=image_input)
|
97 |
+
|
98 |
+
gr.Markdown(description)
|
99 |
+
|
100 |
+
interface.launch()
|
augment_images.py
ADDED
@@ -0,0 +1,64 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
from PIL import Image
|
3 |
+
from tensorflow.keras.preprocessing.image import ImageDataGenerator
|
4 |
+
|
5 |
+
# Set paths
|
6 |
+
base_dir = 'data/chest_xray'
|
7 |
+
val_dir = os.path.join(base_dir, 'val')
|
8 |
+
normal_class_dir = os.path.join(val_dir, 'NORMAL')
|
9 |
+
pneumonia_class_dir = os.path.join(val_dir, 'PNEUMONIA')
|
10 |
+
|
11 |
+
|
12 |
+
def augment_images(class_directory, num_augmented_images):
|
13 |
+
datagen = ImageDataGenerator(
|
14 |
+
rescale=1. / 255,
|
15 |
+
rotation_range=20,
|
16 |
+
width_shift_range=0.2,
|
17 |
+
height_shift_range=0.2,
|
18 |
+
shear_range=0.2,
|
19 |
+
zoom_range=0.2,
|
20 |
+
horizontal_flip=True,
|
21 |
+
fill_mode='nearest'
|
22 |
+
)
|
23 |
+
|
24 |
+
generator = datagen.flow_from_directory(
|
25 |
+
directory=os.path.dirname(class_directory), # Parent directory
|
26 |
+
target_size=(150, 150),
|
27 |
+
batch_size=1,
|
28 |
+
class_mode=None,
|
29 |
+
shuffle=False,
|
30 |
+
classes=[os.path.basename(class_directory)] # Specify class if using subdirectory
|
31 |
+
)
|
32 |
+
|
33 |
+
print(f"Found {generator.samples} images in {class_directory}")
|
34 |
+
|
35 |
+
if generator.samples == 0:
|
36 |
+
print("No images found in the directory.")
|
37 |
+
return
|
38 |
+
|
39 |
+
count = 0
|
40 |
+
|
41 |
+
while count < num_augmented_images:
|
42 |
+
try:
|
43 |
+
img_batch = generator.__next__() # Use __next__() to get image batch
|
44 |
+
img = (img_batch[0] * 255).astype('uint8') # Extract the first image in the batch
|
45 |
+
img_pil = Image.fromarray(img)
|
46 |
+
img_path = os.path.join(class_directory, f"augmented_{count}.png")
|
47 |
+
img_pil.save(img_path)
|
48 |
+
count += 1
|
49 |
+
except StopIteration:
|
50 |
+
print("No more images to generate.")
|
51 |
+
break
|
52 |
+
|
53 |
+
print(f"Total augmented images created: {count}")
|
54 |
+
|
55 |
+
|
56 |
+
# Number of augmented images to generate
|
57 |
+
num_augmented_images_normal = 2944 - 3875 # This should be a negative number since NORMAL is already balanced
|
58 |
+
num_augmented_images_pneumonia = 2944 - 1171 # To match the number of NORMAL images
|
59 |
+
|
60 |
+
# Generate augmented images for the NORMAL class
|
61 |
+
augment_images(normal_class_dir, max(num_augmented_images_normal, 0))
|
62 |
+
|
63 |
+
# Generate augmented images for the PNEUMONIA class
|
64 |
+
augment_images(pneumonia_class_dir, num_augmented_images_pneumonia)
|
data_check.py
ADDED
@@ -0,0 +1,55 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
from PIL import Image
|
3 |
+
|
4 |
+
# Define the data directories
|
5 |
+
base_dir = 'data/chest_xray'
|
6 |
+
train_dir = os.path.join(base_dir, 'train')
|
7 |
+
val_dir = os.path.join(base_dir, 'val')
|
8 |
+
|
9 |
+
|
10 |
+
# Function to count images in a specific category (e.g., NORMAL, PNEUMONIA)
|
11 |
+
def count_images(directory, category):
|
12 |
+
category_dir = os.path.join(directory, category)
|
13 |
+
count = 0
|
14 |
+
for root, dirs, files in os.walk(category_dir):
|
15 |
+
count += len([f for f in files if f.endswith(('.jpg', '.jpeg', '.png'))])
|
16 |
+
return count
|
17 |
+
|
18 |
+
|
19 |
+
# Function to check for corrupted images in a specific category
|
20 |
+
def check_corrupted_images(directory, category):
|
21 |
+
category_dir = os.path.join(directory, category)
|
22 |
+
corrupted_files = []
|
23 |
+
for root, dirs, files in os.walk(category_dir):
|
24 |
+
for file in files:
|
25 |
+
if file.endswith(('.jpg', '.jpeg', '.png')):
|
26 |
+
try:
|
27 |
+
img = Image.open(os.path.join(root, file))
|
28 |
+
img.verify() # Check if the image can be opened and is not corrupted
|
29 |
+
except (IOError, SyntaxError) as e:
|
30 |
+
corrupted_files.append(os.path.join(root, file))
|
31 |
+
return corrupted_files
|
32 |
+
|
33 |
+
|
34 |
+
# Count images in the train and validation sets
|
35 |
+
train_normal_count = count_images(train_dir, 'NORMAL')
|
36 |
+
train_pneumonia_count = count_images(train_dir, 'PNEUMONIA')
|
37 |
+
val_normal_count = count_images(val_dir, 'NORMAL')
|
38 |
+
val_pneumonia_count = count_images(val_dir, 'PNEUMONIA')
|
39 |
+
|
40 |
+
# Check for corrupted images in the train and validation sets
|
41 |
+
train_normal_corrupted = check_corrupted_images(train_dir, 'NORMAL')
|
42 |
+
train_pneumonia_corrupted = check_corrupted_images(train_dir, 'PNEUMONIA')
|
43 |
+
val_normal_corrupted = check_corrupted_images(val_dir, 'NORMAL')
|
44 |
+
val_pneumonia_corrupted = check_corrupted_images(val_dir, 'PNEUMONIA')
|
45 |
+
|
46 |
+
# Print the results
|
47 |
+
print(f"Training NORMAL images: {train_normal_count}")
|
48 |
+
print(f"Training PNEUMONIA images: {train_pneumonia_count}")
|
49 |
+
print(f"Validation NORMAL images: {val_normal_count}")
|
50 |
+
print(f"Validation PNEUMONIA images: {val_pneumonia_count}")
|
51 |
+
|
52 |
+
print(f"Corrupted images in training NORMAL: {train_normal_corrupted}")
|
53 |
+
print(f"Corrupted images in training PNEUMONIA: {train_pneumonia_corrupted}")
|
54 |
+
print(f"Corrupted images in validation NORMAL: {val_normal_corrupted}")
|
55 |
+
print(f"Corrupted images in validation PNEUMONIA: {val_pneumonia_corrupted}")
|
evaluate_model.py
ADDED
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import tensorflow as tf
|
3 |
+
from tensorflow.keras.preprocessing.image import ImageDataGenerator
|
4 |
+
from tensorflow.keras.models import load_model
|
5 |
+
|
6 |
+
base_dir = 'data/chest_xray'
|
7 |
+
val_dir = os.path.join(base_dir, 'val')
|
8 |
+
|
9 |
+
val_datagen = ImageDataGenerator(rescale=1./255)
|
10 |
+
val_generator = val_datagen.flow_from_directory(
|
11 |
+
val_dir,
|
12 |
+
target_size=(150, 150),
|
13 |
+
batch_size=32,
|
14 |
+
class_mode='binary'
|
15 |
+
)
|
16 |
+
|
17 |
+
model = load_model('xray_image_classifier_model.keras')
|
18 |
+
|
19 |
+
loss, accuracy = model.evaluate(val_generator)
|
20 |
+
print(f'Validation Loss: {loss:.4f}')
|
21 |
+
print(f'Validation Accuracy: {accuracy:.4f}')
|
model.py
ADDED
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import tensorflow as tf
|
2 |
+
from tensorflow.keras import layers, models
|
3 |
+
from tensorflow.keras.applications import InceptionV3
|
4 |
+
|
5 |
+
def create_model():
|
6 |
+
base_model = InceptionV3(weights='imagenet', include_top=False, input_shape=(150, 150, 3))
|
7 |
+
base_model.trainable = False # Freezing the base model layers
|
8 |
+
|
9 |
+
model = models.Sequential([
|
10 |
+
base_model,
|
11 |
+
layers.GlobalAveragePooling2D(),
|
12 |
+
layers.Dense(512, activation='relu'),
|
13 |
+
layers.Dropout(0.5),
|
14 |
+
layers.Dense(1, activation='sigmoid')
|
15 |
+
])
|
16 |
+
|
17 |
+
model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=0.0001),
|
18 |
+
loss='binary_crossentropy',
|
19 |
+
metrics=['accuracy'])
|
20 |
+
return model
|
xray_image_classifier_model.keras
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:ad1c91968bb830cc6f96327676f487e94763aaec1c83be18e6270cb47bf273fa
|
3 |
+
size 100786904
|