|
import gradio as gr |
|
import numpy as np |
|
import pandas as pd |
|
import os |
|
|
|
|
|
import clustering |
|
import utils |
|
|
|
import logging |
|
logging.getLogger().setLevel(logging.INFO) |
|
|
|
from tensorflow import keras |
|
|
|
|
|
|
|
|
|
|
|
IMAGE_PATH = "classified_damage_sites.png" |
|
CSV_PATH = "classified_damage_sites.csv" |
|
DEFAULT_IMAGE_PATH = "X4-Aligned_cropped_upperleft_small.png" |
|
|
|
model1_windowsize = [250,250] |
|
|
|
|
|
model1 = keras.models.load_model('rwthmaterials_dp800_network1_inclusion.h5') |
|
model1.compile() |
|
|
|
damage_classes = {3: "Martensite",2: "Interface",0:"Notch",1:"Shadowing"} |
|
|
|
model2_windowsize = [100,100] |
|
|
|
|
|
model2 = keras.models.load_model('rwthmaterials_dp800_network2_damage.h5') |
|
model2.compile() |
|
|
|
|
|
|
|
|
|
|
|
|
|
def damage_classification(SEM_image,image_threshold, model1_threshold, model2_threshold): |
|
|
|
damage_sites = {} |
|
|
|
|
|
|
|
logging.debug('---------------: clustering :=====================') |
|
all_centroids = clustering.get_centroids(SEM_image, image_threshold=image_threshold, |
|
fill_holes=True, filter_close_centroids=True) |
|
|
|
for i in range(len(all_centroids)) : |
|
key = (all_centroids[i][0],all_centroids[i][1]) |
|
damage_sites[key] = 'Not Classified' |
|
|
|
|
|
|
|
|
|
logging.debug('---------------: prepare model 1 :=====================') |
|
images_model1 = utils.prepare_classifier_input(SEM_image, all_centroids, window_size=model1_windowsize) |
|
|
|
logging.debug('---------------: run model 1 :=====================') |
|
y1_pred = model1.predict(np.asarray(images_model1, float)) |
|
|
|
logging.debug('---------------: model1 threshold :=====================') |
|
inclusions = y1_pred[:,0].reshape(len(y1_pred),1) |
|
inclusions = np.where(inclusions > model1_threshold) |
|
|
|
logging.debug('---------------: model 1 update dict :=====================') |
|
for i in range(len(inclusions[0])): |
|
centroid_id = inclusions[0][i] |
|
coordinates = all_centroids[centroid_id] |
|
key = (coordinates[0], coordinates[1]) |
|
damage_sites[key] = 'Inclusion' |
|
logging.debug('Damage sites after model 1') |
|
logging.debug(damage_sites) |
|
|
|
|
|
|
|
|
|
logging.debug('---------------: prepare model 2 :=====================') |
|
centroids_model2 = [] |
|
for key, value in damage_sites.items(): |
|
if value == 'Not Classified': |
|
coordinates = list([key[0],key[1]]) |
|
centroids_model2.append(coordinates) |
|
logging.debug('Centroids model 2') |
|
logging.debug(centroids_model2) |
|
|
|
logging.debug('---------------: prepare model 2 :=====================') |
|
images_model2 = utils.prepare_classifier_input(SEM_image, centroids_model2, window_size=model2_windowsize) |
|
logging.debug('Images model 2') |
|
logging.debug(images_model2) |
|
|
|
logging.debug('---------------: run model 2 :=====================') |
|
y2_pred = model2.predict(np.asarray(images_model2, float)) |
|
|
|
damage_index = np.asarray(y2_pred > model2_threshold).nonzero() |
|
|
|
|
|
for i in range(len(damage_index[0])): |
|
index = damage_index[0][i] |
|
identified_class = damage_index[1][i] |
|
label = damage_classes[identified_class] |
|
coordinates = centroids_model2[index] |
|
|
|
key = (coordinates[0], coordinates[1]) |
|
damage_sites[key] = label |
|
|
|
|
|
|
|
|
|
logging.debug("-----------------: final damage sites :=================") |
|
logging.debug(damage_sites) |
|
|
|
image_path = 'classified_damage_sites.png' |
|
image = utils.show_boxes(SEM_image, damage_sites, |
|
save_image=True, |
|
image_path=image_path) |
|
|
|
|
|
|
|
|
|
csv_path = 'classified_damage_sites.csv' |
|
cols = ['x', 'y', 'damage_type'] |
|
|
|
data = [] |
|
for key, value in damage_sites.items(): |
|
data.append([key[0], key[1], value]) |
|
|
|
df = pd.DataFrame(columns=cols, data=data) |
|
|
|
df.to_csv(csv_path) |
|
|
|
|
|
return image, image_path, csv_path |
|
|
|
|
|
|
|
|
|
with gr.Blocks() as app: |
|
gr.Markdown('# Damage Classification in Dual Phase Steels') |
|
gr.Markdown('This app classifies damage types in dual phase steels. Two models are used. The first model is used to identify inclusions in the steel. The second model is used to identify the remaining damage types: Martensite cracking, Interface Decohesion, Notch effect and Shadows.') |
|
|
|
gr.Markdown('The models used in this app are based on the following papers:') |
|
gr.Markdown('Kusche, C., Reclik, T., Freund, M., Al-Samman, T., Kerzel, U., & Korte-Kerzel, S. (2019). Large-area, high-resolution characterisation and classification of damage mechanisms in dual-phase steel using deep learning. PloS one, 14(5), e0216493. [Link](https://doi.org/10.1371/journal.pone.0216493)') |
|
|
|
gr.Markdown('Setareh Medghalchi, Ehsan Karimi, Sang-Hyeok Lee, Benjamin Berkels, Ulrich Kerzel, Sandra Korte-Kerzel, Three-dimensional characterisation of deformation-induced damage in dual phase steel using deep learning, Materials & Design, Volume 232, 2023, 112108, ISSN 0264-1275, [link] (https://doi.org/10.1016/j.matdes.2023.112108') |
|
gr.Markdown('Original data and code, including the network weights, can be found at Zenodo [link](https://zenodo.org/records/8065752)') |
|
|
|
|
|
with gr.Row(): |
|
with gr.Column(scale=1): |
|
|
|
image_input = gr.Image(type="pil", label="Upload SEM Image", |
|
value=DEFAULT_IMAGE_PATH if os.path.exists(DEFAULT_IMAGE_PATH) else None) |
|
cluster_threshold_input = gr.Number(value=20, label="Image Binarization Threshold") |
|
model1_threshold_input = gr.Number(value=0.7, label="Inclusion Model Certainty (0-1)") |
|
model2_threshold_input = gr.Number(value=0.5, label="Damage Model Certainty (0-1)") |
|
classify_btn = gr.Button("Run Classification", variant="primary") |
|
with gr.Column(scale=2): |
|
output_image = gr.Image(label="Classified Image") |
|
|
|
|
|
download_image_btn = gr.DownloadButton(label="Download Image", value=None, visible=False) |
|
download_csv_btn = gr.DownloadButton(label="Download CSV", value=None, visible=False) |
|
|
|
|
|
def run_classification_and_update_ui(sem_image, cluster_thresh, m1_thresh, m2_thresh): |
|
""" |
|
Calls the core logic and then returns updates for the Gradio UI components. |
|
""" |
|
try: |
|
|
|
classified_img, img_path, csv_path = damage_classification(sem_image, cluster_thresh, m1_thresh, m2_thresh) |
|
|
|
|
|
|
|
return ( |
|
classified_img, |
|
gr.update(value=img_path, visible=True), |
|
gr.update(value=csv_path, visible=True) |
|
) |
|
except Exception as e: |
|
|
|
logging.error(f"Error during classification: {e}") |
|
gr.Warning(f"An error occurred: {e}") |
|
|
|
return ( |
|
None, |
|
gr.update(visible=False), |
|
gr.update(visible=False) |
|
) |
|
|
|
|
|
classify_btn.click( |
|
fn=run_classification_and_update_ui, |
|
inputs=[ |
|
image_input, |
|
cluster_threshold_input, |
|
model1_threshold_input, |
|
model2_threshold_input |
|
], |
|
outputs=[ |
|
output_image, |
|
download_image_btn, |
|
download_csv_btn |
|
], |
|
) |
|
|
|
if __name__ == "__main__": |
|
app.launch() |
|
|