|
import os |
|
import subprocess |
|
|
|
|
|
def install_wkhtmltopdf(): |
|
try: |
|
|
|
subprocess.run( |
|
["wget", "https://github.com/wkhtmltopdf/packaging/releases/download/0.12.6.1-2/wkhtmltox_0.12.6.1-2.bullseye_amd64.deb"], |
|
check=True |
|
) |
|
|
|
|
|
subprocess.run(["ar", "x", "wkhtmltox_0.12.6.1-2.bullseye_amd64.deb"], check=True) |
|
subprocess.run(["tar", "-xvf", "data.tar.xz"], check=True) |
|
|
|
|
|
os.makedirs("/home/user/bin", exist_ok=True) |
|
subprocess.run(["cp", "./usr/local/bin/wkhtmltopdf", "/home/user/bin/"], check=True) |
|
subprocess.run(["cp", "./usr/local/bin/wkhtmltoimage", "/home/user/bin/"], check=True) |
|
|
|
|
|
os.environ["PATH"] += os.pathsep + "/home/user/bin" |
|
print("wkhtmltopdf installed successfully.") |
|
|
|
except subprocess.CalledProcessError as e: |
|
print(f"Error during wkhtmltopdf installation: {e}") |
|
raise |
|
|
|
|
|
if not os.path.exists("/home/user/bin/wkhtmltopdf"): |
|
install_wkhtmltopdf() |
|
|
|
|
|
import pdfkit |
|
|
|
|
|
import subprocess |
|
|
|
try: |
|
path_wkhtmltopdf = subprocess.check_output(['which', 'wkhtmltopdf']).decode('utf-8').strip() |
|
config = pdfkit.configuration(wkhtmltopdf=path_wkhtmltopdf) |
|
except subprocess.CalledProcessError: |
|
raise FileNotFoundError("wkhtmltopdf not found. Ensure it is installed in your environment.") |
|
|
|
|
|
|
|
import numpy as np |
|
from PIL import Image |
|
import cv2 |
|
import gradio as gr |
|
|
|
from transformers import pipeline |
|
|
|
from tensorflow.keras.layers import Dense, Flatten, GlobalAveragePooling2D, BatchNormalization, Dropout,AveragePooling2D |
|
import tensorflow as tf |
|
from tensorflow.keras.applications import DenseNet201 |
|
from keras.models import Model |
|
from keras.models import Sequential |
|
from keras.regularizers import * |
|
from tensorflow import keras |
|
from tensorflow.keras import layers |
|
|
|
import tensorflow as tf |
|
import matplotlib.pyplot as plt |
|
from PIL import Image |
|
import cv2 |
|
from transformers import pipeline |
|
|
|
|
|
def predict_demo(image, model_name): |
|
if model_name == "how dense is": |
|
image = np.asarray(image) |
|
|
|
|
|
def load_model(): |
|
model = tf.keras.models.load_model("model.h5", compile=False) |
|
model.compile(optimizer=tf.keras.optimizers.legacy.Adam(learning_rate=0.00001, decay=0.0001), |
|
metrics=["accuracy"], loss=tf.keras.losses.CategoricalCrossentropy(label_smoothing=0.1)) |
|
model.load_weights("modeldense1.h5") |
|
return model |
|
|
|
model = load_model() |
|
|
|
def preprocess(image): |
|
image = cv2.resize(image, (224, 224)) |
|
kernel = np.array([[0, -1, 0], [-1, 5, -1], [0, -1, 0]]) |
|
im = cv2.filter2D(image, -1, kernel) |
|
if im.ndim == 3: |
|
|
|
im = np.expand_dims(im, axis=0) |
|
elif im.ndim == 2: |
|
|
|
im = np.expand_dims(im, axis=-1) |
|
im = np.repeat(im, 3, axis=-1) |
|
im = np.expand_dims(im, axis=0) |
|
return im |
|
|
|
|
|
class_name = ['Benign with Density=1', 'Malignant with Density=1', 'Benign with Density=2', |
|
'Malignant with Density=2', 'Benign with Density=3', 'Malignant with Density=3', |
|
'Benign with Density=4', 'Malignant with Density=4'] |
|
|
|
def predict_img(img): |
|
img = preprocess(img) |
|
img = img / 255.0 |
|
pred = model.predict(img)[0] |
|
return {class_name[i]: float(pred[i]) for i in range(8)} |
|
|
|
|
|
predict_mamo= predict_img(image) |
|
return predict_mamo |
|
|
|
elif model_name == "what kind is": |
|
image = cv2.cvtColor(np.array(image), cv2.COLOR_BGR2RGB) |
|
im_pil = Image.fromarray(image) |
|
pipe = pipeline("image-classification", model="DHEIVER/finetuned-BreastCancer-Classification", device=0) |
|
|
|
def predict(image): |
|
result = pipe(image) |
|
return {result[i]['label']: float(result[i]['score']) for i in range(2)} |
|
|
|
return predict(im_pil) |
|
|
|
|
|
|
|
def generate_fixed_size_chart(predictions, image_file, chart_width=6, chart_height=5): |
|
|
|
mammo_image = plt.imread(image_file) |
|
|
|
|
|
num_models = len(predictions) |
|
|
|
|
|
fig, axes = plt.subplots(1, num_models + 1, figsize=(chart_width * (num_models + 1), chart_height), constrained_layout=True) |
|
|
|
|
|
|
|
axes[0].imshow(mammo_image, cmap='gray') |
|
axes[0].axis('off') |
|
axes[0].set_title("Mammogram") |
|
|
|
|
|
for i, (model_name, prediction) in enumerate(predictions.items(), start=1): |
|
labels, values = zip(*prediction.items()) |
|
axes[i].barh(labels, values, color='skyblue') |
|
axes[i].set_xlabel('Probability (%)') |
|
axes[i].set_title(f'{model_name}') |
|
|
|
|
|
chart_path = f"{os.getcwd()}/{os.path.basename(image_file)}_combined_chart.png" |
|
plt.savefig(chart_path, bbox_inches='tight') |
|
plt.close(fig) |
|
|
|
return chart_path |
|
|
|
def generate_pdf(patient_info, predictions): |
|
all_charts = [] |
|
for image_file, prediction in predictions: |
|
chart = generate_fixed_size_chart(prediction, image_file) |
|
all_charts.append(chart) |
|
|
|
|
|
html_content = f""" |
|
<html> |
|
<head> |
|
<style> |
|
body {{ font-family: Arial, sans-serif; }} |
|
h1 {{ color: #2F4F4F; text-align: center; margin-bottom: 30px; }} |
|
.info-container {{ |
|
display: flex; |
|
flex-wrap: wrap; |
|
justify-content: space-between; |
|
margin-bottom: 20px; |
|
}} |
|
.info-item {{ |
|
width: 45%; |
|
margin-bottom: 10px; |
|
}} |
|
.image-container {{ |
|
text-align: center; |
|
margin-bottom: 50px; |
|
}} |
|
</style> |
|
</head> |
|
<body> |
|
<h1>Patient Report</h1> |
|
<div class="image-container"> |
|
<h3>Patient Image:</h3> |
|
<img src="{patient_info.get('ImagePath', '')}" alt="Patient Image" width="300"> |
|
</div> |
|
<div class="image-container"> |
|
<h3>Patient Information:</h3> |
|
<div class="info-container"> |
|
{"".join(f"<div class='info-item'><strong>{key}:</strong> {value if value else '-'}</div>" for key, value in patient_info.items() if key != "ImagePath")} |
|
</div> |
|
</div> |
|
<h3>Predictions:</h3> |
|
{"".join(f"<div ><img src='{chart}' width='80%'></div>" for chart in all_charts)} |
|
</body> |
|
</html> |
|
""" |
|
|
|
|
|
pdf_path = "patient_report.pdf" |
|
config = pdfkit.configuration(wkhtmltopdf='/usr/bin/wkhtmltopdf') |
|
options = { |
|
"enable-local-file-access": True, |
|
"no-stop-slow-scripts": True, |
|
} |
|
pdfkit.from_string(html_content, pdf_path, configuration=config, options=options) |
|
|
|
return pdf_path |
|
|
|
|
|
|
|
|
|
def display_report(patient_info, predictions): |
|
pdf_path = generate_pdf(patient_info, predictions) |
|
report_content = f"<h2>Patient Report</h2><p>{patient_info}</p><h2>Predictions</h2>{predictions}" |
|
return report_content, pdf_path |
|
|
|
|
|
with gr.Blocks() as demo: |
|
gr.Markdown("## Breast Cancer Detection - Multi-Model Interface") |
|
|
|
|
|
with gr.Tab("Patient Info"): |
|
patient_image = gr.Image(label="Upload Patient Profile Image", type="pil") |
|
name = gr.Textbox(label="Name") |
|
height = gr.Number(label="Height (cm)") |
|
weight = gr.Number(label="Weight (kg)") |
|
age = gr.Number(label="Age") |
|
gender = gr.Radio(["Male", "Female", "Other"], label="Gender") |
|
residence = gr.Textbox(label="Residence") |
|
birth_place = gr.Textbox(label="Birth Place") |
|
occupation = gr.Textbox(label="Occupation") |
|
medical_history = gr.Textbox(label="Medical History") |
|
patient_info = gr.State() |
|
patient_info_submit = gr.Button("Next") |
|
|
|
|
|
with gr.Tab("Model & Image Selection"): |
|
model_choice = gr.CheckboxGroup(["how dense is", "what kind is"], label="Select Model(s)", interactive=True) |
|
mammography_images = gr.File(label="Upload Mammography Image(s)", file_count="multiple", type="filepath") |
|
predictions = gr.State() |
|
process_button = gr.Button("Process Images") |
|
|
|
|
|
with gr.Tab("Results"): |
|
report_display = gr.HTML(label="Patient Report") |
|
download_button = gr.Button("Download Report") |
|
|
|
|
|
def collect_patient_info(image, name, height, weight, age, gender, residence, birth_place, occupation, medical_history): |
|
|
|
image_path = "patient_image.jpg" |
|
image.save(image_path) |
|
return { |
|
"Name": name, |
|
"Gender": gender, |
|
"Height": height, |
|
"Weight": weight, |
|
"Age": age, |
|
"Residence": residence, |
|
"Birth Place": birth_place, |
|
"Occupation": occupation, |
|
"Medical History": medical_history, |
|
"ImagePath": image_path |
|
} |
|
|
|
patient_info_submit.click( |
|
collect_patient_info, |
|
inputs=[patient_image, name, height, weight, age, gender, residence, birth_place, occupation, medical_history], |
|
outputs=patient_info |
|
) |
|
|
|
|
|
def process_images(patient_info, selected_models, images): |
|
all_predictions = [] |
|
for image_file in images: |
|
image = Image.open(image_file) |
|
image_predictions = {model: predict_demo(image, model) for model in selected_models} |
|
all_predictions.append((image_file, image_predictions)) |
|
return all_predictions |
|
|
|
process_button.click( |
|
process_images, |
|
inputs=[patient_info, model_choice, mammography_images], |
|
outputs=predictions |
|
) |
|
|
|
|
|
download_button.click( |
|
display_report, |
|
inputs=[patient_info, predictions], |
|
outputs=[report_display, gr.File(label="Download PDF Report")] |
|
) |
|
|
|
demo.launch(debug=True, share=True) |
|
|