File size: 4,514 Bytes
d56cd69 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 |
# -*- coding: utf-8 -*-
"""
Created on Wed Nov 13 18:37:31 2024
@author: sabar
"""
import gradio as gr
import cv2
import numpy as np
import os
import json
from openvino.runtime import Core # Assuming you're using OpenVINO
# from tqdm import tqdm
from tf_post_processing import non_max_suppression #,optimized_object_detection
# Load the OpenVINO model
classification_model_xml = "./model/best_openvino_model/best.xml"
core = Core()
config = {
"INFERENCE_NUM_THREADS": 2,
"ENABLE_CPU_PINNING": True
}
model = core.read_model(model=classification_model_xml)
compiled_model = core.compile_model(model=model, device_name="CPU", config=config)
label_to_class_text = {
0: 'range',
1: 'entry door',
2: 'kitchen sink',
3: 'bathroom sink',
4: 'toilet',
5: 'double folding door',
6: 'window',
7: 'shower',
8: 'bathtub',
9: 'single folding door',
10: 'dishwasher',
11: 'refrigerator'
}
# Function to perform inference
def predict_image(image):
# Resize, preprocess, and reshape the input image
img_size = 960
resized_image = cv2.resize(image, (img_size, img_size)) / 255.0
resized_image = resized_image.transpose(2, 0, 1)
reshaped_image = np.expand_dims(resized_image, axis=0).astype(np.float32)
im_height, im_width, _ = image.shape
output_numpy = compiled_model(reshaped_image)[0]
results = non_max_suppression(output_numpy, conf_thres=0.2, iou_thres=0.6, max_wh=15000)[0]
# Prepare output paths
output_path = "./output_file_train/"
output_image_folder = os.path.join(output_path, "images_alienware_openvino/")
os.makedirs(output_image_folder, exist_ok=True)
output_json_folder = os.path.join(output_path, "json_output/")
os.makedirs(output_json_folder, exist_ok=True)
predictions = []
# Draw boxes and collect prediction data
for result in results:
boxes = result[:4]
prob = result[4]
classes = int(result[5])
x1, y1, x2, y2 = np.uint16([
boxes[0] * im_width,
boxes[1] * im_height,
boxes[2] * im_width,
boxes[3] * im_height
])
if prob > 0.2:
cv2.rectangle(image, (x1, y1), (x2, y2), (255, 255, 0), 2)
label_text = f"{classes} {round(prob, 2)}"
cv2.putText(image, label_text, (x1, y1), 0, 0.5, (0, 255, 0), 2)
# Store prediction info in a JSON-compatible format
predictions.append({
"class": label_to_class_text[classes],
"probability": round(float(prob), 2),
"coordinates": {
"xmin": int(x1),
"ymin": int(y1),
"xmax": int(x2),
"ymax": int(y2)
}
})
# Save the processed image and JSON file
output_image_path = os.path.join(output_image_folder, "result_image.jpg")
cv2.imwrite(output_image_path, image)
output_json_path = os.path.join(output_json_folder, "predictions.json")
with open(output_json_path, 'w') as f:
json.dump(predictions, f, indent=4)
return output_image_path, predictions
# Set up Gradio interface to read from sample folder
def gradio_interface():
# sample_folder = "./sample" # Folder containing sample images
# Sample images for demonstration (make sure these image paths exist)
sample_images = [
"./sample/10_2.jpg", # replace with actual image paths
"./sample/10_10.jpg", # replace with actual image paths
"./sample/10_12.jpg" # replace with actual image paths
]
# image_paths = [os.path.join(sample_folder, img) for img in os.listdir(sample_folder) if img.endswith(('.png', '.jpg', '.jpeg'))]
results = []
os.makedirs("samples", exist_ok=True)
for image_path in sample_images:
image = cv2.imread(image_path)
output_image_path, predictions = predict_image(image)
results.append({
"image_path": output_image_path,
"predictions": predictions
})
return results
# Launch the Gradio app
gr.Interface(
fn=gradio_interface,
inputs=None,
outputs="json",
title="OpenVINO Model Inference with Gradio",
description="Reads images from the 'sample' folder to get model predictions with bounding boxes and probabilities."
).launch()
|