sabaridsnfuji commited on
Commit
d56cd69
1 Parent(s): 8f4d0eb

updated the app file

Browse files
Files changed (1) hide show
  1. app.py +135 -101
app.py CHANGED
@@ -1,101 +1,135 @@
1
- # -*- coding: utf-8 -*-
2
- """
3
- Created on Wed Nov 13 18:37:31 2024
4
-
5
- @author: sabar
6
- """
7
-
8
- import gradio as gr
9
- import cv2
10
- import numpy as np
11
- import os
12
- import json
13
- from openvino.runtime import Core # Assuming you're using OpenVINO
14
- from tqdm import tqdm
15
- from tf_post_processing import non_max_suppression #,optimized_object_detection
16
-
17
- # Load the OpenVINO model
18
- classification_model_xml = "./model/best_openvino_model\best.xml"
19
- core = Core()
20
- config = {
21
- "INFERENCE_NUM_THREADS": 2,
22
- "ENABLE_CPU_PINNING": True
23
- }
24
- model = core.read_model(model=classification_model_xml)
25
- compiled_model = core.compile_model(model=model, device_name="CPU", config=config)
26
-
27
- # Function to perform inference
28
- def predict_image(image):
29
- # Resize, preprocess, and reshape the input image
30
- img_size = 960
31
- resized_image = cv2.resize(image, (img_size, img_size)) / 255.0
32
- resized_image = resized_image.transpose(2, 0, 1)
33
- reshaped_image = np.expand_dims(resized_image, axis=0).astype(np.float32)
34
-
35
- im_height, im_width, _ = image.shape
36
- output_numpy = compiled_model(reshaped_image)[0]
37
- results = non_max_suppression(output_numpy, conf_thres=0.2, iou_thres=0.6, max_wh=15000)[0]
38
-
39
- # Prepare output paths
40
- output_path = "D:/Research/Auto_CAD_detection/output_file_train/"
41
- output_image_folder = os.path.join(output_path, "images_alienware_openvino/")
42
- os.makedirs(output_image_folder, exist_ok=True)
43
-
44
- output_json_folder = os.path.join(output_path, "json_output/")
45
- os.makedirs(output_json_folder, exist_ok=True)
46
-
47
- predictions = []
48
-
49
- # Draw boxes and collect prediction data
50
- for result in results:
51
- boxes = result[:4]
52
- prob = result[4]
53
- classes = int(result[5])
54
-
55
- x1, y1, x2, y2 = np.uint16([
56
- boxes[0] * im_width,
57
- boxes[1] * im_height,
58
- boxes[2] * im_width,
59
- boxes[3] * im_height
60
- ])
61
-
62
- if prob > 0.2:
63
- cv2.rectangle(image, (x1, y1), (x2, y2), (255, 255, 0), 2)
64
- label_text = f"{classes} {round(prob, 2)}"
65
- cv2.putText(image, label_text, (x1, y1), 0, 0.5, (0, 255, 0), 2)
66
-
67
- # Store prediction info in a JSON-compatible format
68
- predictions.append({
69
- "class": classes,
70
- "probability": round(float(prob), 2),
71
- "coordinates": {
72
- "xmin": int(x1),
73
- "ymin": int(y1),
74
- "xmax": int(x2),
75
- "ymax": int(y2)
76
- }
77
- })
78
-
79
- # Save the processed image and JSON file
80
- output_image_path = os.path.join(output_image_folder, "result_image.jpg")
81
- cv2.imwrite(output_image_path, image)
82
-
83
- output_json_path = os.path.join(output_json_folder, "predictions.json")
84
- with open(output_json_path, 'w') as f:
85
- json.dump(predictions, f, indent=4)
86
-
87
- return output_image_path, predictions
88
-
89
- # Set up Gradio interface
90
- def gradio_interface(image):
91
- output_image_path, predictions = predict_image(image)
92
- return output_image_path, json.dumps(predictions, indent=4)
93
-
94
- # Launch the Gradio app
95
- gr.Interface(
96
- fn=gradio_interface,
97
- inputs="image",
98
- outputs=["image", "json"],
99
- title="OpenVINO Model Inference with Gradio",
100
- description="Upload an image to get model predictions with bounding boxes and probabilities."
101
- ).launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ """
3
+ Created on Wed Nov 13 18:37:31 2024
4
+
5
+ @author: sabar
6
+ """
7
+
8
+ import gradio as gr
9
+ import cv2
10
+ import numpy as np
11
+ import os
12
+ import json
13
+ from openvino.runtime import Core # Assuming you're using OpenVINO
14
+ # from tqdm import tqdm
15
+ from tf_post_processing import non_max_suppression #,optimized_object_detection
16
+
17
+ # Load the OpenVINO model
18
+ classification_model_xml = "./model/best_openvino_model/best.xml"
19
+ core = Core()
20
+ config = {
21
+ "INFERENCE_NUM_THREADS": 2,
22
+ "ENABLE_CPU_PINNING": True
23
+ }
24
+ model = core.read_model(model=classification_model_xml)
25
+ compiled_model = core.compile_model(model=model, device_name="CPU", config=config)
26
+
27
+ label_to_class_text = {
28
+ 0: 'range',
29
+ 1: 'entry door',
30
+ 2: 'kitchen sink',
31
+ 3: 'bathroom sink',
32
+ 4: 'toilet',
33
+ 5: 'double folding door',
34
+ 6: 'window',
35
+ 7: 'shower',
36
+ 8: 'bathtub',
37
+ 9: 'single folding door',
38
+ 10: 'dishwasher',
39
+ 11: 'refrigerator'
40
+ }
41
+
42
+ # Function to perform inference
43
+ def predict_image(image):
44
+ # Resize, preprocess, and reshape the input image
45
+ img_size = 960
46
+ resized_image = cv2.resize(image, (img_size, img_size)) / 255.0
47
+ resized_image = resized_image.transpose(2, 0, 1)
48
+ reshaped_image = np.expand_dims(resized_image, axis=0).astype(np.float32)
49
+
50
+ im_height, im_width, _ = image.shape
51
+ output_numpy = compiled_model(reshaped_image)[0]
52
+ results = non_max_suppression(output_numpy, conf_thres=0.2, iou_thres=0.6, max_wh=15000)[0]
53
+
54
+ # Prepare output paths
55
+ output_path = "./output_file_train/"
56
+ output_image_folder = os.path.join(output_path, "images_alienware_openvino/")
57
+ os.makedirs(output_image_folder, exist_ok=True)
58
+
59
+ output_json_folder = os.path.join(output_path, "json_output/")
60
+ os.makedirs(output_json_folder, exist_ok=True)
61
+
62
+ predictions = []
63
+
64
+ # Draw boxes and collect prediction data
65
+ for result in results:
66
+ boxes = result[:4]
67
+ prob = result[4]
68
+ classes = int(result[5])
69
+
70
+ x1, y1, x2, y2 = np.uint16([
71
+ boxes[0] * im_width,
72
+ boxes[1] * im_height,
73
+ boxes[2] * im_width,
74
+ boxes[3] * im_height
75
+ ])
76
+
77
+ if prob > 0.2:
78
+ cv2.rectangle(image, (x1, y1), (x2, y2), (255, 255, 0), 2)
79
+ label_text = f"{classes} {round(prob, 2)}"
80
+ cv2.putText(image, label_text, (x1, y1), 0, 0.5, (0, 255, 0), 2)
81
+
82
+ # Store prediction info in a JSON-compatible format
83
+ predictions.append({
84
+ "class": label_to_class_text[classes],
85
+ "probability": round(float(prob), 2),
86
+ "coordinates": {
87
+ "xmin": int(x1),
88
+ "ymin": int(y1),
89
+ "xmax": int(x2),
90
+ "ymax": int(y2)
91
+ }
92
+ })
93
+
94
+ # Save the processed image and JSON file
95
+ output_image_path = os.path.join(output_image_folder, "result_image.jpg")
96
+ cv2.imwrite(output_image_path, image)
97
+
98
+ output_json_path = os.path.join(output_json_folder, "predictions.json")
99
+ with open(output_json_path, 'w') as f:
100
+ json.dump(predictions, f, indent=4)
101
+
102
+ return output_image_path, predictions
103
+
104
+ # Set up Gradio interface to read from sample folder
105
+ def gradio_interface():
106
+ # sample_folder = "./sample" # Folder containing sample images
107
+
108
+ # Sample images for demonstration (make sure these image paths exist)
109
+ sample_images = [
110
+ "./sample/10_2.jpg", # replace with actual image paths
111
+ "./sample/10_10.jpg", # replace with actual image paths
112
+ "./sample/10_12.jpg" # replace with actual image paths
113
+ ]
114
+ # image_paths = [os.path.join(sample_folder, img) for img in os.listdir(sample_folder) if img.endswith(('.png', '.jpg', '.jpeg'))]
115
+ results = []
116
+ os.makedirs("samples", exist_ok=True)
117
+
118
+ for image_path in sample_images:
119
+ image = cv2.imread(image_path)
120
+ output_image_path, predictions = predict_image(image)
121
+ results.append({
122
+ "image_path": output_image_path,
123
+ "predictions": predictions
124
+ })
125
+
126
+ return results
127
+
128
+ # Launch the Gradio app
129
+ gr.Interface(
130
+ fn=gradio_interface,
131
+ inputs=None,
132
+ outputs="json",
133
+ title="OpenVINO Model Inference with Gradio",
134
+ description="Reads images from the 'sample' folder to get model predictions with bounding boxes and probabilities."
135
+ ).launch()