rtr46 commited on
Commit
b72b193
·
verified ·
1 Parent(s): 03d1638

Upload 4 files

Browse files
inference.v0.1.320x192.py ADDED
@@ -0,0 +1,125 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import cv2
3
+ import numpy as np
4
+ import onnxruntime as ort
5
+ import time
6
+
7
+ # --- CONFIGURATION ---
8
+ INPUT_WIDTH = 320
9
+ INPUT_HEIGHT = 192
10
+ MODEL_PATH = f"meiki.text.detect.v0.1.{INPUT_WIDTH}x{INPUT_HEIGHT}.onnx"
11
+ INPUT_IMAGE_PATH = f"input.jpg"
12
+ OUTPUT_IMAGE_PATH = f"output.{INPUT_WIDTH}x{INPUT_HEIGHT}.jpg"
13
+
14
+ # A threshold to filter out weak detections.
15
+ # You can adjust this value (e.g., lower to 0.3 for more boxes,
16
+ # or raise to 0.5 for fewer, more confident boxes).
17
+ CONFIDENCE_THRESHOLD = 0.4
18
+
19
+ def resize(image: np.ndarray, w, h):
20
+ original_height, original_width, _ = image.shape
21
+
22
+ # Calculate the ratio to resize the image.
23
+ ratio_w = w / original_width
24
+ ratio_h = h / original_height
25
+
26
+ # Resize the image
27
+ resized_image = cv2.resize(image, (w, h), interpolation=cv2.INTER_LINEAR)
28
+
29
+ return resized_image, ratio_w, ratio_h
30
+
31
+ def main():
32
+ """
33
+ Main function to run the inference process.
34
+ """
35
+ # --- 1. Load the Model ---
36
+ try:
37
+ # Create an inference session with the ONNX model.
38
+ session = ort.InferenceSession(MODEL_PATH, providers=['CUDAExecutionProvider'])
39
+ print("Session providers:", session.get_providers())
40
+ print(f"Successfully loaded model: {MODEL_PATH}")
41
+ except Exception as e:
42
+ print(f"Error: Failed to load the ONNX model. Make sure '{MODEL_PATH}' exists.")
43
+ print(f"Details: {e}")
44
+ return
45
+
46
+ # --- 2. Load and Pre-process the Input Image ---
47
+ try:
48
+ # Read the input image from the file. It will be in BGR format by default.
49
+ original_image = cv2.imread(INPUT_IMAGE_PATH)
50
+ if original_image is None:
51
+ raise FileNotFoundError(f"Image not found at '{INPUT_IMAGE_PATH}'")
52
+ print(f"Successfully loaded image: {INPUT_IMAGE_PATH}")
53
+ except Exception as e:
54
+ print(f"Error: {e}")
55
+ return
56
+
57
+ resized_image, ratio_w, ratio_h = resize(original_image, INPUT_WIDTH,INPUT_HEIGHT)
58
+
59
+ # Normalize the image data to be between 0 and 1.
60
+ img_normalized = resized_image.astype(np.float32) / 255.0
61
+
62
+ # The model expects the channel dimension to be first (Channels, Height, Width).
63
+ # OpenCV loads images as (Height, Width, Channels), so we transpose the axes.
64
+ img_transposed = np.transpose(img_normalized, (2, 0, 1))
65
+
66
+ image_input_tensor = np.expand_dims(img_transposed, axis=0)
67
+
68
+ # --- 3. Run Inference ---
69
+ # The model requires a second input specifying the image size
70
+ sizes_input_tensor = np.array([[INPUT_WIDTH, INPUT_HEIGHT]], dtype=np.int64)
71
+
72
+ # Get the names of the model's input nodes.
73
+ input_names = [inp.name for inp in session.get_inputs()]
74
+
75
+ # Prepare the dictionary of inputs for the model.
76
+ inputs = {
77
+ input_names[0]: image_input_tensor,
78
+ input_names[1]: sizes_input_tensor
79
+ }
80
+
81
+ # Run the model.
82
+ # This model returns three separate outputs: labels, boxes, and confidence scores.
83
+ for i in range(10):
84
+ start = time.perf_counter()
85
+ outputs = session.run(None, inputs)
86
+ print(f"runtime {time.perf_counter() - start}")
87
+ labels, boxes, scores = outputs
88
+
89
+ # --- 4. Post-process and Draw Bounding Boxes ---
90
+ # The outputs have an extra batch dimension, so we remove it.
91
+ boxes = boxes[0]
92
+ scores = scores[0]
93
+
94
+ print(f"Model returned {len(boxes)} boxes. Filtering with confidence > {CONFIDENCE_THRESHOLD}...")
95
+
96
+ # Create a copy of the original image to draw on.
97
+ output_image = original_image.copy()
98
+
99
+ # Iterate through the boxes and their corresponding scores.
100
+ confident_boxes_count = 0
101
+ for box, score in zip(boxes, scores):
102
+ # Only process boxes with a confidence score above our threshold.
103
+ if score > CONFIDENCE_THRESHOLD:
104
+ confident_boxes_count += 1
105
+ # The coordinates from the model are relative to the 640x640 padded image.
106
+ # We need to scale them back to the original image's coordinate space.
107
+ x_min, y_min, x_max, y_max = box
108
+
109
+ final_x_min = int(x_min / ratio_w)
110
+ final_y_min = int(y_min / ratio_h)
111
+ final_x_max = int(x_max / ratio_w)
112
+ final_y_max = int(y_max / ratio_h)
113
+
114
+ # Draw a green rectangle on the output image.
115
+ cv2.rectangle(output_image, (final_x_min, final_y_min), (final_x_max, final_y_max), (0, 255, 0), 2)
116
+
117
+ print(f"Found {confident_boxes_count} confident boxes.")
118
+
119
+ # --- 5. Save the Final Image ---
120
+ cv2.imwrite(OUTPUT_IMAGE_PATH, output_image)
121
+ print(f"Successfully saved result to: {OUTPUT_IMAGE_PATH}")
122
+
123
+
124
+ if __name__ == "__main__":
125
+ main()
inference.v0.1.960x544.py ADDED
@@ -0,0 +1,125 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import cv2
3
+ import numpy as np
4
+ import onnxruntime as ort
5
+ import time
6
+
7
+ # --- CONFIGURATION ---
8
+ INPUT_WIDTH = 960
9
+ INPUT_HEIGHT = 544
10
+ MODEL_PATH = f"meiki.text.detect.v0.1.{INPUT_WIDTH}x{INPUT_HEIGHT}.onnx"
11
+ INPUT_IMAGE_PATH = f"input.jpg"
12
+ OUTPUT_IMAGE_PATH = f"output.{INPUT_WIDTH}x{INPUT_HEIGHT}.jpg"
13
+
14
+ # A threshold to filter out weak detections.
15
+ # You can adjust this value (e.g., lower to 0.3 for more boxes,
16
+ # or raise to 0.5 for fewer, more confident boxes).
17
+ CONFIDENCE_THRESHOLD = 0.4
18
+
19
+ def resize(image: np.ndarray, w, h):
20
+ original_height, original_width, _ = image.shape
21
+
22
+ # Calculate the ratio to resize the image.
23
+ ratio_w = w / original_width
24
+ ratio_h = h / original_height
25
+
26
+ # Resize the image
27
+ resized_image = cv2.resize(image, (w, h), interpolation=cv2.INTER_LINEAR)
28
+
29
+ return resized_image, ratio_w, ratio_h
30
+
31
+ def main():
32
+ """
33
+ Main function to run the inference process.
34
+ """
35
+ # --- 1. Load the Model ---
36
+ try:
37
+ # Create an inference session with the ONNX model.
38
+ session = ort.InferenceSession(MODEL_PATH, providers=['CUDAExecutionProvider'])
39
+ print("Session providers:", session.get_providers())
40
+ print(f"Successfully loaded model: {MODEL_PATH}")
41
+ except Exception as e:
42
+ print(f"Error: Failed to load the ONNX model. Make sure '{MODEL_PATH}' exists.")
43
+ print(f"Details: {e}")
44
+ return
45
+
46
+ # --- 2. Load and Pre-process the Input Image ---
47
+ try:
48
+ # Read the input image from the file. It will be in BGR format by default.
49
+ original_image = cv2.imread(INPUT_IMAGE_PATH)
50
+ if original_image is None:
51
+ raise FileNotFoundError(f"Image not found at '{INPUT_IMAGE_PATH}'")
52
+ print(f"Successfully loaded image: {INPUT_IMAGE_PATH}")
53
+ except Exception as e:
54
+ print(f"Error: {e}")
55
+ return
56
+
57
+ resized_image, ratio_w, ratio_h = resize(original_image, INPUT_WIDTH,INPUT_HEIGHT)
58
+
59
+ # Normalize the image data to be between 0 and 1.
60
+ img_normalized = resized_image.astype(np.float32) / 255.0
61
+
62
+ # The model expects the channel dimension to be first (Channels, Height, Width).
63
+ # OpenCV loads images as (Height, Width, Channels), so we transpose the axes.
64
+ img_transposed = np.transpose(img_normalized, (2, 0, 1))
65
+
66
+ image_input_tensor = np.expand_dims(img_transposed, axis=0)
67
+
68
+ # --- 3. Run Inference ---
69
+ # The model requires a second input specifying the image size
70
+ sizes_input_tensor = np.array([[INPUT_WIDTH, INPUT_HEIGHT]], dtype=np.int64)
71
+
72
+ # Get the names of the model's input nodes.
73
+ input_names = [inp.name for inp in session.get_inputs()]
74
+
75
+ # Prepare the dictionary of inputs for the model.
76
+ inputs = {
77
+ input_names[0]: image_input_tensor,
78
+ input_names[1]: sizes_input_tensor
79
+ }
80
+
81
+ # Run the model.
82
+ # This model returns three separate outputs: labels, boxes, and confidence scores.
83
+ for i in range(10):
84
+ start = time.perf_counter()
85
+ outputs = session.run(None, inputs)
86
+ print(f"runtime {time.perf_counter() - start}")
87
+ labels, boxes, scores = outputs
88
+
89
+ # --- 4. Post-process and Draw Bounding Boxes ---
90
+ # The outputs have an extra batch dimension, so we remove it.
91
+ boxes = boxes[0]
92
+ scores = scores[0]
93
+
94
+ print(f"Model returned {len(boxes)} boxes. Filtering with confidence > {CONFIDENCE_THRESHOLD}...")
95
+
96
+ # Create a copy of the original image to draw on.
97
+ output_image = original_image.copy()
98
+
99
+ # Iterate through the boxes and their corresponding scores.
100
+ confident_boxes_count = 0
101
+ for box, score in zip(boxes, scores):
102
+ # Only process boxes with a confidence score above our threshold.
103
+ if score > CONFIDENCE_THRESHOLD:
104
+ confident_boxes_count += 1
105
+ # The coordinates from the model are relative to the 640x640 padded image.
106
+ # We need to scale them back to the original image's coordinate space.
107
+ x_min, y_min, x_max, y_max = box
108
+
109
+ final_x_min = int(x_min / ratio_w)
110
+ final_y_min = int(y_min / ratio_h)
111
+ final_x_max = int(x_max / ratio_w)
112
+ final_y_max = int(y_max / ratio_h)
113
+
114
+ # Draw a green rectangle on the output image.
115
+ cv2.rectangle(output_image, (final_x_min, final_y_min), (final_x_max, final_y_max), (0, 255, 0), 2)
116
+
117
+ print(f"Found {confident_boxes_count} confident boxes.")
118
+
119
+ # --- 5. Save the Final Image ---
120
+ cv2.imwrite(OUTPUT_IMAGE_PATH, output_image)
121
+ print(f"Successfully saved result to: {OUTPUT_IMAGE_PATH}")
122
+
123
+
124
+ if __name__ == "__main__":
125
+ main()
meiki.text.detect.v0.1.320x192.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8cdc5daa5c13a5f93adb40ee2e53ad1a3d2f7e372584c5c665b038adeb74ae6d
3
+ size 14084361
meiki.text.detect.v0.1.960x544.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:40b6a016667745cae7d3055929ae3b8b1e7716aac795f5904cd3c2c7c3b8404b
3
+ size 14503825