Commit
·
a4c6590
1
Parent(s):
80cb3a4
added app.py and dockerfile
Browse files- Dockerfile +7 -0
- app.py +65 -0
Dockerfile
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
FROM python:3.10-slim-bullseye
|
2 |
+
RUN apt-get update && apt-get install ffmpeg libsm6 libxext6 -y
|
3 |
+
COPY requirements.txt .
|
4 |
+
RUN pip install -r requirements.txt
|
5 |
+
COPY app.py .
|
6 |
+
COPY model.onnx .
|
7 |
+
CMD python app.py
|
app.py
ADDED
@@ -0,0 +1,65 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import onnxruntime
|
2 |
+
import cv2
|
3 |
+
import numpy as np
|
4 |
+
import oloren as olo
|
5 |
+
|
6 |
+
|
7 |
+
class ONNX_Detectron:
|
8 |
+
REQUIRED_WIDTH = 800
|
9 |
+
REQUIRED_HEIGHT = 1043
|
10 |
+
|
11 |
+
def __init__(self, onnx_path):
|
12 |
+
self.onnx_path = onnx_path
|
13 |
+
self.model = onnxruntime.InferenceSession(self.onnx_path)
|
14 |
+
|
15 |
+
def preprocess(self, path):
|
16 |
+
img = np.array(cv2.imread(path))
|
17 |
+
initial_h, initial_w, _ = img.shape
|
18 |
+
img = cv2.resize(img, (self.REQUIRED_WIDTH, self.REQUIRED_HEIGHT), interpolation=cv2.INTER_LINEAR)
|
19 |
+
img = img.transpose((2, 0, 1)).astype(np.float32)
|
20 |
+
return {self.model.get_inputs()[0].name: img}, initial_w, initial_h
|
21 |
+
|
22 |
+
def predict(self, image):
|
23 |
+
prepared_input, input_w, input_h = self.preprocess(image)
|
24 |
+
bboxes, labels, confidence_scores, _ = self.model.run(None, prepared_input)
|
25 |
+
regions = self.postprocess(bboxes, labels, confidence_scores, input_w, input_h)
|
26 |
+
return regions
|
27 |
+
|
28 |
+
def postprocess(
|
29 |
+
self,
|
30 |
+
bboxes: np.ndarray,
|
31 |
+
labels: np.ndarray,
|
32 |
+
confidence_scores: np.ndarray,
|
33 |
+
input_w: float,
|
34 |
+
input_h: float,
|
35 |
+
):
|
36 |
+
"""Process output into Unstructured class. Bounding box coordinates are converted to
|
37 |
+
original image resolution."""
|
38 |
+
regions = []
|
39 |
+
width_conversion = input_w / self.REQUIRED_WIDTH
|
40 |
+
height_conversion = input_h / self.REQUIRED_HEIGHT
|
41 |
+
for (x1, y1, x2, y2), label, conf in zip(bboxes, labels, confidence_scores):
|
42 |
+
if conf < 0.8:
|
43 |
+
continue
|
44 |
+
region = {
|
45 |
+
"x1": x1 * width_conversion,
|
46 |
+
"y1": y1 * height_conversion,
|
47 |
+
"x2": x2 * width_conversion,
|
48 |
+
"y2": y2 * height_conversion,
|
49 |
+
"confidence": conf,
|
50 |
+
"label": label,
|
51 |
+
}
|
52 |
+
|
53 |
+
regions.append(region)
|
54 |
+
|
55 |
+
return regions
|
56 |
+
|
57 |
+
|
58 |
+
@olo.register()
|
59 |
+
def predict(image=olo.File()):
|
60 |
+
model = ONNX_Detectron("./model.onnx")
|
61 |
+
return model.predict(image)
|
62 |
+
|
63 |
+
|
64 |
+
if __name__ == "__main__":
|
65 |
+
olo.run("detectron")
|