phuochungus commited on
Commit
aaa1b64
1 Parent(s): 828f82d

change to yolo

Browse files
app/__init__.py CHANGED
@@ -3,7 +3,6 @@ import json
3
  import logging
4
 
5
  from dotenv import load_dotenv
6
- from mmdeploy_runtime import Detector
7
  from supabase import create_client, Client
8
  from firebase_admin import credentials, initialize_app, firestore
9
  from neo4j import GraphDatabase
@@ -15,10 +14,6 @@ logger = logging.getLogger(__name__)
15
 
16
  load_dotenv()
17
 
18
- # LOAD MODEL
19
- model_path = "./model"
20
- detector = Detector(model_path=model_path, device_name="cpu", device_id=0)
21
-
22
  # LOAD SUPABASE
23
  url: str = os.environ.get("SUPABASE_URL")
24
  key: str = os.environ.get("SUPABASE_KEY")
 
3
  import logging
4
 
5
  from dotenv import load_dotenv
 
6
  from supabase import create_client, Client
7
  from firebase_admin import credentials, initialize_app, firestore
8
  from neo4j import GraphDatabase
 
14
 
15
  load_dotenv()
16
 
 
 
 
 
17
  # LOAD SUPABASE
18
  url: str = os.environ.get("SUPABASE_URL")
19
  key: str = os.environ.get("SUPABASE_KEY")
app/constants.py CHANGED
@@ -84,87 +84,17 @@ colors = [
84
  (253, 245, 230),
85
  ]
86
 
87
- classNames = [
88
- "person",
89
- "bicycle",
90
- "car",
91
- "motorcycle",
92
- "airplane",
93
- "bus",
94
- "train",
95
- "truck",
96
- "boat",
97
- "traffic light",
98
- "fire hydrant",
99
- "stop sign",
100
- "parking meter",
101
- "bench",
102
- "bird",
103
- "cat",
104
- "dog",
105
- "horse",
106
- "sheep",
107
- "cow",
108
- "elephant",
109
- "bear",
110
- "zebra",
111
- "giraffe",
112
- "backpack",
113
- "umbrella",
114
- "handbag",
115
- "tie",
116
- "suitcase",
117
- "frisbee",
118
- "skis",
119
- "snowboard",
120
- "sports ball",
121
- "kite",
122
- "baseball bat",
123
- "baseball glove",
124
- "skateboard",
125
- "surfboard",
126
- "tennis racket",
127
- "bottle",
128
- "wine glass",
129
- "cup",
130
- "fork",
131
- "knife",
132
- "spoon",
133
- "bowl",
134
- "banana",
135
- "apple",
136
- "sandwich",
137
- "orange",
138
- "broccoli",
139
- "carrot",
140
- "hot dog",
141
- "pizza",
142
- "donut",
143
- "cake",
144
- "chair",
145
- "couch",
146
- "potted plant",
147
- "bed",
148
- "dining table",
149
- "toilet",
150
- "tv",
151
- "laptop",
152
- "mouse",
153
- "remote",
154
- "keyboard",
155
- "cell phone",
156
- "microwave",
157
- "oven",
158
- "toaster",
159
- "sink",
160
- "refrigerator",
161
- "book",
162
- "clock",
163
- "vase",
164
- "scissors",
165
- "teddy bear",
166
- "hair drier",
167
- "toothbrush",
168
  ]
169
 
170
- deviceId = 'dvg7OhkORV6mYFlUuNKNaE:APA91bHp-SGj4uLyyI9zpEFGrQDrklonQff0bJjtQWhGrJ-tyd_1u5NZSenxEkJzDZ-lyYsZCvmIrZnwNeu9CetsuJNGAU0lkNePNngR-76ytR8ZLvqTOMyf5RHWexsxaPa9Rop05WMi'
 
84
  (253, 245, 230),
85
  ]
86
 
87
+ class_names = [
88
+ "Bicycle",
89
+ "Bus",
90
+ "Car",
91
+ "Dog",
92
+ "Electric pole",
93
+ "Motorcycle",
94
+ "Person",
95
+ "Traffic signs",
96
+ "Tree",
97
+ "Uncovered manhole",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
98
  ]
99
 
100
+ deviceId = "dvg7OhkORV6mYFlUuNKNaE:APA91bHp-SGj4uLyyI9zpEFGrQDrklonQff0bJjtQWhGrJ-tyd_1u5NZSenxEkJzDZ-lyYsZCvmIrZnwNeu9CetsuJNGAU0lkNePNngR-76ytR8ZLvqTOMyf5RHWexsxaPa9Rop05WMi"
app/custom_mmcv/__init__.py DELETED
File without changes
app/custom_mmcv/color.py DELETED
@@ -1,52 +0,0 @@
1
- # Copyright (c) OpenMMLab. All rights reserved.
2
- from enum import Enum
3
- from typing import Union
4
-
5
- import numpy as np
6
- from mmengine.utils import is_str
7
-
8
-
9
- class Color(Enum):
10
- """An enum that defines common colors.
11
-
12
- Contains red, green, blue, cyan, yellow, magenta, white and black.
13
- """
14
-
15
- red = (0, 0, 255)
16
- green = (0, 255, 0)
17
- blue = (255, 0, 0)
18
- cyan = (255, 255, 0)
19
- yellow = (0, 255, 255)
20
- magenta = (255, 0, 255)
21
- white = (255, 255, 255)
22
- black = (0, 0, 0)
23
-
24
-
25
- def color_val(color: Union[Color, str, tuple, int, np.ndarray]) -> tuple:
26
- """Convert various input to color tuples.
27
-
28
- Args:
29
- color (:obj:`Color`/str/tuple/int/ndarray): Color inputs
30
-
31
- Returns:
32
- tuple[int]: A tuple of 3 integers indicating BGR channels.
33
- """
34
- if is_str(color):
35
- return Color[color].value # type: ignore
36
- elif isinstance(color, Color):
37
- return color.value
38
- elif isinstance(color, tuple):
39
- assert len(color) == 3
40
- for channel in color:
41
- assert 0 <= channel <= 255
42
- return color
43
- elif isinstance(color, int):
44
- assert 0 <= color <= 255
45
- return color, color, color
46
- elif isinstance(color, np.ndarray):
47
- assert color.ndim == 1 and color.size == 3
48
- assert np.all((color >= 0) & (color <= 255))
49
- color = color.astype(np.uint8)
50
- return tuple(color)
51
- else:
52
- raise TypeError(f"Invalid type for color: {type(color)}")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
app/custom_mmcv/main.py DELETED
@@ -1,88 +0,0 @@
1
- # THE ORIGINAL mmcv.imshow_det_bboxes
2
- # Copyright (c) OpenMMLab. All rights reserved.
3
-
4
- from typing import List, Optional, Union
5
-
6
- import cv2
7
- import numpy as np
8
-
9
- from mmcv.image import imread, imwrite
10
- from .color import Color, color_val
11
-
12
- # a type alias declares the optional types of color argument
13
- ColorType = Union[Color, str, tuple, int, np.ndarray]
14
-
15
-
16
- def imshow_det_bboxes(
17
- img: Union[str, np.ndarray],
18
- bboxes: np.ndarray,
19
- labels: np.ndarray,
20
- class_names: List[str] = [],
21
- score_thr: float = 0,
22
- bbox_color: ColorType = "green",
23
- text_color: ColorType = "green",
24
- thickness: int = 1,
25
- font_scale: float = 1,
26
- out_file: Optional[str] = None,
27
- colors: np.ndarray = None,
28
- ):
29
- """Draw bboxes and class labels (with scores) on an image.
30
-
31
- Args:
32
- img (str or ndarray): The image to be displayed.
33
- bboxes (ndarray): Bounding boxes (with scores), shaped (n, 4) or
34
- (n, 5).
35
- labels (ndarray): Labels of bboxes.
36
- class_names (list[str]): Names of each classes.
37
- score_thr (float): Minimum score of bboxes to be shown.
38
- bbox_color (Color or str or tuple or int or ndarray): Color
39
- of bbox lines.
40
- text_color (Color or str or tuple or int or ndarray): Color
41
- of texts.
42
- thickness (int): Thickness of lines.
43
- font_scale (float): Font scales of texts.
44
- out_file (str or None): The filename to write the image.
45
- colors (array of tuple RGB int): the color of bbox and label of each class
46
-
47
- Returns:
48
- ndarray: The image with bboxes drawn on it.
49
- """
50
- assert bboxes.ndim == 2
51
- assert labels.ndim == 1
52
- assert bboxes.shape[0] == labels.shape[0]
53
- assert bboxes.shape[1] == 4 or bboxes.shape[1] == 5
54
- img = imread(img)
55
-
56
- if score_thr > 0:
57
- assert bboxes.shape[1] == 5
58
- scores = bboxes[:, -1]
59
- inds = scores > score_thr
60
- bboxes = bboxes[inds, :]
61
- labels = labels[inds]
62
-
63
- bbox_color = color_val(bbox_color)
64
- text_color = color_val(text_color)
65
-
66
- for bbox, label in zip(bboxes, labels):
67
- bbox_int = bbox.astype(np.int32)
68
- left_top = (bbox_int[0], bbox_int[1])
69
- right_bottom = (bbox_int[2], bbox_int[3])
70
- if colors is not None and len(colors) > 0:
71
- bbox_color = text_color = color_val(colors[label])
72
- cv2.rectangle(img, left_top, right_bottom, bbox_color, thickness=thickness)
73
- label_text = class_names[label] if class_names is not None else f"cls {label}"
74
- if len(bbox) > 4:
75
- label_text += f"|{bbox[-1]:.02f}"
76
- cv2.putText(
77
- img,
78
- label_text,
79
- (bbox_int[0], bbox_int[1] - 2),
80
- cv2.FONT_HERSHEY_TRIPLEX,
81
- font_scale,
82
- text_color,
83
- 4,
84
- )
85
-
86
- if out_file is not None:
87
- imwrite(img, out_file)
88
- return img
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
app/detector/__init__.py CHANGED
@@ -1,4 +1,4 @@
1
  from .yolov8.YOLOv8 import YOLOv8
2
 
3
- model_path = "./model"
4
  detector = YOLOv8(model_path)
 
1
  from .yolov8.YOLOv8 import YOLOv8
2
 
3
+ model_path = "./model/best20231112.onnx"
4
  detector = YOLOv8(model_path)
app/detector/yolov8/YOLOv8.py CHANGED
@@ -3,7 +3,7 @@ import cv2
3
  import numpy as np
4
  import onnxruntime
5
 
6
- from yolov8.utils import xywh2xyxy, draw_detections, multiclass_nms
7
 
8
 
9
  class YOLOv8:
@@ -17,6 +17,9 @@ class YOLOv8:
17
  def __call__(self, image):
18
  return self.detect_objects(image)
19
 
 
 
 
20
  def initialize_model(self, path):
21
  self.session = onnxruntime.InferenceSession(
22
  path, providers=onnxruntime.get_available_providers()
 
3
  import numpy as np
4
  import onnxruntime
5
 
6
+ from app.detector.yolov8.utils import xywh2xyxy, draw_detections, multiclass_nms
7
 
8
 
9
  class YOLOv8:
 
17
  def __call__(self, image):
18
  return self.detect_objects(image)
19
 
20
+ def set_conf_threshold(self, conf_thres):
21
+ self.conf_threshold = conf_thres
22
+
23
  def initialize_model(self, path):
24
  self.session = onnxruntime.InferenceSession(
25
  path, providers=onnxruntime.get_available_providers()
app/detector/yolov8/utils.py CHANGED
@@ -1,89 +1,9 @@
1
  from typing import Tuple
2
  import numpy as np
3
  import cv2
 
4
 
5
- class_names = [
6
- "person",
7
- "bicycle",
8
- "car",
9
- "motorcycle",
10
- "airplane",
11
- "bus",
12
- "train",
13
- "truck",
14
- "boat",
15
- "traffic light",
16
- "fire hydrant",
17
- "stop sign",
18
- "parking meter",
19
- "bench",
20
- "bird",
21
- "cat",
22
- "dog",
23
- "horse",
24
- "sheep",
25
- "cow",
26
- "elephant",
27
- "bear",
28
- "zebra",
29
- "giraffe",
30
- "backpack",
31
- "umbrella",
32
- "handbag",
33
- "tie",
34
- "suitcase",
35
- "frisbee",
36
- "skis",
37
- "snowboard",
38
- "sports ball",
39
- "kite",
40
- "baseball bat",
41
- "baseball glove",
42
- "skateboard",
43
- "surfboard",
44
- "tennis racket",
45
- "bottle",
46
- "wine glass",
47
- "cup",
48
- "fork",
49
- "knife",
50
- "spoon",
51
- "bowl",
52
- "banana",
53
- "apple",
54
- "sandwich",
55
- "orange",
56
- "broccoli",
57
- "carrot",
58
- "hot dog",
59
- "pizza",
60
- "donut",
61
- "cake",
62
- "chair",
63
- "couch",
64
- "potted plant",
65
- "bed",
66
- "dining table",
67
- "toilet",
68
- "tv",
69
- "laptop",
70
- "mouse",
71
- "remote",
72
- "keyboard",
73
- "cell phone",
74
- "microwave",
75
- "oven",
76
- "toaster",
77
- "sink",
78
- "refrigerator",
79
- "book",
80
- "clock",
81
- "vase",
82
- "scissors",
83
- "teddy bear",
84
- "hair drier",
85
- "toothbrush",
86
- ]
87
 
88
  # Create a list of colors for each class where each color is a tuple of 3 integer values
89
  rng = np.random.default_rng(3)
 
1
  from typing import Tuple
2
  import numpy as np
3
  import cv2
4
+ from app.constants import class_names
5
 
6
+ class_names = class_names
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7
 
8
  # Create a list of colors for each class where each color is a tuple of 3 integer values
9
  rng = np.random.default_rng(3)
app/routers/image.py CHANGED
@@ -1,11 +1,8 @@
1
  import cv2
2
- import numpy as np
3
 
4
- from fastapi import APIRouter, File, Response, WebSocket, WebSocketDisconnect
5
- from app.constants import classNames, colors
6
- from app import detector
7
  from mmcv import imfrombytes
8
- from app.custom_mmcv.main import imshow_det_bboxes
9
  from app import logger
10
 
11
  router = APIRouter(prefix="/image", tags=["Image"])
@@ -15,14 +12,11 @@ router = APIRouter(prefix="/image", tags=["Image"])
15
  async def handleImageRequest(
16
  file: bytes = File(...),
17
  threshold: float = 0.3,
18
- raw: bool = False,
19
  ):
20
  try:
21
  img = imfrombytes(file, cv2.IMREAD_COLOR)
22
- if raw:
23
- bboxes, labels = inferenceImage(img, threshold, raw)
24
- return {"bboxes": bboxes.tolist(), "labels": labels.tolist()}
25
- img = inferenceImage(img, threshold, raw)
26
  except Exception as e:
27
  logger.error(e)
28
  return Response(content="Failed to read image", status_code=400)
@@ -36,24 +30,7 @@ async def handleImageRequest(
36
  return Response(content=jpeg_bytes, media_type="image/jpeg")
37
 
38
 
39
- def inferenceImage(img, threshold: float, isRaw: bool):
40
- bboxes, labels, _ = detector(img)
41
- if isRaw:
42
- removeIndexs = []
43
- for i, bbox in enumerate(bboxes):
44
- if bbox[4] < threshold:
45
- removeIndexs.append(i)
46
-
47
- bboxes = np.delete(bboxes, removeIndexs, axis=0)
48
- labels = np.delete(labels, removeIndexs)
49
-
50
- return bboxes, labels
51
- return imshow_det_bboxes(
52
- img=img,
53
- bboxes=bboxes,
54
- labels=labels,
55
- class_names=classNames,
56
- colors=colors,
57
- score_thr=threshold,
58
- )
59
-
 
1
  import cv2
 
2
 
3
+ from fastapi import APIRouter, File, Response
4
+ from app.detector import detector
 
5
  from mmcv import imfrombytes
 
6
  from app import logger
7
 
8
  router = APIRouter(prefix="/image", tags=["Image"])
 
12
  async def handleImageRequest(
13
  file: bytes = File(...),
14
  threshold: float = 0.3,
 
15
  ):
16
  try:
17
  img = imfrombytes(file, cv2.IMREAD_COLOR)
18
+
19
+ img = inference_image(img, threshold)
 
 
20
  except Exception as e:
21
  logger.error(e)
22
  return Response(content="Failed to read image", status_code=400)
 
30
  return Response(content=jpeg_bytes, media_type="image/jpeg")
31
 
32
 
33
+ def inference_image(img, threshold):
34
+ detector.set_conf_threshold(threshold)
35
+ detector(img)
36
+ return detector.draw_detections(img)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
app/routers/video.py CHANGED
@@ -9,7 +9,6 @@ import cv2
9
  from multiprocessing import Process
10
  from fastapi import (
11
  APIRouter,
12
- Depends,
13
  HTTPException,
14
  UploadFile,
15
  BackgroundTasks,
@@ -19,7 +18,7 @@ from firebase_admin import messaging
19
  from app import db
20
  from app import supabase
21
  from app.dependencies import get_current_user
22
- from app.routers.image import inferenceImage
23
  from google.cloud.firestore_v1.base_query import FieldFilter
24
  from app import logger
25
 
@@ -31,7 +30,6 @@ async def handleVideoRequest(
31
  file: UploadFile,
32
  background_tasks: BackgroundTasks,
33
  threshold: float = 0.3,
34
- user=Depends(get_current_user),
35
  ):
36
  if re.search("^video\/", file.content_type) is None:
37
  raise HTTPException(
@@ -48,7 +46,7 @@ async def handleVideoRequest(
48
  async with aiofiles.open(os.path.join(id, "input.mp4"), "wb") as out_file:
49
  while content := await file.read(1024):
50
  await out_file.write(content)
51
- background_tasks.add_task(inferenceVideo, artifact_ref.id, id, threshold)
52
  return id + ".mp4"
53
  except ValueError as err:
54
  logger.error(err)
@@ -93,7 +91,7 @@ def inference_frame(inputDir, threshold: float = 0.3):
93
  if res == False:
94
  break
95
 
96
- resFram = inferenceImage(frame, threshold, False)
97
  result.write(resFram)
98
  cap.release()
99
  result.release()
@@ -102,9 +100,9 @@ def inference_frame(inputDir, threshold: float = 0.3):
102
  return thumbnail
103
 
104
 
105
- async def inferenceVideo(artifactId: str, inputDir: str, threshold: float):
106
  try:
107
- Process(updateArtifact(artifactId, {"status": "processing"})).start()
108
  thumbnail = inference_frame(inputDir, threshold=threshold)
109
  createThumbnail(thumbnail, inputDir)
110
 
@@ -129,7 +127,7 @@ async def inferenceVideo(artifactId: str, inputDir: str, threshold: float):
129
  except Exception as e:
130
  print(e)
131
 
132
- updateArtifact(
133
  artifactId,
134
  {
135
  "status": "success",
@@ -143,7 +141,7 @@ async def inferenceVideo(artifactId: str, inputDir: str, threshold: float):
143
  )
144
  except:
145
  Process(
146
- updateArtifact(
147
  artifactId,
148
  {
149
  "status": "fail",
@@ -157,7 +155,7 @@ async def inferenceVideo(artifactId: str, inputDir: str, threshold: float):
157
  print(e)
158
 
159
 
160
- def updateArtifact(artifactId: str, body):
161
  artifact_ref = db.collection("artifacts").document(artifactId)
162
  artifact_snapshot = artifact_ref.get()
163
  if artifact_snapshot.exists:
 
9
  from multiprocessing import Process
10
  from fastapi import (
11
  APIRouter,
 
12
  HTTPException,
13
  UploadFile,
14
  BackgroundTasks,
 
18
  from app import db
19
  from app import supabase
20
  from app.dependencies import get_current_user
21
+ from app.routers.image import inference_image
22
  from google.cloud.firestore_v1.base_query import FieldFilter
23
  from app import logger
24
 
 
30
  file: UploadFile,
31
  background_tasks: BackgroundTasks,
32
  threshold: float = 0.3,
 
33
  ):
34
  if re.search("^video\/", file.content_type) is None:
35
  raise HTTPException(
 
46
  async with aiofiles.open(os.path.join(id, "input.mp4"), "wb") as out_file:
47
  while content := await file.read(1024):
48
  await out_file.write(content)
49
+ await inference_video(artifact_ref.id, id, threshold)
50
  return id + ".mp4"
51
  except ValueError as err:
52
  logger.error(err)
 
91
  if res == False:
92
  break
93
 
94
+ resFram = inference_image(frame, threshold)
95
  result.write(resFram)
96
  cap.release()
97
  result.release()
 
100
  return thumbnail
101
 
102
 
103
+ async def inference_video(artifactId: str, inputDir: str, threshold: float):
104
  try:
105
+ Process(update_artifact(artifactId, {"status": "processing"})).start()
106
  thumbnail = inference_frame(inputDir, threshold=threshold)
107
  createThumbnail(thumbnail, inputDir)
108
 
 
127
  except Exception as e:
128
  print(e)
129
 
130
+ update_artifact(
131
  artifactId,
132
  {
133
  "status": "success",
 
141
  )
142
  except:
143
  Process(
144
+ update_artifact(
145
  artifactId,
146
  {
147
  "status": "fail",
 
155
  print(e)
156
 
157
 
158
+ def update_artifact(artifactId: str, body):
159
  artifact_ref = db.collection("artifacts").document(artifactId)
160
  artifact_snapshot = artifact_ref.get()
161
  if artifact_snapshot.exists:
model/{end2end.onnx → best20231112.onnx} RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:e0efff65be17a13541a3b93d6b0cde60ed37f996c4f2d57aa38071e48117f217
3
- size 22245735
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:21f0a8a6f0398cef7e73d74919d8a50fabbf5c1f5dd9e4192b129ce53153a317
3
+ size 12119302
model/deploy.json DELETED
@@ -1,16 +0,0 @@
1
- {
2
- "version": "1.2.0",
3
- "task": "Detector",
4
- "models": [
5
- {
6
- "name": "rtmdet",
7
- "net": "end2end.onnx",
8
- "weights": "",
9
- "backend": "onnxruntime",
10
- "precision": "FP32",
11
- "batch_size": 1,
12
- "dynamic_shape": true
13
- }
14
- ],
15
- "customs": []
16
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
model/detail.json DELETED
@@ -1,68 +0,0 @@
1
- {
2
- "version": "1.2.0",
3
- "codebase": {
4
- "task": "ObjectDetection",
5
- "codebase": "mmdet",
6
- "version": "3.1.0",
7
- "pth": "rtmdet_tiny_8xb32-300e_coco_20220902_112414-78e30dcc.pth",
8
- "config": "rtmdet_tiny_8xb32-300e_coco.py"
9
- },
10
- "codebase_config": {
11
- "type": "mmdet",
12
- "task": "ObjectDetection",
13
- "model_type": "end2end",
14
- "post_processing": {
15
- "score_threshold": 0.05,
16
- "confidence_threshold": 0.005,
17
- "iou_threshold": 0.5,
18
- "max_output_boxes_per_class": 200,
19
- "pre_top_k": 5000,
20
- "keep_top_k": 100,
21
- "background_label_id": -1
22
- }
23
- },
24
- "onnx_config": {
25
- "type": "onnx",
26
- "export_params": true,
27
- "keep_initializers_as_inputs": false,
28
- "opset_version": 11,
29
- "save_file": "end2end.onnx",
30
- "input_names": [
31
- "input"
32
- ],
33
- "output_names": [
34
- "dets",
35
- "labels"
36
- ],
37
- "input_shape": null,
38
- "optimize": true,
39
- "dynamic_axes": {
40
- "input": {
41
- "0": "batch",
42
- "2": "height",
43
- "3": "width"
44
- },
45
- "dets": {
46
- "0": "batch",
47
- "1": "num_dets"
48
- },
49
- "labels": {
50
- "0": "batch",
51
- "1": "num_dets"
52
- }
53
- }
54
- },
55
- "backend_config": {
56
- "type": "onnxruntime",
57
- "precision": "fp16",
58
- "common_config": {
59
- "min_positive_val": 1e-07,
60
- "max_finite_val": 10000.0,
61
- "keep_io_types": false,
62
- "disable_shape_infer": false,
63
- "op_block_list": null,
64
- "node_block_list": null
65
- }
66
- },
67
- "calib_config": {}
68
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
model/pipeline.json DELETED
@@ -1,132 +0,0 @@
1
- {
2
- "pipeline": {
3
- "input": [
4
- "img"
5
- ],
6
- "output": [
7
- "post_output"
8
- ],
9
- "tasks": [
10
- {
11
- "type": "Task",
12
- "module": "Transform",
13
- "name": "Preprocess",
14
- "input": [
15
- "img"
16
- ],
17
- "output": [
18
- "prep_output"
19
- ],
20
- "transforms": [
21
- {
22
- "type": "LoadImageFromFile",
23
- "backend_args": null
24
- },
25
- {
26
- "type": "Resize",
27
- "keep_ratio": true,
28
- "size": [
29
- 640,
30
- 640
31
- ]
32
- },
33
- {
34
- "type": "Pad",
35
- "size": [
36
- 640,
37
- 640
38
- ],
39
- "pad_val": {
40
- "img": [
41
- 114,
42
- 114,
43
- 114
44
- ]
45
- }
46
- },
47
- {
48
- "type": "Normalize",
49
- "to_rgb": false,
50
- "mean": [
51
- 103.53,
52
- 116.28,
53
- 123.675
54
- ],
55
- "std": [
56
- 57.375,
57
- 57.12,
58
- 58.395
59
- ]
60
- },
61
- {
62
- "type": "Pad",
63
- "size_divisor": 1
64
- },
65
- {
66
- "type": "DefaultFormatBundle"
67
- },
68
- {
69
- "type": "Collect",
70
- "meta_keys": [
71
- "img_path",
72
- "flip",
73
- "ori_filename",
74
- "pad_param",
75
- "pad_shape",
76
- "scale_factor",
77
- "valid_ratio",
78
- "flip_direction",
79
- "img_norm_cfg",
80
- "img_id",
81
- "img_shape",
82
- "ori_shape",
83
- "filename"
84
- ],
85
- "keys": [
86
- "img"
87
- ]
88
- }
89
- ]
90
- },
91
- {
92
- "name": "rtmdet",
93
- "type": "Task",
94
- "module": "Net",
95
- "is_batched": true,
96
- "input": [
97
- "prep_output"
98
- ],
99
- "output": [
100
- "infer_output"
101
- ],
102
- "input_map": {
103
- "img": "input"
104
- },
105
- "output_map": {}
106
- },
107
- {
108
- "type": "Task",
109
- "module": "mmdet",
110
- "name": "postprocess",
111
- "component": "ResizeBBox",
112
- "params": {
113
- "nms_pre": 1000,
114
- "min_bbox_size": 0,
115
- "score_thr": 0.3,
116
- "nms": {
117
- "type": "nms",
118
- "iou_threshold": 0.65
119
- },
120
- "max_per_img": 100
121
- },
122
- "output": [
123
- "post_output"
124
- ],
125
- "input": [
126
- "prep_output",
127
- "infer_output"
128
- ]
129
- }
130
- ]
131
- }
132
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
tests/test_custom_mmcv.py CHANGED
@@ -1,6 +1,6 @@
1
  from app.custom_mmcv.color import color_val
2
  from app.custom_mmcv.main import imshow_det_bboxes
3
- from app.constants import classNames
4
  import mmcv
5
  import cv2
6
  import numpy as np
@@ -30,22 +30,22 @@ class TestCustomMMCV():
30
  image = mmcv.imread('demo.jpg')
31
  bboxes = np.ones((1,5))
32
  labels = np.zeros(1, dtype=np.int32)
33
- result = imshow_det_bboxes(image, bboxes, labels, class_names=classNames, bbox_color="red", text_color='red')
34
  assert (result[1,1,:] == (0,0,255)).all()
35
  with pytest.raises(AssertionError):
36
  bboxes = np.ones((1,3))
37
  labels = np.zeros(1, dtype=np.int32)
38
- result = imshow_det_bboxes(image, bboxes, labels, class_names=classNames, bbox_color="red",text_color="red")
39
  with pytest.raises(AssertionError):
40
  bboxes = np.ones((1,7))
41
  labels = np.zeros(1, dtype=np.int32)
42
- result = imshow_det_bboxes(image, bboxes, labels, class_names=classNames, bbox_color="red",text_color="red")
43
  with pytest.raises(AssertionError):
44
  bboxes = np.ones((1,5))
45
  labels = np.zeros(4, dtype=np.int32)
46
- result = imshow_det_bboxes(image, bboxes, labels, class_names=classNames, bbox_color="red",text_color="red")
47
  with pytest.raises(AssertionError):
48
  bboxes = np.ones((2,5))
49
  labels = np.zeros(1, dtype=np.int32)
50
- result = imshow_det_bboxes(image, bboxes, labels, class_names=classNames, bbox_color="red",text_color="red")
51
 
 
1
  from app.custom_mmcv.color import color_val
2
  from app.custom_mmcv.main import imshow_det_bboxes
3
+ from app.constants import class_names
4
  import mmcv
5
  import cv2
6
  import numpy as np
 
30
  image = mmcv.imread('demo.jpg')
31
  bboxes = np.ones((1,5))
32
  labels = np.zeros(1, dtype=np.int32)
33
+ result = imshow_det_bboxes(image, bboxes, labels, class_names=class_names, bbox_color="red", text_color='red')
34
  assert (result[1,1,:] == (0,0,255)).all()
35
  with pytest.raises(AssertionError):
36
  bboxes = np.ones((1,3))
37
  labels = np.zeros(1, dtype=np.int32)
38
+ result = imshow_det_bboxes(image, bboxes, labels, class_names=class_names, bbox_color="red",text_color="red")
39
  with pytest.raises(AssertionError):
40
  bboxes = np.ones((1,7))
41
  labels = np.zeros(1, dtype=np.int32)
42
+ result = imshow_det_bboxes(image, bboxes, labels, class_names=class_names, bbox_color="red",text_color="red")
43
  with pytest.raises(AssertionError):
44
  bboxes = np.ones((1,5))
45
  labels = np.zeros(4, dtype=np.int32)
46
+ result = imshow_det_bboxes(image, bboxes, labels, class_names=class_names, bbox_color="red",text_color="red")
47
  with pytest.raises(AssertionError):
48
  bboxes = np.ones((2,5))
49
  labels = np.zeros(1, dtype=np.int32)
50
+ result = imshow_det_bboxes(image, bboxes, labels, class_names=class_names, bbox_color="red",text_color="red")
51
 
tests/test_image.py CHANGED
@@ -1,7 +1,7 @@
1
  import os
2
  import mmcv
3
  import numpy as np
4
- from app.routers.image import inferenceImage
5
  import pytest
6
  import pytest
7
  from fastapi.testclient import TestClient
@@ -22,9 +22,9 @@ class TestImageRoute():
22
  img = mmcv.imread('demo.jpg')
23
  url = "http://0.0.0.0:3000/image"
24
  def test_inferenceImage(self):
25
- bboxes, labels = inferenceImage(mmcv.imread('demo.jpg'), 0.3, True)
26
  assert len(bboxes.tolist()) > 0 and len(labels.tolist()) > 0 and len(bboxes.tolist()) == len(labels.tolist())
27
- result = inferenceImage(self.img, 0.3, False)
28
  assert type(result) is np.ndarray and result.shape == self.img.shape
29
  def test_ImageAPI(self, client):
30
  payload = {}
 
1
  import os
2
  import mmcv
3
  import numpy as np
4
+ from app.routers.image import inference_image
5
  import pytest
6
  import pytest
7
  from fastapi.testclient import TestClient
 
22
  img = mmcv.imread('demo.jpg')
23
  url = "http://0.0.0.0:3000/image"
24
  def test_inferenceImage(self):
25
+ bboxes, labels = inference_image(mmcv.imread('demo.jpg'), 0.3, True)
26
  assert len(bboxes.tolist()) > 0 and len(labels.tolist()) > 0 and len(bboxes.tolist()) == len(labels.tolist())
27
+ result = inference_image(self.img, 0.3, False)
28
  assert type(result) is np.ndarray and result.shape == self.img.shape
29
  def test_ImageAPI(self, client):
30
  payload = {}
tests/test_video.py CHANGED
@@ -1,6 +1,6 @@
1
  from fastapi.testclient import TestClient
2
  from fastapi.routing import APIRoute
3
- from app.routers.video import updateArtifact, createThumbnail, inference_frame
4
  from app.main import app
5
  from app.constants import deviceId
6
  from app import db
@@ -107,7 +107,7 @@ class TestVideoAPI:
107
  else:
108
  test_artifact.update({"status": "testing", 'path': '', "thumbnailURL":""})
109
  # Testing update on each field
110
- updateArtifact(test_artifact.id,{"status": "test_done"})
111
  assert db.collection("artifacts").document('test').get().to_dict()['status'] == 'test_done'
112
  #Delete data for next time test
113
  test_artifact.delete()
 
1
  from fastapi.testclient import TestClient
2
  from fastapi.routing import APIRoute
3
+ from app.routers.video import update_artifact, createThumbnail, inference_frame
4
  from app.main import app
5
  from app.constants import deviceId
6
  from app import db
 
107
  else:
108
  test_artifact.update({"status": "testing", 'path': '', "thumbnailURL":""})
109
  # Testing update on each field
110
+ update_artifact(test_artifact.id,{"status": "test_done"})
111
  assert db.collection("artifacts").document('test').get().to_dict()['status'] == 'test_done'
112
  #Delete data for next time test
113
  test_artifact.delete()