Andrey commited on
Commit
99d3d67
1 Parent(s): d74b579

Update code following deepsource checks.

Browse files
Files changed (4) hide show
  1. src/ml_utils.py +8 -7
  2. src/model_architecture.py +0 -5
  3. src/utils.py +1 -1
  4. st_app.py +0 -1
src/ml_utils.py CHANGED
@@ -25,14 +25,16 @@ transforms = A.Compose(
25
  )
26
 
27
 
28
- def cells_to_bboxes(predictions: torch.tensor, anchors: torch.tensor, s: int, is_preds: bool = True) -> torch.tensor:
 
 
29
  """
30
  Scale the predictions coming from the model_files to
31
  be relative to the entire image such that they for example later
32
  can be plotted or.
33
  Args:
34
  predictions: tensor of size (N, 3, S, S, num_classes+5)
35
- anchors: the anchors used for the predictions
36
  s: the number of cells the image is divided in on the width (and height)
37
  is_preds: whether the input is predictions or the true bounding boxes
38
  Returns:
@@ -40,12 +42,12 @@ def cells_to_bboxes(predictions: torch.tensor, anchors: torch.tensor, s: int, is
40
  object score, bounding box coordinates
41
  """
42
  batch_size = predictions.shape[0]
43
- num_anchors = len(anchors)
44
  box_predictions = predictions[..., 1:5]
45
  if is_preds:
46
- anchors = anchors.reshape(1, len(anchors), 1, 1, 2)
47
  box_predictions[..., 0:2] = torch.sigmoid(box_predictions[..., 0:2])
48
- box_predictions[..., 2:] = torch.exp(box_predictions[..., 2:]) * anchors
49
  scores = torch.sigmoid(predictions[..., 0:1])
50
  best_class = torch.argmax(predictions[..., 5:], dim=-1).unsqueeze(-1)
51
  else:
@@ -79,8 +81,6 @@ def non_max_suppression(
79
  list: bboxes after performing NMS given a specific IoU threshold
80
  """
81
 
82
- assert type(bboxes) == list
83
-
84
  bboxes = [box for box in bboxes if box[1] > threshold]
85
  bboxes = sorted(bboxes, key=lambda x: x[1], reverse=True)
86
  bboxes_after_nms = []
@@ -177,6 +177,7 @@ def predict(
177
  # postprocess. In fact, we could remove indexing with idx here, as there is a single image.
178
  # But I prefer to keep it so that this code could be easier changed for cases with batch size > 1
179
  bboxes: List[List] = [[] for _ in range(1)]
 
180
  for i in range(3):
181
  S = logits[i].shape[2]
182
  # it could be better to initialize anchors inside the function, but I don't want to do it for every prediction.
 
25
  )
26
 
27
 
28
+ def cells_to_bboxes(
29
+ predictions: torch.tensor, tensor_anchors: torch.tensor, s: int, is_preds: bool = True
30
+ ) -> torch.tensor:
31
  """
32
  Scale the predictions coming from the model_files to
33
  be relative to the entire image such that they for example later
34
  can be plotted or.
35
  Args:
36
  predictions: tensor of size (N, 3, S, S, num_classes+5)
37
+ tensor_anchors: the anchors used for the predictions
38
  s: the number of cells the image is divided in on the width (and height)
39
  is_preds: whether the input is predictions or the true bounding boxes
40
  Returns:
 
42
  object score, bounding box coordinates
43
  """
44
  batch_size = predictions.shape[0]
45
+ num_anchors = len(tensor_anchors)
46
  box_predictions = predictions[..., 1:5]
47
  if is_preds:
48
+ tensor_anchors = tensor_anchors.reshape(1, len(tensor_anchors), 1, 1, 2)
49
  box_predictions[..., 0:2] = torch.sigmoid(box_predictions[..., 0:2])
50
+ box_predictions[..., 2:] = torch.exp(box_predictions[..., 2:]) * tensor_anchors
51
  scores = torch.sigmoid(predictions[..., 0:1])
52
  best_class = torch.argmax(predictions[..., 5:], dim=-1).unsqueeze(-1)
53
  else:
 
81
  list: bboxes after performing NMS given a specific IoU threshold
82
  """
83
 
 
 
84
  bboxes = [box for box in bboxes if box[1] > threshold]
85
  bboxes = sorted(bboxes, key=lambda x: x[1], reverse=True)
86
  bboxes_after_nms = []
 
177
  # postprocess. In fact, we could remove indexing with idx here, as there is a single image.
178
  # But I prefer to keep it so that this code could be easier changed for cases with batch size > 1
179
  bboxes: List[List] = [[] for _ in range(1)]
180
+ idx = 0
181
  for i in range(3):
182
  S = logits[i].shape[2]
183
  # it could be better to initialize anchors inside the function, but I don't want to do it for every prediction.
src/model_architecture.py CHANGED
@@ -60,8 +60,6 @@ class Net(nn.Module):
60
  super().__init__()
61
  self.num_classes = 12
62
  self.in_channels = 3
63
- # self.config = cfg.model_files.params.config
64
- # self.config = [i if i[0] != '(' else literal_eval(i) for i in self.config]
65
  self.config = [
66
  (32, 3, 1),
67
  (64, 3, 2),
@@ -88,7 +86,6 @@ class Net(nn.Module):
88
  (256, 3, 1),
89
  'S',
90
  ]
91
- # print('self.config', self.config)
92
  self.layers = self._create_conv_layers()
93
 
94
  def forward(self, x):
@@ -98,7 +95,6 @@ class Net(nn.Module):
98
  if isinstance(layer, ScalePrediction):
99
  outputs.append(layer(x))
100
  continue
101
- # print(layer, x.shape)
102
  x = layer(x)
103
 
104
  if isinstance(layer, ResidualBlock) and layer.num_repeats == 8:
@@ -115,7 +111,6 @@ class Net(nn.Module):
115
  in_channels = self.in_channels
116
 
117
  for module in self.config:
118
- # print(module, type(module))
119
  if isinstance(module, tuple):
120
  out_channels, kernel_size, stride = module
121
  layers.append(
 
60
  super().__init__()
61
  self.num_classes = 12
62
  self.in_channels = 3
 
 
63
  self.config = [
64
  (32, 3, 1),
65
  (64, 3, 2),
 
86
  (256, 3, 1),
87
  'S',
88
  ]
 
89
  self.layers = self._create_conv_layers()
90
 
91
  def forward(self, x):
 
95
  if isinstance(layer, ScalePrediction):
96
  outputs.append(layer(x))
97
  continue
 
98
  x = layer(x)
99
 
100
  if isinstance(layer, ResidualBlock) and layer.num_repeats == 8:
 
111
  in_channels = self.in_channels
112
 
113
  for module in self.config:
 
114
  if isinstance(module, tuple):
115
  out_channels, kernel_size, stride = module
116
  layers.append(
src/utils.py CHANGED
@@ -28,7 +28,7 @@ def plot_img_with_rects(
28
  ax.imshow(img)
29
 
30
  # Create a Rectangle patch
31
- for _, rect in enumerate([b for b in boxes if b[1] > threshold]):
32
  label, _, xc, yc, w, h = rect
33
  xc, yc, w, h = xc * coef, yc * coef, w * coef, h * coef
34
  # the coordinates from center-based to left top corner
 
28
  ax.imshow(img)
29
 
30
  # Create a Rectangle patch
31
+ for _, rect in enumerate(b for b in boxes if b[1] > threshold):
32
  label, _, xc, yc, w, h = rect
33
  xc, yc, w, h = xc * coef, yc * coef, w * coef, h * coef
34
  # the coordinates from center-based to left top corner
st_app.py CHANGED
@@ -2,7 +2,6 @@ import logging
2
 
3
  import numpy as np
4
  import streamlit as st
5
- import tomli as tomllib
6
  from PIL import Image
7
  from streamlit_drawable_canvas import st_canvas
8
 
 
2
 
3
  import numpy as np
4
  import streamlit as st
 
5
  from PIL import Image
6
  from streamlit_drawable_canvas import st_canvas
7