Robert001 commited on
Commit
b334e29
1 Parent(s): 32e53ac

first commit

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. annotator/__pycache__/util.cpython-38.pyc +0 -0
  2. annotator/blur/__init__.py +7 -0
  3. annotator/blur/__pycache__/__init__.cpython-38.pyc +0 -0
  4. annotator/canny/__init__.py +16 -0
  5. annotator/canny/__pycache__/__init__.cpython-38.pyc +0 -0
  6. annotator/ckpts/ckpts.txt +1 -0
  7. annotator/grayscale/__init__.py +5 -0
  8. annotator/grayscale/__pycache__/__init__.cpython-38.pyc +0 -0
  9. annotator/hed/__init__.py +107 -0
  10. annotator/hed/__pycache__/__init__.cpython-38.pyc +0 -0
  11. annotator/inpainting/__init__.py +15 -0
  12. annotator/inpainting/__pycache__/__init__.cpython-38.pyc +0 -0
  13. annotator/midas/LICENSE +21 -0
  14. annotator/midas/__init__.py +52 -0
  15. annotator/midas/__pycache__/__init__.cpython-38.pyc +0 -0
  16. annotator/midas/__pycache__/api.cpython-38.pyc +0 -0
  17. annotator/midas/api.py +183 -0
  18. annotator/midas/midas/__init__.py +0 -0
  19. annotator/midas/midas/__pycache__/__init__.cpython-38.pyc +0 -0
  20. annotator/midas/midas/__pycache__/base_model.cpython-38.pyc +0 -0
  21. annotator/midas/midas/__pycache__/blocks.cpython-38.pyc +0 -0
  22. annotator/midas/midas/__pycache__/dpt_depth.cpython-38.pyc +0 -0
  23. annotator/midas/midas/__pycache__/midas_net.cpython-38.pyc +0 -0
  24. annotator/midas/midas/__pycache__/midas_net_custom.cpython-38.pyc +0 -0
  25. annotator/midas/midas/__pycache__/transforms.cpython-38.pyc +0 -0
  26. annotator/midas/midas/__pycache__/vit.cpython-38.pyc +0 -0
  27. annotator/midas/midas/base_model.py +26 -0
  28. annotator/midas/midas/blocks.py +352 -0
  29. annotator/midas/midas/dpt_depth.py +119 -0
  30. annotator/midas/midas/midas_net.py +86 -0
  31. annotator/midas/midas/midas_net_custom.py +138 -0
  32. annotator/midas/midas/transforms.py +244 -0
  33. annotator/midas/midas/vit.py +501 -0
  34. annotator/midas/utils.py +199 -0
  35. annotator/mlsd/LICENSE +201 -0
  36. annotator/mlsd/__init__.py +53 -0
  37. annotator/mlsd/models/mbv2_mlsd_large.py +302 -0
  38. annotator/mlsd/models/mbv2_mlsd_tiny.py +285 -0
  39. annotator/mlsd/utils.py +590 -0
  40. annotator/openpose/LICENSE +108 -0
  41. annotator/openpose/__init__.py +59 -0
  42. annotator/openpose/__pycache__/__init__.cpython-38.pyc +0 -0
  43. annotator/openpose/__pycache__/body.cpython-38.pyc +0 -0
  44. annotator/openpose/__pycache__/hand.cpython-38.pyc +0 -0
  45. annotator/openpose/__pycache__/model.cpython-38.pyc +0 -0
  46. annotator/openpose/__pycache__/util.cpython-38.pyc +0 -0
  47. annotator/openpose/body.py +229 -0
  48. annotator/openpose/hand.py +96 -0
  49. annotator/openpose/model.py +229 -0
  50. annotator/openpose/util.py +175 -0
annotator/__pycache__/util.cpython-38.pyc ADDED
Binary file (1.6 kB). View file
 
annotator/blur/__init__.py ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ import cv2
2
+
3
+ class Blurrer:
4
+ def __call__(self, img, ksize):
5
+ img_new = cv2.GaussianBlur(img, (ksize, ksize), cv2.BORDER_DEFAULT)
6
+ img_new = img_new.astype('ubyte')
7
+ return img_new
annotator/blur/__pycache__/__init__.cpython-38.pyc ADDED
Binary file (507 Bytes). View file
 
annotator/canny/__init__.py ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ '''
2
+ * Copyright (c) 2023 Salesforce, Inc.
3
+ * All rights reserved.
4
+ * SPDX-License-Identifier: Apache License 2.0
5
+ * For full license text, see LICENSE.txt file in the repo root or http://www.apache.org/licenses/
6
+ * By Can Qin
7
+ * Modified from ControlNet repo: https://github.com/lllyasviel/ControlNet
8
+ * Copyright (c) 2023 Lvmin Zhang and Maneesh Agrawala
9
+ '''
10
+
11
+ import cv2
12
+
13
+
14
+ class CannyDetector:
15
+ def __call__(self, img, low_threshold, high_threshold):
16
+ return cv2.Canny(img, low_threshold, high_threshold)
annotator/canny/__pycache__/__init__.cpython-38.pyc ADDED
Binary file (847 Bytes). View file
 
annotator/ckpts/ckpts.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ Weights here.
annotator/grayscale/__init__.py ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ from skimage import color
2
+
3
+ class GrayscaleConverter:
4
+ def __call__(self, img):
5
+ return (color.rgb2gray(img) * 255.0).astype('ubyte')
annotator/grayscale/__pycache__/__init__.cpython-38.pyc ADDED
Binary file (511 Bytes). View file
 
annotator/hed/__init__.py ADDED
@@ -0,0 +1,107 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ '''
2
+ * Copyright (c) 2023 Salesforce, Inc.
3
+ * All rights reserved.
4
+ * SPDX-License-Identifier: Apache License 2.0
5
+ * For full license text, see LICENSE.txt file in the repo root or http://www.apache.org/licenses/
6
+ * By Can Qin
7
+ * Modified from ControlNet repo: https://github.com/lllyasviel/ControlNet
8
+ * Copyright (c) 2023 Lvmin Zhang and Maneesh Agrawala
9
+ '''
10
+
11
+ # This is an improved version and model of HED edge detection without GPL contamination
12
+ # Please use this implementation in your products
13
+ # This implementation may produce slightly different results from Saining Xie's official implementations,
14
+ # but it generates smoother edges and is more suitable for ControlNet as well as other image-to-image translations.
15
+ # Different from official models and other implementations, this is an RGB-input model (rather than BGR)
16
+ # and in this way it works better for gradio's RGB protocol
17
+
18
+ import os
19
+ import cv2
20
+ import torch
21
+ import numpy as np
22
+
23
+ from einops import rearrange
24
+ from annotator.util import annotator_ckpts_path
25
+
26
+
27
+ class DoubleConvBlock(torch.nn.Module):
28
+ def __init__(self, input_channel, output_channel, layer_number):
29
+ super().__init__()
30
+ self.convs = torch.nn.Sequential()
31
+ self.convs.append(torch.nn.Conv2d(in_channels=input_channel, out_channels=output_channel, kernel_size=(3, 3), stride=(1, 1), padding=1))
32
+ for i in range(1, layer_number):
33
+ self.convs.append(torch.nn.Conv2d(in_channels=output_channel, out_channels=output_channel, kernel_size=(3, 3), stride=(1, 1), padding=1))
34
+ self.projection = torch.nn.Conv2d(in_channels=output_channel, out_channels=1, kernel_size=(1, 1), stride=(1, 1), padding=0)
35
+
36
+ def __call__(self, x, down_sampling=False):
37
+ h = x
38
+ if down_sampling:
39
+ h = torch.nn.functional.max_pool2d(h, kernel_size=(2, 2), stride=(2, 2))
40
+ for conv in self.convs:
41
+ h = conv(h)
42
+ h = torch.nn.functional.relu(h)
43
+ return h, self.projection(h)
44
+
45
+
46
+ class ControlNetHED_Apache2(torch.nn.Module):
47
+ def __init__(self):
48
+ super().__init__()
49
+ self.norm = torch.nn.Parameter(torch.zeros(size=(1, 3, 1, 1)))
50
+ self.block1 = DoubleConvBlock(input_channel=3, output_channel=64, layer_number=2)
51
+ self.block2 = DoubleConvBlock(input_channel=64, output_channel=128, layer_number=2)
52
+ self.block3 = DoubleConvBlock(input_channel=128, output_channel=256, layer_number=3)
53
+ self.block4 = DoubleConvBlock(input_channel=256, output_channel=512, layer_number=3)
54
+ self.block5 = DoubleConvBlock(input_channel=512, output_channel=512, layer_number=3)
55
+
56
+ def __call__(self, x):
57
+ h = x - self.norm
58
+ h, projection1 = self.block1(h)
59
+ h, projection2 = self.block2(h, down_sampling=True)
60
+ h, projection3 = self.block3(h, down_sampling=True)
61
+ h, projection4 = self.block4(h, down_sampling=True)
62
+ h, projection5 = self.block5(h, down_sampling=True)
63
+ return projection1, projection2, projection3, projection4, projection5
64
+
65
+
66
+ class HEDdetector:
67
+ def __init__(self):
68
+ remote_model_path = "https://huggingface.co/lllyasviel/Annotators/resolve/main/ControlNetHED.pth"
69
+ modelpath = remote_model_path
70
+ #modelpath = os.path.join(annotator_ckpts_path, "ControlNetHED.pth")
71
+ #if not os.path.exists(modelpath):
72
+ # from basicsr.utils.download_util import load_file_from_url
73
+ # load_file_from_url(remote_model_path, model_dir=annotator_ckpts_path)
74
+ self.netNetwork = ControlNetHED_Apache2().float().cuda().eval()
75
+ self.netNetwork.load_state_dict(torch.load(modelpath))
76
+
77
+ def __call__(self, input_image):
78
+ assert input_image.ndim == 3
79
+ H, W, C = input_image.shape
80
+ with torch.no_grad():
81
+ image_hed = torch.from_numpy(input_image.copy()).float().cuda()
82
+ image_hed = rearrange(image_hed, 'h w c -> 1 c h w')
83
+ edges = self.netNetwork(image_hed)
84
+ edges = [e.detach().cpu().numpy().astype(np.float32)[0, 0] for e in edges]
85
+ edges = [cv2.resize(e, (W, H), interpolation=cv2.INTER_LINEAR) for e in edges]
86
+ edges = np.stack(edges, axis=2)
87
+ edge = 1 / (1 + np.exp(-np.mean(edges, axis=2).astype(np.float64)))
88
+ edge = (edge * 255.0).clip(0, 255).astype(np.uint8)
89
+ return edge
90
+
91
+
92
+ def nms(x, t, s):
93
+ x = cv2.GaussianBlur(x.astype(np.float32), (0, 0), s)
94
+
95
+ f1 = np.array([[0, 0, 0], [1, 1, 1], [0, 0, 0]], dtype=np.uint8)
96
+ f2 = np.array([[0, 1, 0], [0, 1, 0], [0, 1, 0]], dtype=np.uint8)
97
+ f3 = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]], dtype=np.uint8)
98
+ f4 = np.array([[0, 0, 1], [0, 1, 0], [1, 0, 0]], dtype=np.uint8)
99
+
100
+ y = np.zeros_like(x)
101
+
102
+ for f in [f1, f2, f3, f4]:
103
+ np.putmask(y, cv2.dilate(x, kernel=f) == x, x)
104
+
105
+ z = np.zeros_like(y, dtype=np.uint8)
106
+ z[y > t] = 255
107
+ return z
annotator/hed/__pycache__/__init__.cpython-38.pyc ADDED
Binary file (4.69 kB). View file
 
annotator/inpainting/__init__.py ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+
3
+ class Inpainter:
4
+ def __call__(self, img, rand_h, rand_h_1, rand_w, rand_w_1):
5
+ h = img.shape[0]
6
+ w = img.shape[1]
7
+ h_new = int(float(h) / 100.0 * float(rand_h))
8
+ w_new = int(float(w) / 100.0 * float(rand_w))
9
+ h_new_1 = int(float(h) / 100.0 * float(rand_h_1))
10
+ w_new_1 = int(float(w) / 100.0 * float(rand_w_1))
11
+
12
+ img_new = img
13
+ img_new[(h-h_new)//2:(h+h_new_1)//2, (w-w_new)//2:(w+w_new_1)//2] = 0
14
+ img_new = img_new.astype('ubyte')
15
+ return img_new
annotator/inpainting/__pycache__/__init__.cpython-38.pyc ADDED
Binary file (764 Bytes). View file
 
annotator/midas/LICENSE ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ MIT License
2
+
3
+ Copyright (c) 2019 Intel ISL (Intel Intelligent Systems Lab)
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
annotator/midas/__init__.py ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ '''
2
+ * Copyright (c) 2023 Salesforce, Inc.
3
+ * All rights reserved.
4
+ * SPDX-License-Identifier: Apache License 2.0
5
+ * For full license text, see LICENSE.txt file in the repo root or http://www.apache.org/licenses/
6
+ * By Can Qin
7
+ * Modified from ControlNet repo: https://github.com/lllyasviel/ControlNet
8
+ * Copyright (c) 2023 Lvmin Zhang and Maneesh Agrawala
9
+ '''
10
+
11
+ # Midas Depth Estimation
12
+ # From https://github.com/isl-org/MiDaS
13
+ # MIT LICENSE
14
+
15
+ import cv2
16
+ import numpy as np
17
+ import torch
18
+
19
+ from einops import rearrange
20
+ from .api import MiDaSInference
21
+
22
+
23
+ class MidasDetector:
24
+ def __init__(self):
25
+ self.model = MiDaSInference(model_type="dpt_large").cuda()
26
+
27
+ def __call__(self, input_image, a=np.pi * 0.2, bg_th=0.02):
28
+ assert input_image.ndim == 3
29
+ image_depth = input_image
30
+ with torch.no_grad():
31
+ image_depth = torch.from_numpy(image_depth).float().cuda()
32
+ image_depth = image_depth / 127.5 - 1.0
33
+ image_depth = rearrange(image_depth, 'h w c -> 1 c h w')
34
+ depth = self.model(image_depth)[0]
35
+
36
+ depth_pt = depth.clone()
37
+ depth_pt -= torch.min(depth_pt)
38
+ depth_pt /= torch.max(depth_pt)
39
+ depth_pt = depth_pt.cpu().numpy()
40
+ depth_image = (depth_pt * 255.0).clip(0, 255).astype(np.uint8)
41
+
42
+ depth_np = depth.cpu().numpy()
43
+ x = cv2.Sobel(depth_np, cv2.CV_32F, 1, 0, ksize=3)
44
+ y = cv2.Sobel(depth_np, cv2.CV_32F, 0, 1, ksize=3)
45
+ z = np.ones_like(x) * a
46
+ x[depth_pt < bg_th] = 0
47
+ y[depth_pt < bg_th] = 0
48
+ normal = np.stack([x, y, z], axis=2)
49
+ normal /= np.sum(normal ** 2.0, axis=2, keepdims=True) ** 0.5
50
+ normal_image = (normal * 127.5 + 127.5).clip(0, 255).astype(np.uint8)
51
+
52
+ return depth_image, normal_image
annotator/midas/__pycache__/__init__.cpython-38.pyc ADDED
Binary file (1.92 kB). View file
 
annotator/midas/__pycache__/api.cpython-38.pyc ADDED
Binary file (4.14 kB). View file
 
annotator/midas/api.py ADDED
@@ -0,0 +1,183 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ '''
2
+ * Copyright (c) 2023 Salesforce, Inc.
3
+ * All rights reserved.
4
+ * SPDX-License-Identifier: Apache License 2.0
5
+ * For full license text, see LICENSE.txt file in the repo root or http://www.apache.org/licenses/
6
+ * By Can Qin
7
+ * Modified from ControlNet repo: https://github.com/lllyasviel/ControlNet
8
+ * Copyright (c) 2023 Lvmin Zhang and Maneesh Agrawala
9
+ '''
10
+
11
+ # based on https://github.com/isl-org/MiDaS
12
+
13
+ import cv2
14
+ import os
15
+ import torch
16
+ import torch.nn as nn
17
+ from torchvision.transforms import Compose
18
+
19
+ from .midas.dpt_depth import DPTDepthModel
20
+ from .midas.midas_net import MidasNet
21
+ from .midas.midas_net_custom import MidasNet_small
22
+ from .midas.transforms import Resize, NormalizeImage, PrepareForNet
23
+ from annotator.util import annotator_ckpts_path
24
+
25
+
26
+ ISL_PATHS = {
27
+ "dpt_large": os.path.join(annotator_ckpts_path, "dpt_large_384.pt"),
28
+ "dpt_hybrid": os.path.join(annotator_ckpts_path, "dpt_hybrid-midas-501f0c75.pt"),
29
+ "midas_v21": "",
30
+ "midas_v21_small": "",
31
+ }
32
+
33
+ # remote_model_path = "https://huggingface.co/lllyasviel/ControlNet/resolve/main/annotator/ckpts/dpt_hybrid-midas-501f0c75.pt"
34
+ remote_model_path = "https://huggingface.co/Salesforce/UniControl/blob/main/annotator/ckpts/dpt_large_384.pt"
35
+
36
+ def disabled_train(self, mode=True):
37
+ """Overwrite model.train with this function to make sure train/eval mode
38
+ does not change anymore."""
39
+ return self
40
+
41
+
42
+ def load_midas_transform(model_type):
43
+ # https://github.com/isl-org/MiDaS/blob/master/run.py
44
+ # load transform only
45
+ if model_type == "dpt_large": # DPT-Large
46
+ net_w, net_h = 384, 384
47
+ resize_mode = "minimal"
48
+ normalization = NormalizeImage(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
49
+
50
+ elif model_type == "dpt_hybrid": # DPT-Hybrid
51
+ net_w, net_h = 384, 384
52
+ resize_mode = "minimal"
53
+ normalization = NormalizeImage(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
54
+
55
+ elif model_type == "midas_v21":
56
+ net_w, net_h = 384, 384
57
+ resize_mode = "upper_bound"
58
+ normalization = NormalizeImage(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
59
+
60
+ elif model_type == "midas_v21_small":
61
+ net_w, net_h = 256, 256
62
+ resize_mode = "upper_bound"
63
+ normalization = NormalizeImage(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
64
+
65
+ else:
66
+ assert False, f"model_type '{model_type}' not implemented, use: --model_type large"
67
+
68
+ transform = Compose(
69
+ [
70
+ Resize(
71
+ net_w,
72
+ net_h,
73
+ resize_target=None,
74
+ keep_aspect_ratio=True,
75
+ ensure_multiple_of=32,
76
+ resize_method=resize_mode,
77
+ image_interpolation_method=cv2.INTER_CUBIC,
78
+ ),
79
+ normalization,
80
+ PrepareForNet(),
81
+ ]
82
+ )
83
+
84
+ return transform
85
+
86
+
87
+ def load_model(model_type):
88
+ # https://github.com/isl-org/MiDaS/blob/master/run.py
89
+ # load network
90
+ model_path = ISL_PATHS[model_type]
91
+ if model_type == "dpt_large": # DPT-Large
92
+ # if not os.path.exists(model_path):
93
+ # from basicsr.utils.download_util import load_file_from_url
94
+ # load_file_from_url(remote_model_path, model_dir=annotator_ckpts_path)
95
+ model_path = remote_model_path
96
+ model = DPTDepthModel(
97
+ path=model_path,
98
+ backbone="vitl16_384",
99
+ non_negative=True,
100
+ )
101
+ net_w, net_h = 384, 384
102
+ resize_mode = "minimal"
103
+ normalization = NormalizeImage(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
104
+
105
+ elif model_type == "dpt_hybrid": # DPT-Hybrid
106
+ if not os.path.exists(model_path):
107
+ from basicsr.utils.download_util import load_file_from_url
108
+ load_file_from_url(remote_model_path, model_dir=annotator_ckpts_path)
109
+
110
+ model = DPTDepthModel(
111
+ path=model_path,
112
+ backbone="vitb_rn50_384",
113
+ non_negative=True,
114
+ )
115
+ net_w, net_h = 384, 384
116
+ resize_mode = "minimal"
117
+ normalization = NormalizeImage(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
118
+
119
+ elif model_type == "midas_v21":
120
+ model = MidasNet(model_path, non_negative=True)
121
+ net_w, net_h = 384, 384
122
+ resize_mode = "upper_bound"
123
+ normalization = NormalizeImage(
124
+ mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]
125
+ )
126
+
127
+ elif model_type == "midas_v21_small":
128
+ model = MidasNet_small(model_path, features=64, backbone="efficientnet_lite3", exportable=True,
129
+ non_negative=True, blocks={'expand': True})
130
+ net_w, net_h = 256, 256
131
+ resize_mode = "upper_bound"
132
+ normalization = NormalizeImage(
133
+ mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]
134
+ )
135
+
136
+ else:
137
+ print(f"model_type '{model_type}' not implemented, use: --model_type large")
138
+ assert False
139
+
140
+ transform = Compose(
141
+ [
142
+ Resize(
143
+ net_w,
144
+ net_h,
145
+ resize_target=None,
146
+ keep_aspect_ratio=True,
147
+ ensure_multiple_of=32,
148
+ resize_method=resize_mode,
149
+ image_interpolation_method=cv2.INTER_CUBIC,
150
+ ),
151
+ normalization,
152
+ PrepareForNet(),
153
+ ]
154
+ )
155
+
156
+ return model.eval(), transform
157
+
158
+
159
+ class MiDaSInference(nn.Module):
160
+ MODEL_TYPES_TORCH_HUB = [
161
+ "DPT_Large",
162
+ "DPT_Hybrid",
163
+ "MiDaS_small"
164
+ ]
165
+ MODEL_TYPES_ISL = [
166
+ "dpt_large",
167
+ "dpt_hybrid",
168
+ "midas_v21",
169
+ "midas_v21_small",
170
+ ]
171
+
172
+ def __init__(self, model_type):
173
+ super().__init__()
174
+ assert (model_type in self.MODEL_TYPES_ISL)
175
+ model, _ = load_model(model_type)
176
+ self.model = model
177
+ self.model.train = disabled_train
178
+
179
+ def forward(self, x):
180
+ with torch.no_grad():
181
+ prediction = self.model(x)
182
+ return prediction
183
+
annotator/midas/midas/__init__.py ADDED
File without changes
annotator/midas/midas/__pycache__/__init__.cpython-38.pyc ADDED
Binary file (126 Bytes). View file
 
annotator/midas/midas/__pycache__/base_model.cpython-38.pyc ADDED
Binary file (1.03 kB). View file
 
annotator/midas/midas/__pycache__/blocks.cpython-38.pyc ADDED
Binary file (7.72 kB). View file
 
annotator/midas/midas/__pycache__/dpt_depth.cpython-38.pyc ADDED
Binary file (3.21 kB). View file
 
annotator/midas/midas/__pycache__/midas_net.cpython-38.pyc ADDED
Binary file (2.67 kB). View file
 
annotator/midas/midas/__pycache__/midas_net_custom.cpython-38.pyc ADDED
Binary file (3.79 kB). View file
 
annotator/midas/midas/__pycache__/transforms.cpython-38.pyc ADDED
Binary file (6.05 kB). View file
 
annotator/midas/midas/__pycache__/vit.cpython-38.pyc ADDED
Binary file (10.1 kB). View file
 
annotator/midas/midas/base_model.py ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ '''
2
+ * Copyright (c) 2023 Salesforce, Inc.
3
+ * All rights reserved.
4
+ * SPDX-License-Identifier: Apache License 2.0
5
+ * For full license text, see LICENSE.txt file in the repo root or http://www.apache.org/licenses/
6
+ * By Can Qin
7
+ * Modified from ControlNet repo: https://github.com/lllyasviel/ControlNet
8
+ * Copyright (c) 2023 Lvmin Zhang and Maneesh Agrawala
9
+ '''
10
+
11
+ import torch
12
+
13
+
14
+ class BaseModel(torch.nn.Module):
15
+ def load(self, path):
16
+ """Load model from file.
17
+
18
+ Args:
19
+ path (str): file path
20
+ """
21
+ parameters = torch.load(path, map_location=torch.device('cpu'))
22
+
23
+ if "optimizer" in parameters:
24
+ parameters = parameters["model"]
25
+
26
+ self.load_state_dict(parameters)
annotator/midas/midas/blocks.py ADDED
@@ -0,0 +1,352 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ '''
2
+ * Copyright (c) 2023 Salesforce, Inc.
3
+ * All rights reserved.
4
+ * SPDX-License-Identifier: Apache License 2.0
5
+ * For full license text, see LICENSE.txt file in the repo root or http://www.apache.org/licenses/
6
+ * By Can Qin
7
+ * Modified from ControlNet repo: https://github.com/lllyasviel/ControlNet
8
+ * Copyright (c) 2023 Lvmin Zhang and Maneesh Agrawala
9
+ '''
10
+
11
+ import torch
12
+ import torch.nn as nn
13
+
14
+ from .vit import (
15
+ _make_pretrained_vitb_rn50_384,
16
+ _make_pretrained_vitl16_384,
17
+ _make_pretrained_vitb16_384,
18
+ forward_vit,
19
+ )
20
+
21
+ def _make_encoder(backbone, features, use_pretrained, groups=1, expand=False, exportable=True, hooks=None, use_vit_only=False, use_readout="ignore",):
22
+ if backbone == "vitl16_384":
23
+ pretrained = _make_pretrained_vitl16_384(
24
+ use_pretrained, hooks=hooks, use_readout=use_readout
25
+ )
26
+ scratch = _make_scratch(
27
+ [256, 512, 1024, 1024], features, groups=groups, expand=expand
28
+ ) # ViT-L/16 - 85.0% Top1 (backbone)
29
+ elif backbone == "vitb_rn50_384":
30
+ pretrained = _make_pretrained_vitb_rn50_384(
31
+ use_pretrained,
32
+ hooks=hooks,
33
+ use_vit_only=use_vit_only,
34
+ use_readout=use_readout,
35
+ )
36
+ scratch = _make_scratch(
37
+ [256, 512, 768, 768], features, groups=groups, expand=expand
38
+ ) # ViT-H/16 - 85.0% Top1 (backbone)
39
+ elif backbone == "vitb16_384":
40
+ pretrained = _make_pretrained_vitb16_384(
41
+ use_pretrained, hooks=hooks, use_readout=use_readout
42
+ )
43
+ scratch = _make_scratch(
44
+ [96, 192, 384, 768], features, groups=groups, expand=expand
45
+ ) # ViT-B/16 - 84.6% Top1 (backbone)
46
+ elif backbone == "resnext101_wsl":
47
+ pretrained = _make_pretrained_resnext101_wsl(use_pretrained)
48
+ scratch = _make_scratch([256, 512, 1024, 2048], features, groups=groups, expand=expand) # efficientnet_lite3
49
+ elif backbone == "efficientnet_lite3":
50
+ pretrained = _make_pretrained_efficientnet_lite3(use_pretrained, exportable=exportable)
51
+ scratch = _make_scratch([32, 48, 136, 384], features, groups=groups, expand=expand) # efficientnet_lite3
52
+ else:
53
+ print(f"Backbone '{backbone}' not implemented")
54
+ assert False
55
+
56
+ return pretrained, scratch
57
+
58
+
59
+ def _make_scratch(in_shape, out_shape, groups=1, expand=False):
60
+ scratch = nn.Module()
61
+
62
+ out_shape1 = out_shape
63
+ out_shape2 = out_shape
64
+ out_shape3 = out_shape
65
+ out_shape4 = out_shape
66
+ if expand==True:
67
+ out_shape1 = out_shape
68
+ out_shape2 = out_shape*2
69
+ out_shape3 = out_shape*4
70
+ out_shape4 = out_shape*8
71
+
72
+ scratch.layer1_rn = nn.Conv2d(
73
+ in_shape[0], out_shape1, kernel_size=3, stride=1, padding=1, bias=False, groups=groups
74
+ )
75
+ scratch.layer2_rn = nn.Conv2d(
76
+ in_shape[1], out_shape2, kernel_size=3, stride=1, padding=1, bias=False, groups=groups
77
+ )
78
+ scratch.layer3_rn = nn.Conv2d(
79
+ in_shape[2], out_shape3, kernel_size=3, stride=1, padding=1, bias=False, groups=groups
80
+ )
81
+ scratch.layer4_rn = nn.Conv2d(
82
+ in_shape[3], out_shape4, kernel_size=3, stride=1, padding=1, bias=False, groups=groups
83
+ )
84
+
85
+ return scratch
86
+
87
+
88
+ def _make_pretrained_efficientnet_lite3(use_pretrained, exportable=False):
89
+ efficientnet = torch.hub.load(
90
+ "rwightman/gen-efficientnet-pytorch",
91
+ "tf_efficientnet_lite3",
92
+ pretrained=use_pretrained,
93
+ exportable=exportable
94
+ )
95
+ return _make_efficientnet_backbone(efficientnet)
96
+
97
+
98
+ def _make_efficientnet_backbone(effnet):
99
+ pretrained = nn.Module()
100
+
101
+ pretrained.layer1 = nn.Sequential(
102
+ effnet.conv_stem, effnet.bn1, effnet.act1, *effnet.blocks[0:2]
103
+ )
104
+ pretrained.layer2 = nn.Sequential(*effnet.blocks[2:3])
105
+ pretrained.layer3 = nn.Sequential(*effnet.blocks[3:5])
106
+ pretrained.layer4 = nn.Sequential(*effnet.blocks[5:9])
107
+
108
+ return pretrained
109
+
110
+
111
+ def _make_resnet_backbone(resnet):
112
+ pretrained = nn.Module()
113
+ pretrained.layer1 = nn.Sequential(
114
+ resnet.conv1, resnet.bn1, resnet.relu, resnet.maxpool, resnet.layer1
115
+ )
116
+
117
+ pretrained.layer2 = resnet.layer2
118
+ pretrained.layer3 = resnet.layer3
119
+ pretrained.layer4 = resnet.layer4
120
+
121
+ return pretrained
122
+
123
+
124
+ def _make_pretrained_resnext101_wsl(use_pretrained):
125
+ resnet = torch.hub.load("facebookresearch/WSL-Images", "resnext101_32x8d_wsl")
126
+ return _make_resnet_backbone(resnet)
127
+
128
+
129
+
130
+ class Interpolate(nn.Module):
131
+ """Interpolation module.
132
+ """
133
+
134
+ def __init__(self, scale_factor, mode, align_corners=False):
135
+ """Init.
136
+
137
+ Args:
138
+ scale_factor (float): scaling
139
+ mode (str): interpolation mode
140
+ """
141
+ super(Interpolate, self).__init__()
142
+
143
+ self.interp = nn.functional.interpolate
144
+ self.scale_factor = scale_factor
145
+ self.mode = mode
146
+ self.align_corners = align_corners
147
+
148
+ def forward(self, x):
149
+ """Forward pass.
150
+
151
+ Args:
152
+ x (tensor): input
153
+
154
+ Returns:
155
+ tensor: interpolated data
156
+ """
157
+
158
+ x = self.interp(
159
+ x, scale_factor=self.scale_factor, mode=self.mode, align_corners=self.align_corners
160
+ )
161
+
162
+ return x
163
+
164
+
165
+ class ResidualConvUnit(nn.Module):
166
+ """Residual convolution module.
167
+ """
168
+
169
+ def __init__(self, features):
170
+ """Init.
171
+
172
+ Args:
173
+ features (int): number of features
174
+ """
175
+ super().__init__()
176
+
177
+ self.conv1 = nn.Conv2d(
178
+ features, features, kernel_size=3, stride=1, padding=1, bias=True
179
+ )
180
+
181
+ self.conv2 = nn.Conv2d(
182
+ features, features, kernel_size=3, stride=1, padding=1, bias=True
183
+ )
184
+
185
+ self.relu = nn.ReLU(inplace=True)
186
+
187
+ def forward(self, x):
188
+ """Forward pass.
189
+
190
+ Args:
191
+ x (tensor): input
192
+
193
+ Returns:
194
+ tensor: output
195
+ """
196
+ out = self.relu(x)
197
+ out = self.conv1(out)
198
+ out = self.relu(out)
199
+ out = self.conv2(out)
200
+
201
+ return out + x
202
+
203
+
204
+ class FeatureFusionBlock(nn.Module):
205
+ """Feature fusion block.
206
+ """
207
+
208
+ def __init__(self, features):
209
+ """Init.
210
+
211
+ Args:
212
+ features (int): number of features
213
+ """
214
+ super(FeatureFusionBlock, self).__init__()
215
+
216
+ self.resConfUnit1 = ResidualConvUnit(features)
217
+ self.resConfUnit2 = ResidualConvUnit(features)
218
+
219
+ def forward(self, *xs):
220
+ """Forward pass.
221
+
222
+ Returns:
223
+ tensor: output
224
+ """
225
+ output = xs[0]
226
+
227
+ if len(xs) == 2:
228
+ output += self.resConfUnit1(xs[1])
229
+
230
+ output = self.resConfUnit2(output)
231
+
232
+ output = nn.functional.interpolate(
233
+ output, scale_factor=2, mode="bilinear", align_corners=True
234
+ )
235
+
236
+ return output
237
+
238
+
239
+
240
+
241
+ class ResidualConvUnit_custom(nn.Module):
242
+ """Residual convolution module.
243
+ """
244
+
245
+ def __init__(self, features, activation, bn):
246
+ """Init.
247
+
248
+ Args:
249
+ features (int): number of features
250
+ """
251
+ super().__init__()
252
+
253
+ self.bn = bn
254
+
255
+ self.groups=1
256
+
257
+ self.conv1 = nn.Conv2d(
258
+ features, features, kernel_size=3, stride=1, padding=1, bias=True, groups=self.groups
259
+ )
260
+
261
+ self.conv2 = nn.Conv2d(
262
+ features, features, kernel_size=3, stride=1, padding=1, bias=True, groups=self.groups
263
+ )
264
+
265
+ if self.bn==True:
266
+ self.bn1 = nn.BatchNorm2d(features)
267
+ self.bn2 = nn.BatchNorm2d(features)
268
+
269
+ self.activation = activation
270
+
271
+ self.skip_add = nn.quantized.FloatFunctional()
272
+
273
+ def forward(self, x):
274
+ """Forward pass.
275
+
276
+ Args:
277
+ x (tensor): input
278
+
279
+ Returns:
280
+ tensor: output
281
+ """
282
+
283
+ out = self.activation(x)
284
+ out = self.conv1(out)
285
+ if self.bn==True:
286
+ out = self.bn1(out)
287
+
288
+ out = self.activation(out)
289
+ out = self.conv2(out)
290
+ if self.bn==True:
291
+ out = self.bn2(out)
292
+
293
+ if self.groups > 1:
294
+ out = self.conv_merge(out)
295
+
296
+ return self.skip_add.add(out, x)
297
+
298
+ # return out + x
299
+
300
+
301
+ class FeatureFusionBlock_custom(nn.Module):
302
+ """Feature fusion block.
303
+ """
304
+
305
+ def __init__(self, features, activation, deconv=False, bn=False, expand=False, align_corners=True):
306
+ """Init.
307
+
308
+ Args:
309
+ features (int): number of features
310
+ """
311
+ super(FeatureFusionBlock_custom, self).__init__()
312
+
313
+ self.deconv = deconv
314
+ self.align_corners = align_corners
315
+
316
+ self.groups=1
317
+
318
+ self.expand = expand
319
+ out_features = features
320
+ if self.expand==True:
321
+ out_features = features//2
322
+
323
+ self.out_conv = nn.Conv2d(features, out_features, kernel_size=1, stride=1, padding=0, bias=True, groups=1)
324
+
325
+ self.resConfUnit1 = ResidualConvUnit_custom(features, activation, bn)
326
+ self.resConfUnit2 = ResidualConvUnit_custom(features, activation, bn)
327
+
328
+ self.skip_add = nn.quantized.FloatFunctional()
329
+
330
+ def forward(self, *xs):
331
+ """Forward pass.
332
+
333
+ Returns:
334
+ tensor: output
335
+ """
336
+ output = xs[0]
337
+
338
+ if len(xs) == 2:
339
+ res = self.resConfUnit1(xs[1])
340
+ output = self.skip_add.add(output, res)
341
+ # output += res
342
+
343
+ output = self.resConfUnit2(output)
344
+
345
+ output = nn.functional.interpolate(
346
+ output, scale_factor=2, mode="bilinear", align_corners=self.align_corners
347
+ )
348
+
349
+ output = self.out_conv(output)
350
+
351
+ return output
352
+
annotator/midas/midas/dpt_depth.py ADDED
@@ -0,0 +1,119 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ '''
2
+ * Copyright (c) 2023 Salesforce, Inc.
3
+ * All rights reserved.
4
+ * SPDX-License-Identifier: Apache License 2.0
5
+ * For full license text, see LICENSE.txt file in the repo root or http://www.apache.org/licenses/
6
+ * By Can Qin
7
+ * Modified from ControlNet repo: https://github.com/lllyasviel/ControlNet
8
+ * Copyright (c) 2023 Lvmin Zhang and Maneesh Agrawala
9
+ '''
10
+
11
+ import torch
12
+ import torch.nn as nn
13
+ import torch.nn.functional as F
14
+
15
+ from .base_model import BaseModel
16
+ from .blocks import (
17
+ FeatureFusionBlock,
18
+ FeatureFusionBlock_custom,
19
+ Interpolate,
20
+ _make_encoder,
21
+ forward_vit,
22
+ )
23
+
24
+
25
+ def _make_fusion_block(features, use_bn):
26
+ return FeatureFusionBlock_custom(
27
+ features,
28
+ nn.ReLU(False),
29
+ deconv=False,
30
+ bn=use_bn,
31
+ expand=False,
32
+ align_corners=True,
33
+ )
34
+
35
+
36
+ class DPT(BaseModel):
37
+ def __init__(
38
+ self,
39
+ head,
40
+ features=256,
41
+ backbone="vitb_rn50_384",
42
+ readout="project",
43
+ channels_last=False,
44
+ use_bn=False,
45
+ ):
46
+
47
+ super(DPT, self).__init__()
48
+
49
+ self.channels_last = channels_last
50
+
51
+ hooks = {
52
+ "vitb_rn50_384": [0, 1, 8, 11],
53
+ "vitb16_384": [2, 5, 8, 11],
54
+ "vitl16_384": [5, 11, 17, 23],
55
+ }
56
+
57
+ # Instantiate backbone and reassemble blocks
58
+ self.pretrained, self.scratch = _make_encoder(
59
+ backbone,
60
+ features,
61
+ False, # Set to true of you want to train from scratch, uses ImageNet weights
62
+ groups=1,
63
+ expand=False,
64
+ exportable=False,
65
+ hooks=hooks[backbone],
66
+ use_readout=readout,
67
+ )
68
+
69
+ self.scratch.refinenet1 = _make_fusion_block(features, use_bn)
70
+ self.scratch.refinenet2 = _make_fusion_block(features, use_bn)
71
+ self.scratch.refinenet3 = _make_fusion_block(features, use_bn)
72
+ self.scratch.refinenet4 = _make_fusion_block(features, use_bn)
73
+
74
+ self.scratch.output_conv = head
75
+
76
+
77
+ def forward(self, x):
78
+ if self.channels_last == True:
79
+ x.contiguous(memory_format=torch.channels_last)
80
+
81
+ layer_1, layer_2, layer_3, layer_4 = forward_vit(self.pretrained, x)
82
+
83
+ layer_1_rn = self.scratch.layer1_rn(layer_1)
84
+ layer_2_rn = self.scratch.layer2_rn(layer_2)
85
+ layer_3_rn = self.scratch.layer3_rn(layer_3)
86
+ layer_4_rn = self.scratch.layer4_rn(layer_4)
87
+
88
+ path_4 = self.scratch.refinenet4(layer_4_rn)
89
+ path_3 = self.scratch.refinenet3(path_4, layer_3_rn)
90
+ path_2 = self.scratch.refinenet2(path_3, layer_2_rn)
91
+ path_1 = self.scratch.refinenet1(path_2, layer_1_rn)
92
+
93
+ out = self.scratch.output_conv(path_1)
94
+
95
+ return out
96
+
97
+
98
+ class DPTDepthModel(DPT):
99
+ def __init__(self, path=None, non_negative=True, **kwargs):
100
+ features = kwargs["features"] if "features" in kwargs else 256
101
+
102
+ head = nn.Sequential(
103
+ nn.Conv2d(features, features // 2, kernel_size=3, stride=1, padding=1),
104
+ Interpolate(scale_factor=2, mode="bilinear", align_corners=True),
105
+ nn.Conv2d(features // 2, 32, kernel_size=3, stride=1, padding=1),
106
+ nn.ReLU(True),
107
+ nn.Conv2d(32, 1, kernel_size=1, stride=1, padding=0),
108
+ nn.ReLU(True) if non_negative else nn.Identity(),
109
+ nn.Identity(),
110
+ )
111
+
112
+ super().__init__(head, **kwargs)
113
+
114
+ if path is not None:
115
+ self.load(path)
116
+
117
+ def forward(self, x):
118
+ return super().forward(x).squeeze(dim=1)
119
+
annotator/midas/midas/midas_net.py ADDED
@@ -0,0 +1,86 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ '''
2
+ * Copyright (c) 2023 Salesforce, Inc.
3
+ * All rights reserved.
4
+ * SPDX-License-Identifier: Apache License 2.0
5
+ * For full license text, see LICENSE.txt file in the repo root or http://www.apache.org/licenses/
6
+ * By Can Qin
7
+ * Modified from ControlNet repo: https://github.com/lllyasviel/ControlNet
8
+ * Copyright (c) 2023 Lvmin Zhang and Maneesh Agrawala
9
+ '''
10
+
11
+ """MidashNet: Network for monocular depth estimation trained by mixing several datasets.
12
+ This file contains code that is adapted from
13
+ https://github.com/thomasjpfan/pytorch_refinenet/blob/master/pytorch_refinenet/refinenet/refinenet_4cascade.py
14
+ """
15
+ import torch
16
+ import torch.nn as nn
17
+
18
+ from .base_model import BaseModel
19
+ from .blocks import FeatureFusionBlock, Interpolate, _make_encoder
20
+
21
+
22
+ class MidasNet(BaseModel):
23
+ """Network for monocular depth estimation.
24
+ """
25
+
26
+ def __init__(self, path=None, features=256, non_negative=True):
27
+ """Init.
28
+
29
+ Args:
30
+ path (str, optional): Path to saved model. Defaults to None.
31
+ features (int, optional): Number of features. Defaults to 256.
32
+ backbone (str, optional): Backbone network for encoder. Defaults to resnet50
33
+ """
34
+ print("Loading weights: ", path)
35
+
36
+ super(MidasNet, self).__init__()
37
+
38
+ use_pretrained = False if path is None else True
39
+
40
+ self.pretrained, self.scratch = _make_encoder(backbone="resnext101_wsl", features=features, use_pretrained=use_pretrained)
41
+
42
+ self.scratch.refinenet4 = FeatureFusionBlock(features)
43
+ self.scratch.refinenet3 = FeatureFusionBlock(features)
44
+ self.scratch.refinenet2 = FeatureFusionBlock(features)
45
+ self.scratch.refinenet1 = FeatureFusionBlock(features)
46
+
47
+ self.scratch.output_conv = nn.Sequential(
48
+ nn.Conv2d(features, 128, kernel_size=3, stride=1, padding=1),
49
+ Interpolate(scale_factor=2, mode="bilinear"),
50
+ nn.Conv2d(128, 32, kernel_size=3, stride=1, padding=1),
51
+ nn.ReLU(True),
52
+ nn.Conv2d(32, 1, kernel_size=1, stride=1, padding=0),
53
+ nn.ReLU(True) if non_negative else nn.Identity(),
54
+ )
55
+
56
+ if path:
57
+ self.load(path)
58
+
59
+ def forward(self, x):
60
+ """Forward pass.
61
+
62
+ Args:
63
+ x (tensor): input data (image)
64
+
65
+ Returns:
66
+ tensor: depth
67
+ """
68
+
69
+ layer_1 = self.pretrained.layer1(x)
70
+ layer_2 = self.pretrained.layer2(layer_1)
71
+ layer_3 = self.pretrained.layer3(layer_2)
72
+ layer_4 = self.pretrained.layer4(layer_3)
73
+
74
+ layer_1_rn = self.scratch.layer1_rn(layer_1)
75
+ layer_2_rn = self.scratch.layer2_rn(layer_2)
76
+ layer_3_rn = self.scratch.layer3_rn(layer_3)
77
+ layer_4_rn = self.scratch.layer4_rn(layer_4)
78
+
79
+ path_4 = self.scratch.refinenet4(layer_4_rn)
80
+ path_3 = self.scratch.refinenet3(path_4, layer_3_rn)
81
+ path_2 = self.scratch.refinenet2(path_3, layer_2_rn)
82
+ path_1 = self.scratch.refinenet1(path_2, layer_1_rn)
83
+
84
+ out = self.scratch.output_conv(path_1)
85
+
86
+ return torch.squeeze(out, dim=1)
annotator/midas/midas/midas_net_custom.py ADDED
@@ -0,0 +1,138 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ '''
2
+ * Copyright (c) 2023 Salesforce, Inc.
3
+ * All rights reserved.
4
+ * SPDX-License-Identifier: Apache License 2.0
5
+ * For full license text, see LICENSE.txt file in the repo root or http://www.apache.org/licenses/
6
+ * By Can Qin
7
+ * Modified from ControlNet repo: https://github.com/lllyasviel/ControlNet
8
+ * Copyright (c) 2023 Lvmin Zhang and Maneesh Agrawala
9
+ '''
10
+
11
+ """MidashNet: Network for monocular depth estimation trained by mixing several datasets.
12
+ This file contains code that is adapted from
13
+ https://github.com/thomasjpfan/pytorch_refinenet/blob/master/pytorch_refinenet/refinenet/refinenet_4cascade.py
14
+ """
15
+ import torch
16
+ import torch.nn as nn
17
+
18
+ from .base_model import BaseModel
19
+ from .blocks import FeatureFusionBlock, FeatureFusionBlock_custom, Interpolate, _make_encoder
20
+
21
+
22
+ class MidasNet_small(BaseModel):
23
+ """Network for monocular depth estimation.
24
+ """
25
+
26
+ def __init__(self, path=None, features=64, backbone="efficientnet_lite3", non_negative=True, exportable=True, channels_last=False, align_corners=True,
27
+ blocks={'expand': True}):
28
+ """Init.
29
+
30
+ Args:
31
+ path (str, optional): Path to saved model. Defaults to None.
32
+ features (int, optional): Number of features. Defaults to 256.
33
+ backbone (str, optional): Backbone network for encoder. Defaults to resnet50
34
+ """
35
+ print("Loading weights: ", path)
36
+
37
+ super(MidasNet_small, self).__init__()
38
+
39
+ use_pretrained = False if path else True
40
+
41
+ self.channels_last = channels_last
42
+ self.blocks = blocks
43
+ self.backbone = backbone
44
+
45
+ self.groups = 1
46
+
47
+ features1=features
48
+ features2=features
49
+ features3=features
50
+ features4=features
51
+ self.expand = False
52
+ if "expand" in self.blocks and self.blocks['expand'] == True:
53
+ self.expand = True
54
+ features1=features
55
+ features2=features*2
56
+ features3=features*4
57
+ features4=features*8
58
+
59
+ self.pretrained, self.scratch = _make_encoder(self.backbone, features, use_pretrained, groups=self.groups, expand=self.expand, exportable=exportable)
60
+
61
+ self.scratch.activation = nn.ReLU(False)
62
+
63
+ self.scratch.refinenet4 = FeatureFusionBlock_custom(features4, self.scratch.activation, deconv=False, bn=False, expand=self.expand, align_corners=align_corners)
64
+ self.scratch.refinenet3 = FeatureFusionBlock_custom(features3, self.scratch.activation, deconv=False, bn=False, expand=self.expand, align_corners=align_corners)
65
+ self.scratch.refinenet2 = FeatureFusionBlock_custom(features2, self.scratch.activation, deconv=False, bn=False, expand=self.expand, align_corners=align_corners)
66
+ self.scratch.refinenet1 = FeatureFusionBlock_custom(features1, self.scratch.activation, deconv=False, bn=False, align_corners=align_corners)
67
+
68
+
69
+ self.scratch.output_conv = nn.Sequential(
70
+ nn.Conv2d(features, features//2, kernel_size=3, stride=1, padding=1, groups=self.groups),
71
+ Interpolate(scale_factor=2, mode="bilinear"),
72
+ nn.Conv2d(features//2, 32, kernel_size=3, stride=1, padding=1),
73
+ self.scratch.activation,
74
+ nn.Conv2d(32, 1, kernel_size=1, stride=1, padding=0),
75
+ nn.ReLU(True) if non_negative else nn.Identity(),
76
+ nn.Identity(),
77
+ )
78
+
79
+ if path:
80
+ self.load(path)
81
+
82
+
83
+ def forward(self, x):
84
+ """Forward pass.
85
+
86
+ Args:
87
+ x (tensor): input data (image)
88
+
89
+ Returns:
90
+ tensor: depth
91
+ """
92
+ if self.channels_last==True:
93
+ print("self.channels_last = ", self.channels_last)
94
+ x.contiguous(memory_format=torch.channels_last)
95
+
96
+
97
+ layer_1 = self.pretrained.layer1(x)
98
+ layer_2 = self.pretrained.layer2(layer_1)
99
+ layer_3 = self.pretrained.layer3(layer_2)
100
+ layer_4 = self.pretrained.layer4(layer_3)
101
+
102
+ layer_1_rn = self.scratch.layer1_rn(layer_1)
103
+ layer_2_rn = self.scratch.layer2_rn(layer_2)
104
+ layer_3_rn = self.scratch.layer3_rn(layer_3)
105
+ layer_4_rn = self.scratch.layer4_rn(layer_4)
106
+
107
+
108
+ path_4 = self.scratch.refinenet4(layer_4_rn)
109
+ path_3 = self.scratch.refinenet3(path_4, layer_3_rn)
110
+ path_2 = self.scratch.refinenet2(path_3, layer_2_rn)
111
+ path_1 = self.scratch.refinenet1(path_2, layer_1_rn)
112
+
113
+ out = self.scratch.output_conv(path_1)
114
+
115
+ return torch.squeeze(out, dim=1)
116
+
117
+
118
+
119
+ def fuse_model(m):
120
+ prev_previous_type = nn.Identity()
121
+ prev_previous_name = ''
122
+ previous_type = nn.Identity()
123
+ previous_name = ''
124
+ for name, module in m.named_modules():
125
+ if prev_previous_type == nn.Conv2d and previous_type == nn.BatchNorm2d and type(module) == nn.ReLU:
126
+ # print("FUSED ", prev_previous_name, previous_name, name)
127
+ torch.quantization.fuse_modules(m, [prev_previous_name, previous_name, name], inplace=True)
128
+ elif prev_previous_type == nn.Conv2d and previous_type == nn.BatchNorm2d:
129
+ # print("FUSED ", prev_previous_name, previous_name)
130
+ torch.quantization.fuse_modules(m, [prev_previous_name, previous_name], inplace=True)
131
+ # elif previous_type == nn.Conv2d and type(module) == nn.ReLU:
132
+ # print("FUSED ", previous_name, name)
133
+ # torch.quantization.fuse_modules(m, [previous_name, name], inplace=True)
134
+
135
+ prev_previous_type = previous_type
136
+ prev_previous_name = previous_name
137
+ previous_type = type(module)
138
+ previous_name = name
annotator/midas/midas/transforms.py ADDED
@@ -0,0 +1,244 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ '''
2
+ * Copyright (c) 2023 Salesforce, Inc.
3
+ * All rights reserved.
4
+ * SPDX-License-Identifier: Apache License 2.0
5
+ * For full license text, see LICENSE.txt file in the repo root or http://www.apache.org/licenses/
6
+ * By Can Qin
7
+ * Modified from ControlNet repo: https://github.com/lllyasviel/ControlNet
8
+ * Copyright (c) 2023 Lvmin Zhang and Maneesh Agrawala
9
+ '''
10
+
11
+ import numpy as np
12
+ import cv2
13
+ import math
14
+
15
+
16
+ def apply_min_size(sample, size, image_interpolation_method=cv2.INTER_AREA):
17
+ """Rezise the sample to ensure the given size. Keeps aspect ratio.
18
+
19
+ Args:
20
+ sample (dict): sample
21
+ size (tuple): image size
22
+
23
+ Returns:
24
+ tuple: new size
25
+ """
26
+ shape = list(sample["disparity"].shape)
27
+
28
+ if shape[0] >= size[0] and shape[1] >= size[1]:
29
+ return sample
30
+
31
+ scale = [0, 0]
32
+ scale[0] = size[0] / shape[0]
33
+ scale[1] = size[1] / shape[1]
34
+
35
+ scale = max(scale)
36
+
37
+ shape[0] = math.ceil(scale * shape[0])
38
+ shape[1] = math.ceil(scale * shape[1])
39
+
40
+ # resize
41
+ sample["image"] = cv2.resize(
42
+ sample["image"], tuple(shape[::-1]), interpolation=image_interpolation_method
43
+ )
44
+
45
+ sample["disparity"] = cv2.resize(
46
+ sample["disparity"], tuple(shape[::-1]), interpolation=cv2.INTER_NEAREST
47
+ )
48
+ sample["mask"] = cv2.resize(
49
+ sample["mask"].astype(np.float32),
50
+ tuple(shape[::-1]),
51
+ interpolation=cv2.INTER_NEAREST,
52
+ )
53
+ sample["mask"] = sample["mask"].astype(bool)
54
+
55
+ return tuple(shape)
56
+
57
+
58
+ class Resize(object):
59
+ """Resize sample to given size (width, height).
60
+ """
61
+
62
+ def __init__(
63
+ self,
64
+ width,
65
+ height,
66
+ resize_target=True,
67
+ keep_aspect_ratio=False,
68
+ ensure_multiple_of=1,
69
+ resize_method="lower_bound",
70
+ image_interpolation_method=cv2.INTER_AREA,
71
+ ):
72
+ """Init.
73
+
74
+ Args:
75
+ width (int): desired output width
76
+ height (int): desired output height
77
+ resize_target (bool, optional):
78
+ True: Resize the full sample (image, mask, target).
79
+ False: Resize image only.
80
+ Defaults to True.
81
+ keep_aspect_ratio (bool, optional):
82
+ True: Keep the aspect ratio of the input sample.
83
+ Output sample might not have the given width and height, and
84
+ resize behaviour depends on the parameter 'resize_method'.
85
+ Defaults to False.
86
+ ensure_multiple_of (int, optional):
87
+ Output width and height is constrained to be multiple of this parameter.
88
+ Defaults to 1.
89
+ resize_method (str, optional):
90
+ "lower_bound": Output will be at least as large as the given size.
91
+ "upper_bound": Output will be at max as large as the given size. (Output size might be smaller than given size.)
92
+ "minimal": Scale as least as possible. (Output size might be smaller than given size.)
93
+ Defaults to "lower_bound".
94
+ """
95
+ self.__width = width
96
+ self.__height = height
97
+
98
+ self.__resize_target = resize_target
99
+ self.__keep_aspect_ratio = keep_aspect_ratio
100
+ self.__multiple_of = ensure_multiple_of
101
+ self.__resize_method = resize_method
102
+ self.__image_interpolation_method = image_interpolation_method
103
+
104
+ def constrain_to_multiple_of(self, x, min_val=0, max_val=None):
105
+ y = (np.round(x / self.__multiple_of) * self.__multiple_of).astype(int)
106
+
107
+ if max_val is not None and y > max_val:
108
+ y = (np.floor(x / self.__multiple_of) * self.__multiple_of).astype(int)
109
+
110
+ if y < min_val:
111
+ y = (np.ceil(x / self.__multiple_of) * self.__multiple_of).astype(int)
112
+
113
+ return y
114
+
115
+ def get_size(self, width, height):
116
+ # determine new height and width
117
+ scale_height = self.__height / height
118
+ scale_width = self.__width / width
119
+
120
+ if self.__keep_aspect_ratio:
121
+ if self.__resize_method == "lower_bound":
122
+ # scale such that output size is lower bound
123
+ if scale_width > scale_height:
124
+ # fit width
125
+ scale_height = scale_width
126
+ else:
127
+ # fit height
128
+ scale_width = scale_height
129
+ elif self.__resize_method == "upper_bound":
130
+ # scale such that output size is upper bound
131
+ if scale_width < scale_height:
132
+ # fit width
133
+ scale_height = scale_width
134
+ else:
135
+ # fit height
136
+ scale_width = scale_height
137
+ elif self.__resize_method == "minimal":
138
+ # scale as least as possbile
139
+ if abs(1 - scale_width) < abs(1 - scale_height):
140
+ # fit width
141
+ scale_height = scale_width
142
+ else:
143
+ # fit height
144
+ scale_width = scale_height
145
+ else:
146
+ raise ValueError(
147
+ f"resize_method {self.__resize_method} not implemented"
148
+ )
149
+
150
+ if self.__resize_method == "lower_bound":
151
+ new_height = self.constrain_to_multiple_of(
152
+ scale_height * height, min_val=self.__height
153
+ )
154
+ new_width = self.constrain_to_multiple_of(
155
+ scale_width * width, min_val=self.__width
156
+ )
157
+ elif self.__resize_method == "upper_bound":
158
+ new_height = self.constrain_to_multiple_of(
159
+ scale_height * height, max_val=self.__height
160
+ )
161
+ new_width = self.constrain_to_multiple_of(
162
+ scale_width * width, max_val=self.__width
163
+ )
164
+ elif self.__resize_method == "minimal":
165
+ new_height = self.constrain_to_multiple_of(scale_height * height)
166
+ new_width = self.constrain_to_multiple_of(scale_width * width)
167
+ else:
168
+ raise ValueError(f"resize_method {self.__resize_method} not implemented")
169
+
170
+ return (new_width, new_height)
171
+
172
+ def __call__(self, sample):
173
+ width, height = self.get_size(
174
+ sample["image"].shape[1], sample["image"].shape[0]
175
+ )
176
+
177
+ # resize sample
178
+ sample["image"] = cv2.resize(
179
+ sample["image"],
180
+ (width, height),
181
+ interpolation=self.__image_interpolation_method,
182
+ )
183
+
184
+ if self.__resize_target:
185
+ if "disparity" in sample:
186
+ sample["disparity"] = cv2.resize(
187
+ sample["disparity"],
188
+ (width, height),
189
+ interpolation=cv2.INTER_NEAREST,
190
+ )
191
+
192
+ if "depth" in sample:
193
+ sample["depth"] = cv2.resize(
194
+ sample["depth"], (width, height), interpolation=cv2.INTER_NEAREST
195
+ )
196
+
197
+ sample["mask"] = cv2.resize(
198
+ sample["mask"].astype(np.float32),
199
+ (width, height),
200
+ interpolation=cv2.INTER_NEAREST,
201
+ )
202
+ sample["mask"] = sample["mask"].astype(bool)
203
+
204
+ return sample
205
+
206
+
207
+ class NormalizeImage(object):
208
+ """Normlize image by given mean and std.
209
+ """
210
+
211
+ def __init__(self, mean, std):
212
+ self.__mean = mean
213
+ self.__std = std
214
+
215
+ def __call__(self, sample):
216
+ sample["image"] = (sample["image"] - self.__mean) / self.__std
217
+
218
+ return sample
219
+
220
+
221
+ class PrepareForNet(object):
222
+ """Prepare sample for usage as network input.
223
+ """
224
+
225
+ def __init__(self):
226
+ pass
227
+
228
+ def __call__(self, sample):
229
+ image = np.transpose(sample["image"], (2, 0, 1))
230
+ sample["image"] = np.ascontiguousarray(image).astype(np.float32)
231
+
232
+ if "mask" in sample:
233
+ sample["mask"] = sample["mask"].astype(np.float32)
234
+ sample["mask"] = np.ascontiguousarray(sample["mask"])
235
+
236
+ if "disparity" in sample:
237
+ disparity = sample["disparity"].astype(np.float32)
238
+ sample["disparity"] = np.ascontiguousarray(disparity)
239
+
240
+ if "depth" in sample:
241
+ depth = sample["depth"].astype(np.float32)
242
+ sample["depth"] = np.ascontiguousarray(depth)
243
+
244
+ return sample
annotator/midas/midas/vit.py ADDED
@@ -0,0 +1,501 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ '''
2
+ * Copyright (c) 2023 Salesforce, Inc.
3
+ * All rights reserved.
4
+ * SPDX-License-Identifier: Apache License 2.0
5
+ * For full license text, see LICENSE.txt file in the repo root or http://www.apache.org/licenses/
6
+ * By Can Qin
7
+ * Modified from ControlNet repo: https://github.com/lllyasviel/ControlNet
8
+ * Copyright (c) 2023 Lvmin Zhang and Maneesh Agrawala
9
+ '''
10
+
11
+ import torch
12
+ import torch.nn as nn
13
+ import timm
14
+ import types
15
+ import math
16
+ import torch.nn.functional as F
17
+
18
+
19
+ class Slice(nn.Module):
20
+ def __init__(self, start_index=1):
21
+ super(Slice, self).__init__()
22
+ self.start_index = start_index
23
+
24
+ def forward(self, x):
25
+ return x[:, self.start_index :]
26
+
27
+
28
+ class AddReadout(nn.Module):
29
+ def __init__(self, start_index=1):
30
+ super(AddReadout, self).__init__()
31
+ self.start_index = start_index
32
+
33
+ def forward(self, x):
34
+ if self.start_index == 2:
35
+ readout = (x[:, 0] + x[:, 1]) / 2
36
+ else:
37
+ readout = x[:, 0]
38
+ return x[:, self.start_index :] + readout.unsqueeze(1)
39
+
40
+
41
+ class ProjectReadout(nn.Module):
42
+ def __init__(self, in_features, start_index=1):
43
+ super(ProjectReadout, self).__init__()
44
+ self.start_index = start_index
45
+
46
+ self.project = nn.Sequential(nn.Linear(2 * in_features, in_features), nn.GELU())
47
+
48
+ def forward(self, x):
49
+ readout = x[:, 0].unsqueeze(1).expand_as(x[:, self.start_index :])
50
+ features = torch.cat((x[:, self.start_index :], readout), -1)
51
+
52
+ return self.project(features)
53
+
54
+
55
+ class Transpose(nn.Module):
56
+ def __init__(self, dim0, dim1):
57
+ super(Transpose, self).__init__()
58
+ self.dim0 = dim0
59
+ self.dim1 = dim1
60
+
61
+ def forward(self, x):
62
+ x = x.transpose(self.dim0, self.dim1)
63
+ return x
64
+
65
+
66
+ def forward_vit(pretrained, x):
67
+ b, c, h, w = x.shape
68
+
69
+ glob = pretrained.model.forward_flex(x)
70
+
71
+ layer_1 = pretrained.activations["1"]
72
+ layer_2 = pretrained.activations["2"]
73
+ layer_3 = pretrained.activations["3"]
74
+ layer_4 = pretrained.activations["4"]
75
+
76
+ layer_1 = pretrained.act_postprocess1[0:2](layer_1)
77
+ layer_2 = pretrained.act_postprocess2[0:2](layer_2)
78
+ layer_3 = pretrained.act_postprocess3[0:2](layer_3)
79
+ layer_4 = pretrained.act_postprocess4[0:2](layer_4)
80
+
81
+ unflatten = nn.Sequential(
82
+ nn.Unflatten(
83
+ 2,
84
+ torch.Size(
85
+ [
86
+ h // pretrained.model.patch_size[1],
87
+ w // pretrained.model.patch_size[0],
88
+ ]
89
+ ),
90
+ )
91
+ )
92
+
93
+ if layer_1.ndim == 3:
94
+ layer_1 = unflatten(layer_1)
95
+ if layer_2.ndim == 3:
96
+ layer_2 = unflatten(layer_2)
97
+ if layer_3.ndim == 3:
98
+ layer_3 = unflatten(layer_3)
99
+ if layer_4.ndim == 3:
100
+ layer_4 = unflatten(layer_4)
101
+
102
+ layer_1 = pretrained.act_postprocess1[3 : len(pretrained.act_postprocess1)](layer_1)
103
+ layer_2 = pretrained.act_postprocess2[3 : len(pretrained.act_postprocess2)](layer_2)
104
+ layer_3 = pretrained.act_postprocess3[3 : len(pretrained.act_postprocess3)](layer_3)
105
+ layer_4 = pretrained.act_postprocess4[3 : len(pretrained.act_postprocess4)](layer_4)
106
+
107
+ return layer_1, layer_2, layer_3, layer_4
108
+
109
+
110
+ def _resize_pos_embed(self, posemb, gs_h, gs_w):
111
+ posemb_tok, posemb_grid = (
112
+ posemb[:, : self.start_index],
113
+ posemb[0, self.start_index :],
114
+ )
115
+
116
+ gs_old = int(math.sqrt(len(posemb_grid)))
117
+
118
+ posemb_grid = posemb_grid.reshape(1, gs_old, gs_old, -1).permute(0, 3, 1, 2)
119
+ posemb_grid = F.interpolate(posemb_grid, size=(gs_h, gs_w), mode="bilinear")
120
+ posemb_grid = posemb_grid.permute(0, 2, 3, 1).reshape(1, gs_h * gs_w, -1)
121
+
122
+ posemb = torch.cat([posemb_tok, posemb_grid], dim=1)
123
+
124
+ return posemb
125
+
126
+
127
+ def forward_flex(self, x):
128
+ b, c, h, w = x.shape
129
+
130
+ pos_embed = self._resize_pos_embed(
131
+ self.pos_embed, h // self.patch_size[1], w // self.patch_size[0]
132
+ )
133
+
134
+ B = x.shape[0]
135
+
136
+ if hasattr(self.patch_embed, "backbone"):
137
+ x = self.patch_embed.backbone(x)
138
+ if isinstance(x, (list, tuple)):
139
+ x = x[-1] # last feature if backbone outputs list/tuple of features
140
+
141
+ x = self.patch_embed.proj(x).flatten(2).transpose(1, 2)
142
+
143
+ if getattr(self, "dist_token", None) is not None:
144
+ cls_tokens = self.cls_token.expand(
145
+ B, -1, -1
146
+ ) # stole cls_tokens impl from Phil Wang, thanks
147
+ dist_token = self.dist_token.expand(B, -1, -1)
148
+ x = torch.cat((cls_tokens, dist_token, x), dim=1)
149
+ else:
150
+ cls_tokens = self.cls_token.expand(
151
+ B, -1, -1
152
+ ) # stole cls_tokens impl from Phil Wang, thanks
153
+ x = torch.cat((cls_tokens, x), dim=1)
154
+
155
+ x = x + pos_embed
156
+ x = self.pos_drop(x)
157
+
158
+ for blk in self.blocks:
159
+ x = blk(x)
160
+
161
+ x = self.norm(x)
162
+
163
+ return x
164
+
165
+
166
+ activations = {}
167
+
168
+
169
+ def get_activation(name):
170
+ def hook(model, input, output):
171
+ activations[name] = output
172
+
173
+ return hook
174
+
175
+
176
+ def get_readout_oper(vit_features, features, use_readout, start_index=1):
177
+ if use_readout == "ignore":
178
+ readout_oper = [Slice(start_index)] * len(features)
179
+ elif use_readout == "add":
180
+ readout_oper = [AddReadout(start_index)] * len(features)
181
+ elif use_readout == "project":
182
+ readout_oper = [
183
+ ProjectReadout(vit_features, start_index) for out_feat in features
184
+ ]
185
+ else:
186
+ assert (
187
+ False
188
+ ), "wrong operation for readout token, use_readout can be 'ignore', 'add', or 'project'"
189
+
190
+ return readout_oper
191
+
192
+
193
+ def _make_vit_b16_backbone(
194
+ model,
195
+ features=[96, 192, 384, 768],
196
+ size=[384, 384],
197
+ hooks=[2, 5, 8, 11],
198
+ vit_features=768,
199
+ use_readout="ignore",
200
+ start_index=1,
201
+ ):
202
+ pretrained = nn.Module()
203
+
204
+ pretrained.model = model
205
+ pretrained.model.blocks[hooks[0]].register_forward_hook(get_activation("1"))
206
+ pretrained.model.blocks[hooks[1]].register_forward_hook(get_activation("2"))
207
+ pretrained.model.blocks[hooks[2]].register_forward_hook(get_activation("3"))
208
+ pretrained.model.blocks[hooks[3]].register_forward_hook(get_activation("4"))
209
+
210
+ pretrained.activations = activations
211
+
212
+ readout_oper = get_readout_oper(vit_features, features, use_readout, start_index)
213
+
214
+ # 32, 48, 136, 384
215
+ pretrained.act_postprocess1 = nn.Sequential(
216
+ readout_oper[0],
217
+ Transpose(1, 2),
218
+ nn.Unflatten(2, torch.Size([size[0] // 16, size[1] // 16])),
219
+ nn.Conv2d(
220
+ in_channels=vit_features,
221
+ out_channels=features[0],
222
+ kernel_size=1,
223
+ stride=1,
224
+ padding=0,
225
+ ),
226
+ nn.ConvTranspose2d(
227
+ in_channels=features[0],
228
+ out_channels=features[0],
229
+ kernel_size=4,
230
+ stride=4,
231
+ padding=0,
232
+ bias=True,
233
+ dilation=1,
234
+ groups=1,
235
+ ),
236
+ )
237
+
238
+ pretrained.act_postprocess2 = nn.Sequential(
239
+ readout_oper[1],
240
+ Transpose(1, 2),
241
+ nn.Unflatten(2, torch.Size([size[0] // 16, size[1] // 16])),
242
+ nn.Conv2d(
243
+ in_channels=vit_features,
244
+ out_channels=features[1],
245
+ kernel_size=1,
246
+ stride=1,
247
+ padding=0,
248
+ ),
249
+ nn.ConvTranspose2d(
250
+ in_channels=features[1],
251
+ out_channels=features[1],
252
+ kernel_size=2,
253
+ stride=2,
254
+ padding=0,
255
+ bias=True,
256
+ dilation=1,
257
+ groups=1,
258
+ ),
259
+ )
260
+
261
+ pretrained.act_postprocess3 = nn.Sequential(
262
+ readout_oper[2],
263
+ Transpose(1, 2),
264
+ nn.Unflatten(2, torch.Size([size[0] // 16, size[1] // 16])),
265
+ nn.Conv2d(
266
+ in_channels=vit_features,
267
+ out_channels=features[2],
268
+ kernel_size=1,
269
+ stride=1,
270
+ padding=0,
271
+ ),
272
+ )
273
+
274
+ pretrained.act_postprocess4 = nn.Sequential(
275
+ readout_oper[3],
276
+ Transpose(1, 2),
277
+ nn.Unflatten(2, torch.Size([size[0] // 16, size[1] // 16])),
278
+ nn.Conv2d(
279
+ in_channels=vit_features,
280
+ out_channels=features[3],
281
+ kernel_size=1,
282
+ stride=1,
283
+ padding=0,
284
+ ),
285
+ nn.Conv2d(
286
+ in_channels=features[3],
287
+ out_channels=features[3],
288
+ kernel_size=3,
289
+ stride=2,
290
+ padding=1,
291
+ ),
292
+ )
293
+
294
+ pretrained.model.start_index = start_index
295
+ pretrained.model.patch_size = [16, 16]
296
+
297
+ # We inject this function into the VisionTransformer instances so that
298
+ # we can use it with interpolated position embeddings without modifying the library source.
299
+ pretrained.model.forward_flex = types.MethodType(forward_flex, pretrained.model)
300
+ pretrained.model._resize_pos_embed = types.MethodType(
301
+ _resize_pos_embed, pretrained.model
302
+ )
303
+
304
+ return pretrained
305
+
306
+
307
+ def _make_pretrained_vitl16_384(pretrained, use_readout="ignore", hooks=None):
308
+ model = timm.create_model("vit_large_patch16_384", pretrained=pretrained)
309
+
310
+ hooks = [5, 11, 17, 23] if hooks == None else hooks
311
+ return _make_vit_b16_backbone(
312
+ model,
313
+ features=[256, 512, 1024, 1024],
314
+ hooks=hooks,
315
+ vit_features=1024,
316
+ use_readout=use_readout,
317
+ )
318
+
319
+
320
+ def _make_pretrained_vitb16_384(pretrained, use_readout="ignore", hooks=None):
321
+ model = timm.create_model("vit_base_patch16_384", pretrained=pretrained)
322
+
323
+ hooks = [2, 5, 8, 11] if hooks == None else hooks
324
+ return _make_vit_b16_backbone(
325
+ model, features=[96, 192, 384, 768], hooks=hooks, use_readout=use_readout
326
+ )
327
+
328
+
329
+ def _make_pretrained_deitb16_384(pretrained, use_readout="ignore", hooks=None):
330
+ model = timm.create_model("vit_deit_base_patch16_384", pretrained=pretrained)
331
+
332
+ hooks = [2, 5, 8, 11] if hooks == None else hooks
333
+ return _make_vit_b16_backbone(
334
+ model, features=[96, 192, 384, 768], hooks=hooks, use_readout=use_readout
335
+ )
336
+
337
+
338
+ def _make_pretrained_deitb16_distil_384(pretrained, use_readout="ignore", hooks=None):
339
+ model = timm.create_model(
340
+ "vit_deit_base_distilled_patch16_384", pretrained=pretrained
341
+ )
342
+
343
+ hooks = [2, 5, 8, 11] if hooks == None else hooks
344
+ return _make_vit_b16_backbone(
345
+ model,
346
+ features=[96, 192, 384, 768],
347
+ hooks=hooks,
348
+ use_readout=use_readout,
349
+ start_index=2,
350
+ )
351
+
352
+
353
+ def _make_vit_b_rn50_backbone(
354
+ model,
355
+ features=[256, 512, 768, 768],
356
+ size=[384, 384],
357
+ hooks=[0, 1, 8, 11],
358
+ vit_features=768,
359
+ use_vit_only=False,
360
+ use_readout="ignore",
361
+ start_index=1,
362
+ ):
363
+ pretrained = nn.Module()
364
+
365
+ pretrained.model = model
366
+
367
+ if use_vit_only == True:
368
+ pretrained.model.blocks[hooks[0]].register_forward_hook(get_activation("1"))
369
+ pretrained.model.blocks[hooks[1]].register_forward_hook(get_activation("2"))
370
+ else:
371
+ pretrained.model.patch_embed.backbone.stages[0].register_forward_hook(
372
+ get_activation("1")
373
+ )
374
+ pretrained.model.patch_embed.backbone.stages[1].register_forward_hook(
375
+ get_activation("2")
376
+ )
377
+
378
+ pretrained.model.blocks[hooks[2]].register_forward_hook(get_activation("3"))
379
+ pretrained.model.blocks[hooks[3]].register_forward_hook(get_activation("4"))
380
+
381
+ pretrained.activations = activations
382
+
383
+ readout_oper = get_readout_oper(vit_features, features, use_readout, start_index)
384
+
385
+ if use_vit_only == True:
386
+ pretrained.act_postprocess1 = nn.Sequential(
387
+ readout_oper[0],
388
+ Transpose(1, 2),
389
+ nn.Unflatten(2, torch.Size([size[0] // 16, size[1] // 16])),
390
+ nn.Conv2d(
391
+ in_channels=vit_features,
392
+ out_channels=features[0],
393
+ kernel_size=1,
394
+ stride=1,
395
+ padding=0,
396
+ ),
397
+ nn.ConvTranspose2d(
398
+ in_channels=features[0],
399
+ out_channels=features[0],
400
+ kernel_size=4,
401
+ stride=4,
402
+ padding=0,
403
+ bias=True,
404
+ dilation=1,
405
+ groups=1,
406
+ ),
407
+ )
408
+
409
+ pretrained.act_postprocess2 = nn.Sequential(
410
+ readout_oper[1],
411
+ Transpose(1, 2),
412
+ nn.Unflatten(2, torch.Size([size[0] // 16, size[1] // 16])),
413
+ nn.Conv2d(
414
+ in_channels=vit_features,
415
+ out_channels=features[1],
416
+ kernel_size=1,
417
+ stride=1,
418
+ padding=0,
419
+ ),
420
+ nn.ConvTranspose2d(
421
+ in_channels=features[1],
422
+ out_channels=features[1],
423
+ kernel_size=2,
424
+ stride=2,
425
+ padding=0,
426
+ bias=True,
427
+ dilation=1,
428
+ groups=1,
429
+ ),
430
+ )
431
+ else:
432
+ pretrained.act_postprocess1 = nn.Sequential(
433
+ nn.Identity(), nn.Identity(), nn.Identity()
434
+ )
435
+ pretrained.act_postprocess2 = nn.Sequential(
436
+ nn.Identity(), nn.Identity(), nn.Identity()
437
+ )
438
+
439
+ pretrained.act_postprocess3 = nn.Sequential(
440
+ readout_oper[2],
441
+ Transpose(1, 2),
442
+ nn.Unflatten(2, torch.Size([size[0] // 16, size[1] // 16])),
443
+ nn.Conv2d(
444
+ in_channels=vit_features,
445
+ out_channels=features[2],
446
+ kernel_size=1,
447
+ stride=1,
448
+ padding=0,
449
+ ),
450
+ )
451
+
452
+ pretrained.act_postprocess4 = nn.Sequential(
453
+ readout_oper[3],
454
+ Transpose(1, 2),
455
+ nn.Unflatten(2, torch.Size([size[0] // 16, size[1] // 16])),
456
+ nn.Conv2d(
457
+ in_channels=vit_features,
458
+ out_channels=features[3],
459
+ kernel_size=1,
460
+ stride=1,
461
+ padding=0,
462
+ ),
463
+ nn.Conv2d(
464
+ in_channels=features[3],
465
+ out_channels=features[3],
466
+ kernel_size=3,
467
+ stride=2,
468
+ padding=1,
469
+ ),
470
+ )
471
+
472
+ pretrained.model.start_index = start_index
473
+ pretrained.model.patch_size = [16, 16]
474
+
475
+ # We inject this function into the VisionTransformer instances so that
476
+ # we can use it with interpolated position embeddings without modifying the library source.
477
+ pretrained.model.forward_flex = types.MethodType(forward_flex, pretrained.model)
478
+
479
+ # We inject this function into the VisionTransformer instances so that
480
+ # we can use it with interpolated position embeddings without modifying the library source.
481
+ pretrained.model._resize_pos_embed = types.MethodType(
482
+ _resize_pos_embed, pretrained.model
483
+ )
484
+
485
+ return pretrained
486
+
487
+
488
+ def _make_pretrained_vitb_rn50_384(
489
+ pretrained, use_readout="ignore", hooks=None, use_vit_only=False
490
+ ):
491
+ model = timm.create_model("vit_base_resnet50_384", pretrained=pretrained)
492
+
493
+ hooks = [0, 1, 8, 11] if hooks == None else hooks
494
+ return _make_vit_b_rn50_backbone(
495
+ model,
496
+ features=[256, 512, 768, 768],
497
+ size=[384, 384],
498
+ hooks=hooks,
499
+ use_vit_only=use_vit_only,
500
+ use_readout=use_readout,
501
+ )
annotator/midas/utils.py ADDED
@@ -0,0 +1,199 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ '''
2
+ * Copyright (c) 2023 Salesforce, Inc.
3
+ * All rights reserved.
4
+ * SPDX-License-Identifier: Apache License 2.0
5
+ * For full license text, see LICENSE.txt file in the repo root or http://www.apache.org/licenses/
6
+ * By Can Qin
7
+ * Modified from ControlNet repo: https://github.com/lllyasviel/ControlNet
8
+ * Copyright (c) 2023 Lvmin Zhang and Maneesh Agrawala
9
+ '''
10
+
11
+ """Utils for monoDepth."""
12
+ import sys
13
+ import re
14
+ import numpy as np
15
+ import cv2
16
+ import torch
17
+
18
+
19
+ def read_pfm(path):
20
+ """Read pfm file.
21
+
22
+ Args:
23
+ path (str): path to file
24
+
25
+ Returns:
26
+ tuple: (data, scale)
27
+ """
28
+ with open(path, "rb") as file:
29
+
30
+ color = None
31
+ width = None
32
+ height = None
33
+ scale = None
34
+ endian = None
35
+
36
+ header = file.readline().rstrip()
37
+ if header.decode("ascii") == "PF":
38
+ color = True
39
+ elif header.decode("ascii") == "Pf":
40
+ color = False
41
+ else:
42
+ raise Exception("Not a PFM file: " + path)
43
+
44
+ dim_match = re.match(r"^(\d+)\s(\d+)\s$", file.readline().decode("ascii"))
45
+ if dim_match:
46
+ width, height = list(map(int, dim_match.groups()))
47
+ else:
48
+ raise Exception("Malformed PFM header.")
49
+
50
+ scale = float(file.readline().decode("ascii").rstrip())
51
+ if scale < 0:
52
+ # little-endian
53
+ endian = "<"
54
+ scale = -scale
55
+ else:
56
+ # big-endian
57
+ endian = ">"
58
+
59
+ data = np.fromfile(file, endian + "f")
60
+ shape = (height, width, 3) if color else (height, width)
61
+
62
+ data = np.reshape(data, shape)
63
+ data = np.flipud(data)
64
+
65
+ return data, scale
66
+
67
+
68
+ def write_pfm(path, image, scale=1):
69
+ """Write pfm file.
70
+
71
+ Args:
72
+ path (str): pathto file
73
+ image (array): data
74
+ scale (int, optional): Scale. Defaults to 1.
75
+ """
76
+
77
+ with open(path, "wb") as file:
78
+ color = None
79
+
80
+ if image.dtype.name != "float32":
81
+ raise Exception("Image dtype must be float32.")
82
+
83
+ image = np.flipud(image)
84
+
85
+ if len(image.shape) == 3 and image.shape[2] == 3: # color image
86
+ color = True
87
+ elif (
88
+ len(image.shape) == 2 or len(image.shape) == 3 and image.shape[2] == 1
89
+ ): # greyscale
90
+ color = False
91
+ else:
92
+ raise Exception("Image must have H x W x 3, H x W x 1 or H x W dimensions.")
93
+
94
+ file.write("PF\n" if color else "Pf\n".encode())
95
+ file.write("%d %d\n".encode() % (image.shape[1], image.shape[0]))
96
+
97
+ endian = image.dtype.byteorder
98
+
99
+ if endian == "<" or endian == "=" and sys.byteorder == "little":
100
+ scale = -scale
101
+
102
+ file.write("%f\n".encode() % scale)
103
+
104
+ image.tofile(file)
105
+
106
+
107
+ def read_image(path):
108
+ """Read image and output RGB image (0-1).
109
+
110
+ Args:
111
+ path (str): path to file
112
+
113
+ Returns:
114
+ array: RGB image (0-1)
115
+ """
116
+ img = cv2.imread(path)
117
+
118
+ if img.ndim == 2:
119
+ img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
120
+
121
+ img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) / 255.0
122
+
123
+ return img
124
+
125
+
126
+ def resize_image(img):
127
+ """Resize image and make it fit for network.
128
+
129
+ Args:
130
+ img (array): image
131
+
132
+ Returns:
133
+ tensor: data ready for network
134
+ """
135
+ height_orig = img.shape[0]
136
+ width_orig = img.shape[1]
137
+
138
+ if width_orig > height_orig:
139
+ scale = width_orig / 384
140
+ else:
141
+ scale = height_orig / 384
142
+
143
+ height = (np.ceil(height_orig / scale / 32) * 32).astype(int)
144
+ width = (np.ceil(width_orig / scale / 32) * 32).astype(int)
145
+
146
+ img_resized = cv2.resize(img, (width, height), interpolation=cv2.INTER_AREA)
147
+
148
+ img_resized = (
149
+ torch.from_numpy(np.transpose(img_resized, (2, 0, 1))).contiguous().float()
150
+ )
151
+ img_resized = img_resized.unsqueeze(0)
152
+
153
+ return img_resized
154
+
155
+
156
+ def resize_depth(depth, width, height):
157
+ """Resize depth map and bring to CPU (numpy).
158
+
159
+ Args:
160
+ depth (tensor): depth
161
+ width (int): image width
162
+ height (int): image height
163
+
164
+ Returns:
165
+ array: processed depth
166
+ """
167
+ depth = torch.squeeze(depth[0, :, :, :]).to("cpu")
168
+
169
+ depth_resized = cv2.resize(
170
+ depth.numpy(), (width, height), interpolation=cv2.INTER_CUBIC
171
+ )
172
+
173
+ return depth_resized
174
+
175
+ def write_depth(path, depth, bits=1):
176
+ """Write depth map to pfm and png file.
177
+
178
+ Args:
179
+ path (str): filepath without extension
180
+ depth (array): depth
181
+ """
182
+ write_pfm(path + ".pfm", depth.astype(np.float32))
183
+
184
+ depth_min = depth.min()
185
+ depth_max = depth.max()
186
+
187
+ max_val = (2**(8*bits))-1
188
+
189
+ if depth_max - depth_min > np.finfo("float").eps:
190
+ out = max_val * (depth - depth_min) / (depth_max - depth_min)
191
+ else:
192
+ out = np.zeros(depth.shape, dtype=depth.type)
193
+
194
+ if bits == 1:
195
+ cv2.imwrite(path + ".png", out.astype("uint8"))
196
+ elif bits == 2:
197
+ cv2.imwrite(path + ".png", out.astype("uint16"))
198
+
199
+ return
annotator/mlsd/LICENSE ADDED
@@ -0,0 +1,201 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Apache License
2
+ Version 2.0, January 2004
3
+ http://www.apache.org/licenses/
4
+
5
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6
+
7
+ 1. Definitions.
8
+
9
+ "License" shall mean the terms and conditions for use, reproduction,
10
+ and distribution as defined by Sections 1 through 9 of this document.
11
+
12
+ "Licensor" shall mean the copyright owner or entity authorized by
13
+ the copyright owner that is granting the License.
14
+
15
+ "Legal Entity" shall mean the union of the acting entity and all
16
+ other entities that control, are controlled by, or are under common
17
+ control with that entity. For the purposes of this definition,
18
+ "control" means (i) the power, direct or indirect, to cause the
19
+ direction or management of such entity, whether by contract or
20
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
21
+ outstanding shares, or (iii) beneficial ownership of such entity.
22
+
23
+ "You" (or "Your") shall mean an individual or Legal Entity
24
+ exercising permissions granted by this License.
25
+
26
+ "Source" form shall mean the preferred form for making modifications,
27
+ including but not limited to software source code, documentation
28
+ source, and configuration files.
29
+
30
+ "Object" form shall mean any form resulting from mechanical
31
+ transformation or translation of a Source form, including but
32
+ not limited to compiled object code, generated documentation,
33
+ and conversions to other media types.
34
+
35
+ "Work" shall mean the work of authorship, whether in Source or
36
+ Object form, made available under the License, as indicated by a
37
+ copyright notice that is included in or attached to the work
38
+ (an example is provided in the Appendix below).
39
+
40
+ "Derivative Works" shall mean any work, whether in Source or Object
41
+ form, that is based on (or derived from) the Work and for which the
42
+ editorial revisions, annotations, elaborations, or other modifications
43
+ represent, as a whole, an original work of authorship. For the purposes
44
+ of this License, Derivative Works shall not include works that remain
45
+ separable from, or merely link (or bind by name) to the interfaces of,
46
+ the Work and Derivative Works thereof.
47
+
48
+ "Contribution" shall mean any work of authorship, including
49
+ the original version of the Work and any modifications or additions
50
+ to that Work or Derivative Works thereof, that is intentionally
51
+ submitted to Licensor for inclusion in the Work by the copyright owner
52
+ or by an individual or Legal Entity authorized to submit on behalf of
53
+ the copyright owner. For the purposes of this definition, "submitted"
54
+ means any form of electronic, verbal, or written communication sent
55
+ to the Licensor or its representatives, including but not limited to
56
+ communication on electronic mailing lists, source code control systems,
57
+ and issue tracking systems that are managed by, or on behalf of, the
58
+ Licensor for the purpose of discussing and improving the Work, but
59
+ excluding communication that is conspicuously marked or otherwise
60
+ designated in writing by the copyright owner as "Not a Contribution."
61
+
62
+ "Contributor" shall mean Licensor and any individual or Legal Entity
63
+ on behalf of whom a Contribution has been received by Licensor and
64
+ subsequently incorporated within the Work.
65
+
66
+ 2. Grant of Copyright License. Subject to the terms and conditions of
67
+ this License, each Contributor hereby grants to You a perpetual,
68
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
69
+ copyright license to reproduce, prepare Derivative Works of,
70
+ publicly display, publicly perform, sublicense, and distribute the
71
+ Work and such Derivative Works in Source or Object form.
72
+
73
+ 3. Grant of Patent License. Subject to the terms and conditions of
74
+ this License, each Contributor hereby grants to You a perpetual,
75
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
76
+ (except as stated in this section) patent license to make, have made,
77
+ use, offer to sell, sell, import, and otherwise transfer the Work,
78
+ where such license applies only to those patent claims licensable
79
+ by such Contributor that are necessarily infringed by their
80
+ Contribution(s) alone or by combination of their Contribution(s)
81
+ with the Work to which such Contribution(s) was submitted. If You
82
+ institute patent litigation against any entity (including a
83
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
84
+ or a Contribution incorporated within the Work constitutes direct
85
+ or contributory patent infringement, then any patent licenses
86
+ granted to You under this License for that Work shall terminate
87
+ as of the date such litigation is filed.
88
+
89
+ 4. Redistribution. You may reproduce and distribute copies of the
90
+ Work or Derivative Works thereof in any medium, with or without
91
+ modifications, and in Source or Object form, provided that You
92
+ meet the following conditions:
93
+
94
+ (a) You must give any other recipients of the Work or
95
+ Derivative Works a copy of this License; and
96
+
97
+ (b) You must cause any modified files to carry prominent notices
98
+ stating that You changed the files; and
99
+
100
+ (c) You must retain, in the Source form of any Derivative Works
101
+ that You distribute, all copyright, patent, trademark, and
102
+ attribution notices from the Source form of the Work,
103
+ excluding those notices that do not pertain to any part of
104
+ the Derivative Works; and
105
+
106
+ (d) If the Work includes a "NOTICE" text file as part of its
107
+ distribution, then any Derivative Works that You distribute must
108
+ include a readable copy of the attribution notices contained
109
+ within such NOTICE file, excluding those notices that do not
110
+ pertain to any part of the Derivative Works, in at least one
111
+ of the following places: within a NOTICE text file distributed
112
+ as part of the Derivative Works; within the Source form or
113
+ documentation, if provided along with the Derivative Works; or,
114
+ within a display generated by the Derivative Works, if and
115
+ wherever such third-party notices normally appear. The contents
116
+ of the NOTICE file are for informational purposes only and
117
+ do not modify the License. You may add Your own attribution
118
+ notices within Derivative Works that You distribute, alongside
119
+ or as an addendum to the NOTICE text from the Work, provided
120
+ that such additional attribution notices cannot be construed
121
+ as modifying the License.
122
+
123
+ You may add Your own copyright statement to Your modifications and
124
+ may provide additional or different license terms and conditions
125
+ for use, reproduction, or distribution of Your modifications, or
126
+ for any such Derivative Works as a whole, provided Your use,
127
+ reproduction, and distribution of the Work otherwise complies with
128
+ the conditions stated in this License.
129
+
130
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
131
+ any Contribution intentionally submitted for inclusion in the Work
132
+ by You to the Licensor shall be under the terms and conditions of
133
+ this License, without any additional terms or conditions.
134
+ Notwithstanding the above, nothing herein shall supersede or modify
135
+ the terms of any separate license agreement you may have executed
136
+ with Licensor regarding such Contributions.
137
+
138
+ 6. Trademarks. This License does not grant permission to use the trade
139
+ names, trademarks, service marks, or product names of the Licensor,
140
+ except as required for reasonable and customary use in describing the
141
+ origin of the Work and reproducing the content of the NOTICE file.
142
+
143
+ 7. Disclaimer of Warranty. Unless required by applicable law or
144
+ agreed to in writing, Licensor provides the Work (and each
145
+ Contributor provides its Contributions) on an "AS IS" BASIS,
146
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
147
+ implied, including, without limitation, any warranties or conditions
148
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
149
+ PARTICULAR PURPOSE. You are solely responsible for determining the
150
+ appropriateness of using or redistributing the Work and assume any
151
+ risks associated with Your exercise of permissions under this License.
152
+
153
+ 8. Limitation of Liability. In no event and under no legal theory,
154
+ whether in tort (including negligence), contract, or otherwise,
155
+ unless required by applicable law (such as deliberate and grossly
156
+ negligent acts) or agreed to in writing, shall any Contributor be
157
+ liable to You for damages, including any direct, indirect, special,
158
+ incidental, or consequential damages of any character arising as a
159
+ result of this License or out of the use or inability to use the
160
+ Work (including but not limited to damages for loss of goodwill,
161
+ work stoppage, computer failure or malfunction, or any and all
162
+ other commercial damages or losses), even if such Contributor
163
+ has been advised of the possibility of such damages.
164
+
165
+ 9. Accepting Warranty or Additional Liability. While redistributing
166
+ the Work or Derivative Works thereof, You may choose to offer,
167
+ and charge a fee for, acceptance of support, warranty, indemnity,
168
+ or other liability obligations and/or rights consistent with this
169
+ License. However, in accepting such obligations, You may act only
170
+ on Your own behalf and on Your sole responsibility, not on behalf
171
+ of any other Contributor, and only if You agree to indemnify,
172
+ defend, and hold each Contributor harmless for any liability
173
+ incurred by, or claims asserted against, such Contributor by reason
174
+ of your accepting any such warranty or additional liability.
175
+
176
+ END OF TERMS AND CONDITIONS
177
+
178
+ APPENDIX: How to apply the Apache License to your work.
179
+
180
+ To apply the Apache License to your work, attach the following
181
+ boilerplate notice, with the fields enclosed by brackets "{}"
182
+ replaced with your own identifying information. (Don't include
183
+ the brackets!) The text should be enclosed in the appropriate
184
+ comment syntax for the file format. We also recommend that a
185
+ file or class name and description of purpose be included on the
186
+ same "printed page" as the copyright notice for easier
187
+ identification within third-party archives.
188
+
189
+ Copyright 2021-present NAVER Corp.
190
+
191
+ Licensed under the Apache License, Version 2.0 (the "License");
192
+ you may not use this file except in compliance with the License.
193
+ You may obtain a copy of the License at
194
+
195
+ http://www.apache.org/licenses/LICENSE-2.0
196
+
197
+ Unless required by applicable law or agreed to in writing, software
198
+ distributed under the License is distributed on an "AS IS" BASIS,
199
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
200
+ See the License for the specific language governing permissions and
201
+ limitations under the License.
annotator/mlsd/__init__.py ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ '''
2
+ * Copyright (c) 2023 Salesforce, Inc.
3
+ * All rights reserved.
4
+ * SPDX-License-Identifier: Apache License 2.0
5
+ * For full license text, see LICENSE.txt file in the repo root or http://www.apache.org/licenses/
6
+ * By Can Qin
7
+ * Modified from ControlNet repo: https://github.com/lllyasviel/ControlNet
8
+ * Copyright (c) 2023 Lvmin Zhang and Maneesh Agrawala
9
+ '''
10
+
11
+ # MLSD Line Detection
12
+ # From https://github.com/navervision/mlsd
13
+ # Apache-2.0 license
14
+
15
+ import cv2
16
+ import numpy as np
17
+ import torch
18
+ import os
19
+
20
+ from einops import rearrange
21
+ from .models.mbv2_mlsd_tiny import MobileV2_MLSD_Tiny
22
+ from .models.mbv2_mlsd_large import MobileV2_MLSD_Large
23
+ from .utils import pred_lines
24
+
25
+ from annotator.util import annotator_ckpts_path
26
+
27
+
28
+ remote_model_path = "https://huggingface.co/lllyasviel/ControlNet/resolve/main/annotator/ckpts/mlsd_large_512_fp32.pth"
29
+
30
+
31
+ class MLSDdetector:
32
+ def __init__(self):
33
+ model_path = os.path.join(annotator_ckpts_path, "mlsd_large_512_fp32.pth")
34
+ if not os.path.exists(model_path):
35
+ from basicsr.utils.download_util import load_file_from_url
36
+ load_file_from_url(remote_model_path, model_dir=annotator_ckpts_path)
37
+ model = MobileV2_MLSD_Large()
38
+ model.load_state_dict(torch.load(model_path), strict=True)
39
+ self.model = model.cuda().eval()
40
+
41
+ def __call__(self, input_image, thr_v, thr_d):
42
+ assert input_image.ndim == 3
43
+ img = input_image
44
+ img_output = np.zeros_like(img)
45
+ try:
46
+ with torch.no_grad():
47
+ lines = pred_lines(img, self.model, [img.shape[0], img.shape[1]], thr_v, thr_d)
48
+ for line in lines:
49
+ x_start, y_start, x_end, y_end = [int(val) for val in line]
50
+ cv2.line(img_output, (x_start, y_start), (x_end, y_end), [255, 255, 255], 1)
51
+ except Exception as e:
52
+ pass
53
+ return img_output[:, :, 0]
annotator/mlsd/models/mbv2_mlsd_large.py ADDED
@@ -0,0 +1,302 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ '''
2
+ * Copyright (c) 2023 Salesforce, Inc.
3
+ * All rights reserved.
4
+ * SPDX-License-Identifier: Apache License 2.0
5
+ * For full license text, see LICENSE.txt file in the repo root or http://www.apache.org/licenses/
6
+ * By Can Qin
7
+ * Modified from ControlNet repo: https://github.com/lllyasviel/ControlNet
8
+ * Copyright (c) 2023 Lvmin Zhang and Maneesh Agrawala
9
+ '''
10
+
11
+ import os
12
+ import sys
13
+ import torch
14
+ import torch.nn as nn
15
+ import torch.utils.model_zoo as model_zoo
16
+ from torch.nn import functional as F
17
+
18
+
19
+ class BlockTypeA(nn.Module):
20
+ def __init__(self, in_c1, in_c2, out_c1, out_c2, upscale = True):
21
+ super(BlockTypeA, self).__init__()
22
+ self.conv1 = nn.Sequential(
23
+ nn.Conv2d(in_c2, out_c2, kernel_size=1),
24
+ nn.BatchNorm2d(out_c2),
25
+ nn.ReLU(inplace=True)
26
+ )
27
+ self.conv2 = nn.Sequential(
28
+ nn.Conv2d(in_c1, out_c1, kernel_size=1),
29
+ nn.BatchNorm2d(out_c1),
30
+ nn.ReLU(inplace=True)
31
+ )
32
+ self.upscale = upscale
33
+
34
+ def forward(self, a, b):
35
+ b = self.conv1(b)
36
+ a = self.conv2(a)
37
+ if self.upscale:
38
+ b = F.interpolate(b, scale_factor=2.0, mode='bilinear', align_corners=True)
39
+ return torch.cat((a, b), dim=1)
40
+
41
+
42
+ class BlockTypeB(nn.Module):
43
+ def __init__(self, in_c, out_c):
44
+ super(BlockTypeB, self).__init__()
45
+ self.conv1 = nn.Sequential(
46
+ nn.Conv2d(in_c, in_c, kernel_size=3, padding=1),
47
+ nn.BatchNorm2d(in_c),
48
+ nn.ReLU()
49
+ )
50
+ self.conv2 = nn.Sequential(
51
+ nn.Conv2d(in_c, out_c, kernel_size=3, padding=1),
52
+ nn.BatchNorm2d(out_c),
53
+ nn.ReLU()
54
+ )
55
+
56
+ def forward(self, x):
57
+ x = self.conv1(x) + x
58
+ x = self.conv2(x)
59
+ return x
60
+
61
+ class BlockTypeC(nn.Module):
62
+ def __init__(self, in_c, out_c):
63
+ super(BlockTypeC, self).__init__()
64
+ self.conv1 = nn.Sequential(
65
+ nn.Conv2d(in_c, in_c, kernel_size=3, padding=5, dilation=5),
66
+ nn.BatchNorm2d(in_c),
67
+ nn.ReLU()
68
+ )
69
+ self.conv2 = nn.Sequential(
70
+ nn.Conv2d(in_c, in_c, kernel_size=3, padding=1),
71
+ nn.BatchNorm2d(in_c),
72
+ nn.ReLU()
73
+ )
74
+ self.conv3 = nn.Conv2d(in_c, out_c, kernel_size=1)
75
+
76
+ def forward(self, x):
77
+ x = self.conv1(x)
78
+ x = self.conv2(x)
79
+ x = self.conv3(x)
80
+ return x
81
+
82
+ def _make_divisible(v, divisor, min_value=None):
83
+ """
84
+ This function is taken from the original tf repo.
85
+ It ensures that all layers have a channel number that is divisible by 8
86
+ It can be seen here:
87
+ https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet.py
88
+ :param v:
89
+ :param divisor:
90
+ :param min_value:
91
+ :return:
92
+ """
93
+ if min_value is None:
94
+ min_value = divisor
95
+ new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)
96
+ # Make sure that round down does not go down by more than 10%.
97
+ if new_v < 0.9 * v:
98
+ new_v += divisor
99
+ return new_v
100
+
101
+
102
+ class ConvBNReLU(nn.Sequential):
103
+ def __init__(self, in_planes, out_planes, kernel_size=3, stride=1, groups=1):
104
+ self.channel_pad = out_planes - in_planes
105
+ self.stride = stride
106
+ #padding = (kernel_size - 1) // 2
107
+
108
+ # TFLite uses slightly different padding than PyTorch
109
+ if stride == 2:
110
+ padding = 0
111
+ else:
112
+ padding = (kernel_size - 1) // 2
113
+
114
+ super(ConvBNReLU, self).__init__(
115
+ nn.Conv2d(in_planes, out_planes, kernel_size, stride, padding, groups=groups, bias=False),
116
+ nn.BatchNorm2d(out_planes),
117
+ nn.ReLU6(inplace=True)
118
+ )
119
+ self.max_pool = nn.MaxPool2d(kernel_size=stride, stride=stride)
120
+
121
+
122
+ def forward(self, x):
123
+ # TFLite uses different padding
124
+ if self.stride == 2:
125
+ x = F.pad(x, (0, 1, 0, 1), "constant", 0)
126
+ #print(x.shape)
127
+
128
+ for module in self:
129
+ if not isinstance(module, nn.MaxPool2d):
130
+ x = module(x)
131
+ return x
132
+
133
+
134
+ class InvertedResidual(nn.Module):
135
+ def __init__(self, inp, oup, stride, expand_ratio):
136
+ super(InvertedResidual, self).__init__()
137
+ self.stride = stride
138
+ assert stride in [1, 2]
139
+
140
+ hidden_dim = int(round(inp * expand_ratio))
141
+ self.use_res_connect = self.stride == 1 and inp == oup
142
+
143
+ layers = []
144
+ if expand_ratio != 1:
145
+ # pw
146
+ layers.append(ConvBNReLU(inp, hidden_dim, kernel_size=1))
147
+ layers.extend([
148
+ # dw
149
+ ConvBNReLU(hidden_dim, hidden_dim, stride=stride, groups=hidden_dim),
150
+ # pw-linear
151
+ nn.Conv2d(hidden_dim, oup, 1, 1, 0, bias=False),
152
+ nn.BatchNorm2d(oup),
153
+ ])
154
+ self.conv = nn.Sequential(*layers)
155
+
156
+ def forward(self, x):
157
+ if self.use_res_connect:
158
+ return x + self.conv(x)
159
+ else:
160
+ return self.conv(x)
161
+
162
+
163
+ class MobileNetV2(nn.Module):
164
+ def __init__(self, pretrained=True):
165
+ """
166
+ MobileNet V2 main class
167
+ Args:
168
+ num_classes (int): Number of classes
169
+ width_mult (float): Width multiplier - adjusts number of channels in each layer by this amount
170
+ inverted_residual_setting: Network structure
171
+ round_nearest (int): Round the number of channels in each layer to be a multiple of this number
172
+ Set to 1 to turn off rounding
173
+ block: Module specifying inverted residual building block for mobilenet
174
+ """
175
+ super(MobileNetV2, self).__init__()
176
+
177
+ block = InvertedResidual
178
+ input_channel = 32
179
+ last_channel = 1280
180
+ width_mult = 1.0
181
+ round_nearest = 8
182
+
183
+ inverted_residual_setting = [
184
+ # t, c, n, s
185
+ [1, 16, 1, 1],
186
+ [6, 24, 2, 2],
187
+ [6, 32, 3, 2],
188
+ [6, 64, 4, 2],
189
+ [6, 96, 3, 1],
190
+ #[6, 160, 3, 2],
191
+ #[6, 320, 1, 1],
192
+ ]
193
+
194
+ # only check the first element, assuming user knows t,c,n,s are required
195
+ if len(inverted_residual_setting) == 0 or len(inverted_residual_setting[0]) != 4:
196
+ raise ValueError("inverted_residual_setting should be non-empty "
197
+ "or a 4-element list, got {}".format(inverted_residual_setting))
198
+
199
+ # building first layer
200
+ input_channel = _make_divisible(input_channel * width_mult, round_nearest)
201
+ self.last_channel = _make_divisible(last_channel * max(1.0, width_mult), round_nearest)
202
+ features = [ConvBNReLU(4, input_channel, stride=2)]
203
+ # building inverted residual blocks
204
+ for t, c, n, s in inverted_residual_setting:
205
+ output_channel = _make_divisible(c * width_mult, round_nearest)
206
+ for i in range(n):
207
+ stride = s if i == 0 else 1
208
+ features.append(block(input_channel, output_channel, stride, expand_ratio=t))
209
+ input_channel = output_channel
210
+
211
+ self.features = nn.Sequential(*features)
212
+ self.fpn_selected = [1, 3, 6, 10, 13]
213
+ # weight initialization
214
+ for m in self.modules():
215
+ if isinstance(m, nn.Conv2d):
216
+ nn.init.kaiming_normal_(m.weight, mode='fan_out')
217
+ if m.bias is not None:
218
+ nn.init.zeros_(m.bias)
219
+ elif isinstance(m, nn.BatchNorm2d):
220
+ nn.init.ones_(m.weight)
221
+ nn.init.zeros_(m.bias)
222
+ elif isinstance(m, nn.Linear):
223
+ nn.init.normal_(m.weight, 0, 0.01)
224
+ nn.init.zeros_(m.bias)
225
+ if pretrained:
226
+ self._load_pretrained_model()
227
+
228
+ def _forward_impl(self, x):
229
+ # This exists since TorchScript doesn't support inheritance, so the superclass method
230
+ # (this one) needs to have a name other than `forward` that can be accessed in a subclass
231
+ fpn_features = []
232
+ for i, f in enumerate(self.features):
233
+ if i > self.fpn_selected[-1]:
234
+ break
235
+ x = f(x)
236
+ if i in self.fpn_selected:
237
+ fpn_features.append(x)
238
+
239
+ c1, c2, c3, c4, c5 = fpn_features
240
+ return c1, c2, c3, c4, c5
241
+
242
+
243
+ def forward(self, x):
244
+ return self._forward_impl(x)
245
+
246
+ def _load_pretrained_model(self):
247
+ pretrain_dict = model_zoo.load_url('https://download.pytorch.org/models/mobilenet_v2-b0353104.pth')
248
+ model_dict = {}
249
+ state_dict = self.state_dict()
250
+ for k, v in pretrain_dict.items():
251
+ if k in state_dict:
252
+ model_dict[k] = v
253
+ state_dict.update(model_dict)
254
+ self.load_state_dict(state_dict)
255
+
256
+
257
+ class MobileV2_MLSD_Large(nn.Module):
258
+ def __init__(self):
259
+ super(MobileV2_MLSD_Large, self).__init__()
260
+
261
+ self.backbone = MobileNetV2(pretrained=False)
262
+ ## A, B
263
+ self.block15 = BlockTypeA(in_c1= 64, in_c2= 96,
264
+ out_c1= 64, out_c2=64,
265
+ upscale=False)
266
+ self.block16 = BlockTypeB(128, 64)
267
+
268
+ ## A, B
269
+ self.block17 = BlockTypeA(in_c1 = 32, in_c2 = 64,
270
+ out_c1= 64, out_c2= 64)
271
+ self.block18 = BlockTypeB(128, 64)
272
+
273
+ ## A, B
274
+ self.block19 = BlockTypeA(in_c1=24, in_c2=64,
275
+ out_c1=64, out_c2=64)
276
+ self.block20 = BlockTypeB(128, 64)
277
+
278
+ ## A, B, C
279
+ self.block21 = BlockTypeA(in_c1=16, in_c2=64,
280
+ out_c1=64, out_c2=64)
281
+ self.block22 = BlockTypeB(128, 64)
282
+
283
+ self.block23 = BlockTypeC(64, 16)
284
+
285
+ def forward(self, x):
286
+ c1, c2, c3, c4, c5 = self.backbone(x)
287
+
288
+ x = self.block15(c4, c5)
289
+ x = self.block16(x)
290
+
291
+ x = self.block17(c3, x)
292
+ x = self.block18(x)
293
+
294
+ x = self.block19(c2, x)
295
+ x = self.block20(x)
296
+
297
+ x = self.block21(c1, x)
298
+ x = self.block22(x)
299
+ x = self.block23(x)
300
+ x = x[:, 7:, :, :]
301
+
302
+ return x
annotator/mlsd/models/mbv2_mlsd_tiny.py ADDED
@@ -0,0 +1,285 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ '''
2
+ * Copyright (c) 2023 Salesforce, Inc.
3
+ * All rights reserved.
4
+ * SPDX-License-Identifier: Apache License 2.0
5
+ * For full license text, see LICENSE.txt file in the repo root or http://www.apache.org/licenses/
6
+ * By Can Qin
7
+ * Modified from ControlNet repo: https://github.com/lllyasviel/ControlNet
8
+ * Copyright (c) 2023 Lvmin Zhang and Maneesh Agrawala
9
+ '''
10
+
11
+ import os
12
+ import sys
13
+ import torch
14
+ import torch.nn as nn
15
+ import torch.utils.model_zoo as model_zoo
16
+ from torch.nn import functional as F
17
+
18
+
19
+ class BlockTypeA(nn.Module):
20
+ def __init__(self, in_c1, in_c2, out_c1, out_c2, upscale = True):
21
+ super(BlockTypeA, self).__init__()
22
+ self.conv1 = nn.Sequential(
23
+ nn.Conv2d(in_c2, out_c2, kernel_size=1),
24
+ nn.BatchNorm2d(out_c2),
25
+ nn.ReLU(inplace=True)
26
+ )
27
+ self.conv2 = nn.Sequential(
28
+ nn.Conv2d(in_c1, out_c1, kernel_size=1),
29
+ nn.BatchNorm2d(out_c1),
30
+ nn.ReLU(inplace=True)
31
+ )
32
+ self.upscale = upscale
33
+
34
+ def forward(self, a, b):
35
+ b = self.conv1(b)
36
+ a = self.conv2(a)
37
+ b = F.interpolate(b, scale_factor=2.0, mode='bilinear', align_corners=True)
38
+ return torch.cat((a, b), dim=1)
39
+
40
+
41
+ class BlockTypeB(nn.Module):
42
+ def __init__(self, in_c, out_c):
43
+ super(BlockTypeB, self).__init__()
44
+ self.conv1 = nn.Sequential(
45
+ nn.Conv2d(in_c, in_c, kernel_size=3, padding=1),
46
+ nn.BatchNorm2d(in_c),
47
+ nn.ReLU()
48
+ )
49
+ self.conv2 = nn.Sequential(
50
+ nn.Conv2d(in_c, out_c, kernel_size=3, padding=1),
51
+ nn.BatchNorm2d(out_c),
52
+ nn.ReLU()
53
+ )
54
+
55
+ def forward(self, x):
56
+ x = self.conv1(x) + x
57
+ x = self.conv2(x)
58
+ return x
59
+
60
+ class BlockTypeC(nn.Module):
61
+ def __init__(self, in_c, out_c):
62
+ super(BlockTypeC, self).__init__()
63
+ self.conv1 = nn.Sequential(
64
+ nn.Conv2d(in_c, in_c, kernel_size=3, padding=5, dilation=5),
65
+ nn.BatchNorm2d(in_c),
66
+ nn.ReLU()
67
+ )
68
+ self.conv2 = nn.Sequential(
69
+ nn.Conv2d(in_c, in_c, kernel_size=3, padding=1),
70
+ nn.BatchNorm2d(in_c),
71
+ nn.ReLU()
72
+ )
73
+ self.conv3 = nn.Conv2d(in_c, out_c, kernel_size=1)
74
+
75
+ def forward(self, x):
76
+ x = self.conv1(x)
77
+ x = self.conv2(x)
78
+ x = self.conv3(x)
79
+ return x
80
+
81
+ def _make_divisible(v, divisor, min_value=None):
82
+ """
83
+ This function is taken from the original tf repo.
84
+ It ensures that all layers have a channel number that is divisible by 8
85
+ It can be seen here:
86
+ https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet.py
87
+ :param v:
88
+ :param divisor:
89
+ :param min_value:
90
+ :return:
91
+ """
92
+ if min_value is None:
93
+ min_value = divisor
94
+ new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)
95
+ # Make sure that round down does not go down by more than 10%.
96
+ if new_v < 0.9 * v:
97
+ new_v += divisor
98
+ return new_v
99
+
100
+
101
+ class ConvBNReLU(nn.Sequential):
102
+ def __init__(self, in_planes, out_planes, kernel_size=3, stride=1, groups=1):
103
+ self.channel_pad = out_planes - in_planes
104
+ self.stride = stride
105
+ #padding = (kernel_size - 1) // 2
106
+
107
+ # TFLite uses slightly different padding than PyTorch
108
+ if stride == 2:
109
+ padding = 0
110
+ else:
111
+ padding = (kernel_size - 1) // 2
112
+
113
+ super(ConvBNReLU, self).__init__(
114
+ nn.Conv2d(in_planes, out_planes, kernel_size, stride, padding, groups=groups, bias=False),
115
+ nn.BatchNorm2d(out_planes),
116
+ nn.ReLU6(inplace=True)
117
+ )
118
+ self.max_pool = nn.MaxPool2d(kernel_size=stride, stride=stride)
119
+
120
+
121
+ def forward(self, x):
122
+ # TFLite uses different padding
123
+ if self.stride == 2:
124
+ x = F.pad(x, (0, 1, 0, 1), "constant", 0)
125
+ #print(x.shape)
126
+
127
+ for module in self:
128
+ if not isinstance(module, nn.MaxPool2d):
129
+ x = module(x)
130
+ return x
131
+
132
+
133
+ class InvertedResidual(nn.Module):
134
+ def __init__(self, inp, oup, stride, expand_ratio):
135
+ super(InvertedResidual, self).__init__()
136
+ self.stride = stride
137
+ assert stride in [1, 2]
138
+
139
+ hidden_dim = int(round(inp * expand_ratio))
140
+ self.use_res_connect = self.stride == 1 and inp == oup
141
+
142
+ layers = []
143
+ if expand_ratio != 1:
144
+ # pw
145
+ layers.append(ConvBNReLU(inp, hidden_dim, kernel_size=1))
146
+ layers.extend([
147
+ # dw
148
+ ConvBNReLU(hidden_dim, hidden_dim, stride=stride, groups=hidden_dim),
149
+ # pw-linear
150
+ nn.Conv2d(hidden_dim, oup, 1, 1, 0, bias=False),
151
+ nn.BatchNorm2d(oup),
152
+ ])
153
+ self.conv = nn.Sequential(*layers)
154
+
155
+ def forward(self, x):
156
+ if self.use_res_connect:
157
+ return x + self.conv(x)
158
+ else:
159
+ return self.conv(x)
160
+
161
+
162
+ class MobileNetV2(nn.Module):
163
+ def __init__(self, pretrained=True):
164
+ """
165
+ MobileNet V2 main class
166
+ Args:
167
+ num_classes (int): Number of classes
168
+ width_mult (float): Width multiplier - adjusts number of channels in each layer by this amount
169
+ inverted_residual_setting: Network structure
170
+ round_nearest (int): Round the number of channels in each layer to be a multiple of this number
171
+ Set to 1 to turn off rounding
172
+ block: Module specifying inverted residual building block for mobilenet
173
+ """
174
+ super(MobileNetV2, self).__init__()
175
+
176
+ block = InvertedResidual
177
+ input_channel = 32
178
+ last_channel = 1280
179
+ width_mult = 1.0
180
+ round_nearest = 8
181
+
182
+ inverted_residual_setting = [
183
+ # t, c, n, s
184
+ [1, 16, 1, 1],
185
+ [6, 24, 2, 2],
186
+ [6, 32, 3, 2],
187
+ [6, 64, 4, 2],
188
+ #[6, 96, 3, 1],
189
+ #[6, 160, 3, 2],
190
+ #[6, 320, 1, 1],
191
+ ]
192
+
193
+ # only check the first element, assuming user knows t,c,n,s are required
194
+ if len(inverted_residual_setting) == 0 or len(inverted_residual_setting[0]) != 4:
195
+ raise ValueError("inverted_residual_setting should be non-empty "
196
+ "or a 4-element list, got {}".format(inverted_residual_setting))
197
+
198
+ # building first layer
199
+ input_channel = _make_divisible(input_channel * width_mult, round_nearest)
200
+ self.last_channel = _make_divisible(last_channel * max(1.0, width_mult), round_nearest)
201
+ features = [ConvBNReLU(4, input_channel, stride=2)]
202
+ # building inverted residual blocks
203
+ for t, c, n, s in inverted_residual_setting:
204
+ output_channel = _make_divisible(c * width_mult, round_nearest)
205
+ for i in range(n):
206
+ stride = s if i == 0 else 1
207
+ features.append(block(input_channel, output_channel, stride, expand_ratio=t))
208
+ input_channel = output_channel
209
+ self.features = nn.Sequential(*features)
210
+
211
+ self.fpn_selected = [3, 6, 10]
212
+ # weight initialization
213
+ for m in self.modules():
214
+ if isinstance(m, nn.Conv2d):
215
+ nn.init.kaiming_normal_(m.weight, mode='fan_out')
216
+ if m.bias is not None:
217
+ nn.init.zeros_(m.bias)
218
+ elif isinstance(m, nn.BatchNorm2d):
219
+ nn.init.ones_(m.weight)
220
+ nn.init.zeros_(m.bias)
221
+ elif isinstance(m, nn.Linear):
222
+ nn.init.normal_(m.weight, 0, 0.01)
223
+ nn.init.zeros_(m.bias)
224
+
225
+ #if pretrained:
226
+ # self._load_pretrained_model()
227
+
228
+ def _forward_impl(self, x):
229
+ # This exists since TorchScript doesn't support inheritance, so the superclass method
230
+ # (this one) needs to have a name other than `forward` that can be accessed in a subclass
231
+ fpn_features = []
232
+ for i, f in enumerate(self.features):
233
+ if i > self.fpn_selected[-1]:
234
+ break
235
+ x = f(x)
236
+ if i in self.fpn_selected:
237
+ fpn_features.append(x)
238
+
239
+ c2, c3, c4 = fpn_features
240
+ return c2, c3, c4
241
+
242
+
243
+ def forward(self, x):
244
+ return self._forward_impl(x)
245
+
246
+ def _load_pretrained_model(self):
247
+ pretrain_dict = model_zoo.load_url('https://download.pytorch.org/models/mobilenet_v2-b0353104.pth')
248
+ model_dict = {}
249
+ state_dict = self.state_dict()
250
+ for k, v in pretrain_dict.items():
251
+ if k in state_dict:
252
+ model_dict[k] = v
253
+ state_dict.update(model_dict)
254
+ self.load_state_dict(state_dict)
255
+
256
+
257
+ class MobileV2_MLSD_Tiny(nn.Module):
258
+ def __init__(self):
259
+ super(MobileV2_MLSD_Tiny, self).__init__()
260
+
261
+ self.backbone = MobileNetV2(pretrained=True)
262
+
263
+ self.block12 = BlockTypeA(in_c1= 32, in_c2= 64,
264
+ out_c1= 64, out_c2=64)
265
+ self.block13 = BlockTypeB(128, 64)
266
+
267
+ self.block14 = BlockTypeA(in_c1 = 24, in_c2 = 64,
268
+ out_c1= 32, out_c2= 32)
269
+ self.block15 = BlockTypeB(64, 64)
270
+
271
+ self.block16 = BlockTypeC(64, 16)
272
+
273
+ def forward(self, x):
274
+ c2, c3, c4 = self.backbone(x)
275
+
276
+ x = self.block12(c3, c4)
277
+ x = self.block13(x)
278
+ x = self.block14(c2, x)
279
+ x = self.block15(x)
280
+ x = self.block16(x)
281
+ x = x[:, 7:, :, :]
282
+ #print(x.shape)
283
+ x = F.interpolate(x, scale_factor=2.0, mode='bilinear', align_corners=True)
284
+
285
+ return x
annotator/mlsd/utils.py ADDED
@@ -0,0 +1,590 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ '''
2
+ * Copyright (c) 2023 Salesforce, Inc.
3
+ * All rights reserved.
4
+ * SPDX-License-Identifier: Apache License 2.0
5
+ * For full license text, see LICENSE.txt file in the repo root or http://www.apache.org/licenses/
6
+ * By Can Qin
7
+ * Modified from ControlNet repo: https://github.com/lllyasviel/ControlNet
8
+ * Copyright (c) 2023 Lvmin Zhang and Maneesh Agrawala
9
+ '''
10
+
11
+ '''
12
+ modified by lihaoweicv
13
+ pytorch version
14
+ '''
15
+
16
+ '''
17
+ M-LSD
18
+ Copyright 2021-present NAVER Corp.
19
+ Apache License v2.0
20
+ '''
21
+
22
+ import os
23
+ import numpy as np
24
+ import cv2
25
+ import torch
26
+ from torch.nn import functional as F
27
+
28
+
29
+ def deccode_output_score_and_ptss(tpMap, topk_n = 200, ksize = 5):
30
+ '''
31
+ tpMap:
32
+ center: tpMap[1, 0, :, :]
33
+ displacement: tpMap[1, 1:5, :, :]
34
+ '''
35
+ b, c, h, w = tpMap.shape
36
+ assert b==1, 'only support bsize==1'
37
+ displacement = tpMap[:, 1:5, :, :][0]
38
+ center = tpMap[:, 0, :, :]
39
+ heat = torch.sigmoid(center)
40
+ hmax = F.max_pool2d( heat, (ksize, ksize), stride=1, padding=(ksize-1)//2)
41
+ keep = (hmax == heat).float()
42
+ heat = heat * keep
43
+ heat = heat.reshape(-1, )
44
+
45
+ scores, indices = torch.topk(heat, topk_n, dim=-1, largest=True)
46
+ yy = torch.floor_divide(indices, w).unsqueeze(-1)
47
+ xx = torch.fmod(indices, w).unsqueeze(-1)
48
+ ptss = torch.cat((yy, xx),dim=-1)
49
+
50
+ ptss = ptss.detach().cpu().numpy()
51
+ scores = scores.detach().cpu().numpy()
52
+ displacement = displacement.detach().cpu().numpy()
53
+ displacement = displacement.transpose((1,2,0))
54
+ return ptss, scores, displacement
55
+
56
+
57
+ def pred_lines(image, model,
58
+ input_shape=[512, 512],
59
+ score_thr=0.10,
60
+ dist_thr=20.0):
61
+ h, w, _ = image.shape
62
+ h_ratio, w_ratio = [h / input_shape[0], w / input_shape[1]]
63
+
64
+ resized_image = np.concatenate([cv2.resize(image, (input_shape[1], input_shape[0]), interpolation=cv2.INTER_AREA),
65
+ np.ones([input_shape[0], input_shape[1], 1])], axis=-1)
66
+
67
+ resized_image = resized_image.transpose((2,0,1))
68
+ batch_image = np.expand_dims(resized_image, axis=0).astype('float32')
69
+ batch_image = (batch_image / 127.5) - 1.0
70
+
71
+ batch_image = torch.from_numpy(batch_image).float().cuda()
72
+ outputs = model(batch_image)
73
+ pts, pts_score, vmap = deccode_output_score_and_ptss(outputs, 200, 3)
74
+ start = vmap[:, :, :2]
75
+ end = vmap[:, :, 2:]
76
+ dist_map = np.sqrt(np.sum((start - end) ** 2, axis=-1))
77
+
78
+ segments_list = []
79
+ for center, score in zip(pts, pts_score):
80
+ y, x = center
81
+ distance = dist_map[y, x]
82
+ if score > score_thr and distance > dist_thr:
83
+ disp_x_start, disp_y_start, disp_x_end, disp_y_end = vmap[y, x, :]
84
+ x_start = x + disp_x_start
85
+ y_start = y + disp_y_start
86
+ x_end = x + disp_x_end
87
+ y_end = y + disp_y_end
88
+ segments_list.append([x_start, y_start, x_end, y_end])
89
+
90
+ lines = 2 * np.array(segments_list) # 256 > 512
91
+ lines[:, 0] = lines[:, 0] * w_ratio
92
+ lines[:, 1] = lines[:, 1] * h_ratio
93
+ lines[:, 2] = lines[:, 2] * w_ratio
94
+ lines[:, 3] = lines[:, 3] * h_ratio
95
+
96
+ return lines
97
+
98
+
99
+ def pred_squares(image,
100
+ model,
101
+ input_shape=[512, 512],
102
+ params={'score': 0.06,
103
+ 'outside_ratio': 0.28,
104
+ 'inside_ratio': 0.45,
105
+ 'w_overlap': 0.0,
106
+ 'w_degree': 1.95,
107
+ 'w_length': 0.0,
108
+ 'w_area': 1.86,
109
+ 'w_center': 0.14}):
110
+ '''
111
+ shape = [height, width]
112
+ '''
113
+ h, w, _ = image.shape
114
+ original_shape = [h, w]
115
+
116
+ resized_image = np.concatenate([cv2.resize(image, (input_shape[0], input_shape[1]), interpolation=cv2.INTER_AREA),
117
+ np.ones([input_shape[0], input_shape[1], 1])], axis=-1)
118
+ resized_image = resized_image.transpose((2, 0, 1))
119
+ batch_image = np.expand_dims(resized_image, axis=0).astype('float32')
120
+ batch_image = (batch_image / 127.5) - 1.0
121
+
122
+ batch_image = torch.from_numpy(batch_image).float().cuda()
123
+ outputs = model(batch_image)
124
+
125
+ pts, pts_score, vmap = deccode_output_score_and_ptss(outputs, 200, 3)
126
+ start = vmap[:, :, :2] # (x, y)
127
+ end = vmap[:, :, 2:] # (x, y)
128
+ dist_map = np.sqrt(np.sum((start - end) ** 2, axis=-1))
129
+
130
+ junc_list = []
131
+ segments_list = []
132
+ for junc, score in zip(pts, pts_score):
133
+ y, x = junc
134
+ distance = dist_map[y, x]
135
+ if score > params['score'] and distance > 20.0:
136
+ junc_list.append([x, y])
137
+ disp_x_start, disp_y_start, disp_x_end, disp_y_end = vmap[y, x, :]
138
+ d_arrow = 1.0
139
+ x_start = x + d_arrow * disp_x_start
140
+ y_start = y + d_arrow * disp_y_start
141
+ x_end = x + d_arrow * disp_x_end
142
+ y_end = y + d_arrow * disp_y_end
143
+ segments_list.append([x_start, y_start, x_end, y_end])
144
+
145
+ segments = np.array(segments_list)
146
+
147
+ ####### post processing for squares
148
+ # 1. get unique lines
149
+ point = np.array([[0, 0]])
150
+ point = point[0]
151
+ start = segments[:, :2]
152
+ end = segments[:, 2:]
153
+ diff = start - end
154
+ a = diff[:, 1]
155
+ b = -diff[:, 0]
156
+ c = a * start[:, 0] + b * start[:, 1]
157
+
158
+ d = np.abs(a * point[0] + b * point[1] - c) / np.sqrt(a ** 2 + b ** 2 + 1e-10)
159
+ theta = np.arctan2(diff[:, 0], diff[:, 1]) * 180 / np.pi
160
+ theta[theta < 0.0] += 180
161
+ hough = np.concatenate([d[:, None], theta[:, None]], axis=-1)
162
+
163
+ d_quant = 1
164
+ theta_quant = 2
165
+ hough[:, 0] //= d_quant
166
+ hough[:, 1] //= theta_quant
167
+ _, indices, counts = np.unique(hough, axis=0, return_index=True, return_counts=True)
168
+
169
+ acc_map = np.zeros([512 // d_quant + 1, 360 // theta_quant + 1], dtype='float32')
170
+ idx_map = np.zeros([512 // d_quant + 1, 360 // theta_quant + 1], dtype='int32') - 1
171
+ yx_indices = hough[indices, :].astype('int32')
172
+ acc_map[yx_indices[:, 0], yx_indices[:, 1]] = counts
173
+ idx_map[yx_indices[:, 0], yx_indices[:, 1]] = indices
174
+
175
+ acc_map_np = acc_map
176
+ # acc_map = acc_map[None, :, :, None]
177
+ #
178
+ # ### fast suppression using tensorflow op
179
+ # acc_map = tf.constant(acc_map, dtype=tf.float32)
180
+ # max_acc_map = tf.keras.layers.MaxPool2D(pool_size=(5, 5), strides=1, padding='same')(acc_map)
181
+ # acc_map = acc_map * tf.cast(tf.math.equal(acc_map, max_acc_map), tf.float32)
182
+ # flatten_acc_map = tf.reshape(acc_map, [1, -1])
183
+ # topk_values, topk_indices = tf.math.top_k(flatten_acc_map, k=len(pts))
184
+ # _, h, w, _ = acc_map.shape
185
+ # y = tf.expand_dims(topk_indices // w, axis=-1)
186
+ # x = tf.expand_dims(topk_indices % w, axis=-1)
187
+ # yx = tf.concat([y, x], axis=-1)
188
+
189
+ ### fast suppression using pytorch op
190
+ acc_map = torch.from_numpy(acc_map_np).unsqueeze(0).unsqueeze(0)
191
+ _,_, h, w = acc_map.shape
192
+ max_acc_map = F.max_pool2d(acc_map,kernel_size=5, stride=1, padding=2)
193
+ acc_map = acc_map * ( (acc_map == max_acc_map).float() )
194
+ flatten_acc_map = acc_map.reshape([-1, ])
195
+
196
+ scores, indices = torch.topk(flatten_acc_map, len(pts), dim=-1, largest=True)
197
+ yy = torch.div(indices, w, rounding_mode='floor').unsqueeze(-1)
198
+ xx = torch.fmod(indices, w).unsqueeze(-1)
199
+ yx = torch.cat((yy, xx), dim=-1)
200
+
201
+ yx = yx.detach().cpu().numpy()
202
+
203
+ topk_values = scores.detach().cpu().numpy()
204
+ indices = idx_map[yx[:, 0], yx[:, 1]]
205
+ basis = 5 // 2
206
+
207
+ merged_segments = []
208
+ for yx_pt, max_indice, value in zip(yx, indices, topk_values):
209
+ y, x = yx_pt
210
+ if max_indice == -1 or value == 0:
211
+ continue
212
+ segment_list = []
213
+ for y_offset in range(-basis, basis + 1):
214
+ for x_offset in range(-basis, basis + 1):
215
+ indice = idx_map[y + y_offset, x + x_offset]
216
+ cnt = int(acc_map_np[y + y_offset, x + x_offset])
217
+ if indice != -1:
218
+ segment_list.append(segments[indice])
219
+ if cnt > 1:
220
+ check_cnt = 1
221
+ current_hough = hough[indice]
222
+ for new_indice, new_hough in enumerate(hough):
223
+ if (current_hough == new_hough).all() and indice != new_indice:
224
+ segment_list.append(segments[new_indice])
225
+ check_cnt += 1
226
+ if check_cnt == cnt:
227
+ break
228
+ group_segments = np.array(segment_list).reshape([-1, 2])
229
+ sorted_group_segments = np.sort(group_segments, axis=0)
230
+ x_min, y_min = sorted_group_segments[0, :]
231
+ x_max, y_max = sorted_group_segments[-1, :]
232
+
233
+ deg = theta[max_indice]
234
+ if deg >= 90:
235
+ merged_segments.append([x_min, y_max, x_max, y_min])
236
+ else:
237
+ merged_segments.append([x_min, y_min, x_max, y_max])
238
+
239
+ # 2. get intersections
240
+ new_segments = np.array(merged_segments) # (x1, y1, x2, y2)
241
+ start = new_segments[:, :2] # (x1, y1)
242
+ end = new_segments[:, 2:] # (x2, y2)
243
+ new_centers = (start + end) / 2.0
244
+ diff = start - end
245
+ dist_segments = np.sqrt(np.sum(diff ** 2, axis=-1))
246
+
247
+ # ax + by = c
248
+ a = diff[:, 1]
249
+ b = -diff[:, 0]
250
+ c = a * start[:, 0] + b * start[:, 1]
251
+ pre_det = a[:, None] * b[None, :]
252
+ det = pre_det - np.transpose(pre_det)
253
+
254
+ pre_inter_y = a[:, None] * c[None, :]
255
+ inter_y = (pre_inter_y - np.transpose(pre_inter_y)) / (det + 1e-10)
256
+ pre_inter_x = c[:, None] * b[None, :]
257
+ inter_x = (pre_inter_x - np.transpose(pre_inter_x)) / (det + 1e-10)
258
+ inter_pts = np.concatenate([inter_x[:, :, None], inter_y[:, :, None]], axis=-1).astype('int32')
259
+
260
+ # 3. get corner information
261
+ # 3.1 get distance
262
+ '''
263
+ dist_segments:
264
+ | dist(0), dist(1), dist(2), ...|
265
+ dist_inter_to_segment1:
266
+ | dist(inter,0), dist(inter,0), dist(inter,0), ... |
267
+ | dist(inter,1), dist(inter,1), dist(inter,1), ... |
268
+ ...
269
+ dist_inter_to_semgnet2:
270
+ | dist(inter,0), dist(inter,1), dist(inter,2), ... |
271
+ | dist(inter,0), dist(inter,1), dist(inter,2), ... |
272
+ ...
273
+ '''
274
+
275
+ dist_inter_to_segment1_start = np.sqrt(
276
+ np.sum(((inter_pts - start[:, None, :]) ** 2), axis=-1, keepdims=True)) # [n_batch, n_batch, 1]
277
+ dist_inter_to_segment1_end = np.sqrt(
278
+ np.sum(((inter_pts - end[:, None, :]) ** 2), axis=-1, keepdims=True)) # [n_batch, n_batch, 1]
279
+ dist_inter_to_segment2_start = np.sqrt(
280
+ np.sum(((inter_pts - start[None, :, :]) ** 2), axis=-1, keepdims=True)) # [n_batch, n_batch, 1]
281
+ dist_inter_to_segment2_end = np.sqrt(
282
+ np.sum(((inter_pts - end[None, :, :]) ** 2), axis=-1, keepdims=True)) # [n_batch, n_batch, 1]
283
+
284
+ # sort ascending
285
+ dist_inter_to_segment1 = np.sort(
286
+ np.concatenate([dist_inter_to_segment1_start, dist_inter_to_segment1_end], axis=-1),
287
+ axis=-1) # [n_batch, n_batch, 2]
288
+ dist_inter_to_segment2 = np.sort(
289
+ np.concatenate([dist_inter_to_segment2_start, dist_inter_to_segment2_end], axis=-1),
290
+ axis=-1) # [n_batch, n_batch, 2]
291
+
292
+ # 3.2 get degree
293
+ inter_to_start = new_centers[:, None, :] - inter_pts
294
+ deg_inter_to_start = np.arctan2(inter_to_start[:, :, 1], inter_to_start[:, :, 0]) * 180 / np.pi
295
+ deg_inter_to_start[deg_inter_to_start < 0.0] += 360
296
+ inter_to_end = new_centers[None, :, :] - inter_pts
297
+ deg_inter_to_end = np.arctan2(inter_to_end[:, :, 1], inter_to_end[:, :, 0]) * 180 / np.pi
298
+ deg_inter_to_end[deg_inter_to_end < 0.0] += 360
299
+
300
+ '''
301
+ B -- G
302
+ | |
303
+ C -- R
304
+ B : blue / G: green / C: cyan / R: red
305
+
306
+ 0 -- 1
307
+ | |
308
+ 3 -- 2
309
+ '''
310
+ # rename variables
311
+ deg1_map, deg2_map = deg_inter_to_start, deg_inter_to_end
312
+ # sort deg ascending
313
+ deg_sort = np.sort(np.concatenate([deg1_map[:, :, None], deg2_map[:, :, None]], axis=-1), axis=-1)
314
+
315
+ deg_diff_map = np.abs(deg1_map - deg2_map)
316
+ # we only consider the smallest degree of intersect
317
+ deg_diff_map[deg_diff_map > 180] = 360 - deg_diff_map[deg_diff_map > 180]
318
+
319
+ # define available degree range
320
+ deg_range = [60, 120]
321
+
322
+ corner_dict = {corner_info: [] for corner_info in range(4)}
323
+ inter_points = []
324
+ for i in range(inter_pts.shape[0]):
325
+ for j in range(i + 1, inter_pts.shape[1]):
326
+ # i, j > line index, always i < j
327
+ x, y = inter_pts[i, j, :]
328
+ deg1, deg2 = deg_sort[i, j, :]
329
+ deg_diff = deg_diff_map[i, j]
330
+
331
+ check_degree = deg_diff > deg_range[0] and deg_diff < deg_range[1]
332
+
333
+ outside_ratio = params['outside_ratio'] # over ratio >>> drop it!
334
+ inside_ratio = params['inside_ratio'] # over ratio >>> drop it!
335
+ check_distance = ((dist_inter_to_segment1[i, j, 1] >= dist_segments[i] and \
336
+ dist_inter_to_segment1[i, j, 0] <= dist_segments[i] * outside_ratio) or \
337
+ (dist_inter_to_segment1[i, j, 1] <= dist_segments[i] and \
338
+ dist_inter_to_segment1[i, j, 0] <= dist_segments[i] * inside_ratio)) and \
339
+ ((dist_inter_to_segment2[i, j, 1] >= dist_segments[j] and \
340
+ dist_inter_to_segment2[i, j, 0] <= dist_segments[j] * outside_ratio) or \
341
+ (dist_inter_to_segment2[i, j, 1] <= dist_segments[j] and \
342
+ dist_inter_to_segment2[i, j, 0] <= dist_segments[j] * inside_ratio))
343
+
344
+ if check_degree and check_distance:
345
+ corner_info = None
346
+
347
+ if (deg1 >= 0 and deg1 <= 45 and deg2 >= 45 and deg2 <= 120) or \
348
+ (deg2 >= 315 and deg1 >= 45 and deg1 <= 120):
349
+ corner_info, color_info = 0, 'blue'
350
+ elif (deg1 >= 45 and deg1 <= 125 and deg2 >= 125 and deg2 <= 225):
351
+ corner_info, color_info = 1, 'green'
352
+ elif (deg1 >= 125 and deg1 <= 225 and deg2 >= 225 and deg2 <= 315):
353
+ corner_info, color_info = 2, 'black'
354
+ elif (deg1 >= 0 and deg1 <= 45 and deg2 >= 225 and deg2 <= 315) or \
355
+ (deg2 >= 315 and deg1 >= 225 and deg1 <= 315):
356
+ corner_info, color_info = 3, 'cyan'
357
+ else:
358
+ corner_info, color_info = 4, 'red' # we don't use it
359
+ continue
360
+
361
+ corner_dict[corner_info].append([x, y, i, j])
362
+ inter_points.append([x, y])
363
+
364
+ square_list = []
365
+ connect_list = []
366
+ segments_list = []
367
+ for corner0 in corner_dict[0]:
368
+ for corner1 in corner_dict[1]:
369
+ connect01 = False
370
+ for corner0_line in corner0[2:]:
371
+ if corner0_line in corner1[2:]:
372
+ connect01 = True
373
+ break
374
+ if connect01:
375
+ for corner2 in corner_dict[2]:
376
+ connect12 = False
377
+ for corner1_line in corner1[2:]:
378
+ if corner1_line in corner2[2:]:
379
+ connect12 = True
380
+ break
381
+ if connect12:
382
+ for corner3 in corner_dict[3]:
383
+ connect23 = False
384
+ for corner2_line in corner2[2:]:
385
+ if corner2_line in corner3[2:]:
386
+ connect23 = True
387
+ break
388
+ if connect23:
389
+ for corner3_line in corner3[2:]:
390
+ if corner3_line in corner0[2:]:
391
+ # SQUARE!!!
392
+ '''
393
+ 0 -- 1
394
+ | |
395
+ 3 -- 2
396
+ square_list:
397
+ order: 0 > 1 > 2 > 3
398
+ | x0, y0, x1, y1, x2, y2, x3, y3 |
399
+ | x0, y0, x1, y1, x2, y2, x3, y3 |
400
+ ...
401
+ connect_list:
402
+ order: 01 > 12 > 23 > 30
403
+ | line_idx01, line_idx12, line_idx23, line_idx30 |
404
+ | line_idx01, line_idx12, line_idx23, line_idx30 |
405
+ ...
406
+ segments_list:
407
+ order: 0 > 1 > 2 > 3
408
+ | line_idx0_i, line_idx0_j, line_idx1_i, line_idx1_j, line_idx2_i, line_idx2_j, line_idx3_i, line_idx3_j |
409
+ | line_idx0_i, line_idx0_j, line_idx1_i, line_idx1_j, line_idx2_i, line_idx2_j, line_idx3_i, line_idx3_j |
410
+ ...
411
+ '''
412
+ square_list.append(corner0[:2] + corner1[:2] + corner2[:2] + corner3[:2])
413
+ connect_list.append([corner0_line, corner1_line, corner2_line, corner3_line])
414
+ segments_list.append(corner0[2:] + corner1[2:] + corner2[2:] + corner3[2:])
415
+
416
+ def check_outside_inside(segments_info, connect_idx):
417
+ # return 'outside or inside', min distance, cover_param, peri_param
418
+ if connect_idx == segments_info[0]:
419
+ check_dist_mat = dist_inter_to_segment1
420
+ else:
421
+ check_dist_mat = dist_inter_to_segment2
422
+
423
+ i, j = segments_info
424
+ min_dist, max_dist = check_dist_mat[i, j, :]
425
+ connect_dist = dist_segments[connect_idx]
426
+ if max_dist > connect_dist:
427
+ return 'outside', min_dist, 0, 1
428
+ else:
429
+ return 'inside', min_dist, -1, -1
430
+
431
+ top_square = None
432
+
433
+ try:
434
+ map_size = input_shape[0] / 2
435
+ squares = np.array(square_list).reshape([-1, 4, 2])
436
+ score_array = []
437
+ connect_array = np.array(connect_list)
438
+ segments_array = np.array(segments_list).reshape([-1, 4, 2])
439
+
440
+ # get degree of corners:
441
+ squares_rollup = np.roll(squares, 1, axis=1)
442
+ squares_rolldown = np.roll(squares, -1, axis=1)
443
+ vec1 = squares_rollup - squares
444
+ normalized_vec1 = vec1 / (np.linalg.norm(vec1, axis=-1, keepdims=True) + 1e-10)
445
+ vec2 = squares_rolldown - squares
446
+ normalized_vec2 = vec2 / (np.linalg.norm(vec2, axis=-1, keepdims=True) + 1e-10)
447
+ inner_products = np.sum(normalized_vec1 * normalized_vec2, axis=-1) # [n_squares, 4]
448
+ squares_degree = np.arccos(inner_products) * 180 / np.pi # [n_squares, 4]
449
+
450
+ # get square score
451
+ overlap_scores = []
452
+ degree_scores = []
453
+ length_scores = []
454
+
455
+ for connects, segments, square, degree in zip(connect_array, segments_array, squares, squares_degree):
456
+ '''
457
+ 0 -- 1
458
+ | |
459
+ 3 -- 2
460
+
461
+ # segments: [4, 2]
462
+ # connects: [4]
463
+ '''
464
+
465
+ ###################################### OVERLAP SCORES
466
+ cover = 0
467
+ perimeter = 0
468
+ # check 0 > 1 > 2 > 3
469
+ square_length = []
470
+
471
+ for start_idx in range(4):
472
+ end_idx = (start_idx + 1) % 4
473
+
474
+ connect_idx = connects[start_idx] # segment idx of segment01
475
+ start_segments = segments[start_idx]
476
+ end_segments = segments[end_idx]
477
+
478
+ start_point = square[start_idx]
479
+ end_point = square[end_idx]
480
+
481
+ # check whether outside or inside
482
+ start_position, start_min, start_cover_param, start_peri_param = check_outside_inside(start_segments,
483
+ connect_idx)
484
+ end_position, end_min, end_cover_param, end_peri_param = check_outside_inside(end_segments, connect_idx)
485
+
486
+ cover += dist_segments[connect_idx] + start_cover_param * start_min + end_cover_param * end_min
487
+ perimeter += dist_segments[connect_idx] + start_peri_param * start_min + end_peri_param * end_min
488
+
489
+ square_length.append(
490
+ dist_segments[connect_idx] + start_peri_param * start_min + end_peri_param * end_min)
491
+
492
+ overlap_scores.append(cover / perimeter)
493
+ ######################################
494
+ ###################################### DEGREE SCORES
495
+ '''
496
+ deg0 vs deg2
497
+ deg1 vs deg3
498
+ '''
499
+ deg0, deg1, deg2, deg3 = degree
500
+ deg_ratio1 = deg0 / deg2
501
+ if deg_ratio1 > 1.0:
502
+ deg_ratio1 = 1 / deg_ratio1
503
+ deg_ratio2 = deg1 / deg3
504
+ if deg_ratio2 > 1.0:
505
+ deg_ratio2 = 1 / deg_ratio2
506
+ degree_scores.append((deg_ratio1 + deg_ratio2) / 2)
507
+ ######################################
508
+ ###################################### LENGTH SCORES
509
+ '''
510
+ len0 vs len2
511
+ len1 vs len3
512
+ '''
513
+ len0, len1, len2, len3 = square_length
514
+ len_ratio1 = len0 / len2 if len2 > len0 else len2 / len0
515
+ len_ratio2 = len1 / len3 if len3 > len1 else len3 / len1
516
+ length_scores.append((len_ratio1 + len_ratio2) / 2)
517
+
518
+ ######################################
519
+
520
+ overlap_scores = np.array(overlap_scores)
521
+ overlap_scores /= np.max(overlap_scores)
522
+
523
+ degree_scores = np.array(degree_scores)
524
+ # degree_scores /= np.max(degree_scores)
525
+
526
+ length_scores = np.array(length_scores)
527
+
528
+ ###################################### AREA SCORES
529
+ area_scores = np.reshape(squares, [-1, 4, 2])
530
+ area_x = area_scores[:, :, 0]
531
+ area_y = area_scores[:, :, 1]
532
+ correction = area_x[:, -1] * area_y[:, 0] - area_y[:, -1] * area_x[:, 0]
533
+ area_scores = np.sum(area_x[:, :-1] * area_y[:, 1:], axis=-1) - np.sum(area_y[:, :-1] * area_x[:, 1:], axis=-1)
534
+ area_scores = 0.5 * np.abs(area_scores + correction)
535
+ area_scores /= (map_size * map_size) # np.max(area_scores)
536
+ ######################################
537
+
538
+ ###################################### CENTER SCORES
539
+ centers = np.array([[256 // 2, 256 // 2]], dtype='float32') # [1, 2]
540
+ # squares: [n, 4, 2]
541
+ square_centers = np.mean(squares, axis=1) # [n, 2]
542
+ center2center = np.sqrt(np.sum((centers - square_centers) ** 2))
543
+ center_scores = center2center / (map_size / np.sqrt(2.0))
544
+
545
+ '''
546
+ score_w = [overlap, degree, area, center, length]
547
+ '''
548
+ score_w = [0.0, 1.0, 10.0, 0.5, 1.0]
549
+ score_array = params['w_overlap'] * overlap_scores \
550
+ + params['w_degree'] * degree_scores \
551
+ + params['w_area'] * area_scores \
552
+ - params['w_center'] * center_scores \
553
+ + params['w_length'] * length_scores
554
+
555
+ best_square = []
556
+
557
+ sorted_idx = np.argsort(score_array)[::-1]
558
+ score_array = score_array[sorted_idx]
559
+ squares = squares[sorted_idx]
560
+
561
+ except Exception as e:
562
+ pass
563
+
564
+ '''return list
565
+ merged_lines, squares, scores
566
+ '''
567
+
568
+ try:
569
+ new_segments[:, 0] = new_segments[:, 0] * 2 / input_shape[1] * original_shape[1]
570
+ new_segments[:, 1] = new_segments[:, 1] * 2 / input_shape[0] * original_shape[0]
571
+ new_segments[:, 2] = new_segments[:, 2] * 2 / input_shape[1] * original_shape[1]
572
+ new_segments[:, 3] = new_segments[:, 3] * 2 / input_shape[0] * original_shape[0]
573
+ except:
574
+ new_segments = []
575
+
576
+ try:
577
+ squares[:, :, 0] = squares[:, :, 0] * 2 / input_shape[1] * original_shape[1]
578
+ squares[:, :, 1] = squares[:, :, 1] * 2 / input_shape[0] * original_shape[0]
579
+ except:
580
+ squares = []
581
+ score_array = []
582
+
583
+ try:
584
+ inter_points = np.array(inter_points)
585
+ inter_points[:, 0] = inter_points[:, 0] * 2 / input_shape[1] * original_shape[1]
586
+ inter_points[:, 1] = inter_points[:, 1] * 2 / input_shape[0] * original_shape[0]
587
+ except:
588
+ inter_points = []
589
+
590
+ return new_segments, squares, score_array, inter_points
annotator/openpose/LICENSE ADDED
@@ -0,0 +1,108 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ OPENPOSE: MULTIPERSON KEYPOINT DETECTION
2
+ SOFTWARE LICENSE AGREEMENT
3
+ ACADEMIC OR NON-PROFIT ORGANIZATION NONCOMMERCIAL RESEARCH USE ONLY
4
+
5
+ BY USING OR DOWNLOADING THE SOFTWARE, YOU ARE AGREEING TO THE TERMS OF THIS LICENSE AGREEMENT. IF YOU DO NOT AGREE WITH THESE TERMS, YOU MAY NOT USE OR DOWNLOAD THE SOFTWARE.
6
+
7
+ This is a license agreement ("Agreement") between your academic institution or non-profit organization or self (called "Licensee" or "You" in this Agreement) and Carnegie Mellon University (called "Licensor" in this Agreement). All rights not specifically granted to you in this Agreement are reserved for Licensor.
8
+
9
+ RESERVATION OF OWNERSHIP AND GRANT OF LICENSE:
10
+ Licensor retains exclusive ownership of any copy of the Software (as defined below) licensed under this Agreement and hereby grants to Licensee a personal, non-exclusive,
11
+ non-transferable license to use the Software for noncommercial research purposes, without the right to sublicense, pursuant to the terms and conditions of this Agreement. As used in this Agreement, the term "Software" means (i) the actual copy of all or any portion of code for program routines made accessible to Licensee by Licensor pursuant to this Agreement, inclusive of backups, updates, and/or merged copies permitted hereunder or subsequently supplied by Licensor, including all or any file structures, programming instructions, user interfaces and screen formats and sequences as well as any and all documentation and instructions related to it, and (ii) all or any derivatives and/or modifications created or made by You to any of the items specified in (i).
12
+
13
+ CONFIDENTIALITY: Licensee acknowledges that the Software is proprietary to Licensor, and as such, Licensee agrees to receive all such materials in confidence and use the Software only in accordance with the terms of this Agreement. Licensee agrees to use reasonable effort to protect the Software from unauthorized use, reproduction, distribution, or publication.
14
+
15
+ COPYRIGHT: The Software is owned by Licensor and is protected by United
16
+ States copyright laws and applicable international treaties and/or conventions.
17
+
18
+ PERMITTED USES: The Software may be used for your own noncommercial internal research purposes. You understand and agree that Licensor is not obligated to implement any suggestions and/or feedback you might provide regarding the Software, but to the extent Licensor does so, you are not entitled to any compensation related thereto.
19
+
20
+ DERIVATIVES: You may create derivatives of or make modifications to the Software, however, You agree that all and any such derivatives and modifications will be owned by Licensor and become a part of the Software licensed to You under this Agreement. You may only use such derivatives and modifications for your own noncommercial internal research purposes, and you may not otherwise use, distribute or copy such derivatives and modifications in violation of this Agreement.
21
+
22
+ BACKUPS: If Licensee is an organization, it may make that number of copies of the Software necessary for internal noncommercial use at a single site within its organization provided that all information appearing in or on the original labels, including the copyright and trademark notices are copied onto the labels of the copies.
23
+
24
+ USES NOT PERMITTED: You may not distribute, copy or use the Software except as explicitly permitted herein. Licensee has not been granted any trademark license as part of this Agreement and may not use the name or mark “OpenPose", "Carnegie Mellon" or any renditions thereof without the prior written permission of Licensor.
25
+
26
+ You may not sell, rent, lease, sublicense, lend, time-share or transfer, in whole or in part, or provide third parties access to prior or present versions (or any parts thereof) of the Software.
27
+
28
+ ASSIGNMENT: You may not assign this Agreement or your rights hereunder without the prior written consent of Licensor. Any attempted assignment without such consent shall be null and void.
29
+
30
+ TERM: The term of the license granted by this Agreement is from Licensee's acceptance of this Agreement by downloading the Software or by using the Software until terminated as provided below.
31
+
32
+ The Agreement automatically terminates without notice if you fail to comply with any provision of this Agreement. Licensee may terminate this Agreement by ceasing using the Software. Upon any termination of this Agreement, Licensee will delete any and all copies of the Software. You agree that all provisions which operate to protect the proprietary rights of Licensor shall remain in force should breach occur and that the obligation of confidentiality described in this Agreement is binding in perpetuity and, as such, survives the term of the Agreement.
33
+
34
+ FEE: Provided Licensee abides completely by the terms and conditions of this Agreement, there is no fee due to Licensor for Licensee's use of the Software in accordance with this Agreement.
35
+
36
+ DISCLAIMER OF WARRANTIES: THE SOFTWARE IS PROVIDED "AS-IS" WITHOUT WARRANTY OF ANY KIND INCLUDING ANY WARRANTIES OF PERFORMANCE OR MERCHANTABILITY OR FITNESS FOR A PARTICULAR USE OR PURPOSE OR OF NON-INFRINGEMENT. LICENSEE BEARS ALL RISK RELATING TO QUALITY AND PERFORMANCE OF THE SOFTWARE AND RELATED MATERIALS.
37
+
38
+ SUPPORT AND MAINTENANCE: No Software support or training by the Licensor is provided as part of this Agreement.
39
+
40
+ EXCLUSIVE REMEDY AND LIMITATION OF LIABILITY: To the maximum extent permitted under applicable law, Licensor shall not be liable for direct, indirect, special, incidental, or consequential damages or lost profits related to Licensee's use of and/or inability to use the Software, even if Licensor is advised of the possibility of such damage.
41
+
42
+ EXPORT REGULATION: Licensee agrees to comply with any and all applicable
43
+ U.S. export control laws, regulations, and/or other laws related to embargoes and sanction programs administered by the Office of Foreign Assets Control.
44
+
45
+ SEVERABILITY: If any provision(s) of this Agreement shall be held to be invalid, illegal, or unenforceable by a court or other tribunal of competent jurisdiction, the validity, legality and enforceability of the remaining provisions shall not in any way be affected or impaired thereby.
46
+
47
+ NO IMPLIED WAIVERS: No failure or delay by Licensor in enforcing any right or remedy under this Agreement shall be construed as a waiver of any future or other exercise of such right or remedy by Licensor.
48
+
49
+ GOVERNING LAW: This Agreement shall be construed and enforced in accordance with the laws of the Commonwealth of Pennsylvania without reference to conflict of laws principles. You consent to the personal jurisdiction of the courts of this County and waive their rights to venue outside of Allegheny County, Pennsylvania.
50
+
51
+ ENTIRE AGREEMENT AND AMENDMENTS: This Agreement constitutes the sole and entire agreement between Licensee and Licensor as to the matter set forth herein and supersedes any previous agreements, understandings, and arrangements between the parties relating hereto.
52
+
53
+
54
+
55
+ ************************************************************************
56
+
57
+ THIRD-PARTY SOFTWARE NOTICES AND INFORMATION
58
+
59
+ This project incorporates material from the project(s) listed below (collectively, "Third Party Code"). This Third Party Code is licensed to you under their original license terms set forth below. We reserves all other rights not expressly granted, whether by implication, estoppel or otherwise.
60
+
61
+ 1. Caffe, version 1.0.0, (https://github.com/BVLC/caffe/)
62
+
63
+ COPYRIGHT
64
+
65
+ All contributions by the University of California:
66
+ Copyright (c) 2014-2017 The Regents of the University of California (Regents)
67
+ All rights reserved.
68
+
69
+ All other contributions:
70
+ Copyright (c) 2014-2017, the respective contributors
71
+ All rights reserved.
72
+
73
+ Caffe uses a shared copyright model: each contributor holds copyright over
74
+ their contributions to Caffe. The project versioning records all such
75
+ contribution and copyright details. If a contributor wants to further mark
76
+ their specific copyright on a particular contribution, they should indicate
77
+ their copyright solely in the commit message of the change when it is
78
+ committed.
79
+
80
+ LICENSE
81
+
82
+ Redistribution and use in source and binary forms, with or without
83
+ modification, are permitted provided that the following conditions are met:
84
+
85
+ 1. Redistributions of source code must retain the above copyright notice, this
86
+ list of conditions and the following disclaimer.
87
+ 2. Redistributions in binary form must reproduce the above copyright notice,
88
+ this list of conditions and the following disclaimer in the documentation
89
+ and/or other materials provided with the distribution.
90
+
91
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
92
+ ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
93
+ WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
94
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
95
+ ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
96
+ (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
97
+ LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
98
+ ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
99
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
100
+ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
101
+
102
+ CONTRIBUTION AGREEMENT
103
+
104
+ By contributing to the BVLC/caffe repository through pull-request, comment,
105
+ or otherwise, the contributor releases their content to the
106
+ license and copyright terms herein.
107
+
108
+ ************END OF THIRD-PARTY SOFTWARE NOTICES AND INFORMATION**********
annotator/openpose/__init__.py ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ '''
2
+ * Copyright (c) 2023 Salesforce, Inc.
3
+ * All rights reserved.
4
+ * SPDX-License-Identifier: Apache License 2.0
5
+ * For full license text, see LICENSE.txt file in the repo root or http://www.apache.org/licenses/
6
+ * By Can Qin
7
+ * Modified from ControlNet repo: https://github.com/lllyasviel/ControlNet
8
+ * Copyright (c) 2023 Lvmin Zhang and Maneesh Agrawala
9
+ '''
10
+
11
+ # Openpose
12
+ # Original from CMU https://github.com/CMU-Perceptual-Computing-Lab/openpose
13
+ # 2nd Edited by https://github.com/Hzzone/pytorch-openpose
14
+ # 3rd Edited by ControlNet
15
+
16
+ import os
17
+ os.environ["KMP_DUPLICATE_LIB_OK"]="TRUE"
18
+
19
+ import torch
20
+ import numpy as np
21
+ from . import util
22
+ from .body import Body
23
+ from .hand import Hand
24
+ from annotator.util import annotator_ckpts_path
25
+
26
+
27
+ body_model_path = "https://huggingface.co/lllyasviel/ControlNet/resolve/main/annotator/ckpts/body_pose_model.pth"
28
+ hand_model_path = "https://huggingface.co/lllyasviel/ControlNet/resolve/main/annotator/ckpts/hand_pose_model.pth"
29
+
30
+
31
+ class OpenposeDetector:
32
+ def __init__(self):
33
+ # body_modelpath = os.path.join(annotator_ckpts_path, "body_pose_model.pth")
34
+ # hand_modelpath = os.path.join(annotator_ckpts_path, "hand_pose_model.pth")
35
+
36
+ # if not os.path.exists(hand_modelpath):
37
+ # from basicsr.utils.download_util import load_file_from_url
38
+ # load_file_from_url(body_model_path, model_dir=annotator_ckpts_path)
39
+ # load_file_from_url(hand_model_path, model_dir=annotator_ckpts_path)
40
+
41
+ self.body_estimation = Body(body_modelpath)
42
+ self.hand_estimation = Hand(hand_modelpath)
43
+
44
+ def __call__(self, oriImg, hand=False):
45
+ oriImg = oriImg[:, :, ::-1].copy()
46
+ with torch.no_grad():
47
+ candidate, subset = self.body_estimation(oriImg)
48
+ canvas = np.zeros_like(oriImg)
49
+ canvas = util.draw_bodypose(canvas, candidate, subset)
50
+ if hand:
51
+ hands_list = util.handDetect(candidate, subset, oriImg)
52
+ all_hand_peaks = []
53
+ for x, y, w, is_left in hands_list:
54
+ peaks = self.hand_estimation(oriImg[y:y+w, x:x+w, :])
55
+ peaks[:, 0] = np.where(peaks[:, 0] == 0, peaks[:, 0], peaks[:, 0] + x)
56
+ peaks[:, 1] = np.where(peaks[:, 1] == 0, peaks[:, 1], peaks[:, 1] + y)
57
+ all_hand_peaks.append(peaks)
58
+ canvas = util.draw_handpose(canvas, all_hand_peaks)
59
+ return canvas, dict(candidate=candidate.tolist(), subset=subset.tolist())
annotator/openpose/__pycache__/__init__.cpython-38.pyc ADDED
Binary file (2.4 kB). View file
 
annotator/openpose/__pycache__/body.cpython-38.pyc ADDED
Binary file (7.69 kB). View file
 
annotator/openpose/__pycache__/hand.cpython-38.pyc ADDED
Binary file (3.43 kB). View file
 
annotator/openpose/__pycache__/model.cpython-38.pyc ADDED
Binary file (6.4 kB). View file
 
annotator/openpose/__pycache__/util.cpython-38.pyc ADDED
Binary file (5.24 kB). View file
 
annotator/openpose/body.py ADDED
@@ -0,0 +1,229 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ '''
2
+ * Copyright (c) 2023 Salesforce, Inc.
3
+ * All rights reserved.
4
+ * SPDX-License-Identifier: Apache License 2.0
5
+ * For full license text, see LICENSE.txt file in the repo root or http://www.apache.org/licenses/
6
+ * By Can Qin
7
+ * Modified from ControlNet repo: https://github.com/lllyasviel/ControlNet
8
+ * Copyright (c) 2023 Lvmin Zhang and Maneesh Agrawala
9
+ '''
10
+
11
+ import cv2
12
+ import numpy as np
13
+ import math
14
+ import time
15
+ from scipy.ndimage.filters import gaussian_filter
16
+ import matplotlib.pyplot as plt
17
+ import matplotlib
18
+ import torch
19
+ from torchvision import transforms
20
+
21
+ from . import util
22
+ from .model import bodypose_model
23
+
24
+ class Body(object):
25
+ def __init__(self, model_path):
26
+ self.model = bodypose_model()
27
+ if torch.cuda.is_available():
28
+ self.model = self.model.cuda()
29
+ print('cuda')
30
+ model_dict = util.transfer(self.model, torch.load(model_path))
31
+ self.model.load_state_dict(model_dict)
32
+ self.model.eval()
33
+
34
+ def __call__(self, oriImg):
35
+ # scale_search = [0.5, 1.0, 1.5, 2.0]
36
+ scale_search = [0.5]
37
+ boxsize = 368
38
+ stride = 8
39
+ padValue = 128
40
+ thre1 = 0.1
41
+ thre2 = 0.05
42
+ multiplier = [x * boxsize / oriImg.shape[0] for x in scale_search]
43
+ heatmap_avg = np.zeros((oriImg.shape[0], oriImg.shape[1], 19))
44
+ paf_avg = np.zeros((oriImg.shape[0], oriImg.shape[1], 38))
45
+
46
+ for m in range(len(multiplier)):
47
+ scale = multiplier[m]
48
+ imageToTest = cv2.resize(oriImg, (0, 0), fx=scale, fy=scale, interpolation=cv2.INTER_CUBIC)
49
+ imageToTest_padded, pad = util.padRightDownCorner(imageToTest, stride, padValue)
50
+ im = np.transpose(np.float32(imageToTest_padded[:, :, :, np.newaxis]), (3, 2, 0, 1)) / 256 - 0.5
51
+ im = np.ascontiguousarray(im)
52
+
53
+ data = torch.from_numpy(im).float()
54
+ if torch.cuda.is_available():
55
+ data = data.cuda()
56
+ # data = data.permute([2, 0, 1]).unsqueeze(0).float()
57
+ with torch.no_grad():
58
+ Mconv7_stage6_L1, Mconv7_stage6_L2 = self.model(data)
59
+ Mconv7_stage6_L1 = Mconv7_stage6_L1.cpu().numpy()
60
+ Mconv7_stage6_L2 = Mconv7_stage6_L2.cpu().numpy()
61
+
62
+ # extract outputs, resize, and remove padding
63
+ # heatmap = np.transpose(np.squeeze(net.blobs[output_blobs.keys()[1]].data), (1, 2, 0)) # output 1 is heatmaps
64
+ heatmap = np.transpose(np.squeeze(Mconv7_stage6_L2), (1, 2, 0)) # output 1 is heatmaps
65
+ heatmap = cv2.resize(heatmap, (0, 0), fx=stride, fy=stride, interpolation=cv2.INTER_CUBIC)
66
+ heatmap = heatmap[:imageToTest_padded.shape[0] - pad[2], :imageToTest_padded.shape[1] - pad[3], :]
67
+ heatmap = cv2.resize(heatmap, (oriImg.shape[1], oriImg.shape[0]), interpolation=cv2.INTER_CUBIC)
68
+
69
+ # paf = np.transpose(np.squeeze(net.blobs[output_blobs.keys()[0]].data), (1, 2, 0)) # output 0 is PAFs
70
+ paf = np.transpose(np.squeeze(Mconv7_stage6_L1), (1, 2, 0)) # output 0 is PAFs
71
+ paf = cv2.resize(paf, (0, 0), fx=stride, fy=stride, interpolation=cv2.INTER_CUBIC)
72
+ paf = paf[:imageToTest_padded.shape[0] - pad[2], :imageToTest_padded.shape[1] - pad[3], :]
73
+ paf = cv2.resize(paf, (oriImg.shape[1], oriImg.shape[0]), interpolation=cv2.INTER_CUBIC)
74
+
75
+ heatmap_avg += heatmap_avg + heatmap / len(multiplier)
76
+ paf_avg += + paf / len(multiplier)
77
+
78
+ all_peaks = []
79
+ peak_counter = 0
80
+
81
+ for part in range(18):
82
+ map_ori = heatmap_avg[:, :, part]
83
+ one_heatmap = gaussian_filter(map_ori, sigma=3)
84
+
85
+ map_left = np.zeros(one_heatmap.shape)
86
+ map_left[1:, :] = one_heatmap[:-1, :]
87
+ map_right = np.zeros(one_heatmap.shape)
88
+ map_right[:-1, :] = one_heatmap[1:, :]
89
+ map_up = np.zeros(one_heatmap.shape)
90
+ map_up[:, 1:] = one_heatmap[:, :-1]
91
+ map_down = np.zeros(one_heatmap.shape)
92
+ map_down[:, :-1] = one_heatmap[:, 1:]
93
+
94
+ peaks_binary = np.logical_and.reduce(
95
+ (one_heatmap >= map_left, one_heatmap >= map_right, one_heatmap >= map_up, one_heatmap >= map_down, one_heatmap > thre1))
96
+ peaks = list(zip(np.nonzero(peaks_binary)[1], np.nonzero(peaks_binary)[0])) # note reverse
97
+ peaks_with_score = [x + (map_ori[x[1], x[0]],) for x in peaks]
98
+ peak_id = range(peak_counter, peak_counter + len(peaks))
99
+ peaks_with_score_and_id = [peaks_with_score[i] + (peak_id[i],) for i in range(len(peak_id))]
100
+
101
+ all_peaks.append(peaks_with_score_and_id)
102
+ peak_counter += len(peaks)
103
+
104
+ # find connection in the specified sequence, center 29 is in the position 15
105
+ limbSeq = [[2, 3], [2, 6], [3, 4], [4, 5], [6, 7], [7, 8], [2, 9], [9, 10], \
106
+ [10, 11], [2, 12], [12, 13], [13, 14], [2, 1], [1, 15], [15, 17], \
107
+ [1, 16], [16, 18], [3, 17], [6, 18]]
108
+ # the middle joints heatmap correpondence
109
+ mapIdx = [[31, 32], [39, 40], [33, 34], [35, 36], [41, 42], [43, 44], [19, 20], [21, 22], \
110
+ [23, 24], [25, 26], [27, 28], [29, 30], [47, 48], [49, 50], [53, 54], [51, 52], \
111
+ [55, 56], [37, 38], [45, 46]]
112
+
113
+ connection_all = []
114
+ special_k = []
115
+ mid_num = 10
116
+
117
+ for k in range(len(mapIdx)):
118
+ score_mid = paf_avg[:, :, [x - 19 for x in mapIdx[k]]]
119
+ candA = all_peaks[limbSeq[k][0] - 1]
120
+ candB = all_peaks[limbSeq[k][1] - 1]
121
+ nA = len(candA)
122
+ nB = len(candB)
123
+ indexA, indexB = limbSeq[k]
124
+ if (nA != 0 and nB != 0):
125
+ connection_candidate = []
126
+ for i in range(nA):
127
+ for j in range(nB):
128
+ vec = np.subtract(candB[j][:2], candA[i][:2])
129
+ norm = math.sqrt(vec[0] * vec[0] + vec[1] * vec[1])
130
+ norm = max(0.001, norm)
131
+ vec = np.divide(vec, norm)
132
+
133
+ startend = list(zip(np.linspace(candA[i][0], candB[j][0], num=mid_num), \
134
+ np.linspace(candA[i][1], candB[j][1], num=mid_num)))
135
+
136
+ vec_x = np.array([score_mid[int(round(startend[I][1])), int(round(startend[I][0])), 0] \
137
+ for I in range(len(startend))])
138
+ vec_y = np.array([score_mid[int(round(startend[I][1])), int(round(startend[I][0])), 1] \
139
+ for I in range(len(startend))])
140
+
141
+ score_midpts = np.multiply(vec_x, vec[0]) + np.multiply(vec_y, vec[1])
142
+ score_with_dist_prior = sum(score_midpts) / len(score_midpts) + min(
143
+ 0.5 * oriImg.shape[0] / norm - 1, 0)
144
+ criterion1 = len(np.nonzero(score_midpts > thre2)[0]) > 0.8 * len(score_midpts)
145
+ criterion2 = score_with_dist_prior > 0
146
+ if criterion1 and criterion2:
147
+ connection_candidate.append(
148
+ [i, j, score_with_dist_prior, score_with_dist_prior + candA[i][2] + candB[j][2]])
149
+
150
+ connection_candidate = sorted(connection_candidate, key=lambda x: x[2], reverse=True)
151
+ connection = np.zeros((0, 5))
152
+ for c in range(len(connection_candidate)):
153
+ i, j, s = connection_candidate[c][0:3]
154
+ if (i not in connection[:, 3] and j not in connection[:, 4]):
155
+ connection = np.vstack([connection, [candA[i][3], candB[j][3], s, i, j]])
156
+ if (len(connection) >= min(nA, nB)):
157
+ break
158
+
159
+ connection_all.append(connection)
160
+ else:
161
+ special_k.append(k)
162
+ connection_all.append([])
163
+
164
+ # last number in each row is the total parts number of that person
165
+ # the second last number in each row is the score of the overall configuration
166
+ subset = -1 * np.ones((0, 20))
167
+ candidate = np.array([item for sublist in all_peaks for item in sublist])
168
+
169
+ for k in range(len(mapIdx)):
170
+ if k not in special_k:
171
+ partAs = connection_all[k][:, 0]
172
+ partBs = connection_all[k][:, 1]
173
+ indexA, indexB = np.array(limbSeq[k]) - 1
174
+
175
+ for i in range(len(connection_all[k])): # = 1:size(temp,1)
176
+ found = 0
177
+ subset_idx = [-1, -1]
178
+ for j in range(len(subset)): # 1:size(subset,1):
179
+ if subset[j][indexA] == partAs[i] or subset[j][indexB] == partBs[i]:
180
+ subset_idx[found] = j
181
+ found += 1
182
+
183
+ if found == 1:
184
+ j = subset_idx[0]
185
+ if subset[j][indexB] != partBs[i]:
186
+ subset[j][indexB] = partBs[i]
187
+ subset[j][-1] += 1
188
+ subset[j][-2] += candidate[partBs[i].astype(int), 2] + connection_all[k][i][2]
189
+ elif found == 2: # if found 2 and disjoint, merge them
190
+ j1, j2 = subset_idx
191
+ membership = ((subset[j1] >= 0).astype(int) + (subset[j2] >= 0).astype(int))[:-2]
192
+ if len(np.nonzero(membership == 2)[0]) == 0: # merge
193
+ subset[j1][:-2] += (subset[j2][:-2] + 1)
194
+ subset[j1][-2:] += subset[j2][-2:]
195
+ subset[j1][-2] += connection_all[k][i][2]
196
+ subset = np.delete(subset, j2, 0)
197
+ else: # as like found == 1
198
+ subset[j1][indexB] = partBs[i]
199
+ subset[j1][-1] += 1
200
+ subset[j1][-2] += candidate[partBs[i].astype(int), 2] + connection_all[k][i][2]
201
+
202
+ # if find no partA in the subset, create a new subset
203
+ elif not found and k < 17:
204
+ row = -1 * np.ones(20)
205
+ row[indexA] = partAs[i]
206
+ row[indexB] = partBs[i]
207
+ row[-1] = 2
208
+ row[-2] = sum(candidate[connection_all[k][i, :2].astype(int), 2]) + connection_all[k][i][2]
209
+ subset = np.vstack([subset, row])
210
+ # delete some rows of subset which has few parts occur
211
+ deleteIdx = []
212
+ for i in range(len(subset)):
213
+ if subset[i][-1] < 4 or subset[i][-2] / subset[i][-1] < 0.4:
214
+ deleteIdx.append(i)
215
+ subset = np.delete(subset, deleteIdx, axis=0)
216
+
217
+ # subset: n*20 array, 0-17 is the index in candidate, 18 is the total score, 19 is the total parts
218
+ # candidate: x, y, score, id
219
+ return candidate, subset
220
+
221
+ if __name__ == "__main__":
222
+ body_estimation = Body('../model/body_pose_model.pth')
223
+
224
+ test_image = '../images/ski.jpg'
225
+ oriImg = cv2.imread(test_image) # B,G,R order
226
+ candidate, subset = body_estimation(oriImg)
227
+ canvas = util.draw_bodypose(oriImg, candidate, subset)
228
+ plt.imshow(canvas[:, :, [2, 1, 0]])
229
+ plt.show()
annotator/openpose/hand.py ADDED
@@ -0,0 +1,96 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ '''
2
+ * Copyright (c) 2023 Salesforce, Inc.
3
+ * All rights reserved.
4
+ * SPDX-License-Identifier: Apache License 2.0
5
+ * For full license text, see LICENSE.txt file in the repo root or http://www.apache.org/licenses/
6
+ * By Can Qin
7
+ * Modified from ControlNet repo: https://github.com/lllyasviel/ControlNet
8
+ * Copyright (c) 2023 Lvmin Zhang and Maneesh Agrawala
9
+ '''
10
+
11
+ import cv2
12
+ import json
13
+ import numpy as np
14
+ import math
15
+ import time
16
+ from scipy.ndimage.filters import gaussian_filter
17
+ import matplotlib.pyplot as plt
18
+ import matplotlib
19
+ import torch
20
+ from skimage.measure import label
21
+
22
+ from .model import handpose_model
23
+ from . import util
24
+
25
+ class Hand(object):
26
+ def __init__(self, model_path):
27
+ self.model = handpose_model()
28
+ if torch.cuda.is_available():
29
+ self.model = self.model.cuda()
30
+ print('cuda')
31
+ model_dict = util.transfer(self.model, torch.load(model_path))
32
+ self.model.load_state_dict(model_dict)
33
+ self.model.eval()
34
+
35
+ def __call__(self, oriImg):
36
+ scale_search = [0.5, 1.0, 1.5, 2.0]
37
+ # scale_search = [0.5]
38
+ boxsize = 368
39
+ stride = 8
40
+ padValue = 128
41
+ thre = 0.05
42
+ multiplier = [x * boxsize / oriImg.shape[0] for x in scale_search]
43
+ heatmap_avg = np.zeros((oriImg.shape[0], oriImg.shape[1], 22))
44
+ # paf_avg = np.zeros((oriImg.shape[0], oriImg.shape[1], 38))
45
+
46
+ for m in range(len(multiplier)):
47
+ scale = multiplier[m]
48
+ imageToTest = cv2.resize(oriImg, (0, 0), fx=scale, fy=scale, interpolation=cv2.INTER_CUBIC)
49
+ imageToTest_padded, pad = util.padRightDownCorner(imageToTest, stride, padValue)
50
+ im = np.transpose(np.float32(imageToTest_padded[:, :, :, np.newaxis]), (3, 2, 0, 1)) / 256 - 0.5
51
+ im = np.ascontiguousarray(im)
52
+
53
+ data = torch.from_numpy(im).float()
54
+ if torch.cuda.is_available():
55
+ data = data.cuda()
56
+ # data = data.permute([2, 0, 1]).unsqueeze(0).float()
57
+ with torch.no_grad():
58
+ output = self.model(data).cpu().numpy()
59
+ # output = self.model(data).numpy()q
60
+
61
+ # extract outputs, resize, and remove padding
62
+ heatmap = np.transpose(np.squeeze(output), (1, 2, 0)) # output 1 is heatmaps
63
+ heatmap = cv2.resize(heatmap, (0, 0), fx=stride, fy=stride, interpolation=cv2.INTER_CUBIC)
64
+ heatmap = heatmap[:imageToTest_padded.shape[0] - pad[2], :imageToTest_padded.shape[1] - pad[3], :]
65
+ heatmap = cv2.resize(heatmap, (oriImg.shape[1], oriImg.shape[0]), interpolation=cv2.INTER_CUBIC)
66
+
67
+ heatmap_avg += heatmap / len(multiplier)
68
+
69
+ all_peaks = []
70
+ for part in range(21):
71
+ map_ori = heatmap_avg[:, :, part]
72
+ one_heatmap = gaussian_filter(map_ori, sigma=3)
73
+ binary = np.ascontiguousarray(one_heatmap > thre, dtype=np.uint8)
74
+ # 全部小于阈值
75
+ if np.sum(binary) == 0:
76
+ all_peaks.append([0, 0])
77
+ continue
78
+ label_img, label_numbers = label(binary, return_num=True, connectivity=binary.ndim)
79
+ max_index = np.argmax([np.sum(map_ori[label_img == i]) for i in range(1, label_numbers + 1)]) + 1
80
+ label_img[label_img != max_index] = 0
81
+ map_ori[label_img == 0] = 0
82
+
83
+ y, x = util.npmax(map_ori)
84
+ all_peaks.append([x, y])
85
+ return np.array(all_peaks)
86
+
87
+ if __name__ == "__main__":
88
+ hand_estimation = Hand('../model/hand_pose_model.pth')
89
+
90
+ # test_image = '../images/hand.jpg'
91
+ test_image = '../images/hand.jpg'
92
+ oriImg = cv2.imread(test_image) # B,G,R order
93
+ peaks = hand_estimation(oriImg)
94
+ canvas = util.draw_handpose(oriImg, peaks, True)
95
+ cv2.imshow('', canvas)
96
+ cv2.waitKey(0)
annotator/openpose/model.py ADDED
@@ -0,0 +1,229 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ '''
2
+ * Copyright (c) 2023 Salesforce, Inc.
3
+ * All rights reserved.
4
+ * SPDX-License-Identifier: Apache License 2.0
5
+ * For full license text, see LICENSE.txt file in the repo root or http://www.apache.org/licenses/
6
+ * By Can Qin
7
+ * Modified from ControlNet repo: https://github.com/lllyasviel/ControlNet
8
+ * Copyright (c) 2023 Lvmin Zhang and Maneesh Agrawala
9
+ '''
10
+
11
+ import torch
12
+ from collections import OrderedDict
13
+
14
+ import torch
15
+ import torch.nn as nn
16
+
17
+ def make_layers(block, no_relu_layers):
18
+ layers = []
19
+ for layer_name, v in block.items():
20
+ if 'pool' in layer_name:
21
+ layer = nn.MaxPool2d(kernel_size=v[0], stride=v[1],
22
+ padding=v[2])
23
+ layers.append((layer_name, layer))
24
+ else:
25
+ conv2d = nn.Conv2d(in_channels=v[0], out_channels=v[1],
26
+ kernel_size=v[2], stride=v[3],
27
+ padding=v[4])
28
+ layers.append((layer_name, conv2d))
29
+ if layer_name not in no_relu_layers:
30
+ layers.append(('relu_'+layer_name, nn.ReLU(inplace=True)))
31
+
32
+ return nn.Sequential(OrderedDict(layers))
33
+
34
+ class bodypose_model(nn.Module):
35
+ def __init__(self):
36
+ super(bodypose_model, self).__init__()
37
+
38
+ # these layers have no relu layer
39
+ no_relu_layers = ['conv5_5_CPM_L1', 'conv5_5_CPM_L2', 'Mconv7_stage2_L1',\
40
+ 'Mconv7_stage2_L2', 'Mconv7_stage3_L1', 'Mconv7_stage3_L2',\
41
+ 'Mconv7_stage4_L1', 'Mconv7_stage4_L2', 'Mconv7_stage5_L1',\
42
+ 'Mconv7_stage5_L2', 'Mconv7_stage6_L1', 'Mconv7_stage6_L1']
43
+ blocks = {}
44
+ block0 = OrderedDict([
45
+ ('conv1_1', [3, 64, 3, 1, 1]),
46
+ ('conv1_2', [64, 64, 3, 1, 1]),
47
+ ('pool1_stage1', [2, 2, 0]),
48
+ ('conv2_1', [64, 128, 3, 1, 1]),
49
+ ('conv2_2', [128, 128, 3, 1, 1]),
50
+ ('pool2_stage1', [2, 2, 0]),
51
+ ('conv3_1', [128, 256, 3, 1, 1]),
52
+ ('conv3_2', [256, 256, 3, 1, 1]),
53
+ ('conv3_3', [256, 256, 3, 1, 1]),
54
+ ('conv3_4', [256, 256, 3, 1, 1]),
55
+ ('pool3_stage1', [2, 2, 0]),
56
+ ('conv4_1', [256, 512, 3, 1, 1]),
57
+ ('conv4_2', [512, 512, 3, 1, 1]),
58
+ ('conv4_3_CPM', [512, 256, 3, 1, 1]),
59
+ ('conv4_4_CPM', [256, 128, 3, 1, 1])
60
+ ])
61
+
62
+
63
+ # Stage 1
64
+ block1_1 = OrderedDict([
65
+ ('conv5_1_CPM_L1', [128, 128, 3, 1, 1]),
66
+ ('conv5_2_CPM_L1', [128, 128, 3, 1, 1]),
67
+ ('conv5_3_CPM_L1', [128, 128, 3, 1, 1]),
68
+ ('conv5_4_CPM_L1', [128, 512, 1, 1, 0]),
69
+ ('conv5_5_CPM_L1', [512, 38, 1, 1, 0])
70
+ ])
71
+
72
+ block1_2 = OrderedDict([
73
+ ('conv5_1_CPM_L2', [128, 128, 3, 1, 1]),
74
+ ('conv5_2_CPM_L2', [128, 128, 3, 1, 1]),
75
+ ('conv5_3_CPM_L2', [128, 128, 3, 1, 1]),
76
+ ('conv5_4_CPM_L2', [128, 512, 1, 1, 0]),
77
+ ('conv5_5_CPM_L2', [512, 19, 1, 1, 0])
78
+ ])
79
+ blocks['block1_1'] = block1_1
80
+ blocks['block1_2'] = block1_2
81
+
82
+ self.model0 = make_layers(block0, no_relu_layers)
83
+
84
+ # Stages 2 - 6
85
+ for i in range(2, 7):
86
+ blocks['block%d_1' % i] = OrderedDict([
87
+ ('Mconv1_stage%d_L1' % i, [185, 128, 7, 1, 3]),
88
+ ('Mconv2_stage%d_L1' % i, [128, 128, 7, 1, 3]),
89
+ ('Mconv3_stage%d_L1' % i, [128, 128, 7, 1, 3]),
90
+ ('Mconv4_stage%d_L1' % i, [128, 128, 7, 1, 3]),
91
+ ('Mconv5_stage%d_L1' % i, [128, 128, 7, 1, 3]),
92
+ ('Mconv6_stage%d_L1' % i, [128, 128, 1, 1, 0]),
93
+ ('Mconv7_stage%d_L1' % i, [128, 38, 1, 1, 0])
94
+ ])
95
+
96
+ blocks['block%d_2' % i] = OrderedDict([
97
+ ('Mconv1_stage%d_L2' % i, [185, 128, 7, 1, 3]),
98
+ ('Mconv2_stage%d_L2' % i, [128, 128, 7, 1, 3]),
99
+ ('Mconv3_stage%d_L2' % i, [128, 128, 7, 1, 3]),
100
+ ('Mconv4_stage%d_L2' % i, [128, 128, 7, 1, 3]),
101
+ ('Mconv5_stage%d_L2' % i, [128, 128, 7, 1, 3]),
102
+ ('Mconv6_stage%d_L2' % i, [128, 128, 1, 1, 0]),
103
+ ('Mconv7_stage%d_L2' % i, [128, 19, 1, 1, 0])
104
+ ])
105
+
106
+ for k in blocks.keys():
107
+ blocks[k] = make_layers(blocks[k], no_relu_layers)
108
+
109
+ self.model1_1 = blocks['block1_1']
110
+ self.model2_1 = blocks['block2_1']
111
+ self.model3_1 = blocks['block3_1']
112
+ self.model4_1 = blocks['block4_1']
113
+ self.model5_1 = blocks['block5_1']
114
+ self.model6_1 = blocks['block6_1']
115
+
116
+ self.model1_2 = blocks['block1_2']
117
+ self.model2_2 = blocks['block2_2']
118
+ self.model3_2 = blocks['block3_2']
119
+ self.model4_2 = blocks['block4_2']
120
+ self.model5_2 = blocks['block5_2']
121
+ self.model6_2 = blocks['block6_2']
122
+
123
+
124
+ def forward(self, x):
125
+
126
+ out1 = self.model0(x)
127
+
128
+ out1_1 = self.model1_1(out1)
129
+ out1_2 = self.model1_2(out1)
130
+ out2 = torch.cat([out1_1, out1_2, out1], 1)
131
+
132
+ out2_1 = self.model2_1(out2)
133
+ out2_2 = self.model2_2(out2)
134
+ out3 = torch.cat([out2_1, out2_2, out1], 1)
135
+
136
+ out3_1 = self.model3_1(out3)
137
+ out3_2 = self.model3_2(out3)
138
+ out4 = torch.cat([out3_1, out3_2, out1], 1)
139
+
140
+ out4_1 = self.model4_1(out4)
141
+ out4_2 = self.model4_2(out4)
142
+ out5 = torch.cat([out4_1, out4_2, out1], 1)
143
+
144
+ out5_1 = self.model5_1(out5)
145
+ out5_2 = self.model5_2(out5)
146
+ out6 = torch.cat([out5_1, out5_2, out1], 1)
147
+
148
+ out6_1 = self.model6_1(out6)
149
+ out6_2 = self.model6_2(out6)
150
+
151
+ return out6_1, out6_2
152
+
153
+ class handpose_model(nn.Module):
154
+ def __init__(self):
155
+ super(handpose_model, self).__init__()
156
+
157
+ # these layers have no relu layer
158
+ no_relu_layers = ['conv6_2_CPM', 'Mconv7_stage2', 'Mconv7_stage3',\
159
+ 'Mconv7_stage4', 'Mconv7_stage5', 'Mconv7_stage6']
160
+ # stage 1
161
+ block1_0 = OrderedDict([
162
+ ('conv1_1', [3, 64, 3, 1, 1]),
163
+ ('conv1_2', [64, 64, 3, 1, 1]),
164
+ ('pool1_stage1', [2, 2, 0]),
165
+ ('conv2_1', [64, 128, 3, 1, 1]),
166
+ ('conv2_2', [128, 128, 3, 1, 1]),
167
+ ('pool2_stage1', [2, 2, 0]),
168
+ ('conv3_1', [128, 256, 3, 1, 1]),
169
+ ('conv3_2', [256, 256, 3, 1, 1]),
170
+ ('conv3_3', [256, 256, 3, 1, 1]),
171
+ ('conv3_4', [256, 256, 3, 1, 1]),
172
+ ('pool3_stage1', [2, 2, 0]),
173
+ ('conv4_1', [256, 512, 3, 1, 1]),
174
+ ('conv4_2', [512, 512, 3, 1, 1]),
175
+ ('conv4_3', [512, 512, 3, 1, 1]),
176
+ ('conv4_4', [512, 512, 3, 1, 1]),
177
+ ('conv5_1', [512, 512, 3, 1, 1]),
178
+ ('conv5_2', [512, 512, 3, 1, 1]),
179
+ ('conv5_3_CPM', [512, 128, 3, 1, 1])
180
+ ])
181
+
182
+ block1_1 = OrderedDict([
183
+ ('conv6_1_CPM', [128, 512, 1, 1, 0]),
184
+ ('conv6_2_CPM', [512, 22, 1, 1, 0])
185
+ ])
186
+
187
+ blocks = {}
188
+ blocks['block1_0'] = block1_0
189
+ blocks['block1_1'] = block1_1
190
+
191
+ # stage 2-6
192
+ for i in range(2, 7):
193
+ blocks['block%d' % i] = OrderedDict([
194
+ ('Mconv1_stage%d' % i, [150, 128, 7, 1, 3]),
195
+ ('Mconv2_stage%d' % i, [128, 128, 7, 1, 3]),
196
+ ('Mconv3_stage%d' % i, [128, 128, 7, 1, 3]),
197
+ ('Mconv4_stage%d' % i, [128, 128, 7, 1, 3]),
198
+ ('Mconv5_stage%d' % i, [128, 128, 7, 1, 3]),
199
+ ('Mconv6_stage%d' % i, [128, 128, 1, 1, 0]),
200
+ ('Mconv7_stage%d' % i, [128, 22, 1, 1, 0])
201
+ ])
202
+
203
+ for k in blocks.keys():
204
+ blocks[k] = make_layers(blocks[k], no_relu_layers)
205
+
206
+ self.model1_0 = blocks['block1_0']
207
+ self.model1_1 = blocks['block1_1']
208
+ self.model2 = blocks['block2']
209
+ self.model3 = blocks['block3']
210
+ self.model4 = blocks['block4']
211
+ self.model5 = blocks['block5']
212
+ self.model6 = blocks['block6']
213
+
214
+ def forward(self, x):
215
+ out1_0 = self.model1_0(x)
216
+ out1_1 = self.model1_1(out1_0)
217
+ concat_stage2 = torch.cat([out1_1, out1_0], 1)
218
+ out_stage2 = self.model2(concat_stage2)
219
+ concat_stage3 = torch.cat([out_stage2, out1_0], 1)
220
+ out_stage3 = self.model3(concat_stage3)
221
+ concat_stage4 = torch.cat([out_stage3, out1_0], 1)
222
+ out_stage4 = self.model4(concat_stage4)
223
+ concat_stage5 = torch.cat([out_stage4, out1_0], 1)
224
+ out_stage5 = self.model5(concat_stage5)
225
+ concat_stage6 = torch.cat([out_stage5, out1_0], 1)
226
+ out_stage6 = self.model6(concat_stage6)
227
+ return out_stage6
228
+
229
+
annotator/openpose/util.py ADDED
@@ -0,0 +1,175 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ '''
2
+ * Copyright (c) 2023 Salesforce, Inc.
3
+ * All rights reserved.
4
+ * SPDX-License-Identifier: Apache License 2.0
5
+ * For full license text, see LICENSE.txt file in the repo root or http://www.apache.org/licenses/
6
+ * By Can Qin
7
+ * Modified from ControlNet repo: https://github.com/lllyasviel/ControlNet
8
+ * Copyright (c) 2023 Lvmin Zhang and Maneesh Agrawala
9
+ '''
10
+
11
+
12
+ import math
13
+ import numpy as np
14
+ import matplotlib
15
+ import cv2
16
+
17
+
18
+ def padRightDownCorner(img, stride, padValue):
19
+ h = img.shape[0]
20
+ w = img.shape[1]
21
+
22
+ pad = 4 * [None]
23
+ pad[0] = 0 # up
24
+ pad[1] = 0 # left
25
+ pad[2] = 0 if (h % stride == 0) else stride - (h % stride) # down
26
+ pad[3] = 0 if (w % stride == 0) else stride - (w % stride) # right
27
+
28
+ img_padded = img
29
+ pad_up = np.tile(img_padded[0:1, :, :]*0 + padValue, (pad[0], 1, 1))
30
+ img_padded = np.concatenate((pad_up, img_padded), axis=0)
31
+ pad_left = np.tile(img_padded[:, 0:1, :]*0 + padValue, (1, pad[1], 1))
32
+ img_padded = np.concatenate((pad_left, img_padded), axis=1)
33
+ pad_down = np.tile(img_padded[-2:-1, :, :]*0 + padValue, (pad[2], 1, 1))
34
+ img_padded = np.concatenate((img_padded, pad_down), axis=0)
35
+ pad_right = np.tile(img_padded[:, -2:-1, :]*0 + padValue, (1, pad[3], 1))
36
+ img_padded = np.concatenate((img_padded, pad_right), axis=1)
37
+
38
+ return img_padded, pad
39
+
40
+ # transfer caffe model to pytorch which will match the layer name
41
+ def transfer(model, model_weights):
42
+ transfered_model_weights = {}
43
+ for weights_name in model.state_dict().keys():
44
+ transfered_model_weights[weights_name] = model_weights['.'.join(weights_name.split('.')[1:])]
45
+ return transfered_model_weights
46
+
47
+ # draw the body keypoint and lims
48
+ def draw_bodypose(canvas, candidate, subset):
49
+ stickwidth = 4
50
+ limbSeq = [[2, 3], [2, 6], [3, 4], [4, 5], [6, 7], [7, 8], [2, 9], [9, 10], \
51
+ [10, 11], [2, 12], [12, 13], [13, 14], [2, 1], [1, 15], [15, 17], \
52
+ [1, 16], [16, 18], [3, 17], [6, 18]]
53
+
54
+ colors = [[255, 0, 0], [255, 85, 0], [255, 170, 0], [255, 255, 0], [170, 255, 0], [85, 255, 0], [0, 255, 0], \
55
+ [0, 255, 85], [0, 255, 170], [0, 255, 255], [0, 170, 255], [0, 85, 255], [0, 0, 255], [85, 0, 255], \
56
+ [170, 0, 255], [255, 0, 255], [255, 0, 170], [255, 0, 85]]
57
+ for i in range(18):
58
+ for n in range(len(subset)):
59
+ index = int(subset[n][i])
60
+ if index == -1:
61
+ continue
62
+ x, y = candidate[index][0:2]
63
+ cv2.circle(canvas, (int(x), int(y)), 4, colors[i], thickness=-1)
64
+ for i in range(17):
65
+ for n in range(len(subset)):
66
+ index = subset[n][np.array(limbSeq[i]) - 1]
67
+ if -1 in index:
68
+ continue
69
+ cur_canvas = canvas.copy()
70
+ Y = candidate[index.astype(int), 0]
71
+ X = candidate[index.astype(int), 1]
72
+ mX = np.mean(X)
73
+ mY = np.mean(Y)
74
+ length = ((X[0] - X[1]) ** 2 + (Y[0] - Y[1]) ** 2) ** 0.5
75
+ angle = math.degrees(math.atan2(X[0] - X[1], Y[0] - Y[1]))
76
+ polygon = cv2.ellipse2Poly((int(mY), int(mX)), (int(length / 2), stickwidth), int(angle), 0, 360, 1)
77
+ cv2.fillConvexPoly(cur_canvas, polygon, colors[i])
78
+ canvas = cv2.addWeighted(canvas, 0.4, cur_canvas, 0.6, 0)
79
+ # plt.imsave("preview.jpg", canvas[:, :, [2, 1, 0]])
80
+ # plt.imshow(canvas[:, :, [2, 1, 0]])
81
+ return canvas
82
+
83
+
84
+ # image drawed by opencv is not good.
85
+ def draw_handpose(canvas, all_hand_peaks, show_number=False):
86
+ edges = [[0, 1], [1, 2], [2, 3], [3, 4], [0, 5], [5, 6], [6, 7], [7, 8], [0, 9], [9, 10], \
87
+ [10, 11], [11, 12], [0, 13], [13, 14], [14, 15], [15, 16], [0, 17], [17, 18], [18, 19], [19, 20]]
88
+
89
+ for peaks in all_hand_peaks:
90
+ for ie, e in enumerate(edges):
91
+ if np.sum(np.all(peaks[e], axis=1)==0)==0:
92
+ x1, y1 = peaks[e[0]]
93
+ x2, y2 = peaks[e[1]]
94
+ cv2.line(canvas, (x1, y1), (x2, y2), matplotlib.colors.hsv_to_rgb([ie/float(len(edges)), 1.0, 1.0])*255, thickness=2)
95
+
96
+ for i, keyponit in enumerate(peaks):
97
+ x, y = keyponit
98
+ cv2.circle(canvas, (x, y), 4, (0, 0, 255), thickness=-1)
99
+ if show_number:
100
+ cv2.putText(canvas, str(i), (x, y), cv2.FONT_HERSHEY_SIMPLEX, 0.3, (0, 0, 0), lineType=cv2.LINE_AA)
101
+ return canvas
102
+
103
+ # detect hand according to body pose keypoints
104
+ # please refer to https://github.com/CMU-Perceptual-Computing-Lab/openpose/blob/master/src/openpose/hand/handDetector.cpp
105
+ def handDetect(candidate, subset, oriImg):
106
+ # right hand: wrist 4, elbow 3, shoulder 2
107
+ # left hand: wrist 7, elbow 6, shoulder 5
108
+ ratioWristElbow = 0.33
109
+ detect_result = []
110
+ image_height, image_width = oriImg.shape[0:2]
111
+ for person in subset.astype(int):
112
+ # if any of three not detected
113
+ has_left = np.sum(person[[5, 6, 7]] == -1) == 0
114
+ has_right = np.sum(person[[2, 3, 4]] == -1) == 0
115
+ if not (has_left or has_right):
116
+ continue
117
+ hands = []
118
+ #left hand
119
+ if has_left:
120
+ left_shoulder_index, left_elbow_index, left_wrist_index = person[[5, 6, 7]]
121
+ x1, y1 = candidate[left_shoulder_index][:2]
122
+ x2, y2 = candidate[left_elbow_index][:2]
123
+ x3, y3 = candidate[left_wrist_index][:2]
124
+ hands.append([x1, y1, x2, y2, x3, y3, True])
125
+ # right hand
126
+ if has_right:
127
+ right_shoulder_index, right_elbow_index, right_wrist_index = person[[2, 3, 4]]
128
+ x1, y1 = candidate[right_shoulder_index][:2]
129
+ x2, y2 = candidate[right_elbow_index][:2]
130
+ x3, y3 = candidate[right_wrist_index][:2]
131
+ hands.append([x1, y1, x2, y2, x3, y3, False])
132
+
133
+ for x1, y1, x2, y2, x3, y3, is_left in hands:
134
+ # pos_hand = pos_wrist + ratio * (pos_wrist - pos_elbox) = (1 + ratio) * pos_wrist - ratio * pos_elbox
135
+ # handRectangle.x = posePtr[wrist*3] + ratioWristElbow * (posePtr[wrist*3] - posePtr[elbow*3]);
136
+ # handRectangle.y = posePtr[wrist*3+1] + ratioWristElbow * (posePtr[wrist*3+1] - posePtr[elbow*3+1]);
137
+ # const auto distanceWristElbow = getDistance(poseKeypoints, person, wrist, elbow);
138
+ # const auto distanceElbowShoulder = getDistance(poseKeypoints, person, elbow, shoulder);
139
+ # handRectangle.width = 1.5f * fastMax(distanceWristElbow, 0.9f * distanceElbowShoulder);
140
+ x = x3 + ratioWristElbow * (x3 - x2)
141
+ y = y3 + ratioWristElbow * (y3 - y2)
142
+ distanceWristElbow = math.sqrt((x3 - x2) ** 2 + (y3 - y2) ** 2)
143
+ distanceElbowShoulder = math.sqrt((x2 - x1) ** 2 + (y2 - y1) ** 2)
144
+ width = 1.5 * max(distanceWristElbow, 0.9 * distanceElbowShoulder)
145
+ # x-y refers to the center --> offset to topLeft point
146
+ # handRectangle.x -= handRectangle.width / 2.f;
147
+ # handRectangle.y -= handRectangle.height / 2.f;
148
+ x -= width / 2
149
+ y -= width / 2 # width = height
150
+ # overflow the image
151
+ if x < 0: x = 0
152
+ if y < 0: y = 0
153
+ width1 = width
154
+ width2 = width
155
+ if x + width > image_width: width1 = image_width - x
156
+ if y + width > image_height: width2 = image_height - y
157
+ width = min(width1, width2)
158
+ # the max hand box value is 20 pixels
159
+ if width >= 20:
160
+ detect_result.append([int(x), int(y), int(width), is_left])
161
+
162
+ '''
163
+ return value: [[x, y, w, True if left hand else False]].
164
+ width=height since the network require squared input.
165
+ x, y is the coordinate of top left
166
+ '''
167
+ return detect_result
168
+
169
+ # get max index of 2d array
170
+ def npmax(array):
171
+ arrayindex = array.argmax(1)
172
+ arrayvalue = array.max(1)
173
+ i = arrayvalue.argmax()
174
+ j = arrayindex[i]
175
+ return i, j