Vincentqyw commited on
Commit
a80d6bb
1 Parent(s): 814990b

update: features and matchers

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. third_party/ALIKE/LICENSE +29 -0
  2. third_party/ALIKE/README.md +131 -0
  3. third_party/ALIKE/alike.py +143 -0
  4. third_party/ALIKE/alnet.py +164 -0
  5. third_party/ALIKE/assets/ALIKE_code.zip +3 -0
  6. third_party/ALIKE/assets/alike.png +3 -0
  7. third_party/ALIKE/assets/kitti.gif +3 -0
  8. third_party/ALIKE/assets/kitti/000100.png +3 -0
  9. third_party/ALIKE/assets/kitti/000101.png +3 -0
  10. third_party/ALIKE/assets/kitti/000102.png +3 -0
  11. third_party/ALIKE/assets/kitti/000103.png +3 -0
  12. third_party/ALIKE/assets/kitti/000104.png +3 -0
  13. third_party/ALIKE/assets/kitti/000105.png +3 -0
  14. third_party/ALIKE/assets/kitti/000106.png +3 -0
  15. third_party/ALIKE/assets/kitti/000107.png +3 -0
  16. third_party/ALIKE/assets/kitti/000108.png +3 -0
  17. third_party/ALIKE/assets/kitti/000109.png +3 -0
  18. third_party/ALIKE/assets/kitti/000110.png +3 -0
  19. third_party/ALIKE/assets/kitti/000111.png +3 -0
  20. third_party/ALIKE/assets/kitti/000112.png +3 -0
  21. third_party/ALIKE/assets/kitti/000113.png +3 -0
  22. third_party/ALIKE/assets/kitti/000114.png +3 -0
  23. third_party/ALIKE/assets/kitti/000115.png +3 -0
  24. third_party/ALIKE/assets/kitti/000116.png +3 -0
  25. third_party/ALIKE/assets/kitti/000117.png +3 -0
  26. third_party/ALIKE/assets/kitti/000118.png +3 -0
  27. third_party/ALIKE/assets/kitti/000119.png +3 -0
  28. third_party/ALIKE/assets/tum.gif +3 -0
  29. third_party/ALIKE/assets/tum/1311868169.163498.png +3 -0
  30. third_party/ALIKE/assets/tum/1311868169.263274.png +3 -0
  31. third_party/ALIKE/assets/tum/1311868169.363470.png +3 -0
  32. third_party/ALIKE/assets/tum/1311868169.463229.png +3 -0
  33. third_party/ALIKE/assets/tum/1311868169.563501.png +3 -0
  34. third_party/ALIKE/assets/tum/1311868169.663240.png +3 -0
  35. third_party/ALIKE/assets/tum/1311868169.763417.png +3 -0
  36. third_party/ALIKE/assets/tum/1311868169.863396.png +3 -0
  37. third_party/ALIKE/assets/tum/1311868169.963415.png +3 -0
  38. third_party/ALIKE/assets/tum/1311868170.063469.png +3 -0
  39. third_party/ALIKE/assets/tum/1311868170.163416.png +3 -0
  40. third_party/ALIKE/assets/tum/1311868170.263521.png +3 -0
  41. third_party/ALIKE/assets/tum/1311868170.363400.png +3 -0
  42. third_party/ALIKE/assets/tum/1311868170.463383.png +3 -0
  43. third_party/ALIKE/assets/tum/1311868170.563345.png +3 -0
  44. third_party/ALIKE/assets/tum/1311868170.663430.png +3 -0
  45. third_party/ALIKE/assets/tum/1311868170.763453.png +3 -0
  46. third_party/ALIKE/assets/tum/1311868170.863446.png +3 -0
  47. third_party/ALIKE/assets/tum/1311868170.963440.png +3 -0
  48. third_party/ALIKE/assets/tum/1311868171.063438.png +3 -0
  49. third_party/ALIKE/demo.py +167 -0
  50. third_party/ALIKE/hseq/cache/alike-l-ms.npy +3 -0
third_party/ALIKE/LICENSE ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ BSD 3-Clause License
2
+
3
+ Copyright (c) 2022, Zhao Xiaoming
4
+ All rights reserved.
5
+
6
+ Redistribution and use in source and binary forms, with or without
7
+ modification, are permitted provided that the following conditions are met:
8
+
9
+ 1. Redistributions of source code must retain the above copyright notice, this
10
+ list of conditions and the following disclaimer.
11
+
12
+ 2. Redistributions in binary form must reproduce the above copyright notice,
13
+ this list of conditions and the following disclaimer in the documentation
14
+ and/or other materials provided with the distribution.
15
+
16
+ 3. Neither the name of the copyright holder nor the names of its
17
+ contributors may be used to endorse or promote products derived from
18
+ this software without specific prior written permission.
19
+
20
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
23
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
24
+ FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25
+ DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
26
+ SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
27
+ CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
28
+ OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
29
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
third_party/ALIKE/README.md ADDED
@@ -0,0 +1,131 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # News
2
+
3
+ - The [ALIKED](https://github.com/Shiaoming/ALIKED) is released.
4
+ - The [ALIKE training code](https://github.com/Shiaoming/ALIKE/raw/main/assets/ALIKE_code.zip) is released.
5
+
6
+ # ALIKE: Accurate and Lightweight Keypoint Detection and Descriptor Extraction
7
+
8
+ ALIKE applies a differentiable keypoint detection module to detect accurate sub-pixel keypoints. The network can run at 95 frames per second for 640 x 480 images on NVIDIA Titan X (Pascal) GPU and achieve equivalent performance with the state-of-the-arts. ALIKE benefits real-time applications in resource-limited platforms/devices. Technical details are described in [this paper](https://arxiv.org/pdf/2112.02906.pdf).
9
+
10
+ > ```
11
+ > Xiaoming Zhao, Xingming Wu, Jinyu Miao, Weihai Chen, Peter C. Y. Chen, Zhengguo Li, "ALIKE: Accurate and Lightweight Keypoint
12
+ > Detection and Descriptor Extraction," IEEE Transactions on Multimedia, 2022.
13
+ > ```
14
+
15
+ ![](./assets/alike.png)
16
+
17
+
18
+ If you use ALIKE in an academic work, please cite:
19
+
20
+ ```
21
+ @article{Zhao2023ALIKED,
22
+ title = {ALIKED: A Lighter Keypoint and Descriptor Extraction Network via Deformable Transformation},
23
+ url = {https://arxiv.org/pdf/2304.03608.pdf},
24
+ doi = {10.1109/TIM.2023.3271000},
25
+ journal = {IEEE Transactions on Instrumentation & Measurement},
26
+ author = {Zhao, Xiaoming and Wu, Xingming and Chen, Weihai and Chen, Peter C. Y. and Xu, Qingsong and Li, Zhengguo},
27
+ year = {2023},
28
+ volume = {72},
29
+ pages = {1-16},
30
+ }
31
+
32
+ @article{Zhao2022ALIKE,
33
+ title = {ALIKE: Accurate and Lightweight Keypoint Detection and Descriptor Extraction},
34
+ url = {http://arxiv.org/abs/2112.02906},
35
+ doi = {10.1109/TMM.2022.3155927},
36
+ journal = {IEEE Transactions on Multimedia},
37
+ author = {Zhao, Xiaoming and Wu, Xingming and Miao, Jinyu and Chen, Weihai and Chen, Peter C. Y. and Li, Zhengguo},
38
+ month = march,
39
+ year = {2022},
40
+ }
41
+ ```
42
+
43
+
44
+
45
+ ## 1. Prerequisites
46
+
47
+ The required packages are listed in the `requirements.txt` :
48
+
49
+ ```shell
50
+ pip install -r requirements.txt
51
+ ```
52
+
53
+
54
+
55
+ ## 2. Models
56
+
57
+ The off-the-shelf weights of four variant ALIKE models are provided in `models/` .
58
+
59
+
60
+
61
+ ## 3. Run demo
62
+
63
+ ```shell
64
+ $ python demo.py -h
65
+ usage: demo.py [-h] [--model {alike-t,alike-s,alike-n,alike-l}]
66
+ [--device DEVICE] [--top_k TOP_K] [--scores_th SCORES_TH]
67
+ [--n_limit N_LIMIT] [--no_display] [--no_sub_pixel]
68
+ input
69
+
70
+ ALike Demo.
71
+
72
+ positional arguments:
73
+ input Image directory or movie file or "camera0" (for
74
+ webcam0).
75
+
76
+ optional arguments:
77
+ -h, --help show this help message and exit
78
+ --model {alike-t,alike-s,alike-n,alike-l}
79
+ The model configuration
80
+ --device DEVICE Running device (default: cuda).
81
+ --top_k TOP_K Detect top K keypoints. -1 for threshold based mode,
82
+ >0 for top K mode. (default: -1)
83
+ --scores_th SCORES_TH
84
+ Detector score threshold (default: 0.2).
85
+ --n_limit N_LIMIT Maximum number of keypoints to be detected (default:
86
+ 5000).
87
+ --no_display Do not display images to screen. Useful if running
88
+ remotely (default: False).
89
+ --no_sub_pixel Do not detect sub-pixel keypoints (default: False).
90
+ ```
91
+
92
+
93
+
94
+ ## 4. Examples
95
+
96
+ ### KITTI example
97
+ ```shell
98
+ python demo.py assets/kitti
99
+ ```
100
+ ![](./assets/kitti.gif)
101
+
102
+ ### TUM example
103
+ ```shell
104
+ python demo.py assets/tum
105
+ ```
106
+ ![](./assets/tum.gif)
107
+
108
+ ## 5. Efficiency and performance
109
+
110
+ | Models | Parameters | GFLOPs(640x480) | MHA@3 on Hpatches | mAA(10°) on [IMW2020-test](https://www.cs.ubc.ca/research/image-matching-challenge/2021/leaderboard) (Stereo) |
111
+ |:---:|:---:|:---:|:-----------------:|:-------------------------------------------------------------------------------------------------------------:|
112
+ | D2-Net(MS) | 7653KB | 889.40 | 38.33% | 12.27% |
113
+ | LF-Net(MS) | 2642KB | 24.37 | 57.78% | 23.44% |
114
+ | SuperPoint | 1301KB | 26.11 | 70.19% | 28.97% |
115
+ | R2D2(MS) | 484KB | 464.55 | 71.48% | 39.02% |
116
+ | ASLFeat(MS) | 823KB | 77.58 | 73.52% | 33.65% |
117
+ | DISK | 1092KB | 98.97 | 70.56% | 51.22% |
118
+ | ALike-N | 318KB | 7.909 | 75.74% | 47.18% |
119
+ | ALike-L | 653KB | 19.685 | 76.85% | 49.58% |
120
+
121
+ ### Evaluation on Hpatches
122
+
123
+ - Download [hpatches-sequences-release](https://hpatches.github.io/) and put it into `hseq/hpatches-sequences-release`.
124
+ - Remove the unreliable sequences as D2-Net.
125
+ - Run the following command to evaluate the performance:
126
+ ```shell
127
+ python hseq/eval.py
128
+ ```
129
+
130
+
131
+ For more details, please refer to the [paper](https://arxiv.org/abs/2112.02906).
third_party/ALIKE/alike.py ADDED
@@ -0,0 +1,143 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import logging
2
+ import os
3
+ import cv2
4
+ import torch
5
+ from copy import deepcopy
6
+ import torch.nn.functional as F
7
+ from torchvision.transforms import ToTensor
8
+ import math
9
+
10
+ from alnet import ALNet
11
+ from soft_detect import DKD
12
+ import time
13
+
14
+ configs = {
15
+ 'alike-t': {'c1': 8, 'c2': 16, 'c3': 32, 'c4': 64, 'dim': 64, 'single_head': True, 'radius': 2,
16
+ 'model_path': os.path.join(os.path.split(__file__)[0], 'models', 'alike-t.pth')},
17
+ 'alike-s': {'c1': 8, 'c2': 16, 'c3': 48, 'c4': 96, 'dim': 96, 'single_head': True, 'radius': 2,
18
+ 'model_path': os.path.join(os.path.split(__file__)[0], 'models', 'alike-s.pth')},
19
+ 'alike-n': {'c1': 16, 'c2': 32, 'c3': 64, 'c4': 128, 'dim': 128, 'single_head': True, 'radius': 2,
20
+ 'model_path': os.path.join(os.path.split(__file__)[0], 'models', 'alike-n.pth')},
21
+ 'alike-l': {'c1': 32, 'c2': 64, 'c3': 128, 'c4': 128, 'dim': 128, 'single_head': False, 'radius': 2,
22
+ 'model_path': os.path.join(os.path.split(__file__)[0], 'models', 'alike-l.pth')},
23
+ }
24
+
25
+
26
+ class ALike(ALNet):
27
+ def __init__(self,
28
+ # ================================== feature encoder
29
+ c1: int = 32, c2: int = 64, c3: int = 128, c4: int = 128, dim: int = 128,
30
+ single_head: bool = False,
31
+ # ================================== detect parameters
32
+ radius: int = 2,
33
+ top_k: int = 500, scores_th: float = 0.5,
34
+ n_limit: int = 5000,
35
+ device: str = 'cpu',
36
+ model_path: str = ''
37
+ ):
38
+ super().__init__(c1, c2, c3, c4, dim, single_head)
39
+ self.radius = radius
40
+ self.top_k = top_k
41
+ self.n_limit = n_limit
42
+ self.scores_th = scores_th
43
+ self.dkd = DKD(radius=self.radius, top_k=self.top_k,
44
+ scores_th=self.scores_th, n_limit=self.n_limit)
45
+ self.device = device
46
+
47
+ if model_path != '':
48
+ state_dict = torch.load(model_path, self.device)
49
+ self.load_state_dict(state_dict)
50
+ self.to(self.device)
51
+ self.eval()
52
+ logging.info(f'Loaded model parameters from {model_path}')
53
+ logging.info(
54
+ f"Number of model parameters: {sum(p.numel() for p in self.parameters() if p.requires_grad) / 1e3}KB")
55
+
56
+ def extract_dense_map(self, image, ret_dict=False):
57
+ # ====================================================
58
+ # check image size, should be integer multiples of 2^5
59
+ # if it is not a integer multiples of 2^5, padding zeros
60
+ device = image.device
61
+ b, c, h, w = image.shape
62
+ h_ = math.ceil(h / 32) * 32 if h % 32 != 0 else h
63
+ w_ = math.ceil(w / 32) * 32 if w % 32 != 0 else w
64
+ if h_ != h:
65
+ h_padding = torch.zeros(b, c, h_ - h, w, device=device)
66
+ image = torch.cat([image, h_padding], dim=2)
67
+ if w_ != w:
68
+ w_padding = torch.zeros(b, c, h_, w_ - w, device=device)
69
+ image = torch.cat([image, w_padding], dim=3)
70
+ # ====================================================
71
+
72
+ scores_map, descriptor_map = super().forward(image)
73
+
74
+ # ====================================================
75
+ if h_ != h or w_ != w:
76
+ descriptor_map = descriptor_map[:, :, :h, :w]
77
+ scores_map = scores_map[:, :, :h, :w] # Bx1xHxW
78
+ # ====================================================
79
+
80
+ # BxCxHxW
81
+ descriptor_map = torch.nn.functional.normalize(descriptor_map, p=2, dim=1)
82
+
83
+ if ret_dict:
84
+ return {'descriptor_map': descriptor_map, 'scores_map': scores_map, }
85
+ else:
86
+ return descriptor_map, scores_map
87
+
88
+ def forward(self, img, image_size_max=99999, sort=False, sub_pixel=False):
89
+ """
90
+ :param img: np.array HxWx3, RGB
91
+ :param image_size_max: maximum image size, otherwise, the image will be resized
92
+ :param sort: sort keypoints by scores
93
+ :param sub_pixel: whether to use sub-pixel accuracy
94
+ :return: a dictionary with 'keypoints', 'descriptors', 'scores', and 'time'
95
+ """
96
+ H, W, three = img.shape
97
+ assert three == 3, "input image shape should be [HxWx3]"
98
+
99
+ # ==================== image size constraint
100
+ image = deepcopy(img)
101
+ max_hw = max(H, W)
102
+ if max_hw > image_size_max:
103
+ ratio = float(image_size_max / max_hw)
104
+ image = cv2.resize(image, dsize=None, fx=ratio, fy=ratio)
105
+
106
+ # ==================== convert image to tensor
107
+ image = torch.from_numpy(image).to(self.device).to(torch.float32).permute(2, 0, 1)[None] / 255.0
108
+
109
+ # ==================== extract keypoints
110
+ start = time.time()
111
+
112
+ with torch.no_grad():
113
+ descriptor_map, scores_map = self.extract_dense_map(image)
114
+ keypoints, descriptors, scores, _ = self.dkd(scores_map, descriptor_map,
115
+ sub_pixel=sub_pixel)
116
+ keypoints, descriptors, scores = keypoints[0], descriptors[0], scores[0]
117
+ keypoints = (keypoints + 1) / 2 * keypoints.new_tensor([[W - 1, H - 1]])
118
+
119
+ if sort:
120
+ indices = torch.argsort(scores, descending=True)
121
+ keypoints = keypoints[indices]
122
+ descriptors = descriptors[indices]
123
+ scores = scores[indices]
124
+
125
+ end = time.time()
126
+
127
+ return {'keypoints': keypoints.cpu().numpy(),
128
+ 'descriptors': descriptors.cpu().numpy(),
129
+ 'scores': scores.cpu().numpy(),
130
+ 'scores_map': scores_map.cpu().numpy(),
131
+ 'time': end - start, }
132
+
133
+
134
+ if __name__ == '__main__':
135
+ import numpy as np
136
+ from thop import profile
137
+
138
+ net = ALike(c1=32, c2=64, c3=128, c4=128, dim=128, single_head=False)
139
+
140
+ image = np.random.random((640, 480, 3)).astype(np.float32)
141
+ flops, params = profile(net, inputs=(image, 9999, False), verbose=False)
142
+ print('{:<30} {:<8} GFLops'.format('Computational complexity: ', flops / 1e9))
143
+ print('{:<30} {:<8} KB'.format('Number of parameters: ', params / 1e3))
third_party/ALIKE/alnet.py ADDED
@@ -0,0 +1,164 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from torch import nn
3
+ from torchvision.models import resnet
4
+ from typing import Optional, Callable
5
+
6
+
7
+ class ConvBlock(nn.Module):
8
+ def __init__(self, in_channels, out_channels,
9
+ gate: Optional[Callable[..., nn.Module]] = None,
10
+ norm_layer: Optional[Callable[..., nn.Module]] = None):
11
+ super().__init__()
12
+ if gate is None:
13
+ self.gate = nn.ReLU(inplace=True)
14
+ else:
15
+ self.gate = gate
16
+ if norm_layer is None:
17
+ norm_layer = nn.BatchNorm2d
18
+ self.conv1 = resnet.conv3x3(in_channels, out_channels)
19
+ self.bn1 = norm_layer(out_channels)
20
+ self.conv2 = resnet.conv3x3(out_channels, out_channels)
21
+ self.bn2 = norm_layer(out_channels)
22
+
23
+ def forward(self, x):
24
+ x = self.gate(self.bn1(self.conv1(x))) # B x in_channels x H x W
25
+ x = self.gate(self.bn2(self.conv2(x))) # B x out_channels x H x W
26
+ return x
27
+
28
+
29
+ # copied from torchvision\models\resnet.py#27->BasicBlock
30
+ class ResBlock(nn.Module):
31
+ expansion: int = 1
32
+
33
+ def __init__(
34
+ self,
35
+ inplanes: int,
36
+ planes: int,
37
+ stride: int = 1,
38
+ downsample: Optional[nn.Module] = None,
39
+ groups: int = 1,
40
+ base_width: int = 64,
41
+ dilation: int = 1,
42
+ gate: Optional[Callable[..., nn.Module]] = None,
43
+ norm_layer: Optional[Callable[..., nn.Module]] = None
44
+ ) -> None:
45
+ super(ResBlock, self).__init__()
46
+ if gate is None:
47
+ self.gate = nn.ReLU(inplace=True)
48
+ else:
49
+ self.gate = gate
50
+ if norm_layer is None:
51
+ norm_layer = nn.BatchNorm2d
52
+ if groups != 1 or base_width != 64:
53
+ raise ValueError('ResBlock only supports groups=1 and base_width=64')
54
+ if dilation > 1:
55
+ raise NotImplementedError("Dilation > 1 not supported in ResBlock")
56
+ # Both self.conv1 and self.downsample layers downsample the input when stride != 1
57
+ self.conv1 = resnet.conv3x3(inplanes, planes, stride)
58
+ self.bn1 = norm_layer(planes)
59
+ self.conv2 = resnet.conv3x3(planes, planes)
60
+ self.bn2 = norm_layer(planes)
61
+ self.downsample = downsample
62
+ self.stride = stride
63
+
64
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
65
+ identity = x
66
+
67
+ out = self.conv1(x)
68
+ out = self.bn1(out)
69
+ out = self.gate(out)
70
+
71
+ out = self.conv2(out)
72
+ out = self.bn2(out)
73
+
74
+ if self.downsample is not None:
75
+ identity = self.downsample(x)
76
+
77
+ out += identity
78
+ out = self.gate(out)
79
+
80
+ return out
81
+
82
+
83
+ class ALNet(nn.Module):
84
+ def __init__(self, c1: int = 32, c2: int = 64, c3: int = 128, c4: int = 128, dim: int = 128,
85
+ single_head: bool = True,
86
+ ):
87
+ super().__init__()
88
+
89
+ self.gate = nn.ReLU(inplace=True)
90
+
91
+ self.pool2 = nn.MaxPool2d(kernel_size=2, stride=2)
92
+ self.pool4 = nn.MaxPool2d(kernel_size=4, stride=4)
93
+
94
+ self.block1 = ConvBlock(3, c1, self.gate, nn.BatchNorm2d)
95
+
96
+ self.block2 = ResBlock(inplanes=c1, planes=c2, stride=1,
97
+ downsample=nn.Conv2d(c1, c2, 1),
98
+ gate=self.gate,
99
+ norm_layer=nn.BatchNorm2d)
100
+ self.block3 = ResBlock(inplanes=c2, planes=c3, stride=1,
101
+ downsample=nn.Conv2d(c2, c3, 1),
102
+ gate=self.gate,
103
+ norm_layer=nn.BatchNorm2d)
104
+ self.block4 = ResBlock(inplanes=c3, planes=c4, stride=1,
105
+ downsample=nn.Conv2d(c3, c4, 1),
106
+ gate=self.gate,
107
+ norm_layer=nn.BatchNorm2d)
108
+
109
+ # ================================== feature aggregation
110
+ self.conv1 = resnet.conv1x1(c1, dim // 4)
111
+ self.conv2 = resnet.conv1x1(c2, dim // 4)
112
+ self.conv3 = resnet.conv1x1(c3, dim // 4)
113
+ self.conv4 = resnet.conv1x1(dim, dim // 4)
114
+ self.upsample2 = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)
115
+ self.upsample4 = nn.Upsample(scale_factor=4, mode='bilinear', align_corners=True)
116
+ self.upsample8 = nn.Upsample(scale_factor=8, mode='bilinear', align_corners=True)
117
+ self.upsample32 = nn.Upsample(scale_factor=32, mode='bilinear', align_corners=True)
118
+
119
+ # ================================== detector and descriptor head
120
+ self.single_head = single_head
121
+ if not self.single_head:
122
+ self.convhead1 = resnet.conv1x1(dim, dim)
123
+ self.convhead2 = resnet.conv1x1(dim, dim + 1)
124
+
125
+ def forward(self, image):
126
+ # ================================== feature encoder
127
+ x1 = self.block1(image) # B x c1 x H x W
128
+ x2 = self.pool2(x1)
129
+ x2 = self.block2(x2) # B x c2 x H/2 x W/2
130
+ x3 = self.pool4(x2)
131
+ x3 = self.block3(x3) # B x c3 x H/8 x W/8
132
+ x4 = self.pool4(x3)
133
+ x4 = self.block4(x4) # B x dim x H/32 x W/32
134
+
135
+ # ================================== feature aggregation
136
+ x1 = self.gate(self.conv1(x1)) # B x dim//4 x H x W
137
+ x2 = self.gate(self.conv2(x2)) # B x dim//4 x H//2 x W//2
138
+ x3 = self.gate(self.conv3(x3)) # B x dim//4 x H//8 x W//8
139
+ x4 = self.gate(self.conv4(x4)) # B x dim//4 x H//32 x W//32
140
+ x2_up = self.upsample2(x2) # B x dim//4 x H x W
141
+ x3_up = self.upsample8(x3) # B x dim//4 x H x W
142
+ x4_up = self.upsample32(x4) # B x dim//4 x H x W
143
+ x1234 = torch.cat([x1, x2_up, x3_up, x4_up], dim=1)
144
+
145
+ # ================================== detector and descriptor head
146
+ if not self.single_head:
147
+ x1234 = self.gate(self.convhead1(x1234))
148
+ x = self.convhead2(x1234) # B x dim+1 x H x W
149
+
150
+ descriptor_map = x[:, :-1, :, :]
151
+ scores_map = torch.sigmoid(x[:, -1, :, :]).unsqueeze(1)
152
+
153
+ return scores_map, descriptor_map
154
+
155
+
156
+ if __name__ == '__main__':
157
+ from thop import profile
158
+
159
+ net = ALNet(c1=16, c2=32, c3=64, c4=128, dim=128, single_head=True)
160
+
161
+ image = torch.randn(1, 3, 640, 480)
162
+ flops, params = profile(net, inputs=(image,), verbose=False)
163
+ print('{:<30} {:<8} GFLops'.format('Computational complexity: ', flops / 1e9))
164
+ print('{:<30} {:<8} KB'.format('Number of parameters: ', params / 1e3))
third_party/ALIKE/assets/ALIKE_code.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:891e8431c047e7aeed77c9e5f64ffeed262d92389d8ae6235dde0964a9048a08
3
+ size 62774
third_party/ALIKE/assets/alike.png ADDED

Git LFS Details

  • SHA256: d35e59f8e4d9c34b0e2686ecd5ca5414fe975b81553e4968eccc4bff1535c2d4
  • Pointer size: 131 Bytes
  • Size of remote file: 162 kB
third_party/ALIKE/assets/kitti.gif ADDED

Git LFS Details

  • SHA256: 0b05e4dc0000b9abf53183a3ebdfc0b95a92513952e235ea24f27f2945389ea1
  • Pointer size: 132 Bytes
  • Size of remote file: 7.03 MB
third_party/ALIKE/assets/kitti/000100.png ADDED

Git LFS Details

  • SHA256: c8d4a81ad91c7945cabd15de286aacf27ab661163b5eee0177128721782d5405
  • Pointer size: 131 Bytes
  • Size of remote file: 273 kB
third_party/ALIKE/assets/kitti/000101.png ADDED

Git LFS Details

  • SHA256: 539c684432726e903191a2471c8dae8c4b0012b88e1b3af7590de08c24890327
  • Pointer size: 131 Bytes
  • Size of remote file: 272 kB
third_party/ALIKE/assets/kitti/000102.png ADDED

Git LFS Details

  • SHA256: 5bbc9a5b04bd425a5e146f3ba114027041086477a5fa123a50463932ab62617e
  • Pointer size: 131 Bytes
  • Size of remote file: 270 kB
third_party/ALIKE/assets/kitti/000103.png ADDED

Git LFS Details

  • SHA256: 2041e633aeb85022b1222277cace17132bed09ca19856d1e6787984b05d61339
  • Pointer size: 131 Bytes
  • Size of remote file: 271 kB
third_party/ALIKE/assets/kitti/000104.png ADDED

Git LFS Details

  • SHA256: 6ca8a30c0edb7d2c6d6e5c2f5317bdffdae2269157d69e71f9602e0bbf2090ab
  • Pointer size: 131 Bytes
  • Size of remote file: 271 kB
third_party/ALIKE/assets/kitti/000105.png ADDED

Git LFS Details

  • SHA256: b8bca67672e8b2181b193f0577a9a3b42b64df9bb57d98608dbdbb54e79925bd
  • Pointer size: 131 Bytes
  • Size of remote file: 270 kB
third_party/ALIKE/assets/kitti/000106.png ADDED

Git LFS Details

  • SHA256: 2ccc83d57703afdcda4afd746dd99458b425fbc11ce3155583abde25e988e389
  • Pointer size: 131 Bytes
  • Size of remote file: 269 kB
third_party/ALIKE/assets/kitti/000107.png ADDED

Git LFS Details

  • SHA256: 980f4c74ac9117020f954cc75718cf0a09baeb30894aea123db59f9e4555ecef
  • Pointer size: 131 Bytes
  • Size of remote file: 269 kB
third_party/ALIKE/assets/kitti/000108.png ADDED

Git LFS Details

  • SHA256: c7c2234c8ba8c056c452a0d625db6eac09c8963b0c5e8a5d0b1c3af15a4b7516
  • Pointer size: 131 Bytes
  • Size of remote file: 271 kB
third_party/ALIKE/assets/kitti/000109.png ADDED

Git LFS Details

  • SHA256: 6a34b9639806e7deefe1cb24ae7b376343d394d2d032f95e763e4b6921cd61c7
  • Pointer size: 131 Bytes
  • Size of remote file: 276 kB
third_party/ALIKE/assets/kitti/000110.png ADDED

Git LFS Details

  • SHA256: 6af1b3e55b9c1eac208c887c44592f93e8ae7cc0196acaa2639c265f8bf959e3
  • Pointer size: 131 Bytes
  • Size of remote file: 275 kB
third_party/ALIKE/assets/kitti/000111.png ADDED

Git LFS Details

  • SHA256: 215ed5306f4976458110836a620dcf55030d8dd20618e6365d60176988c1cfa6
  • Pointer size: 131 Bytes
  • Size of remote file: 276 kB
third_party/ALIKE/assets/kitti/000112.png ADDED

Git LFS Details

  • SHA256: 8a265252457871d4dd2f17c42eafa1c0da99df90d103c653c8097aad26073d22
  • Pointer size: 131 Bytes
  • Size of remote file: 276 kB
third_party/ALIKE/assets/kitti/000113.png ADDED

Git LFS Details

  • SHA256: c83f220b29b5d04ead44c9304f9eccde3a4ff4e60627d7014f8fe424afb873f4
  • Pointer size: 131 Bytes
  • Size of remote file: 276 kB
third_party/ALIKE/assets/kitti/000114.png ADDED

Git LFS Details

  • SHA256: 1abad021db35c21f2e9ac0ce7e54a5721eec3ff32bc4ce820f5b7091af4d6fac
  • Pointer size: 131 Bytes
  • Size of remote file: 276 kB
third_party/ALIKE/assets/kitti/000115.png ADDED

Git LFS Details

  • SHA256: 6be815b2b0aa8aa3dc47e314ed6645eeb474996e9a920fab2abe8a35fb3ea089
  • Pointer size: 131 Bytes
  • Size of remote file: 274 kB
third_party/ALIKE/assets/kitti/000116.png ADDED

Git LFS Details

  • SHA256: 96b8df04ee570d877a04e43f1f4c30abc7e7383b24ce70a1a83a82dcbd863293
  • Pointer size: 131 Bytes
  • Size of remote file: 271 kB
third_party/ALIKE/assets/kitti/000117.png ADDED

Git LFS Details

  • SHA256: f32567394c096442df0c768822af1e21f2163f373eec94b7a36f2941ae08b199
  • Pointer size: 131 Bytes
  • Size of remote file: 267 kB
third_party/ALIKE/assets/kitti/000118.png ADDED

Git LFS Details

  • SHA256: b76476a8856d33960302b29cbd339c8bc513c52e7b81b21ba7d9f07dd0e4b096
  • Pointer size: 131 Bytes
  • Size of remote file: 268 kB
third_party/ALIKE/assets/kitti/000119.png ADDED

Git LFS Details

  • SHA256: c818d19b8a1ce7051b006361bc14f638d8df2989b0bba8a96472e8551e02e5d1
  • Pointer size: 131 Bytes
  • Size of remote file: 270 kB
third_party/ALIKE/assets/tum.gif ADDED

Git LFS Details

  • SHA256: df6ecf9666386bfa5925c8e57d196f15c077d550eb84dd392f5f49b90e86a5dc
  • Pointer size: 132 Bytes
  • Size of remote file: 4.04 MB
third_party/ALIKE/assets/tum/1311868169.163498.png ADDED

Git LFS Details

  • SHA256: 20bc06c1249727c16efc812082454bc8305438f756bcc95f913b9f79819f08e3
  • Pointer size: 131 Bytes
  • Size of remote file: 512 kB
third_party/ALIKE/assets/tum/1311868169.263274.png ADDED

Git LFS Details

  • SHA256: 0954d005c8f9ab146718f52601136c513b96a4414b0a0cbc02a01184686fb01e
  • Pointer size: 131 Bytes
  • Size of remote file: 516 kB
third_party/ALIKE/assets/tum/1311868169.363470.png ADDED

Git LFS Details

  • SHA256: 1d2681bb2b8a907d53469d9e67f6d1809b9ec435ec210622bf255c66c8918efd
  • Pointer size: 131 Bytes
  • Size of remote file: 506 kB
third_party/ALIKE/assets/tum/1311868169.463229.png ADDED

Git LFS Details

  • SHA256: ba2cd89601523665d0bee9dd3ea2117d9249e7ea4c7b43753298c1bab74cd532
  • Pointer size: 131 Bytes
  • Size of remote file: 509 kB
third_party/ALIKE/assets/tum/1311868169.563501.png ADDED

Git LFS Details

  • SHA256: 0a0239c7cb08fefbe4f5ec87f1c5e5fd5a32be11349744dc45158caa7d403744
  • Pointer size: 131 Bytes
  • Size of remote file: 526 kB
third_party/ALIKE/assets/tum/1311868169.663240.png ADDED

Git LFS Details

  • SHA256: 6e538c9dbaf4242072949920b3105ccdcfac68af955d623a701b9eea0e6e0f6f
  • Pointer size: 131 Bytes
  • Size of remote file: 521 kB
third_party/ALIKE/assets/tum/1311868169.763417.png ADDED

Git LFS Details

  • SHA256: 22a4fadfc031c36efd4cee5f70d0b501557bf820fa4b39a1c77f4268d0c12e86
  • Pointer size: 131 Bytes
  • Size of remote file: 544 kB
third_party/ALIKE/assets/tum/1311868169.863396.png ADDED

Git LFS Details

  • SHA256: eae0ee5be82b14aa1ed19e0b20a72bc37964c64732c7016739a5b30158453049
  • Pointer size: 131 Bytes
  • Size of remote file: 549 kB
third_party/ALIKE/assets/tum/1311868169.963415.png ADDED

Git LFS Details

  • SHA256: a590b6fdb98c4a4ee8e13aafcd9d2392c78a7881b4cc7fd1109231adc3cc8b91
  • Pointer size: 131 Bytes
  • Size of remote file: 541 kB
third_party/ALIKE/assets/tum/1311868170.063469.png ADDED

Git LFS Details

  • SHA256: 3d2d6058e036b307efa7d6008a02103b9c31ed8d0edd4b2f1e9ad49717b89684
  • Pointer size: 131 Bytes
  • Size of remote file: 550 kB
third_party/ALIKE/assets/tum/1311868170.163416.png ADDED

Git LFS Details

  • SHA256: 741d1e0ede775dd4b7054314c1a95ed3e5116792245b9eb1a5e2492ffe4d935c
  • Pointer size: 131 Bytes
  • Size of remote file: 550 kB
third_party/ALIKE/assets/tum/1311868170.263521.png ADDED

Git LFS Details

  • SHA256: 04ce12ed16c6fa89a9fdb3b64e7471335d13b82b84c7a554b3f9fd08f6e254a0
  • Pointer size: 131 Bytes
  • Size of remote file: 546 kB
third_party/ALIKE/assets/tum/1311868170.363400.png ADDED

Git LFS Details

  • SHA256: fb6be184df6fd2ca2e287bc64ada937ce2cec3f5d90e15c244fffa8aa44b11b1
  • Pointer size: 131 Bytes
  • Size of remote file: 545 kB
third_party/ALIKE/assets/tum/1311868170.463383.png ADDED

Git LFS Details

  • SHA256: d82953d4580894111f15a5b57e0059dca0baf02e788e0726a2849647cf570b63
  • Pointer size: 131 Bytes
  • Size of remote file: 542 kB
third_party/ALIKE/assets/tum/1311868170.563345.png ADDED

Git LFS Details

  • SHA256: d498847d7b8bc2389550941b01e95b1bf6459c70ff645d9893637d59e129ae29
  • Pointer size: 131 Bytes
  • Size of remote file: 549 kB
third_party/ALIKE/assets/tum/1311868170.663430.png ADDED

Git LFS Details

  • SHA256: b299c55e430afecb9f5d0ff6e1485ce72d90f5ddf1ec1a186fbcb2b110e035f2
  • Pointer size: 131 Bytes
  • Size of remote file: 541 kB
third_party/ALIKE/assets/tum/1311868170.763453.png ADDED

Git LFS Details

  • SHA256: 8073cc59711d6bea5038b698fb74eaa72eeca663dcc35850e0b334e234605385
  • Pointer size: 131 Bytes
  • Size of remote file: 541 kB
third_party/ALIKE/assets/tum/1311868170.863446.png ADDED

Git LFS Details

  • SHA256: 70b27a2d1c9e30ad0b164af13eb992b9c54c11aa7b408221515b6b106de87763
  • Pointer size: 131 Bytes
  • Size of remote file: 544 kB
third_party/ALIKE/assets/tum/1311868170.963440.png ADDED

Git LFS Details

  • SHA256: 36c02db5125b37725ce2c6fb502ba80e3ff85755dabf1a21d952e186480b8e56
  • Pointer size: 131 Bytes
  • Size of remote file: 535 kB
third_party/ALIKE/assets/tum/1311868171.063438.png ADDED

Git LFS Details

  • SHA256: f54d76a6b4bb8d3fb81c257920ddffdf75480bba34d506b481ee6dfaff894ecf
  • Pointer size: 131 Bytes
  • Size of remote file: 536 kB
third_party/ALIKE/demo.py ADDED
@@ -0,0 +1,167 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import copy
2
+ import os
3
+ import cv2
4
+ import glob
5
+ import logging
6
+ import argparse
7
+ import numpy as np
8
+ from tqdm import tqdm
9
+ from alike import ALike, configs
10
+
11
+
12
+ class ImageLoader(object):
13
+ def __init__(self, filepath: str):
14
+ self.N = 3000
15
+ if filepath.startswith('camera'):
16
+ camera = int(filepath[6:])
17
+ self.cap = cv2.VideoCapture(camera)
18
+ if not self.cap.isOpened():
19
+ raise IOError(f"Can't open camera {camera}!")
20
+ logging.info(f'Opened camera {camera}')
21
+ self.mode = 'camera'
22
+ elif os.path.exists(filepath):
23
+ if os.path.isfile(filepath):
24
+ self.cap = cv2.VideoCapture(filepath)
25
+ if not self.cap.isOpened():
26
+ raise IOError(f"Can't open video {filepath}!")
27
+ rate = self.cap.get(cv2.CAP_PROP_FPS)
28
+ self.N = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT)) - 1
29
+ duration = self.N / rate
30
+ logging.info(f'Opened video {filepath}')
31
+ logging.info(f'Frames: {self.N}, FPS: {rate}, Duration: {duration}s')
32
+ self.mode = 'video'
33
+ else:
34
+ self.images = glob.glob(os.path.join(filepath, '*.png')) + \
35
+ glob.glob(os.path.join(filepath, '*.jpg')) + \
36
+ glob.glob(os.path.join(filepath, '*.ppm'))
37
+ self.images.sort()
38
+ self.N = len(self.images)
39
+ logging.info(f'Loading {self.N} images')
40
+ self.mode = 'images'
41
+ else:
42
+ raise IOError('Error filepath (camerax/path of images/path of videos): ', filepath)
43
+
44
+ def __getitem__(self, item):
45
+ if self.mode == 'camera' or self.mode == 'video':
46
+ if item > self.N:
47
+ return None
48
+ ret, img = self.cap.read()
49
+ if not ret:
50
+ raise "Can't read image from camera"
51
+ if self.mode == 'video':
52
+ self.cap.set(cv2.CAP_PROP_POS_FRAMES, item)
53
+ elif self.mode == 'images':
54
+ filename = self.images[item]
55
+ img = cv2.imread(filename)
56
+ if img is None:
57
+ raise Exception('Error reading image %s' % filename)
58
+ return img
59
+
60
+ def __len__(self):
61
+ return self.N
62
+
63
+
64
+ class SimpleTracker(object):
65
+ def __init__(self):
66
+ self.pts_prev = None
67
+ self.desc_prev = None
68
+
69
+ def update(self, img, pts, desc):
70
+ N_matches = 0
71
+ if self.pts_prev is None:
72
+ self.pts_prev = pts
73
+ self.desc_prev = desc
74
+
75
+ out = copy.deepcopy(img)
76
+ for pt1 in pts:
77
+ p1 = (int(round(pt1[0])), int(round(pt1[1])))
78
+ cv2.circle(out, p1, 1, (0, 0, 255), -1, lineType=16)
79
+ else:
80
+ matches = self.mnn_mather(self.desc_prev, desc)
81
+ mpts1, mpts2 = self.pts_prev[matches[:, 0]], pts[matches[:, 1]]
82
+ N_matches = len(matches)
83
+
84
+ out = copy.deepcopy(img)
85
+ for pt1, pt2 in zip(mpts1, mpts2):
86
+ p1 = (int(round(pt1[0])), int(round(pt1[1])))
87
+ p2 = (int(round(pt2[0])), int(round(pt2[1])))
88
+ cv2.line(out, p1, p2, (0, 255, 0), lineType=16)
89
+ cv2.circle(out, p2, 1, (0, 0, 255), -1, lineType=16)
90
+
91
+ self.pts_prev = pts
92
+ self.desc_prev = desc
93
+
94
+ return out, N_matches
95
+
96
+ def mnn_mather(self, desc1, desc2):
97
+ sim = desc1 @ desc2.transpose()
98
+ sim[sim < 0.9] = 0
99
+ nn12 = np.argmax(sim, axis=1)
100
+ nn21 = np.argmax(sim, axis=0)
101
+ ids1 = np.arange(0, sim.shape[0])
102
+ mask = (ids1 == nn21[nn12])
103
+ matches = np.stack([ids1[mask], nn12[mask]])
104
+ return matches.transpose()
105
+
106
+
107
+ if __name__ == '__main__':
108
+ parser = argparse.ArgumentParser(description='ALike Demo.')
109
+ parser.add_argument('input', type=str, default='',
110
+ help='Image directory or movie file or "camera0" (for webcam0).')
111
+ parser.add_argument('--model', choices=['alike-t', 'alike-s', 'alike-n', 'alike-l'], default="alike-t",
112
+ help="The model configuration")
113
+ parser.add_argument('--device', type=str, default='cuda', help="Running device (default: cuda).")
114
+ parser.add_argument('--top_k', type=int, default=-1,
115
+ help='Detect top K keypoints. -1 for threshold based mode, >0 for top K mode. (default: -1)')
116
+ parser.add_argument('--scores_th', type=float, default=0.2,
117
+ help='Detector score threshold (default: 0.2).')
118
+ parser.add_argument('--n_limit', type=int, default=5000,
119
+ help='Maximum number of keypoints to be detected (default: 5000).')
120
+ parser.add_argument('--no_display', action='store_true',
121
+ help='Do not display images to screen. Useful if running remotely (default: False).')
122
+ parser.add_argument('--no_sub_pixel', action='store_true',
123
+ help='Do not detect sub-pixel keypoints (default: False).')
124
+ args = parser.parse_args()
125
+
126
+ logging.basicConfig(level=logging.INFO)
127
+
128
+ image_loader = ImageLoader(args.input)
129
+ model = ALike(**configs[args.model],
130
+ device=args.device,
131
+ top_k=args.top_k,
132
+ scores_th=args.scores_th,
133
+ n_limit=args.n_limit)
134
+ tracker = SimpleTracker()
135
+
136
+ if not args.no_display:
137
+ logging.info("Press 'q' to stop!")
138
+ cv2.namedWindow(args.model)
139
+
140
+ runtime = []
141
+ progress_bar = tqdm(image_loader)
142
+ for img in progress_bar:
143
+ if img is None:
144
+ break
145
+
146
+ img_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
147
+ pred = model(img_rgb, sub_pixel=not args.no_sub_pixel)
148
+ kpts = pred['keypoints']
149
+ desc = pred['descriptors']
150
+ runtime.append(pred['time'])
151
+
152
+ out, N_matches = tracker.update(img, kpts, desc)
153
+
154
+ ave_fps = (1. / np.stack(runtime)).mean()
155
+ status = f"Fps:{ave_fps:.1f}, Keypoints/Matches: {len(kpts)}/{N_matches}"
156
+ progress_bar.set_description(status)
157
+
158
+ if not args.no_display:
159
+ cv2.setWindowTitle(args.model, args.model + ': ' + status)
160
+ cv2.imshow(args.model, out)
161
+ if cv2.waitKey(1) == ord('q'):
162
+ break
163
+
164
+ logging.info('Finished!')
165
+ if not args.no_display:
166
+ logging.info('Press any key to exit!')
167
+ cv2.waitKey()
third_party/ALIKE/hseq/cache/alike-l-ms.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1350ab826afdd9b7542a556e2fda9ad9f94388a875c8edb7874e4bcdfebc63ca
3
+ size 13124