YannisK commited on
Commit
689e965
1 Parent(s): dca7dd8
.ipynb_checkpoints/app-checkpoint.py ADDED
@@ -0,0 +1,165 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+
3
+ import cv2
4
+
5
+ import torch
6
+
7
+ import matplotlib.pyplot as plt
8
+ from matplotlib import cm
9
+ from matplotlib import colors
10
+ from mpl_toolkits.axes_grid1 import ImageGrid
11
+
12
+ from torchvision import transforms
13
+
14
+ import fire_network
15
+
16
+ import numpy as np
17
+
18
+
19
+
20
+ from PIL import Image
21
+
22
+ # Possible Scales for multiscale inference
23
+ scales = [2.0, 1.414, 1.0, 0.707, 0.5, 0.353, 0.25]
24
+
25
+ device = 'cpu'
26
+
27
+ # Load net
28
+ state = torch.load('fire.pth', map_location='cpu')
29
+ state['net_params']['pretrained'] = None # no need for imagenet pretrained model
30
+ net = fire_network.init_network(**state['net_params']).to(device)
31
+ net.load_state_dict(state['state_dict'])
32
+
33
+ transform = transforms.Compose([
34
+ transforms.Resize(1024),
35
+ transforms.ToTensor(),
36
+ transforms.Normalize(**dict(zip(["mean", "std"], net.runtime['mean_std'])))
37
+ ])
38
+
39
+
40
+ # which sf
41
+ sf_idx_ = [55, 14, 5, 4, 52, 57, 40, 9]
42
+
43
+ col = plt.get_cmap('tab10')
44
+
45
+ def generate_matching_superfeatures(im1, im2, scale_id=6, threshold=50):
46
+
47
+ im1_tensor = transform(im1).unsqueeze(0)
48
+ im2_tensor = transform(im2).unsqueeze(0)
49
+
50
+ im1_cv = np.array(im1)[:, :, ::-1].copy()
51
+ im2_cv = np.array(im2)[:, :, ::-1].copy()
52
+
53
+ # extract features
54
+ with torch.no_grad():
55
+ output1 = net.get_superfeatures(im1_tensor.to(device), scales=[scale_id])
56
+ feats1 = output1[0][0]
57
+ attns1 = output1[1][0]
58
+ strenghts1 = output1[2][0]
59
+
60
+ output2 = net.get_superfeatures(im2_tensor.to(device), scales=[scale_id])
61
+ feats2 = output2[0][0]
62
+ attns2 = output2[1][0]
63
+ strenghts2 = output2[2][0]
64
+
65
+ print(feats1.shape, feats2.shape)
66
+ print(attns1.shape, attns2.shape)
67
+ print(strenghts1.shape, strenghts2.shape)
68
+
69
+ # Store all binary SF att maps to show them all at once in the end
70
+ all_att_bin1 = []
71
+ all_att_bin2 = []
72
+ for n, i in enumerate(sf_idx_):
73
+ # all_atts[n].append(attn[j][scale_id][0,i,:,:].numpy())
74
+ att_heat = np.array(attns1[0,i,:,:].numpy(), dtype=np.float32)
75
+ att_heat = np.uint8(att_heat / np.max(att_heat[:]) * 255.0)
76
+ att_heat_bin = np.where(att_heat>threshold, 255, 0)
77
+ print(att_heat_bin)
78
+ all_att_bin1.append(att_heat_bin)
79
+
80
+ att_heat = np.array(attns2[0,i,:,:].numpy(), dtype=np.float32)
81
+ att_heat = np.uint8(att_heat / np.max(att_heat[:]) * 255.0)
82
+ att_heat_bin = np.where(att_heat>threshold, 255, 0)
83
+ all_att_bin2.append(att_heat_bin)
84
+
85
+
86
+ fin_img = []
87
+ img1rsz = np.copy(im1_cv)
88
+ print(im1.size)
89
+ print(img1rsz.shape)
90
+ for j, att in enumerate(all_att_bin1):
91
+ att = cv2.resize(att, im1.size, interpolation=cv2.INTER_NEAREST)
92
+ # att = cv2.resize(att, imgz[i].shape[:2][::-1], interpolation=cv2.INTER_CUBIC)
93
+ # att = cv2.resize(att, imgz[i].shape[:2][::-1])
94
+ # att = att.resize(shape)
95
+ # att = resize(att, im1.size)
96
+ mask2d = zip(*np.where(att==255))
97
+ for m,n in mask2d:
98
+ col_ = col.colors[j] if j < 7 else col.colors[j+1]
99
+ if j == 0: col_ = col.colors[9]
100
+ col_ = 255*np.array(colors.to_rgba(col_))[:3]
101
+ img1rsz[m,n, :] = col_[::-1]
102
+ fin_img.append(img1rsz)
103
+
104
+ img2rsz = np.copy(im2_cv)
105
+ for j, att in enumerate(all_att_bin2):
106
+ att = cv2.resize(att, im2.size, interpolation=cv2.INTER_NEAREST)
107
+ # att = cv2.resize(att, imgz[i].shape[:2][::-1], interpolation=cv2.INTER_CUBIC)
108
+ # # att = cv2.resize(att, imgz[i].shape[:2][::-1])
109
+ # att = att.resize(im2.shape)
110
+ # print('att:', att.shape)
111
+ mask2d = zip(*np.where(att==255))
112
+ for m,n in mask2d:
113
+ col_ = col.colors[j] if j < 7 else col.colors[j+1]
114
+ if j == 0: col_ = col.colors[9]
115
+ col_ = 255*np.array(colors.to_rgba(col_))[:3]
116
+ img2rsz[m,n, :] = col_[::-1]
117
+ fin_img.append(img2rsz)
118
+
119
+
120
+ fig = plt.figure()
121
+ grid = ImageGrid(fig, 111, nrows_ncols=(2, 1), axes_pad=0.1)
122
+ for ax, img in zip(grid, fin_img):
123
+ ax.imshow(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
124
+ ax.axis('scaled')
125
+ ax.axis('off')
126
+ plt.tight_layout()
127
+ # fig.suptitle("Matching SFs", fontsize=16)
128
+
129
+ # fig.canvas.draw()
130
+ # # Now we can save it to a numpy array.
131
+ # data = np.frombuffer(fig.canvas.tostring_rgb(), dtype=np.uint8)
132
+ # data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,))
133
+ return fig
134
+
135
+
136
+ # GRADIO APP
137
+ title = "Visualizing Super-features"
138
+ description = "TBD"
139
+ article = "<p style='text-align: center'><a href='https://github.com/naver/fire' target='_blank'>Original Github Repo</a></p>"
140
+
141
+
142
+ # css = ".output-image, .input-image {height: 40rem !important; width: 100% !important;}"
143
+ # css = "@media screen and (max-width: 600px) { .output_image, .input_image {height:20rem !important; width: 100% !important;} }"
144
+ # css = ".output_image, .input_image {height: 600px !important}"
145
+ css = ".input_image {height: 600px !important} .output_image, {height: 1200px !important}"
146
+ # css = ".output-image, .input-image {height: 40rem !important; width: 100% !important;}"
147
+
148
+
149
+ iface = gr.Interface(
150
+ fn=generate_matching_superfeatures,
151
+ inputs=[
152
+ gr.inputs.Image(shape=(1024, 1024), type="pil"),
153
+ gr.inputs.Image(shape=(1024, 1024), type="pil"),
154
+ gr.inputs.Slider(minimum=1, maximum=7, step=1, default=2, label="Scale"),
155
+ gr.inputs.Slider(minimum=1, maximum=255, step=25, default=50, label="Binarizatio Threshold")],
156
+ outputs="plot",
157
+ # outputs=gr.outputs.Image(shape=(1024,2048), type="plot"),
158
+ enable_queue=True,
159
+ title=title,
160
+ description=description,
161
+ article=article,
162
+ css=css,
163
+ examples=[["chateau_1.png", "chateau_2.png", 6, 50]],
164
+ )
165
+ iface.launch()
__pycache__/fire_network.cpython-37.pyc ADDED
Binary file (4.67 kB). View file
 
__pycache__/lit.cpython-37.pyc ADDED
Binary file (2.59 kB). View file
 
app.py CHANGED
@@ -52,12 +52,12 @@ def generate_matching_superfeatures(im1, im2, scale_id=6, threshold=50):
52
 
53
  # extract features
54
  with torch.no_grad():
55
- output1 = net.get_superfeatures(im1_tensor.to(device), scales=[scale_id])
56
  feats1 = output1[0][0]
57
  attns1 = output1[1][0]
58
  strenghts1 = output1[2][0]
59
 
60
- output2 = net.get_superfeatures(im2_tensor.to(device), scales=[scale_id])
61
  feats2 = output2[0][0]
62
  attns2 = output2[1][0]
63
  strenghts2 = output2[2][0]
 
52
 
53
  # extract features
54
  with torch.no_grad():
55
+ output1 = net.get_superfeatures(im1_tensor.to(device), scales=[scales[scale_id]])
56
  feats1 = output1[0][0]
57
  attns1 = output1[1][0]
58
  strenghts1 = output1[2][0]
59
 
60
+ output2 = net.get_superfeatures(im2_tensor.to(device), scales=[scales[scale_id]])
61
  feats2 = output2[0][0]
62
  attns2 = output2[1][0]
63
  strenghts2 = output2[2][0]
gradio_queue.db ADDED
File without changes
gradio_queue.db-journal ADDED
Binary file (512 Bytes). View file
 
how/__pycache__/__init__.cpython-37.pyc ADDED
Binary file (296 Bytes). View file
 
how/layers/__pycache__/__init__.cpython-37.pyc ADDED
Binary file (312 Bytes). View file
 
how/layers/__pycache__/attention.cpython-37.pyc ADDED
Binary file (674 Bytes). View file
 
how/layers/__pycache__/dim_reduction.cpython-37.pyc ADDED
Binary file (1.5 kB). View file
 
how/layers/__pycache__/functional.cpython-37.pyc ADDED
Binary file (2.29 kB). View file
 
how/layers/__pycache__/pooling.cpython-37.pyc ADDED
Binary file (959 Bytes). View file
 
how/networks/__pycache__/__init__.cpython-37.pyc ADDED
Binary file (213 Bytes). View file
 
how/networks/__pycache__/how_net.cpython-37.pyc ADDED
Binary file (4.01 kB). View file
 
how/utils/__pycache__/__init__.cpython-37.pyc ADDED
Binary file (202 Bytes). View file
 
how/utils/__pycache__/whitening.cpython-37.pyc ADDED
Binary file (1.27 kB). View file