YannisK commited on
Commit
f4b82b2
1 Parent(s): 1a2db09
Files changed (3) hide show
  1. app.py +36 -12
  2. fire_network.py +1 -3
  3. requirements.txt +1 -0
app.py CHANGED
@@ -1,23 +1,46 @@
1
  import gradio as gr
2
 
3
- def greet(name):
4
- return "Hello " + name + "!!"
5
 
 
6
 
7
- # Model to use
8
- net_path = 'fire.pth'
9
 
10
- # CPU / GPU
11
- device = 'cpu'
12
 
13
- # Images will be downscaled to this size prior processing with the network
14
- image_size = 1024
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
15
 
16
  # Wrapper
17
  def generate_matching_superfeatures(im1, im2, scale=6):
18
 
19
- # Possible Scales for multiscale inference
20
- scales = [2.0, 1.414, 1.0, 0.707, 0.5, 0.353, 0.25]
 
 
 
 
 
 
 
 
 
 
 
21
 
22
 
23
  # GRADIO APP
@@ -31,12 +54,13 @@ iface = gr.Interface(
31
  inputs=[
32
  gr.inputs.Image(shape=(240, 240), type="pil"),
33
  gr.inputs.Image(shape=(240, 240), type="pil"),
34
- gr.inputs.Slider(minimum=1, maximum=7, step=1, default=2, label="Scale")],
 
35
  outputs="plot",
36
  enable_queue=True,
37
  title=title,
38
  description=description,
39
  article=article,
40
- examples=[["chateau_1.png", "chateau_2.png", 6]],
41
  )
42
  iface.launch()
 
1
  import gradio as gr
2
 
3
+ import torch
 
4
 
5
+ from how.networks import how_net
6
 
7
+ import fire_network
 
8
 
 
 
9
 
10
+ # Possible Scales for multiscale inference
11
+ scales = [2.0, 1.414, 1.0, 0.707, 0.5, 0.353, 0.25]
12
+ infer_opts = {"scales": scales, "features_num": 1000}
13
+
14
+
15
+ # Load net
16
+ state = torch.load('fire.pth', map_location='cpu')
17
+ state['net_params']['pretrained'] = None # no need for imagenet pretrained model
18
+ net = fire_network.init_network(**state['net_params']).to(device)
19
+ net.load_state_dict(state['state_dict'])
20
+
21
+ transforms_ = transforms.Compose([
22
+ transforms.Resize(1024),
23
+ transforms.ToTensor(),
24
+ transforms.Normalize(**dict(zip(["mean", "std"], net.runtime['mean_std'])))
25
+ ])
26
+
27
 
28
  # Wrapper
29
  def generate_matching_superfeatures(im1, im2, scale=6):
30
 
31
+ # extract features
32
+ with torch.no_grad():
33
+ output1 = net.get_superfeatures(im1.to(device), scales=scales)
34
+ feats1 = output1[0]
35
+ attns1 = output1[1]
36
+ strenghts1 = output1[2]
37
+
38
+ output2 = net.get_superfeatures(im2.to(device), scales=scales)
39
+ feats2 = output2[0]
40
+ attns2 = output2[1]
41
+ strenghts2 = output2[2]
42
+
43
+
44
 
45
 
46
  # GRADIO APP
 
54
  inputs=[
55
  gr.inputs.Image(shape=(240, 240), type="pil"),
56
  gr.inputs.Image(shape=(240, 240), type="pil"),
57
+ gr.inputs.Slider(minimum=1, maximum=7, step=1, default=2, label="Scale"),
58
+ gr.inputs.Slider(minimum=1, maximum=255, step=25, default=50, label="Binarizatio Threshold")],
59
  outputs="plot",
60
  enable_queue=True,
61
  title=title,
62
  description=description,
63
  article=article,
64
+ examples=[["chateau_1.png", "chateau_2.png", 6, 50]],
65
  )
66
  iface.launch()
fire_network.py CHANGED
@@ -6,8 +6,6 @@ import torch
6
  from torch import nn
7
  import torchvision
8
 
9
- from cirtorch.networks import imageretrievalnet
10
-
11
  from how import layers
12
  from how.layers import functional as HF
13
 
@@ -103,7 +101,7 @@ def init_network(architecture, pretrained, skip_layer, dim_reduction, lit, runti
103
 
104
  if skip_layer > 0:
105
  features = features[:-skip_layer]
106
- backbone_dim = imageretrievalnet.OUTPUT_DIM[architecture] // (2 ** skip_layer)
107
 
108
  att_layer = layers.attention.L2Attention()
109
 
 
6
  from torch import nn
7
  import torchvision
8
 
 
 
9
  from how import layers
10
  from how.layers import functional as HF
11
 
 
101
 
102
  if skip_layer > 0:
103
  features = features[:-skip_layer]
104
+ backbone_dim = 2048 // (2 ** skip_layer)
105
 
106
  att_layer = layers.attention.L2Attention()
107
 
requirements.txt CHANGED
@@ -3,3 +3,4 @@ pyaml
3
  matplotlib
4
  torch==1.10.2
5
  torchvision==0.11.3
 
 
3
  matplotlib
4
  torch==1.10.2
5
  torchvision==0.11.3
6
+ opencv-python=4.5.5