Cherie Ho commited on
Commit
b684d11
1 Parent(s): e46cbce

added legend and more examples

Browse files
app.py CHANGED
@@ -24,7 +24,7 @@ description = """
24
  <a href="https://github.com/MapItAnywhere/MapItAnywhere" target="_blank">Code</a>
25
  </h3>
26
  <p align="center">
27
- Mapper generates birds-eye-view maps from in-the-wild monocular first-person view images. You can try our demo by uploading your images or using the examples provided. Tip: You can also try out images across the world using <a href="https://www.mapillary.com/app" target="_blank">Mapillary</a> &#128521;
28
  </p>
29
  """
30
 
@@ -119,7 +119,7 @@ def run(input_img):
119
 
120
  # TODO: add legend here
121
 
122
- plot_images([image, rgb_prediction], titles=["Input Image", "Prediction"], pad=2, adaptive=True)
123
 
124
  return plt.gcf()
125
 
@@ -128,6 +128,8 @@ examples = [
128
  ["examples/left_crossing.jpg"],
129
  ["examples/crossing.jpg"],
130
  ["examples/two_roads.jpg"],
 
 
131
  ["examples/night_road.jpg"],
132
  ["examples/night_crossing.jpg"],
133
  ]
@@ -142,4 +144,4 @@ demo = gr.Interface(
142
  ],
143
  description=description,
144
  examples=examples)
145
- demo.launch(share=False, server_name="0.0.0.0")
 
24
  <a href="https://github.com/MapItAnywhere/MapItAnywhere" target="_blank">Code</a>
25
  </h3>
26
  <p align="center">
27
+ Mapper generates birds-eye-view maps from in-the-wild monocular first-person view images. You can try our demo by uploading your images or using the examples provided. Tip: You can also try out images across the world using <a href="https://www.mapillary.com/app" target="_blank">Mapillary</a> &#128521; Also try out some examples that are taken in cities we have not trained on!
28
  </p>
29
  """
30
 
 
119
 
120
  # TODO: add legend here
121
 
122
+ plot_images([image, rgb_prediction], titles=["Input Image", "Top-Down Prediction"], pad=2, adaptive=True)
123
 
124
  return plt.gcf()
125
 
 
128
  ["examples/left_crossing.jpg"],
129
  ["examples/crossing.jpg"],
130
  ["examples/two_roads.jpg"],
131
+ ["examples/japan_narrow_road.jpeg"],
132
+ ["examples/zurich_crossing.jpg"],
133
  ["examples/night_road.jpg"],
134
  ["examples/night_crossing.jpg"],
135
  ]
 
144
  ],
145
  description=description,
146
  examples=examples)
147
+ demo.launch(share=True, server_name="0.0.0.0")
examples/japan_narrow_road.jpeg ADDED
examples/zurich_crossing.jpg ADDED

Git LFS Details

  • SHA256: 4455256942e35620ce38f606712e24ebb4e8633210fbc65bda9ffce5b63e5daa
  • Pointer size: 132 Bytes
  • Size of remote file: 1.57 MB
mapper/utils/viz_2d.py CHANGED
@@ -7,7 +7,7 @@
7
  import numpy as np
8
  import torch
9
  import matplotlib.pyplot as plt
10
-
11
 
12
  def features_to_RGB(*Fs, masks=None, skip=1):
13
  """Project a list of d-dimensional feature maps to RGB colors using PCA."""
@@ -133,4 +133,17 @@ def plot_images(imgs, titles=None, cmaps="gray", dpi=100, pad=0.5, adaptive=True
133
  spine.set_visible(False)
134
  if titles:
135
  ax[i].set_title(titles[i])
 
 
 
 
 
 
 
 
 
 
 
 
 
136
  fig.tight_layout(pad=pad)
 
7
  import numpy as np
8
  import torch
9
  import matplotlib.pyplot as plt
10
+ import matplotlib.patches as mpatches
11
 
12
  def features_to_RGB(*Fs, masks=None, skip=1):
13
  """Project a list of d-dimensional feature maps to RGB colors using PCA."""
 
133
  spine.set_visible(False)
134
  if titles:
135
  ax[i].set_title(titles[i])
136
+
137
+ # Create legend
138
+ class_colors = {
139
+ 'Road': (68, 68, 68), # 0: Black
140
+ 'Crossing': (244, 162, 97), # 1; Red
141
+ 'Sidewalk': (233, 196, 106), # 2: Yellow
142
+ 'Building': (231, 111, 81), # 5: Magenta
143
+ 'Terrain': (42, 157, 143), # 7: Cyan
144
+ 'Parking': (204, 204, 204), # 8: Dark Grey
145
+ }
146
+ patches = [mpatches.Patch(color=[c/255.0 for c in color], label=label) for label, color in class_colors.items()]
147
+ plt.legend(handles=patches, loc='upper center', bbox_to_anchor=(0.5, -0.05), ncol=3)
148
+
149
  fig.tight_layout(pad=pad)