freealise commited on
Commit
8157cf8
1 Parent(s): 76b41b4

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +15 -10
app.py CHANGED
@@ -208,7 +208,7 @@ def depth_edges_mask(depth):
208
  mask = depth_grad > 0.05
209
  return mask
210
 
211
- def pano_depth_to_world_points(depth, location, displace):
212
  """
213
  360 depth to world points
214
  given 2D depth is an equirectangular projection of a spherical image
@@ -240,9 +240,9 @@ def pano_depth_to_world_points(depth, location, displace):
240
  d_lat = lat + j/2 * np.pi / depth.shape[0] + location["pitch"] / 180 * np.pi
241
 
242
  # Convert to cartesian coordinates
243
- x = radius * np.cos(d_lon) * np.sin(d_lat) + location["lat"]*displace
244
  y = radius * np.cos(d_lat)
245
- z = radius * np.sin(d_lon) * np.sin(d_lat) + location["lng"]*displace
246
 
247
  pts = np.stack([x, y, z], axis=1)
248
  uvs = np.stack([lon, lat], axis=1)
@@ -258,7 +258,7 @@ def pano_depth_to_world_points(depth, location, displace):
258
  def rgb2gray(rgb):
259
  return np.dot(rgb[...,:3], [0.333, 0.333, 0.333])
260
 
261
- def get_mesh(image, depth, blur_data, loadall, displace):
262
  global locations
263
  global mesh
264
  global scene
@@ -274,7 +274,7 @@ def get_mesh(image, depth, blur_data, loadall, displace):
274
  gdepth = rgb2gray(depthc)
275
 
276
  print('depth to gray - ok')
277
- points = pano_depth_to_world_points(gdepth, locations[fnum], displace)
278
  pts3d = points[0]
279
  uv = points[1]
280
  print('radius from depth - ok')
@@ -605,7 +605,13 @@ with gr.Blocks(css=css) as demo:
605
  with gr.Accordion(label="Blur levels", open=False):
606
  blur_in = gr.Textbox(label="Kernel size", show_label=False, value="1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1")
607
  with gr.Accordion(label="Locations", open=False):
608
- offset = gr.HTML(value="""<input type='text' id='kbrd' onkeydown='
 
 
 
 
 
 
609
  if (BABYLON) {
610
  if (!BABYLON.Engine.LastCreatedScene.activeCamera.metadata) {
611
  var evt = document.createEvent(\"Event\");
@@ -720,13 +726,13 @@ with gr.Blocks(css=css) as demo:
720
  </pre>""")
721
  selected = gr.Number(elem_id="fnum", value=0, minimum=0, maximum=256, interactive=False)
722
  output_frame.select(fn=select_frame, inputs=[output_mask], outputs=[output_mask, selected])
723
- example_coords = """[
724
  {"lat": -2.0593128411405814e-06, "lng": 7.138250595062345e-07, "heading": 152.70303, "pitch": 2.607833999999997},
725
  {"lat": 1.5397763348801163e-06, "lng": 1.3651150112536925e-06, "heading": 151.12973, "pitch": 2.8672300000000064},
726
  {"lat": -2.1552188087525792e-05, "lng": 1.5591533522041345e-05, "heading": 151.41025, "pitch": 3.4802200000000028},
727
  {"lat": -3.0588534016828817e-06, "lng": 3.549113042566887e-06, "heading": 151.93391, "pitch": 2.843050000000005},
728
  {"lat": 2.513057799546914e-05, "lng": -2.1219586638920873e-05, "heading": 152.95769, "pitch": 4.233024999999998}
729
- ]"""
730
  coords = gr.JSON(elem_id="coords", value=example_coords, label="Precise coordinates", show_label=False)
731
 
732
  html = gr.HTML(value="""<label for='zoom'>Zoom</label><input id='zoom' type='range' style='width:256px;height:1em;' value='0.8' min='0.157' max='1.57' step='0.001' oninput='
@@ -843,7 +849,6 @@ with gr.Blocks(css=css) as demo:
843
  '/><br/>
844
  <canvas id='cnv_out'/>""")
845
  load_all = gr.Checkbox(label="Load all")
846
- displace = gr.Slider(label="Displace", value=0, maximum=255, minimum=0, step=1)
847
  render = gr.Button("Render")
848
 
849
  def on_submit(uploaded_video,model_type,coordinates):
@@ -881,7 +886,7 @@ with gr.Blocks(css=css) as demo:
881
  return output_video_path + (locations,)
882
 
883
  submit.click(on_submit, inputs=[input_video, model_type, coords], outputs=[processed_video, processed_zip, output_frame, output_mask, coords])
884
- render.click(partial(get_mesh), inputs=[output_frame, output_mask, blur_in, load_all, displace], outputs=[result])
885
 
886
  example_files = os.listdir('examples')
887
  example_files.sort()
 
208
  mask = depth_grad > 0.05
209
  return mask
210
 
211
+ def pano_depth_to_world_points(depth, location):
212
  """
213
  360 depth to world points
214
  given 2D depth is an equirectangular projection of a spherical image
 
240
  d_lat = lat + j/2 * np.pi / depth.shape[0] + location["pitch"] / 180 * np.pi
241
 
242
  # Convert to cartesian coordinates
243
+ x = radius * np.cos(d_lon) * np.sin(d_lat)
244
  y = radius * np.cos(d_lat)
245
+ z = radius * np.sin(d_lon) * np.sin(d_lat)
246
 
247
  pts = np.stack([x, y, z], axis=1)
248
  uvs = np.stack([lon, lat], axis=1)
 
258
  def rgb2gray(rgb):
259
  return np.dot(rgb[...,:3], [0.333, 0.333, 0.333])
260
 
261
+ def get_mesh(image, depth, blur_data, loadall):
262
  global locations
263
  global mesh
264
  global scene
 
274
  gdepth = rgb2gray(depthc)
275
 
276
  print('depth to gray - ok')
277
+ points = pano_depth_to_world_points(gdepth, locations[fnum])
278
  pts3d = points[0]
279
  uv = points[1]
280
  print('radius from depth - ok')
 
605
  with gr.Accordion(label="Blur levels", open=False):
606
  blur_in = gr.Textbox(label="Kernel size", show_label=False, value="1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1")
607
  with gr.Accordion(label="Locations", open=False):
608
+ offset = gr.HTML(value="""<label for='displace'>Displace</label><input id='displace' type='range' style='width:256px;height:1em;' value='0' min='0' max='255' step='1' oninput='
609
+ try {
610
+ var json = JSON.parse(document.getElementById(\"coords\").innerText.slice(6,-1));
611
+ BABYLON.Engine.LastCreatedScene.getNodes()[parseInt(document.getElementById(\"fnum\").getElementsByTagName(\"input\")[0].value)+1].position.x = this.value * json[parseInt(document.getElementById(\"fnum\").getElementsByTagName(\"input\")[0].value)].lat;
612
+ BABYLON.Engine.LastCreatedScene.getNodes()[parseInt(document.getElementById(\"fnum\").getElementsByTagName(\"input\")[0].value)+1].position.z = this.value * json[parseInt(document.getElementById(\"fnum\").getElementsByTagName(\"input\")[0].value)].lng;
613
+ } catch(e) {alert(e)}
614
+ '/><input type='text' id='kbrd' onkeydown='
615
  if (BABYLON) {
616
  if (!BABYLON.Engine.LastCreatedScene.activeCamera.metadata) {
617
  var evt = document.createEvent(\"Event\");
 
726
  </pre>""")
727
  selected = gr.Number(elem_id="fnum", value=0, minimum=0, maximum=256, interactive=False)
728
  output_frame.select(fn=select_frame, inputs=[output_mask], outputs=[output_mask, selected])
729
+ example_coords = """"root=[
730
  {"lat": -2.0593128411405814e-06, "lng": 7.138250595062345e-07, "heading": 152.70303, "pitch": 2.607833999999997},
731
  {"lat": 1.5397763348801163e-06, "lng": 1.3651150112536925e-06, "heading": 151.12973, "pitch": 2.8672300000000064},
732
  {"lat": -2.1552188087525792e-05, "lng": 1.5591533522041345e-05, "heading": 151.41025, "pitch": 3.4802200000000028},
733
  {"lat": -3.0588534016828817e-06, "lng": 3.549113042566887e-06, "heading": 151.93391, "pitch": 2.843050000000005},
734
  {"lat": 2.513057799546914e-05, "lng": -2.1219586638920873e-05, "heading": 152.95769, "pitch": 4.233024999999998}
735
+ ]""""
736
  coords = gr.JSON(elem_id="coords", value=example_coords, label="Precise coordinates", show_label=False)
737
 
738
  html = gr.HTML(value="""<label for='zoom'>Zoom</label><input id='zoom' type='range' style='width:256px;height:1em;' value='0.8' min='0.157' max='1.57' step='0.001' oninput='
 
849
  '/><br/>
850
  <canvas id='cnv_out'/>""")
851
  load_all = gr.Checkbox(label="Load all")
 
852
  render = gr.Button("Render")
853
 
854
  def on_submit(uploaded_video,model_type,coordinates):
 
886
  return output_video_path + (locations,)
887
 
888
  submit.click(on_submit, inputs=[input_video, model_type, coords], outputs=[processed_video, processed_zip, output_frame, output_mask, coords])
889
+ render.click(partial(get_mesh), inputs=[output_frame, output_mask, blur_in, load_all], outputs=[result])
890
 
891
  example_files = os.listdir('examples')
892
  example_files.sort()