02alexander commited on
Commit
4050bb1
1 Parent(s): b00b3bb
Files changed (1) hide show
  1. app.py +4 -91
app.py CHANGED
@@ -139,12 +139,10 @@ model = model.to(device)
139
 
140
  print('Loading Finished!')
141
 
142
-
143
  def check_input_image(input_image):
144
  if input_image is None:
145
  raise gr.Error("No image uploaded!")
146
 
147
-
148
  def preprocess(input_image, do_remove_background):
149
 
150
  rembg_session = rembg.new_session() if do_remove_background else None
@@ -221,53 +219,16 @@ def _make3d(output_queue: SimpleQueue, images: Image.Image):
221
  images = rearrange(images, 'c (n h) (m w) -> (n m) c h w', n=3, m=2) # (6, 3, 320, 320)
222
 
223
  input_cameras = get_zero123plus_input_cameras(batch_size=1, radius=4.0).to(device)
224
- render_cameras = get_render_cameras(batch_size=1, radius=2.5, is_flexicubes=IS_FLEXICUBES).to(device)
225
 
226
  images = images.unsqueeze(0).to(device)
227
  images = v2.functional.resize(images, (320, 320), interpolation=3, antialias=True).clamp(0, 1)
228
 
229
  mesh_fpath = tempfile.NamedTemporaryFile(suffix=f".obj", delete=False).name
230
- mesh_basename = os.path.basename(mesh_fpath).split('.')[0]
231
- mesh_dirname = os.path.dirname(mesh_fpath)
232
- video_fpath = os.path.join(mesh_dirname, f"{mesh_basename}.mp4")
233
- mesh_glb_fpath = os.path.join(mesh_dirname, f"{mesh_basename}.glb")
234
 
235
  with torch.no_grad():
236
  # get triplane
237
  planes = model.forward_planes(images, input_cameras)
238
 
239
- # get video
240
- # chunk_size = 20 if IS_FLEXICUBES else 1
241
- # render_size = 384
242
-
243
-
244
- # frames = []
245
- # for i in tqdm(range(0, render_cameras.shape[1], chunk_size)):
246
- # if IS_FLEXICUBES:
247
- # frame = model.forward_geometry(
248
- # planes,
249
- # render_cameras[:, i:i+chunk_size],
250
- # render_size=render_size,
251
- # )['img']
252
- # else:
253
- # frame = model.synthesizer(
254
- # planes,
255
- # cameras=render_cameras[:, i:i+chunk_size],
256
- # render_size=render_size,
257
- # )['images_rgb']
258
-
259
- # frames.append(frame)
260
-
261
- # frames = torch.cat(frames, dim=1)
262
-
263
- # images_to_video(
264
- # frames[0],
265
- # video_fpath,
266
- # fps=30,
267
- # )
268
-
269
- # print(f"Video saved to {video_fpath}")
270
-
271
  # get mesh
272
  mesh_out = model.extract_mesh(
273
  planes,
@@ -288,14 +249,6 @@ def _make3d(output_queue: SimpleQueue, images: Image.Image):
288
  ),
289
  )
290
  )
291
-
292
- vertices = vertices[:, [1, 2, 0]]
293
-
294
- save_glb(vertices, faces, vertex_colors, mesh_glb_fpath)
295
- save_obj(vertices, faces, vertex_colors, mesh_fpath)
296
-
297
- print(f"Mesh saved to {mesh_fpath}")
298
-
299
  output_queue.put(("mesh", mesh_out))
300
 
301
  def generate_blueprint() -> rrb.Blueprint:
@@ -306,7 +259,7 @@ def generate_blueprint() -> rrb.Blueprint:
306
  rrb.Spatial2DView(origin="z123image"),
307
  rrb.Spatial2DView(origin="preprocessed_image"),
308
  rrb.Spatial2DView(origin="mvs/image"),
309
- rrb.TensorView(origin="mvs/latents"),
310
  ),
311
  column_shares=[1, 1],
312
  ),
@@ -351,15 +304,13 @@ def log_to_rr(input_image, do_remove_background, sample_steps, sample_seed):
351
  # return mesh
352
 
353
  _HEADER_ = '''
354
- <h2><b>Official 🤗 Gradio Demo</b></h2><h2><a href='https://github.com/TencentARC/InstantMesh' target='_blank'><b>InstantMesh: Efficient 3D Mesh Generation from a Single Image with Sparse-view Large Reconstruction Models</b></a></h2>
 
355
 
356
  **InstantMesh** is a feed-forward framework for efficient 3D mesh generation from a single image based on the LRM/Instant3D architecture.
357
 
358
- Code: <a href='https://github.com/TencentARC/InstantMesh' target='_blank'>GitHub</a>. Techenical report: <a href='https://arxiv.org/abs/2404.07191' target='_blank'>ArXiv</a>.
359
 
360
- ❗️❗️❗️**Important Notes:**
361
- - Our demo can export a .obj mesh with vertex colors or a .glb mesh now. If you prefer to export a .obj mesh with a **texture map**, please refer to our <a href='https://github.com/TencentARC/InstantMesh?tab=readme-ov-file#running-with-command-line' target='_blank'>Github Repo</a>.
362
- - The 3D mesh generation results highly depend on the quality of generated multi-view images. Please try a different **seed value** if the result is unsatisfying (Default: 42).
363
  '''
364
 
365
  _CITE_ = r"""
@@ -434,30 +385,6 @@ with gr.Blocks() as demo:
434
 
435
  viewer = Rerun(streaming=True, height=800)
436
 
437
- # with gr.Row():
438
-
439
- # with gr.Column():
440
- # mv_show_images = gr.Image(
441
- # label="Generated Multi-views",
442
- # type="pil",
443
- # width=379,
444
- # interactive=False
445
- # )
446
-
447
- # with gr.Row():
448
- # with gr.Tab("OBJ"):
449
- # output_model_obj = gr.Model3D(
450
- # label="Output Model (OBJ Format)",
451
- # interactive=False,
452
- # )
453
- # gr.Markdown("Note: Downloaded .obj model will be flipped. Export .glb instead or manually flip it before usage.")
454
- # with gr.Tab("GLB"):
455
- # output_model_glb = gr.Model3D(
456
- # label="Output Model (GLB Format)",
457
- # interactive=False,
458
- # )
459
- # gr.Markdown("Note: The model shown here has a darker appearance. Download to get correct results.")
460
-
461
  with gr.Row():
462
  gr.Markdown('''Try a different <b>seed value</b> if the result is unsatisfying (Default: 42).''')
463
 
@@ -470,19 +397,5 @@ with gr.Blocks() as demo:
470
  inputs=[input_image, do_remove_background, sample_steps, sample_seed],
471
  outputs=[viewer]
472
  )
473
- # submit.click(fn=check_input_image, inputs=[input_image]).success(
474
- # fn=preprocess,
475
- # inputs=[input_image, do_remove_background],
476
- # outputs=[processed_image],
477
- # ).success(
478
- # fn=generate_mvs,
479
- # inputs=[processed_image, sample_steps, sample_seed],
480
- # outputs=[mv_images, mv_show_images]
481
-
482
- # ).success(
483
- # fn=make3d,
484
- # inputs=[mv_images],
485
- # outputs=[output_model_obj, output_model_glb]
486
- # )
487
 
488
  demo.launch()
 
139
 
140
  print('Loading Finished!')
141
 
 
142
  def check_input_image(input_image):
143
  if input_image is None:
144
  raise gr.Error("No image uploaded!")
145
 
 
146
  def preprocess(input_image, do_remove_background):
147
 
148
  rembg_session = rembg.new_session() if do_remove_background else None
 
219
  images = rearrange(images, 'c (n h) (m w) -> (n m) c h w', n=3, m=2) # (6, 3, 320, 320)
220
 
221
  input_cameras = get_zero123plus_input_cameras(batch_size=1, radius=4.0).to(device)
 
222
 
223
  images = images.unsqueeze(0).to(device)
224
  images = v2.functional.resize(images, (320, 320), interpolation=3, antialias=True).clamp(0, 1)
225
 
226
  mesh_fpath = tempfile.NamedTemporaryFile(suffix=f".obj", delete=False).name
 
 
 
 
227
 
228
  with torch.no_grad():
229
  # get triplane
230
  planes = model.forward_planes(images, input_cameras)
231
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
232
  # get mesh
233
  mesh_out = model.extract_mesh(
234
  planes,
 
249
  ),
250
  )
251
  )
 
 
 
 
 
 
 
 
252
  output_queue.put(("mesh", mesh_out))
253
 
254
  def generate_blueprint() -> rrb.Blueprint:
 
259
  rrb.Spatial2DView(origin="z123image"),
260
  rrb.Spatial2DView(origin="preprocessed_image"),
261
  rrb.Spatial2DView(origin="mvs/image"),
262
+ rrb.TensorView(origin="mvs/latents", ),
263
  ),
264
  column_shares=[1, 1],
265
  ),
 
304
  # return mesh
305
 
306
  _HEADER_ = '''
307
+ <h2><b>Duplicate of the <a href=https://huggingface.co/spaces/TencentARC/InstantMesh>InstantMesh space</a> that uses <a href=https://rerun.io/>Rerun</a> for visualization.</b></h2>
308
+ <h2><a href='https://github.com/TencentARC/InstantMesh' target='_blank'><b>InstantMesh: Efficient 3D Mesh Generation from a Single Image with Sparse-view Large Reconstruction Models</b></a></h2>
309
 
310
  **InstantMesh** is a feed-forward framework for efficient 3D mesh generation from a single image based on the LRM/Instant3D architecture.
311
 
312
+ Technical report: <a href='https://arxiv.org/abs/2404.07191' target='_blank'>ArXiv</a>.
313
 
 
 
 
314
  '''
315
 
316
  _CITE_ = r"""
 
385
 
386
  viewer = Rerun(streaming=True, height=800)
387
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
388
  with gr.Row():
389
  gr.Markdown('''Try a different <b>seed value</b> if the result is unsatisfying (Default: 42).''')
390
 
 
397
  inputs=[input_image, do_remove_background, sample_steps, sample_seed],
398
  outputs=[viewer]
399
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
400
 
401
  demo.launch()