Chao Xu commited on
Commit
3c3d4fa
โ€ข
1 Parent(s): 7148e12

empty cache

Browse files
Files changed (1) hide show
  1. app.py +7 -4
app.py CHANGED
@@ -30,10 +30,7 @@ import torch
30
  import fire
31
  import gradio as gr
32
  import numpy as np
33
- # import plotly.express as px
34
  import plotly.graph_objects as go
35
- # import rich
36
- import sys
37
  from functools import partial
38
 
39
  from lovely_numpy import lo
@@ -272,12 +269,13 @@ def stage1_run(models, device, cam_vis, tmp_dir,
272
  output_ims_2 = predict_stage1_gradio(models['turncam'], input_im, save_path=stage1_dir, adjust_set=list(range(4,8)), device=device, ddim_steps=ddim_steps, scale=scale)
273
  else:
274
  output_ims_2 = predict_stage1_gradio(models['turncam'], input_im, save_path=stage1_dir, adjust_set=list(range(8,12)), device=device, ddim_steps=ddim_steps, scale=scale)
 
275
  return (90-elev_output, new_fig, *output_ims, *output_ims_2)
276
  else:
277
  rerun_idx = [i for i in range(len(btn_retrys)) if btn_retrys[i]]
278
  # elev_output = estimate_elev(tmp_dir)
279
  # if elev_output > 75:
280
- if 90-elev >75:
281
  rerun_idx_in = [i if i < 4 else i+4 for i in rerun_idx]
282
  else:
283
  rerun_idx_in = rerun_idx
@@ -290,6 +288,7 @@ def stage1_run(models, device, cam_vis, tmp_dir,
290
  for idx, view_idx in enumerate(rerun_idx):
291
  outputs[view_idx] = output_ims[idx]
292
  reset = [gr.update(value=False)] * 8
 
293
  return (rerun_all, *reset, *outputs)
294
 
295
  def stage2_run(models, device, tmp_dir,
@@ -309,6 +308,7 @@ def stage2_run(models, device, tmp_dir,
309
  dataset = tmp_dir
310
  main_dir_path = os.path.dirname(os.path.abspath(
311
  inspect.getfile(inspect.currentframe())))
 
312
  os.chdir(os.path.join(code_dir, 'SparseNeuS_demo_v1/'))
313
 
314
  bash_script = f'CUDA_VISIBLE_DEVICES={_GPU_INDEX} python exp_runner_generic_blender_val.py --specific_dataset_name {dataset} --mode export_mesh --conf confs/one2345_lod0_val_demo.conf --is_continue'
@@ -333,6 +333,7 @@ def stage2_run(models, device, tmp_dir,
333
  mesh.faces = np.fliplr(mesh.faces)
334
  # Export the mesh as .obj file with colors
335
  mesh.export(mesh_path, file_type='obj', include_color=True)
 
336
 
337
  if not is_rerun:
338
  return (mesh_path)
@@ -344,6 +345,7 @@ def nsfw_check(models, raw_im, device='cuda'):
344
  (_, has_nsfw_concept) = models['nsfw'](
345
  images=np.ones((1, 3)), clip_input=safety_checker_input.pixel_values)
346
  print('has_nsfw_concept:', has_nsfw_concept)
 
347
  if np.any(has_nsfw_concept):
348
  print('NSFW content detected.')
349
  # Define the image size and background color
@@ -372,6 +374,7 @@ def preprocess_run(predictor, models, raw_im, preprocess, *bbox_sliders):
372
  return check_results
373
  image_sam = sam_out_nosave(predictor, raw_im.convert("RGB"), *bbox_sliders)
374
  input_256 = image_preprocess_nosave(image_sam, lower_contrast=preprocess, rescale=True)
 
375
  return input_256
376
 
377
  def calc_cam_cone_pts_3d(polar_deg, azimuth_deg, radius_m, fov_deg):
 
30
  import fire
31
  import gradio as gr
32
  import numpy as np
 
33
  import plotly.graph_objects as go
 
 
34
  from functools import partial
35
 
36
  from lovely_numpy import lo
 
269
  output_ims_2 = predict_stage1_gradio(models['turncam'], input_im, save_path=stage1_dir, adjust_set=list(range(4,8)), device=device, ddim_steps=ddim_steps, scale=scale)
270
  else:
271
  output_ims_2 = predict_stage1_gradio(models['turncam'], input_im, save_path=stage1_dir, adjust_set=list(range(8,12)), device=device, ddim_steps=ddim_steps, scale=scale)
272
+ torch.cuda.empty_cache()
273
  return (90-elev_output, new_fig, *output_ims, *output_ims_2)
274
  else:
275
  rerun_idx = [i for i in range(len(btn_retrys)) if btn_retrys[i]]
276
  # elev_output = estimate_elev(tmp_dir)
277
  # if elev_output > 75:
278
+ if 90-elev > 75:
279
  rerun_idx_in = [i if i < 4 else i+4 for i in rerun_idx]
280
  else:
281
  rerun_idx_in = rerun_idx
 
288
  for idx, view_idx in enumerate(rerun_idx):
289
  outputs[view_idx] = output_ims[idx]
290
  reset = [gr.update(value=False)] * 8
291
+ torch.cuda.empty_cache()
292
  return (rerun_all, *reset, *outputs)
293
 
294
  def stage2_run(models, device, tmp_dir,
 
308
  dataset = tmp_dir
309
  main_dir_path = os.path.dirname(os.path.abspath(
310
  inspect.getfile(inspect.currentframe())))
311
+ torch.cuda.empty_cache()
312
  os.chdir(os.path.join(code_dir, 'SparseNeuS_demo_v1/'))
313
 
314
  bash_script = f'CUDA_VISIBLE_DEVICES={_GPU_INDEX} python exp_runner_generic_blender_val.py --specific_dataset_name {dataset} --mode export_mesh --conf confs/one2345_lod0_val_demo.conf --is_continue'
 
333
  mesh.faces = np.fliplr(mesh.faces)
334
  # Export the mesh as .obj file with colors
335
  mesh.export(mesh_path, file_type='obj', include_color=True)
336
+ torch.cuda.empty_cache()
337
 
338
  if not is_rerun:
339
  return (mesh_path)
 
345
  (_, has_nsfw_concept) = models['nsfw'](
346
  images=np.ones((1, 3)), clip_input=safety_checker_input.pixel_values)
347
  print('has_nsfw_concept:', has_nsfw_concept)
348
+ del safety_checker_input
349
  if np.any(has_nsfw_concept):
350
  print('NSFW content detected.')
351
  # Define the image size and background color
 
374
  return check_results
375
  image_sam = sam_out_nosave(predictor, raw_im.convert("RGB"), *bbox_sliders)
376
  input_256 = image_preprocess_nosave(image_sam, lower_contrast=preprocess, rescale=True)
377
+ torch.cuda.empty_cache()
378
  return input_256
379
 
380
  def calc_cam_cone_pts_3d(polar_deg, azimuth_deg, radius_m, fov_deg):