02alexander commited on
Commit
75eb903
1 Parent(s): 9f38f01

remove video generation

Browse files
Files changed (1) hide show
  1. app.py +19 -22
app.py CHANGED
@@ -217,7 +217,7 @@ def _make3d(output_queue: SimpleQueue, images: Image.Image):
217
  print(f'type(images)={type(images)}')
218
  global model
219
  if IS_FLEXICUBES:
220
- model.init_flexicubes_geometry(device, use_renderer=True)
221
  model = model.eval()
222
 
223
  images = np.asarray(images, dtype=np.float32) / 255.0
@@ -245,30 +245,27 @@ def _make3d(output_queue: SimpleQueue, images: Image.Image):
245
  planes = model.forward_planes(images, input_cameras)
246
  print(f'type(planes)={type(planes)}')
247
 
248
- # # get video
249
- chunk_size = 20 if IS_FLEXICUBES else 1
250
- render_size = 384
251
 
252
- print(f'IS_FLEXICUBES={IS_FLEXICUBES}')
253
 
254
  # frames = []
255
- for i in tqdm(range(0, render_cameras.shape[1], chunk_size)):
256
- if IS_FLEXICUBES:
257
- frame = model.forward_geometry(
258
- planes,
259
- render_cameras[:, i:i+chunk_size],
260
- render_size=render_size,
261
- )['img']
262
- else:
263
- frame = model.synthesizer(
264
- planes,
265
- cameras=render_cameras[:, i:i+chunk_size],
266
- render_size=render_size,
267
- )['images_rgb']
268
-
269
- print(f'type(frame)={type(frame)}')
270
- output_queue.put(("log", "3dvideo", rr.Image(frame)))
271
- # frames.append(frame)
272
 
273
  # frames = torch.cat(frames, dim=1)
274
 
 
217
  print(f'type(images)={type(images)}')
218
  global model
219
  if IS_FLEXICUBES:
220
+ model.init_flexicubes_geometry(device, use_renderer=False)
221
  model = model.eval()
222
 
223
  images = np.asarray(images, dtype=np.float32) / 255.0
 
245
  planes = model.forward_planes(images, input_cameras)
246
  print(f'type(planes)={type(planes)}')
247
 
248
+ # get video
249
+ # chunk_size = 20 if IS_FLEXICUBES else 1
250
+ # render_size = 384
251
 
 
252
 
253
  # frames = []
254
+ # for i in tqdm(range(0, render_cameras.shape[1], chunk_size)):
255
+ # if IS_FLEXICUBES:
256
+ # frame = model.forward_geometry(
257
+ # planes,
258
+ # render_cameras[:, i:i+chunk_size],
259
+ # render_size=render_size,
260
+ # )['img']
261
+ # else:
262
+ # frame = model.synthesizer(
263
+ # planes,
264
+ # cameras=render_cameras[:, i:i+chunk_size],
265
+ # render_size=render_size,
266
+ # )['images_rgb']
267
+
268
+ # frames.append(frame)
 
 
269
 
270
  # frames = torch.cat(frames, dim=1)
271