amankishore commited on
Commit
a698138
1 Parent(s): f845bf6

Better video

Browse files
Files changed (1) hide show
  1. app.py +16 -10
app.py CHANGED
@@ -7,6 +7,7 @@ import imageio
7
  import numpy as np
8
  import math
9
  import argparse
 
10
 
11
  import torch
12
  import base64
@@ -61,6 +62,14 @@ def decode_latent_images_foo(
61
  )
62
  return decoded
63
 
 
 
 
 
 
 
 
 
64
  def generate_3D(input, grid_size=64):
65
  set_state('Entered generate function...')
66
 
@@ -129,11 +138,8 @@ def generate_3D(input, grid_size=64):
129
 
130
  images = decode_latent_images(xm, latents[0], cameras, rendering_mode=render_mode)
131
 
132
- # Convert images to gif
133
- images[0].save(f'/tmp/mesh.gif', save_all=True, append_images=images[1:], duration=100, loop=0)
134
-
135
 
136
- return ply_to_glb('/tmp/mesh.ply', '/tmp/mesh.glb'), '/tmp/mesh.gif', gr.update(value=['/tmp/mesh.glb', '/tmp/mesh.ply'], visible=True)
137
 
138
  def prepare_img(img):
139
 
@@ -257,20 +263,20 @@ with block:
257
  if torch.cuda.is_available():
258
  gr.Examples(
259
  examples=[
260
- ["images/pumpkin.png"],
261
- ["images/fantasy_world.png"],
262
  ],
263
- inputs=[input_image],
264
  outputs=[model_3d, model_gif, file_out],
265
  fn=generate_3D,
266
  cache_examples=True
267
  )
268
  gr.Examples(
269
  examples=[
270
- ["a shark"],
271
- ["an avocado"],
272
  ],
273
- inputs=[prompt],
274
  outputs=[model_3d, model_gif, file_out],
275
  fn=generate_3D,
276
  cache_examples=True
 
7
  import numpy as np
8
  import math
9
  import argparse
10
+ import tempfile
11
 
12
  import torch
13
  import base64
 
62
  )
63
  return decoded
64
 
65
+ def to_video(frames: list[Image.Image], fps: int = 5) -> str:
66
+ out_file = tempfile.NamedTemporaryFile(suffix='.mp4', delete=False)
67
+ writer = imageio.get_writer(out_file.name, format='FFMPEG', fps=fps)
68
+ for frame in frames:
69
+ writer.append_data(np.asarray(frame))
70
+ writer.close()
71
+ return out_file.name
72
+
73
  def generate_3D(input, grid_size=64):
74
  set_state('Entered generate function...')
75
 
 
138
 
139
  images = decode_latent_images(xm, latents[0], cameras, rendering_mode=render_mode)
140
 
 
 
 
141
 
142
+ return ply_to_glb('/tmp/mesh.ply', '/tmp/mesh.glb'), to_video(images), gr.update(value=['/tmp/mesh.glb', '/tmp/mesh.ply'], visible=True)
143
 
144
  def prepare_img(img):
145
 
 
263
  if torch.cuda.is_available():
264
  gr.Examples(
265
  examples=[
266
+ ["a shark"],
267
+ ["an avocado"],
268
  ],
269
+ inputs=[prompt],
270
  outputs=[model_3d, model_gif, file_out],
271
  fn=generate_3D,
272
  cache_examples=True
273
  )
274
  gr.Examples(
275
  examples=[
276
+ ["images/pumpkin.png"],
277
+ ["images/fantasy_world.png"],
278
  ],
279
+ inputs=[input_image],
280
  outputs=[model_3d, model_gif, file_out],
281
  fn=generate_3D,
282
  cache_examples=True