Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -312,6 +312,7 @@ class ShapERenderer:
|
|
312 |
self.diffusion = diffusion_from_config(load_config('diffusion'))
|
313 |
print("Shap-E models initialized!")
|
314 |
|
|
|
315 |
def generate_views(self, prompt, guidance_scale=15.0, num_steps=64):
|
316 |
try:
|
317 |
torch.cuda.empty_cache() # Clear GPU memory before generation
|
@@ -322,22 +323,22 @@ class ShapERenderer:
|
|
322 |
|
323 |
with torch.amp.autocast('cuda'): # Use automatic mixed precision
|
324 |
# spaces duration is 20 seconds, so we need to be careful here
|
325 |
-
|
326 |
-
|
327 |
-
|
328 |
-
|
329 |
-
|
330 |
-
|
331 |
-
|
332 |
-
|
333 |
-
|
334 |
-
|
335 |
-
|
336 |
-
|
337 |
-
|
338 |
-
|
339 |
-
|
340 |
-
|
341 |
|
342 |
# Render the 6 views we need with specific viewing angles
|
343 |
size = 320 # Size of each rendered image
|
|
|
312 |
self.diffusion = diffusion_from_config(load_config('diffusion'))
|
313 |
print("Shap-E models initialized!")
|
314 |
|
315 |
+
@spaces.GPU(duration=80)
|
316 |
def generate_views(self, prompt, guidance_scale=15.0, num_steps=64):
|
317 |
try:
|
318 |
torch.cuda.empty_cache() # Clear GPU memory before generation
|
|
|
323 |
|
324 |
with torch.amp.autocast('cuda'): # Use automatic mixed precision
|
325 |
# spaces duration is 20 seconds, so we need to be careful here
|
326 |
+
|
327 |
+
latents = sample_latents(
|
328 |
+
batch_size=batch_size,
|
329 |
+
model=self.model,
|
330 |
+
diffusion=self.diffusion,
|
331 |
+
guidance_scale=guidance_scale,
|
332 |
+
model_kwargs=dict(texts=[prompt] * batch_size),
|
333 |
+
progress=True,
|
334 |
+
clip_denoised=True,
|
335 |
+
use_fp16=True,
|
336 |
+
use_karras=True,
|
337 |
+
karras_steps=num_steps,
|
338 |
+
sigma_min=1e-3,
|
339 |
+
sigma_max=160,
|
340 |
+
s_churn=0,
|
341 |
+
)
|
342 |
|
343 |
# Render the 6 views we need with specific viewing angles
|
344 |
size = 320 # Size of each rendered image
|