Testing / app.py
Masrkai's picture
Update app.py
f43fb20 verified
raw
history blame
1.62 kB
import torch
from diffusers import ShapEPipeline
from diffusers.utils import export_to_gif
import PIL.Image
def generate_3d_model(prompt, output_path="assistant_3d.gif"):
"""
Generate a 3D model using ShapE optimized for CPU usage
"""
try:
# Force CPU and reduced precision
pipe = ShapEPipeline.from_pretrained(
"openai/shap-e",
torch_dtype=torch.float32,
low_cpu_mem_usage=True
).to("cpu")
# Minimal generation settings to reduce memory usage
outputs = pipe(
prompt,
num_inference_steps=32, # Reduced from default
frame_size=32, # Smaller frame size
guidance_scale=10.0, # Reduced guidance scale
)
# Ensure we have PIL images
if not isinstance(outputs.images[0], PIL.Image.Image):
images = [PIL.Image.fromarray(img) for img in outputs.images]
else:
images = outputs.images
# Save as GIF
gif_path = export_to_gif(images, output_path)
print(f"Successfully created GIF at: {gif_path}")
return gif_path
except Exception as e:
print(f"Error during generation: {e}")
print(f"Error type: {type(e)}")
print(f"Full error details: {str(e)}")
raise
if __name__ == "__main__":
prompt = "A gentle AI voice assistant constructed from a circle ring and 3 lines that fly alongside the circle" # Simplified prompt
try:
generate_3d_model(prompt)
except Exception as e:
print(f"Generation failed: {e}")