File size: 1,622 Bytes
33baf83 97677c5 33baf83 97677c5 41e6120 97677c5 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 |
import torch
from diffusers import ShapEPipeline
from diffusers.utils import export_to_gif
import PIL.Image
def generate_3d_model(prompt, output_path="assistant_3d.gif"):
"""
Generate a 3D model using ShapE optimized for CPU usage
"""
try:
# Force CPU and reduced precision
pipe = ShapEPipeline.from_pretrained(
"openai/shap-e",
torch_dtype=torch.float32,
low_cpu_mem_usage=True
).to("cpu")
# Minimal generation settings to reduce memory usage
outputs = pipe(
prompt,
num_inference_steps=32, # Reduced from default
frame_size=32, # Smaller frame size
guidance_scale=10.0, # Reduced guidance scale
)
# Ensure we have PIL images
if not isinstance(outputs.images[0], PIL.Image.Image):
images = [PIL.Image.fromarray(img) for img in outputs.images]
else:
images = outputs.images
# Save as GIF
gif_path = export_to_gif(images, output_path)
print(f"Successfully created GIF at: {gif_path}")
return gif_path
except Exception as e:
print(f"Error during generation: {e}")
print(f"Error type: {type(e)}")
print(f"Full error details: {str(e)}")
raise
if __name__ == "__main__":
prompt = "A gentle AI voice assistant constructed from a circle ring and 3 lines that fly alongside the circle" # Simplified prompt
try:
generate_3d_model(prompt)
except Exception as e:
print(f"Generation failed: {e}") |