alpha111 commited on
Commit
b71b94c
1 Parent(s): d09ec35

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +37 -11
README.md CHANGED
@@ -1,11 +1,37 @@
1
- ---
2
- language:
3
- - en
4
- - bg
5
- metrics:
6
- - accuracy
7
- - character
8
- pipeline_tag: image-to-3d
9
- tags:
10
- - music
11
- ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ import torch
3
+ import requests
4
+ from PIL import Image
5
+ import numpy as np
6
+ from torchvision.utils import make_grid, save_image
7
+ from diffusers import DiffusionPipeline # only tested on diffusers[torch]==0.19.3, may have conflicts with newer versions of diffusers
8
+
9
+ def load_wonder3d_pipeline():
10
+
11
+ pipeline = DiffusionPipeline.from_pretrained(
12
+ 'flamehaze1115/wonder3d-v1.0', # or use local checkpoint './ckpts'
13
+ custom_pipeline='flamehaze1115/wonder3d-pipeline',
14
+ torch_dtype=torch.float16
15
+ )
16
+
17
+ # enable xformers
18
+ pipeline.unet.enable_xformers_memory_efficient_attention()
19
+
20
+ if torch.cuda.is_available():
21
+ pipeline.to('cuda:0')
22
+ return pipeline
23
+
24
+ pipeline = load_wonder3d_pipeline()
25
+
26
+ # Download an example image.
27
+ cond = Image.open(requests.get("https://d.skis.ltd/nrp/sample-data/lysol.png", stream=True).raw)
28
+
29
+ # The object should be located in the center and resized to 80% of image height.
30
+ cond = Image.fromarray(np.array(cond)[:, :, :3])
31
+
32
+ # Run the pipeline!
33
+ images = pipeline(cond, num_inference_steps=20, output_type='pt', guidance_scale=1.0).images
34
+
35
+ result = make_grid(images, nrow=6, ncol=2, padding=0, value_range=(0, 1))
36
+
37
+ save_image(result, 'result.png')