Wording in the code example

#2
by multimodalart HF staff - opened
Files changed (1) hide show
  1. README.md +24 -20
README.md CHANGED
@@ -65,7 +65,9 @@ aesthetic prompts. Specifically, Stable Cascade (30 inference steps) was compare
65
  steps), SDXL (50 inference steps), SDXL Turbo (1 inference step) and Würstchen v2 (30 inference steps).
66
 
67
  ## Code Example
68
- Iinstall `diffusers` from this branch while the PR is WIP.
 
 
69
  ```shell
70
  pip install git+https://github.com/kashif/diffusers.git@wuerstchen-v3
71
  ```
@@ -75,31 +77,33 @@ import torch
75
  from diffusers import StableCascadeDecoderPipeline, StableCascadePriorPipeline
76
 
77
  device = "cuda"
78
- dtype = torch.bfloat16
79
  num_images_per_prompt = 2
80
 
81
- prior = StableCascadePriorPipeline.from_pretrained("stabilityai/stable-cascade-prior", torch_dtype=dtype).to(device)
82
- decoder = StableCascadeDecoderPipeline.from_pretrained("stabilityai/stable-cascade", torch_dtype=dtype).to(device)
83
 
84
  prompt = "Anthropomorphic cat dressed as a pilot"
85
  negative_prompt = ""
86
 
87
- with torch.cuda.amp.autocast(dtype=dtype):
88
- prior_output = prior(
89
- prompt=prompt,
90
- height=1024,
91
- width=1024,
92
- negative_prompt=negative_prompt,
93
- guidance_scale=4.0,
94
- num_images_per_prompt=num_images_per_prompt,
95
- )
96
- decoder_output = decoder(
97
- image_embeddings=prior_output.image_embeddings,
98
- prompt=prompt,
99
- negative_prompt=negative_prompt,
100
- guidance_scale=0.0,
101
- output_type="pil",
102
- ).images
 
 
 
103
  ```
104
 
105
  ## Uses
 
65
  steps), SDXL (50 inference steps), SDXL Turbo (1 inference step) and Würstchen v2 (30 inference steps).
66
 
67
  ## Code Example
68
+
69
+ **⚠️ Important**: For the code below to work, you have to install `diffusers` from this branch while the PR is WIP.
70
+
71
  ```shell
72
  pip install git+https://github.com/kashif/diffusers.git@wuerstchen-v3
73
  ```
 
77
  from diffusers import StableCascadeDecoderPipeline, StableCascadePriorPipeline
78
 
79
  device = "cuda"
 
80
  num_images_per_prompt = 2
81
 
82
+ prior = StableCascadePriorPipeline.from_pretrained("stabilityai/stable-cascade-prior", torch_dtype=torch.bfloat16).to(device)
83
+ decoder = StableCascadeDecoderPipeline.from_pretrained("stabilityai/stable-cascade", torch_dtype=torch.float16).to(device)
84
 
85
  prompt = "Anthropomorphic cat dressed as a pilot"
86
  negative_prompt = ""
87
 
88
+ prior_output = prior(
89
+ prompt=prompt,
90
+ height=1024,
91
+ width=1024,
92
+ negative_prompt=negative_prompt,
93
+ guidance_scale=4.0,
94
+ num_images_per_prompt=num_images_per_prompt,
95
+ num_inference_steps=20
96
+ )
97
+ decoder_output = decoder(
98
+ image_embeddings=prior_output.image_embeddings.half(),
99
+ prompt=prompt,
100
+ negative_prompt=negative_prompt,
101
+ guidance_scale=0.0,
102
+ output_type="pil",
103
+ num_inference_steps=10
104
+ ).images
105
+
106
+ #Now decoder_output is a list with your PIL images
107
  ```
108
 
109
  ## Uses