Merge branch 'main' of https://huggingface.co/diffusers/tools
Browse files- clear_mem.py +21 -0
- run_sdxl_dummy.py +21 -0
- run_sdxl_dummy_flax.py +25 -0
clear_mem.py
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
import torch
|
| 3 |
+
import gc
|
| 4 |
+
|
| 5 |
+
shape = (10,000)
|
| 6 |
+
|
| 7 |
+
input = torch.ones((shape, shape), device="cuda")
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
def clear_memory(model):
|
| 11 |
+
model.to('cpu')
|
| 12 |
+
gc.collect()
|
| 13 |
+
torch.cuda.empty_cache()
|
| 14 |
+
torch.cuda.ipc_collect()
|
| 15 |
+
torch.clear_autocast_cache()
|
| 16 |
+
|
| 17 |
+
for _ in range(6):
|
| 18 |
+
linear = torch.nn.Linear(shape, shape).to("cuda")
|
| 19 |
+
output = linear(input)
|
| 20 |
+
|
| 21 |
+
clear_memory(linear)
|
run_sdxl_dummy.py
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
import torch
|
| 3 |
+
import numpy as np
|
| 4 |
+
from diffusers import StableDiffusionXLPipeline
|
| 5 |
+
|
| 6 |
+
path = "hf-internal-testing/tiny-stable-diffusion-xl-pipe"
|
| 7 |
+
|
| 8 |
+
pipe = StableDiffusionXLPipeline.from_pretrained(path)
|
| 9 |
+
pipe.unet.set_default_attn_processor()
|
| 10 |
+
|
| 11 |
+
prompt = "An astronaut riding a green horse on Mars"
|
| 12 |
+
steps = 3
|
| 13 |
+
|
| 14 |
+
batch_size, height, width, ch = 1, 32, 32, 4
|
| 15 |
+
num_elems = batch_size * height * width * ch
|
| 16 |
+
latents = (torch.arange(num_elems) / num_elems)[:, None, None, None].reshape(batch_size, ch, width, height)
|
| 17 |
+
print("latents", latents.abs().sum())
|
| 18 |
+
|
| 19 |
+
image = pipe(prompt, latents=latents, num_inference_steps=3, output_type="np", guidance_scale=7.5).images[0]
|
| 20 |
+
|
| 21 |
+
print(np.abs(image).sum())
|
run_sdxl_dummy_flax.py
ADDED
|
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
from diffusers import FlaxStableDiffusionXLPipeline
|
| 3 |
+
import numpy as np
|
| 4 |
+
import jax.numpy as jnp
|
| 5 |
+
import jax
|
| 6 |
+
|
| 7 |
+
path = "hf-internal-testing/tiny-stable-diffusion-xl-pipe"
|
| 8 |
+
|
| 9 |
+
pipe, params = FlaxStableDiffusionXLPipeline.from_pretrained(path)
|
| 10 |
+
|
| 11 |
+
prompt = "An astronaut riding a green horse on Mars"
|
| 12 |
+
steps = 3
|
| 13 |
+
|
| 14 |
+
batch_size, height, width, ch = 1, 32, 32, 4
|
| 15 |
+
num_elems = batch_size * height * width * ch
|
| 16 |
+
rng = jax.random.PRNGKey(0)
|
| 17 |
+
latents = (jnp.arange(num_elems) / num_elems)[:, None, None, None].reshape(batch_size, ch, width, height)
|
| 18 |
+
|
| 19 |
+
print("latents", np.abs(np.asarray(latents)).sum())
|
| 20 |
+
|
| 21 |
+
prompt_embeds = pipe.prepare_inputs(prompt)
|
| 22 |
+
|
| 23 |
+
image = pipe(prompt_embeds, params, rng, latents=latents, num_inference_steps=3, output_type="np").images[0]
|
| 24 |
+
|
| 25 |
+
print(np.abs(np.asarray(image)).sum())
|