patrickvonplaten commited on
Commit
403db3e
2 Parent(s): d4184d4 056c912

Merge branch 'main' of https://huggingface.co/diffusers/tools

Browse files
Files changed (7) hide show
  1. all_branches.txt +0 -0
  2. all_branches_diffusers.txt +56 -0
  3. cat.pt +0 -3
  4. dog.pt +0 -3
  5. get_model_ids.py +1 -0
  6. run_local.py +4 -2
  7. run_pix2pix0.py +25 -17
all_branches.txt CHANGED
The diff for this file is too large to render. See raw diff
 
all_branches_diffusers.txt ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ google/ncsnpp-ffhq-1024: ['master']
2
+ google/ncsnpp-ffhq-1024: ['master']
3
+ shalpin87/diffusion_conditional: ['backup']
4
+ CompVis/stable-diffusion-v1-3: ['fp16']
5
+ CompVis/stable-diffusion-v1-1: ['fp16']
6
+ CompVis/stable-diffusion-v1-2: ['fp16']
7
+ CompVis/stable-diffusion-v1-4: ['non-ema', 'onnx', 'bf16', 'flax', 'fp16']
8
+ hakurei/waifu-diffusion: ['fp16']
9
+ rinna/japanese-stable-diffusion: ['fp16']
10
+ naclbit/trinart_stable_diffusion_v2: ['diffusers-95k', 'diffusers-60k', 'diffusers-115k']
11
+ pcuenq/stable-diffusion-v1-4: ['onnx']
12
+ lambdalabs/sd-pokemon-diffusers: ['onnx']
13
+ CompVis/stable-diffusion-v1-5: ['fp16']
14
+ Gazoche/sd-gundam-diffusers: ['epoch-000020', 'epoch-000081', 'epoch-000025']
15
+ runwayml/stable-diffusion-inpainting: ['onnx', 'fp16']
16
+ fusing/sd-inpaint-temp: ['fp16']
17
+ runwayml/stable-diffusion-v1-5: ['onnx', 'fp16', 'non-ema', 'flax', 'bf16']
18
+ ckpt/sd15: ['flax', 'bf16', 'fp16']
19
+ aarondotwork/sd-pokemon-diffusers: ['fp16']
20
+ technillogue/waifu-diffusion: ['fp16']
21
+ DGSpitzer/Cyberpunk-Anime-Diffusion: ['fp16']
22
+ uripper/GIANNIS: ['ONNX', 'Traced', 'ONNX-Q']
23
+ microsoft/vq-diffusion-ithq: ['fp16']
24
+ fusing/rdm: ['fp16']
25
+ CompVis/ldm-super-resolution-4x-openimages: ['fp16']
26
+ lilpotat/f2: ['flax']
27
+ lilpotat/a3: ['flax']
28
+ lilpotat/rbm: ['flax']
29
+ BAAI/AltDiffusion: ['fp16']
30
+ fusing/test: ['fp16']
31
+ stabilityai/stable-diffusion-2: ['fp16', 'bf16']
32
+ stabilityai/stable-diffusion-2-base: ['onnx', 'fp16']
33
+ stabilityai/stable-diffusion-2-depth: ['fp16']
34
+ stabilityai/stable-diffusion-2-inpainting: ['fp16']
35
+ stabilityai/stable-diffusion-x4-upscaler: ['fp16']
36
+ Abhilashvj/openjourney_copy: ['master']
37
+ questcoast/clone-wars-diffusion-v1: ['readme']
38
+ lilpotat/ashleymoore: ['flax']
39
+ jplumail/matthieu-v1-pipe: ['fp16']
40
+ stabilityai/stable-diffusion-2-1: ['bf16', 'fp16']
41
+ stabilityai/stable-diffusion-2-1-base: ['fp16']
42
+ jplumail/matthieu-v2-pipe: ['fp16']
43
+ NickKolok/arikioyami-20221223_1: ['backforward_overfit']
44
+ mann-e/mann-e: ['master']
45
+ DucHaiten/DucHaitenAIart: ['safetensors']
46
+ ShibaDeveloper/olivia-v1.0: ['safetensors']
47
+ cdefghijkl/luber: ['safetensors']
48
+ mddy/abyss2-diffusers: ['master']
49
+ OFA-Sys/small-stable-diffusion-v0: ['onnx']
50
+ timbrooks/instruct-pix2pix: ['fp16']
51
+ DucHaiten/DucHaitenAnime: ['vae']
52
+ neemspees/dnd-maps-2: ['v2', 'v3']
53
+ ruiruin/counmargemodel: ['fp16']
54
+ Nacholmo/AbyssOrangeMix2-hard-vae-swapped: ['fp16']
55
+ Nacholmo/Counterfeit-V2.5-vae-swapped: ['de_vae', 'dvae', 'fp16']
56
+ Nacholmo/VOXO-v0-vtuber-diffusers: ['fp16']
cat.pt DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:aa9441dc014d5e86567c5ef165e10b50d2a7b3a68d90686d0cd1006792adf334
3
- size 237300
 
 
 
 
dog.pt DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:becf079d61d7f35727bcc0d8506ddcdcddb61e62d611840ff3d18eca7fb6338c
3
- size 237300
 
 
 
 
get_model_ids.py CHANGED
@@ -4,6 +4,7 @@ from huggingface_hub import HfApi
4
  api = HfApi()
5
  out = api.list_models(filter="diffusers")
6
  model_ids = [x.modelId for x in out]
 
7
 
8
  with open("./model_ids.txt", "w") as f:
9
  for _id in model_ids:
 
4
  api = HfApi()
5
  out = api.list_models(filter="diffusers")
6
  model_ids = [x.modelId for x in out]
7
+ print(len(model_ids))
8
 
9
  with open("./model_ids.txt", "w") as f:
10
  for _id in model_ids:
run_local.py CHANGED
@@ -1,5 +1,6 @@
1
  #!/usr/bin/env python3
2
  from diffusers import StableDiffusionPipeline, DPMSolverSinglestepScheduler, DPMSolverMultistepScheduler, DEISMultistepScheduler, HeunDiscreteScheduler
 
3
  import time
4
  import os
5
  from huggingface_hub import HfApi
@@ -14,12 +15,13 @@ start_time = time.time()
14
  #pipe = StableDiffusionPipeline.from_pretrained(path, torch_dtype=torch.float16, device_map="auto")
15
  #pipe.scheduler = HeunDiscreteScheduler.from_config(pipe.scheduler.config)
16
  pipe = StableDiffusionPipeline.from_pretrained(path, torch_dtype=torch.float16)
17
- pipe.scheduler = HeunDiscreteScheduler.from_config(pipe.scheduler.config)
18
  pipe = pipe.to("cuda")
19
 
20
  prompt = "a highly realistic photo of green turtle"
 
21
  generator = torch.Generator(device="cuda").manual_seed(0)
22
- image = pipe(prompt, generator=generator, num_inference_steps=25).images[0]
23
  print("Time", time.time() - start_time)
24
 
25
  path = os.path.join(Path.home(), "images", "aa.png")
 
1
  #!/usr/bin/env python3
2
  from diffusers import StableDiffusionPipeline, DPMSolverSinglestepScheduler, DPMSolverMultistepScheduler, DEISMultistepScheduler, HeunDiscreteScheduler
3
+ from diffusers.schedulers.scheduling_unipc_multistep import UniPCMultistepScheduler
4
  import time
5
  import os
6
  from huggingface_hub import HfApi
 
15
  #pipe = StableDiffusionPipeline.from_pretrained(path, torch_dtype=torch.float16, device_map="auto")
16
  #pipe.scheduler = HeunDiscreteScheduler.from_config(pipe.scheduler.config)
17
  pipe = StableDiffusionPipeline.from_pretrained(path, torch_dtype=torch.float16)
18
+ pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config)
19
  pipe = pipe.to("cuda")
20
 
21
  prompt = "a highly realistic photo of green turtle"
22
+
23
  generator = torch.Generator(device="cuda").manual_seed(0)
24
+ image = pipe(prompt, generator=generator, num_inference_steps=15).images[0]
25
  print("Time", time.time() - start_time)
26
 
27
  path = os.path.join(Path.home(), "images", "aa.png")
run_pix2pix0.py CHANGED
@@ -2,44 +2,52 @@
2
  from huggingface_hub import HfApi
3
  import torch
4
 
5
- # https://github.com/pix2pixzero/pix2pix-zero/blob/main/src/edit_synthetic.py
6
  import requests
 
7
 
8
  from diffusers import DDIMScheduler, StableDiffusionPix2PixZeroPipeline
 
 
9
 
10
  api = HfApi()
 
 
11
 
12
-
13
- def download(embedding_url, local_filepath):
14
- r = requests.get(embedding_url)
15
- with open(local_filepath, "wb") as f:
16
- f.write(r.content)
17
-
18
 
19
  model_ckpt = "CompVis/stable-diffusion-v1-4"
20
  pipeline = StableDiffusionPix2PixZeroPipeline.from_pretrained(
21
- model_ckpt, conditions_input_image=False, torch_dtype=torch.float16
22
  )
 
 
 
 
23
  pipeline.scheduler = DDIMScheduler.from_config(pipeline.scheduler.config)
24
- pipeline.to("cuda")
 
 
25
 
 
 
26
 
27
- prompt = "a high resolution painting of a cat in the style of van gough"
28
- source_embedding_url = "https://github.com/pix2pixzero/pix2pix-zero/raw/main/assets/embeddings_sd_1.4/cat.pt"
29
- target_embedding_url = "https://github.com/pix2pixzero/pix2pix-zero/raw/main/assets/embeddings_sd_1.4/dog.pt"
30
 
31
- for url in [source_embedding_url, target_embedding_url]:
32
- download(url, url.split("/")[-1])
33
 
34
- source_embeds = torch.load(source_embedding_url.split("/")[-1])
35
- target_embeds = torch.load(target_embedding_url.split("/")[-1])
36
 
37
  image = pipeline(
38
- prompt,
39
  source_embeds=source_embeds,
40
  target_embeds=target_embeds,
41
  num_inference_steps=50,
42
  cross_attention_guidance_amount=0.15,
 
 
 
43
  ).images[0]
44
 
45
  path = "/home/patrick_huggingface_co/images/aa.png"
 
2
  from huggingface_hub import HfApi
3
  import torch
4
 
 
5
  import requests
6
+ from PIL import Image
7
 
8
  from diffusers import DDIMScheduler, StableDiffusionPix2PixZeroPipeline
9
+ from diffusers.schedulers.scheduling_ddim_inverse import DDIMInverseScheduler
10
+ from transformers import BlipForConditionalGeneration, BlipProcessor
11
 
12
  api = HfApi()
13
+ img_url = "https://github.com/pix2pixzero/pix2pix-zero/raw/main/assets/test_images/cats/cat_6.png"
14
+ raw_image = Image.open(requests.get(img_url, stream=True).raw).convert('RGB').resize((512, 512))
15
 
16
+ processor = BlipProcessor.from_pretrained("Salesforce/blip-image-captioning-base")
17
+ model = BlipForConditionalGeneration.from_pretrained("Salesforce/blip-image-captioning-base", torch_dtype=torch.float16, low_cpu_mem_usage=True)
 
 
 
 
18
 
19
  model_ckpt = "CompVis/stable-diffusion-v1-4"
20
  pipeline = StableDiffusionPix2PixZeroPipeline.from_pretrained(
21
+ model_ckpt, caption_generator=model, caption_processor=processor, torch_dtype=torch.float16, safety_checker=None,
22
  )
23
+ pipeline.enable_model_cpu_offload()
24
+
25
+ caption = pipeline.generate_caption(raw_image)
26
+
27
  pipeline.scheduler = DDIMScheduler.from_config(pipeline.scheduler.config)
28
+ pipeline.inverse_scheduler = DDIMInverseScheduler.from_config(pipeline.scheduler.config)
29
+
30
+ print(caption)
31
 
32
+ generator = torch.manual_seed(0)
33
+ inv_latents = pipeline.invert(caption, image=raw_image, generator=generator).latents
34
 
35
+ source_prompts = 4 * ["a cat sitting on the street", "a cat playing in the field", "a face of a cat"]
36
+ target_prompts = 4 * ["a dog sitting on the street", "a dog playing in the field", "a face of a dog"]
 
37
 
38
+ source_embeds = pipeline.get_embeds(source_prompts, batch_size=2)
39
+ target_embeds = pipeline.get_embeds(target_prompts, batch_size=2)
40
 
 
 
41
 
42
  image = pipeline(
43
+ caption,
44
  source_embeds=source_embeds,
45
  target_embeds=target_embeds,
46
  num_inference_steps=50,
47
  cross_attention_guidance_amount=0.15,
48
+ generator=generator,
49
+ latents=inv_latents,
50
+ negative_prompt=caption,
51
  ).images[0]
52
 
53
  path = "/home/patrick_huggingface_co/images/aa.png"