patrickvonplaten commited on
Commit
866074c
1 Parent(s): 9ae8cf2
all_branches.txt CHANGED
The diff for this file is too large to render. See raw diff
 
all_branches_diffusers.txt ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ google/ncsnpp-ffhq-1024: ['master']
2
+ google/ncsnpp-ffhq-1024: ['master']
3
+ shalpin87/diffusion_conditional: ['backup']
4
+ CompVis/stable-diffusion-v1-3: ['fp16']
5
+ CompVis/stable-diffusion-v1-1: ['fp16']
6
+ CompVis/stable-diffusion-v1-2: ['fp16']
7
+ CompVis/stable-diffusion-v1-4: ['non-ema', 'onnx', 'bf16', 'flax', 'fp16']
8
+ hakurei/waifu-diffusion: ['fp16']
9
+ rinna/japanese-stable-diffusion: ['fp16']
10
+ naclbit/trinart_stable_diffusion_v2: ['diffusers-95k', 'diffusers-60k', 'diffusers-115k']
11
+ pcuenq/stable-diffusion-v1-4: ['onnx']
12
+ lambdalabs/sd-pokemon-diffusers: ['onnx']
13
+ CompVis/stable-diffusion-v1-5: ['fp16']
14
+ Gazoche/sd-gundam-diffusers: ['epoch-000020', 'epoch-000081', 'epoch-000025']
15
+ runwayml/stable-diffusion-inpainting: ['onnx', 'fp16']
16
+ fusing/sd-inpaint-temp: ['fp16']
17
+ runwayml/stable-diffusion-v1-5: ['onnx', 'fp16', 'non-ema', 'flax', 'bf16']
18
+ ckpt/sd15: ['flax', 'bf16', 'fp16']
19
+ aarondotwork/sd-pokemon-diffusers: ['fp16']
20
+ technillogue/waifu-diffusion: ['fp16']
21
+ DGSpitzer/Cyberpunk-Anime-Diffusion: ['fp16']
22
+ uripper/GIANNIS: ['ONNX', 'Traced', 'ONNX-Q']
23
+ microsoft/vq-diffusion-ithq: ['fp16']
24
+ fusing/rdm: ['fp16']
25
+ CompVis/ldm-super-resolution-4x-openimages: ['fp16']
26
+ lilpotat/f2: ['flax']
27
+ lilpotat/a3: ['flax']
28
+ lilpotat/rbm: ['flax']
29
+ BAAI/AltDiffusion: ['fp16']
30
+ fusing/test: ['fp16']
31
+ stabilityai/stable-diffusion-2: ['fp16', 'bf16']
32
+ stabilityai/stable-diffusion-2-base: ['onnx', 'fp16']
33
+ stabilityai/stable-diffusion-2-depth: ['fp16']
34
+ stabilityai/stable-diffusion-2-inpainting: ['fp16']
35
+ stabilityai/stable-diffusion-x4-upscaler: ['fp16']
36
+ Abhilashvj/openjourney_copy: ['master']
37
+ questcoast/clone-wars-diffusion-v1: ['readme']
38
+ lilpotat/ashleymoore: ['flax']
39
+ jplumail/matthieu-v1-pipe: ['fp16']
40
+ stabilityai/stable-diffusion-2-1: ['bf16', 'fp16']
41
+ stabilityai/stable-diffusion-2-1-base: ['fp16']
42
+ jplumail/matthieu-v2-pipe: ['fp16']
43
+ NickKolok/arikioyami-20221223_1: ['backforward_overfit']
44
+ mann-e/mann-e: ['master']
45
+ DucHaiten/DucHaitenAIart: ['safetensors']
46
+ ShibaDeveloper/olivia-v1.0: ['safetensors']
47
+ cdefghijkl/luber: ['safetensors']
48
+ mddy/abyss2-diffusers: ['master']
49
+ OFA-Sys/small-stable-diffusion-v0: ['onnx']
50
+ timbrooks/instruct-pix2pix: ['fp16']
51
+ DucHaiten/DucHaitenAnime: ['vae']
52
+ neemspees/dnd-maps-2: ['v2', 'v3']
53
+ ruiruin/counmargemodel: ['fp16']
54
+ Nacholmo/AbyssOrangeMix2-hard-vae-swapped: ['fp16']
55
+ Nacholmo/Counterfeit-V2.5-vae-swapped: ['de_vae', 'dvae', 'fp16']
56
+ Nacholmo/VOXO-v0-vtuber-diffusers: ['fp16']
get_model_ids.py CHANGED
@@ -4,6 +4,7 @@ from huggingface_hub import HfApi
4
  api = HfApi()
5
  out = api.list_models(filter="diffusers")
6
  model_ids = [x.modelId for x in out]
 
7
 
8
  with open("./model_ids.txt", "w") as f:
9
  for _id in model_ids:
 
4
  api = HfApi()
5
  out = api.list_models(filter="diffusers")
6
  model_ids = [x.modelId for x in out]
7
+ print(len(model_ids))
8
 
9
  with open("./model_ids.txt", "w") as f:
10
  for _id in model_ids:
run_local.py CHANGED
@@ -1,5 +1,6 @@
1
  #!/usr/bin/env python3
2
  from diffusers import StableDiffusionPipeline, DPMSolverSinglestepScheduler, DPMSolverMultistepScheduler, DEISMultistepScheduler, HeunDiscreteScheduler
 
3
  import time
4
  from huggingface_hub import HfApi
5
  import torch
@@ -10,10 +11,11 @@ path = sys.argv[1]
10
  api = HfApi()
11
  start_time = time.time()
12
  pipe = StableDiffusionPipeline.from_pretrained(path, torch_dtype=torch.float16)
13
- pipe.scheduler = HeunDiscreteScheduler.from_config(pipe.scheduler.config)
14
  pipe = pipe.to("cuda")
15
 
16
  prompt = "a highly realistic photo of green turtle"
 
17
  generator = torch.Generator(device="cuda").manual_seed(0)
18
  image = pipe(prompt, generator=generator, num_inference_steps=25).images[0]
19
  print("Time", time.time() - start_time)
 
1
  #!/usr/bin/env python3
2
  from diffusers import StableDiffusionPipeline, DPMSolverSinglestepScheduler, DPMSolverMultistepScheduler, DEISMultistepScheduler, HeunDiscreteScheduler
3
+ from diffusers.schedulers.scheduling_unipc_multistep import UniPCMultistepScheduler
4
  import time
5
  from huggingface_hub import HfApi
6
  import torch
 
11
  api = HfApi()
12
  start_time = time.time()
13
  pipe = StableDiffusionPipeline.from_pretrained(path, torch_dtype=torch.float16)
14
+ pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config)
15
  pipe = pipe.to("cuda")
16
 
17
  prompt = "a highly realistic photo of green turtle"
18
+ prompt = "an image of Santa Claus in light blue"
19
  generator = torch.Generator(device="cuda").manual_seed(0)
20
  image = pipe(prompt, generator=generator, num_inference_steps=25).images[0]
21
  print("Time", time.time() - start_time)