File size: 6,598 Bytes
908a50c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
from sdutils import get_dtypes, SCHEDULER_CONFIG_MAP
import gradio as gr


DEFAULT_DTYPE = get_dtypes()[0]
schedulers = list(SCHEDULER_CONFIG_MAP.keys())


clips = [
    "",
    "openai/clip-vit-large-patch14",
]


t5s = [
    "",
    "https://huggingface.co/camenduru/FLUX.1-dev/blob/main/t5xxl_fp8_e4m3fn.safetensors",
]


sdxl_vaes = [
    "",
    "https://huggingface.co/madebyollin/sdxl-vae-fp16-fix/blob/main/sdxl.vae.safetensors",
    "https://huggingface.co/nubby/blessed-sdxl-vae-fp16-fix/blob/main/sdxl_vae-fp16fix-blessed.safetensors",
    "https://huggingface.co/John6666/safetensors_converting_test/blob/main/xlVAEC_e7.safetensors",
    "https://huggingface.co/John6666/safetensors_converting_test/blob/main/xlVAEC_f1.safetensors",
]


sdxl_loras = [
    "",
    "https://huggingface.co/SPO-Diffusion-Models/SPO-SDXL_4k-p_10ep_LoRA/blob/main/spo_sdxl_10ep_4k-data_lora_diffusers.safetensors",
    "https://huggingface.co/wangfuyun/PCM_Weights/blob/main/sdxl/pcm_sdxl_smallcfg_2step_converted.safetensors",
    "https://huggingface.co/wangfuyun/PCM_Weights/blob/main/sdxl/pcm_sdxl_smallcfg_4step_converted.safetensors",
    "https://huggingface.co/wangfuyun/PCM_Weights/blob/main/sdxl/pcm_sdxl_smallcfg_8step_converted.safetensors",
    "https://huggingface.co/wangfuyun/PCM_Weights/blob/main/sdxl/pcm_sdxl_normalcfg_8step_converted.safetensors",
    "https://huggingface.co/wangfuyun/PCM_Weights/blob/main/sdxl/pcm_sdxl_normalcfg_16step_converted.safetensors",
    "https://huggingface.co/ByteDance/Hyper-SD/blob/main/Hyper-SDXL-1step-lora.safetensors",
    "https://huggingface.co/ByteDance/Hyper-SD/blob/main/Hyper-SDXL-2steps-lora.safetensors",
    "https://huggingface.co/ByteDance/Hyper-SD/blob/main/Hyper-SDXL-4steps-lora.safetensors",
    "https://huggingface.co/ByteDance/Hyper-SD/blob/main/Hyper-SDXL-8steps-CFG-lora.safetensors",
    "https://huggingface.co/ByteDance/Hyper-SD/blob/main/Hyper-SDXL-12steps-CFG-lora.safetensors",
    "https://huggingface.co/latent-consistency/lcm-lora-sdxl/blob/main/pytorch_lora_weights.safetensors",
]


sdxl_preset_items = ["dtype", "vae", "scheduler", "lora1", "lora1s", "lora2", "lora2s", "lora3", "lora3s", "lora4", "lora4s", "lora5", "lora5s"]
sdxl_preset_dict = {
    "Default": [DEFAULT_DTYPE, "", "Euler a", "", 1.0, "", 1.0, "", 1.0, "", 1.0, "", 1.0],
    "Bake in standard VAE": [DEFAULT_DTYPE, "https://huggingface.co/madebyollin/sdxl-vae-fp16-fix/blob/main/sdxl.vae.safetensors",
                             "Euler a", "", 1.0, "", 1.0, "", 1.0, "", 1.0, "", 1.0],
    "Hyper-SDXL / SPO": [DEFAULT_DTYPE, "https://huggingface.co/madebyollin/sdxl-vae-fp16-fix/blob/main/sdxl.vae.safetensors",
     "TCD", "https://huggingface.co/ByteDance/Hyper-SD/blob/main/Hyper-SDXL-8steps-CFG-lora.safetensors", 1.0,
     "https://huggingface.co/SPO-Diffusion-Models/SPO-SDXL_4k-p_10ep_LoRA/blob/main/spo_sdxl_10ep_4k-data_lora_diffusers.safetensors",
       1.0, "", 1.0, "", 1.0, "", 1.0],
}


def sdxl_set_presets(preset: str="Default"):
    p = []
    if preset in sdxl_preset_dict.keys(): p = sdxl_preset_dict[preset]
    else: p = sdxl_preset_dict["Default"]
    if len(p) != len(sdxl_preset_items): raise gr.Error("Invalid preset.")
    print("Setting SDXL preset:", ", ".join([f"{x}:{y}" for x, y in zip(sdxl_preset_items, p)]))
    return p[0], p[1], p[2], p[3], p[4], p[5], p[6], p[7], p[8], p[9], p[10], p[11], p[12]


sd15_vaes = [
    "",
    "https://huggingface.co/stabilityai/sd-vae-ft-mse-original/resolve/main/vae-ft-mse-840000-ema-pruned.ckpt",
    "https://huggingface.co/stabilityai/sd-vae-ft-ema-original/resolve/main/vae-ft-ema-560000-ema-pruned.ckpt",
]


sd15_loras = [
    "",
    "https://huggingface.co/SPO-Diffusion-Models/SPO-SD-v1-5_4k-p_10ep_LoRA/blob/main/spo-sd-v1-5_4k-p_10ep_lora_diffusers.safetensors",
]


sd15_preset_items = ["dtype", "vae", "scheduler", "lora1", "lora1s", "lora2", "lora2s", "lora3", "lora3s", "lora4", "lora4s", "lora5", "lora5s", "ema"]
sd15_preset_dict = {
    "Default": [DEFAULT_DTYPE, "", "Euler", "", 1.0, "", 1.0, "", 1.0, "", 1.0, "", 1.0, True],
    "Bake in standard VAE": [DEFAULT_DTYPE, "https://huggingface.co/stabilityai/sd-vae-ft-mse-original/resolve/main/vae-ft-mse-840000-ema-pruned.ckpt",
                             "Euler", "", 1.0, "", 1.0, "", 1.0, "", 1.0, "", 1.0, True],
}


def sd15_set_presets(preset: str="Default"):
    p = []
    if preset in sd15_preset_dict.keys(): p = sd15_preset_dict[preset]
    else: p = sd15_preset_dict["Default"]
    if len(p) != len(sd15_preset_items): raise gr.Error("Invalid preset.")
    print("Setting SD1.5 preset:", ", ".join([f"{x}:{y}" for x, y in zip(sd15_preset_items, p)]))
    return p[0], p[1], p[2], p[3], p[4], p[5], p[6], p[7], p[8], p[9], p[10], p[11], p[12], p[13]


flux_vaes = [
    "",
]


flux_loras = [
    "",
]


flux_preset_items = ["dtype", "vae", "scheduler", "lora1", "lora1s", "lora2", "lora2s", "lora3", "lora3s", "lora4", "lora4s", "lora5", "lora5s", "base_repo"]
flux_preset_dict = {
    "dev": ["bf16", "", "", "", 1.0, "", 1.0, "", 1.0, "", 1.0, "", 1.0, "camenduru/FLUX.1-dev-diffusers"],
    "schnell": ["bf16", "", "", "", 1.0, "", 1.0, "", 1.0, "", 1.0, "", 1.0, "black-forest-labs/FLUX.1-schnell"],
}


def flux_set_presets(preset: str="dev"):
    p = []
    if preset in flux_preset_dict.keys(): p = flux_preset_dict[preset]
    else: p = flux_preset_dict["dev"]
    if len(p) != len(flux_preset_items): raise gr.Error("Invalid preset.")
    print("Setting FLUX.1 preset:", ", ".join([f"{x}:{y}" for x, y in zip(flux_preset_items, p)]))
    return p[0], p[1], p[2], p[3], p[4], p[5], p[6], p[7], p[8], p[9], p[10], p[11], p[12], p[13]



sd35_vaes = [
    "",
]


sd35_loras = [
    "",
]


sd35_preset_items = ["dtype", "vae", "scheduler", "lora1", "lora1s", "lora2", "lora2s", "lora3", "lora3s", "lora4", "lora4s", "lora5", "lora5s", "base_repo"]
sd35_preset_dict = {
    "Default": ["bf16", "", "", "", 1.0, "", 1.0, "", 1.0, "", 1.0, "", 1.0, "adamo1139/stable-diffusion-3.5-large-ungated"],
}


def sd35_set_presets(preset: str="dev"):
    p = []
    if preset in sd35_preset_dict.keys(): p = sd35_preset_dict[preset]
    else: p = sd35_preset_dict["Default"]
    if len(p) != len(sd35_preset_items): raise gr.Error("Invalid preset.")
    print("Setting SD3.5 preset:", ", ".join([f"{x}:{y}" for x, y in zip(sd35_preset_items, p)]))
    return p[0], p[1], p[2], p[3], p[4], p[5], p[6], p[7], p[8], p[9], p[10], p[11], p[12], p[13]