Spaces:
Running
on
Zero
Running
on
Zero
File size: 3,486 Bytes
163a3a9 083766b ae18532 083766b ae18532 083766b ae18532 0d34381 083766b 0d34381 083766b 163a3a9 0d34381 163a3a9 0d34381 163a3a9 0d34381 ae18532 163a3a9 0d34381 6b66635 ae18532 fe2bc86 ae18532 6b66635 9f665e5 ae18532 0d34381 fe2bc86 0d34381 fe2bc86 ae18532 0177258 ae18532 163a3a9 ae18532 0d34381 163a3a9 ae18532 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 |
import os
from importlib import import_module
from importlib.util import find_spec
from types import SimpleNamespace
from warnings import filterwarnings
from diffusers import (
DDIMScheduler,
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
StableDiffusionXLImg2ImgPipeline,
StableDiffusionXLPipeline,
)
from diffusers.utils import logging as diffusers_logging
from transformers import logging as transformers_logging
# Improved GPU handling and progress bars; set before importing spaces
os.environ["ZEROGPU_V2"] = "1"
# Use Rust-based downloader; errors if enabled and not installed
if find_spec("hf_transfer"):
os.environ["HF_HUB_ENABLE_HF_TRANSFER"] = "1"
filterwarnings("ignore", category=FutureWarning, module="diffusers")
filterwarnings("ignore", category=FutureWarning, module="transformers")
diffusers_logging.set_verbosity_error()
transformers_logging.set_verbosity_error()
# Standard refiner structure
_sdxl_refiner_files = [
"scheduler/scheduler_config.json",
"text_encoder_2/config.json",
"text_encoder_2/model.fp16.safetensors",
"tokenizer_2/merges.txt",
"tokenizer_2/special_tokens_map.json",
"tokenizer_2/tokenizer_config.json",
"tokenizer_2/vocab.json",
"unet/config.json",
"unet/diffusion_pytorch_model.fp16.safetensors",
"vae/config.json",
"vae/diffusion_pytorch_model.fp16.safetensors",
"model_index.json",
]
# Standard SDXL structure
_sdxl_files = [
*_sdxl_refiner_files,
"text_encoder/config.json",
"text_encoder/model.fp16.safetensors",
"tokenizer/merges.txt",
"tokenizer/special_tokens_map.json",
"tokenizer/tokenizer_config.json",
"tokenizer/vocab.json",
]
# Using namespace instead of dataclass for simplicity
Config = SimpleNamespace(
HF_TOKEN=os.environ.get("HF_TOKEN", None),
ZERO_GPU=import_module("spaces").config.Config.zero_gpu,
HF_MODELS={
"segmind/Segmind-Vega": [*_sdxl_files],
"stabilityai/stable-diffusion-xl-base-1.0": [*_sdxl_files, "vae_1_0/config.json"],
"stabilityai/stable-diffusion-xl-refiner-1.0": [*_sdxl_refiner_files],
},
PIPELINES={
"txt2img": StableDiffusionXLPipeline,
"img2img": StableDiffusionXLImg2ImgPipeline,
},
MODEL="segmind/Segmind-Vega",
MODELS=[
"cyberdelia/CyberRealsticXL",
"fluently/Fluently-XL-Final",
"segmind/Segmind-Vega",
"SG161222/RealVisXL_V5.0",
"stabilityai/stable-diffusion-xl-base-1.0",
],
# Single-file model weights
MODEL_CHECKPOINTS={
# keep keys lowercase for case-insensitive matching in the loader
"cyberdelia/cyberrealsticxl": "CyberRealisticXLPlay_V1.0.safetensors", # typo in "realistic"
"fluently/fluently-xl-final": "FluentlyXL-Final.safetensors",
"sg161222/realvisxl_v5.0": "RealVisXL_V5.0_fp16.safetensors",
},
VAE_MODEL="madebyollin/sdxl-vae-fp16-fix",
REFINER_MODEL="stabilityai/stable-diffusion-xl-refiner-1.0",
SCHEDULER="Euler",
SCHEDULERS={
"DDIM": DDIMScheduler,
"DEIS 2M": DEISMultistepScheduler,
"DPM++ 2M": DPMSolverMultistepScheduler,
"Euler": EulerDiscreteScheduler,
"Euler a": EulerAncestralDiscreteScheduler,
},
WIDTH=1024,
HEIGHT=1024,
NUM_IMAGES=1,
SEED=-1,
GUIDANCE_SCALE=6,
INFERENCE_STEPS=40,
DEEPCACHE_INTERVAL=1,
SCALE=1,
SCALES=[1, 2, 4],
)
|