Spaces:
Runtime error
Runtime error
File size: 7,224 Bytes
6e5b74f 0fc7a46 9ce2bc6 1d1cc4d 9ce2bc6 8fcb305 1684436 17bcc13 9ce2bc6 0330034 17bcc13 3c2f74e 4145ccd 17bcc13 9df39b1 4ceba74 9ce2bc6 3c2f74e b3ccbc7 1684436 17bcc13 1684436 b3ccbc7 1684436 3c2f74e 1684436 3c2f74e 7a8e3f2 9ce2bc6 1d1cc4d 3c2f74e 1684436 9ce2bc6 4145ccd 3c2f74e 4145ccd 3c2f74e 04c0c84 3c2f74e 4145ccd 04c0c84 4145ccd 3c2f74e 4145ccd 3c2f74e 4145ccd 04c0c84 4145ccd 3c2f74e 4145ccd 3c2f74e 4145ccd 9ce2bc6 3c2f74e 9ce2bc6 3c2f74e aa274d7 9ce2bc6 aa274d7 7a8e3f2 9ce2bc6 aa274d7 9ce2bc6 aa274d7 9ce2bc6 3c2f74e 17bcc13 3c2f74e 4ceba74 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 |
from __future__ import annotations
from huggingface_hub import HfApi, snapshot_download
from concurrent.futures import ThreadPoolExecutor
import asyncio
import ast
import os
import random
import time
import gradio as gr
import numpy as np
import PIL.Image
import torch
from diffusers import StableDiffusionPipeline
import uuid
from diffusers import DiffusionPipeline
from tqdm import tqdm
from safetensors.torch import load_file
import cv2
MAX_SEED = np.iinfo(np.int32).max
CACHE_EXAMPLES = torch.cuda.is_available() and os.getenv("CACHE_EXAMPLES") == "1"
MAX_IMAGE_SIZE = int(os.getenv("MAX_IMAGE_SIZE", "768"))
USE_TORCH_COMPILE = os.getenv("USE_TORCH_COMPILE") == "1"
DTYPE = torch.float32
api = HfApi()
executor = ThreadPoolExecutor()
model_cache = {}
model_id = "Lykon/dreamshaper-xl-v2-turbo"
custom_pipe = DiffusionPipeline.from_pretrained(
model_id,
custom_pipeline="latent_consistency_txt2img",
custom_revision="main",
safety_checker=None,
feature_extractor=None
)
custom_pipe.to(torch_device="cpu", torch_dtype=DTYPE)
pipe = custom_pipe
def randomize_seed_fn(seed: int, randomize_seed: bool) -> int:
return random.randint(0, MAX_SEED) if randomize_seed else seed
def save_image(img, profile: gr.OAuthProfile | None, metadata: dict):
unique_name = str(uuid.uuid4()) + '.png'
img.save(unique_name)
gr_user_history.save_image(label=metadata["prompt"], image=img, profile=profile, metadata=metadata)
return unique_name
def save_images(image_array, profile: gr.OAuthProfile | None, metadata: dict):
with ThreadPoolExecutor() as executor:
return list(executor.map(save_image, image_array, [profile]*len(image_array), [metadata]*len(image_array)))
def generate(prompt: str, seed: int = 0, width: int = 512, height: int = 512,
guidance_scale: float = 8.0, num_inference_steps: int = 4,
num_images: int = 1, randomize_seed: bool = False,
progress=gr.Progress(track_tqdm=True),
profile: gr.OAuthProfile | None = None) -> tuple[list[str], int]:
seed = randomize_seed_fn(seed, randomize_seed)
torch.manual_seed(seed)
start_time = time.time()
outputs = pipe(prompt=prompt, negative_prompt="", height=height, width=width,
guidance_scale=guidance_scale, num_inference_steps=num_inference_steps,
num_images_per_prompt=num_images, output_type="pil", lcm_origin_steps=50).images
print(f"Generation took {time.time() - start_time:.2f} seconds")
paths = save_images(outputs, profile, metadata={"prompt": prompt, "seed": seed,
"width": width, "height": height,
"guidance_scale": guidance_scale,
"num_inference_steps": num_inference_steps})
return paths, seed
def validate_and_list_models(hfuser):
try:
models = api.list_models(author=hfuser)
return [model.modelId for model in models if model.pipeline_tag == "text-to-image"]
except Exception:
return []
def parse_user_model_dict(user_model_dict_str):
try:
data = ast.literal_eval(user_model_dict_str)
if isinstance(data, dict) and all(isinstance(v, list) for v in data.values()):
return data
return {}
except Exception:
return {}
def load_model(model_id):
if model_id in model_cache:
return f"{model_id} loaded from cache"
try:
path = snapshot_download(repo_id=model_id, cache_dir="model_cache", token=os.getenv("HF_TOKEN"))
model_cache[model_id] = path
return f"{model_id} loaded successfully"
except Exception as e:
return f"{model_id} failed to load: {str(e)}"
def run_models(models, parallel):
if parallel:
futures = [executor.submit(load_model, m) for m in models]
return [f.result() for f in futures]
return [load_model(m) for m in models]
with gr.Blocks() as demo:
with gr.Row():
gr.HTML("""
<p id="project-links" align="center">
<a href='https://huggingface.co/spaces/charliebaby2023/Fast_Stable_diffusion_CPU/edit/main/app_demo.py'>Edit this app_demo py file</a>
<p> this is currently running the Lykon/dreamshaper-xl-v2-turbo model</p>
<p><fast stable diffusion, CPU</p>
</p>
""")
with gr.Column(scale=1):
with gr.Row():
hfuser_input = gr.Textbox(label="Hugging Face Username")
hfuser_models = gr.Dropdown(label="Models from User", choices=["Choose A Model"], value="Choose A Model", multiselect=True, visible=False)
user_model_dict = gr.Textbox(visible=False, label="Dict Input (e.g., {'username': ['model1', 'model2']})")
with gr.Row():
run_btn = gr.Button("Load Models")
with gr.Column(scale=3):
with gr.Row():
parallel_toggle = gr.Checkbox(label="Load in Parallel", value=True)
with gr.Row():
output = gr.Textbox(label="Output", lines=3)
def update_models(hfuser):
if hfuser:
models = validate_and_list_models(hfuser)
label = f"Models found: {len(models)}"
return gr.update(choices=models, label=label, visible=bool(models))
return gr.update(choices=[], label='', visible=False)
def update_from_dict(dict_str):
parsed = parse_user_model_dict(dict_str)
if not parsed:
return gr.update(), gr.update()
hfuser = next(iter(parsed))
models = parsed[hfuser]
label = f"Models found: {len(models)}"
return gr.update(value=hfuser), gr.update(choices=models, value=models, label=label)
hfuser_input.change(update_models, hfuser_input, hfuser_models)
user_model_dict.change(update_from_dict, user_model_dict, [hfuser_input, hfuser_models])
run_btn.click(run_models, [hfuser_models, parallel_toggle], output)
with gr.Group():
with gr.Row():
prompt = gr.Text(placeholder="Enter your prompt", show_label=False, container=False)
run_button = gr.Button("Run", scale=0)
gallery = gr.Gallery(label="Generated images", show_label=False, elem_id="gallery")
with gr.Accordion("Advanced options", open=False):
seed = gr.Slider(0, MAX_SEED, value=0, step=1, randomize=True, label="Seed")
randomize_seed = gr.Checkbox(label="Randomize seed across runs", value=True)
with gr.Row():
width = gr.Slider(256, MAX_IMAGE_SIZE, value=512, step=32, label="Width")
height = gr.Slider(256, MAX_IMAGE_SIZE, value=512, step=32, label="Height")
with gr.Row():
guidance_scale = gr.Slider(2.0, 14.0, value=8.0, step=0.1, label="Guidance Scale")
num_inference_steps = gr.Slider(1, 8, value=4, step=1, label="Inference Steps")
num_images = gr.Slider(1, 8, value=1, step=1, label="Number of Images")
run_button.click(
fn=generate,
inputs=[prompt, seed, width, height, guidance_scale, num_inference_steps, num_images, randomize_seed],
outputs=[gallery, seed]
) |