Spaces:
Running
Running
File size: 12,120 Bytes
0af19e2 0b45526 0af19e2 0ed58ab a6cc3dc 0ed58ab 0af19e2 1db75d1 0b45526 0af19e2 0ed58ab 96a7b08 024f666 96a7b08 0af19e2 96a7b08 0af19e2 96a7b08 0b45526 2c1071a 0b45526 0af19e2 96a7b08 0b45526 0af19e2 96a7b08 0b45526 0af19e2 0b45526 0af19e2 96a7b08 f994e4e 0398bb7 96a7b08 f994e4e 96a7b08 0ed58ab 96a7b08 0af19e2 96a7b08 c473977 96a7b08 0af19e2 96a7b08 0af19e2 96a7b08 c473977 96a7b08 0af19e2 96a7b08 0af19e2 96a7b08 ec32dbe 0af19e2 96a7b08 bdfed08 0af19e2 bdfed08 c473977 bdfed08 0af19e2 bdfed08 0af19e2 0b45526 0af19e2 96a7b08 0af19e2 843706d 54b0d06 bafd1c7 54b0d06 bafd1c7 54b0d06 bafd1c7 80db697 bafd1c7 54b0d06 bafd1c7 54b0d06 bafd1c7 54b0d06 bafd1c7 54b0d06 bafd1c7 54b0d06 bafd1c7 b6673c1 0f4b77b 54b0d06 96a7b08 c8c6184 23b0a5d 6673991 23b0a5d 6673991 3c3755b 560ebd8 db376ef 560ebd8 db376ef 560ebd8 c8c6184 574c8e4 843706d 5d14d8b 0af19e2 0ed58ab 3bcddcc 560ebd8 3bcddcc 024f666 3bcddcc b03fd10 7e92123 0af19e2 b03fd10 3bcddcc 96a7b08 f994e4e 96a7b08 3bcddcc 86d7743 3bcddcc 96a7b08 3bcddcc 671b6e3 3bcddcc db376ef 3bcddcc b03fd10 3bcddcc 671b6e3 0af19e2 3bcddcc b03fd10 3bcddcc 96a7b08 0af19e2 3bcddcc 96a7b08 f994e4e 3bcddcc 26428ec 96a7b08 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 |
import gradio as gr
from all_models import models
from externalmod import gr_Interface_load, save_image, randomize_seed
import asyncio
import os
from threading import RLock
from datetime import datetime
preSetPrompt = "High fashion studio foto shoot. tall slender 18+ caucasian woman. gorgeous face. high waist sexy bodysuit. photorealistic. f1.4"
negPreSetPrompt = "[deformed | disfigured], poorly drawn, [bad : wrong] anatomy, [extra | missing | floating | disconnected] limb, (mutated hands and fingers), blurry, text, fuzziness"
lock = RLock()
HF_TOKEN = os.environ.get("HF_TOKEN") if os.environ.get("HF_TOKEN") else None # If private or gated models aren't used, ENV setting is unnecessary.
def get_current_time():
now = datetime.now()
current_time = now.strftime("%y-%m-%d %H:%M:%S")
return current_time
def load_fn(models):
global models_load
models_load = {}
for model in models:
if model not in models_load.keys():
try:
m = gr_Interface_load(f'models/{model}', hf_token=HF_TOKEN)
except Exception as error:
print(error)
m = gr.Interface(lambda: None, ['text'], ['image'])
models_load.update({model: m})
load_fn(models)
num_models = 12
max_images = 12
inference_timeout = 400
default_models = models[:num_models]
MAX_SEED = 2**32-1
def extend_choices(choices):
return choices[:num_models] + (num_models - len(choices[:num_models])) * ['NA']
def update_imgbox(choices):
choices_plus = extend_choices(choices[:num_models])
return [gr.Image(None, label=m, visible=(m!='NA')) for m in choices_plus]
def random_choices():
import random
random.seed()
return random.choices(models, k=num_models)
async def infer(model_str, prompt, nprompt="", height=0, width=0, steps=0, cfg=0, seed=-1, timeout=inference_timeout):
kwargs = {}
if height > 0: kwargs["height"] = height
if width > 0: kwargs["width"] = width
if steps > 0: kwargs["num_inference_steps"] = steps
if cfg > 0: cfg = kwargs["guidance_scale"] = cfg
if seed == -1:
theSeed = randomize_seed()
else:
theSeed = seed
kwargs["seed"] = theSeed
task = asyncio.create_task(asyncio.to_thread(models_load[model_str].fn, prompt=prompt, negative_prompt=nprompt, **kwargs, token=HF_TOKEN))
await asyncio.sleep(0)
try:
result = await asyncio.wait_for(task, timeout=timeout)
except asyncio.TimeoutError as e:
print(e)
print(f"infer: Task timed out: {model_str}")
if not task.done(): task.cancel()
result = None
raise Exception(f"Task timed out: {model_str}") from e
except Exception as e:
print(e)
print(f"infer: exception: {model_str}")
if not task.done(): task.cancel()
result = None
raise Exception() from e
if task.done() and result is not None and not isinstance(result, tuple):
with lock:
png_path = model_str.replace("/", "_") + " - " + get_current_time() + "_" + str(theSeed) + ".png"
image = save_image(result, png_path, model_str, prompt, nprompt, height, width, steps, cfg, theSeed)
return image
return None
def gen_fn(model_str, prompt, nprompt="", height=0, width=0, steps=0, cfg=0, seed=-1):
try:
loop = asyncio.new_event_loop()
result = loop.run_until_complete(infer(model_str, prompt, nprompt,
height, width, steps, cfg, seed, inference_timeout))
except (Exception, asyncio.CancelledError) as e:
print(e)
print(f"gen_fn: Task aborted: {model_str}")
result = None
raise gr.Error(f"Task aborted: {model_str}, Error: {e}")
finally:
loop.close()
return result
def add_gallery(image, model_str, gallery):
if gallery is None: gallery = []
with lock:
if image is not None: gallery.insert(0, (image, model_str))
return gallery
JS="""
<script>
/*
function simulateButtonPress_() {
const button = document.getElementById('simulate-button');
if (button) {
button.click(); // Simulate the button press
console.log('Button Pressed!');
}
}
*/
function simulateButtonPress() {
console.log('Button Pressed!');
}
// Function to observe image changes
function observeImageChanges() {
// Select all images with the 'image-monitor' class
const images = document.querySelectorAll('.svelte-1pijsyv');
// Create a MutationObserver to watch for changes in the image src
const observer = new MutationObserver((mutationsList, observer) => {
mutationsList.forEach(mutation => {
if (mutation.type === 'attributes' && mutation.attributeName === 'src') {
// If the image src changes, simulate button press
console.log('Image changed!');
simulateButtonPress();
}
});
});
// Observer options: observe changes to attributes (like src)
const config = { attributes: true };
// Start observing each image
images.forEach(image => {
observer.observe(image, config);
});
}
// Start observing
window.addEventListener('load', () => {
observeImageChanges();
console.log("Yo");
});
</script>
"""
CSS="""
<style>
.image-monitor {
border:1px solid red;
}
/*
.svelte-1pijsyv{
border:1px solid green;
}
*/
.gallery-container{
max-height: 512px;
}
.butt{
background-color:#2b4764 !important
}
.butt:hover{
background-color:#3a6c9f !important;
}
</style>
"""
# with gr.Blocks(fill_width=True, head=js) as demo:
with gr.Blocks(head=CSS + JS) as demo:
with gr.Tab(str(num_models) + ' Models'):
with gr.Column(scale=2):
with gr.Group():
txt_input = gr.Textbox(label='Your prompt:', value=preSetPrompt, lines=3, autofocus=1)
neg_input = gr.Textbox(label='Negative prompt:', value=negPreSetPrompt, lines=1)
with gr.Accordion("Advanced", open=False, visible=True):
with gr.Row():
width = gr.Slider(label="Width", info="If 0, the default value is used.", maximum=1216, step=32, value=0)
height = gr.Slider(label="Height", info="If 0, the default value is used.", maximum=1216, step=32, value=0)
with gr.Row():
steps = gr.Slider(label="Number of inference steps", info="If 0, the default value is used.", maximum=100, step=1, value=0)
cfg = gr.Slider(label="Guidance scale", info="If 0, the default value is used.", maximum=30.0, step=0.1, value=0)
seed = gr.Slider(label="Seed", info="Randomize Seed if -1.", minimum=-1, maximum=MAX_SEED, step=1, value=-1)
seed_rand = gr.Button("Randomize Seed 🎲", size="sm", variant="secondary")
seed_rand.click(randomize_seed, None, [seed], queue=False)
with gr.Row():
gen_button = gr.Button(f'Generate up to {int(num_models)} images', variant='primary', scale=3, elem_classes=["butt"])
random_button = gr.Button(f'Randomize Models', variant='secondary', scale=1)
with gr.Column(scale=1):
with gr.Group():
with gr.Row():
output = [gr.Image(label=m, show_download_button=True, elem_classes=["image-monitor"],
interactive=False, width=112, height=112, show_share_button=False, format="png",
visible=True) for m in default_models]
current_models = [gr.Textbox(m, visible=False) for m in default_models]
with gr.Column(scale=2):
gallery = gr.Gallery(label="Output", show_download_button=True,
interactive=False, show_share_button=False, container=True, format="png",
preview=True, object_fit="cover", columns=2, rows=2)
for m, o in zip(current_models, output):
gen_event = gr.on(triggers=[gen_button.click, txt_input.submit], fn=gen_fn,
inputs=[m, txt_input, neg_input, height, width, steps, cfg, seed], outputs=[o],
concurrency_limit=None, queue=False)
o.change(add_gallery, [o, m, gallery], [gallery])
with gr.Column(scale=4):
with gr.Accordion('Model selection'):
model_choice = gr.CheckboxGroup(models, label = f'Choose up to {int(num_models)} different models from the {len(models)} available!', value=default_models, interactive=True)
model_choice.change(update_imgbox, model_choice, output)
model_choice.change(extend_choices, model_choice, current_models)
random_button.click(random_choices, None, model_choice)
with gr.Tab('Single model'):
with gr.Column(scale=2):
model_choice2 = gr.Dropdown(models, label='Choose model', value=models[0])
with gr.Group():
txt_input2 = gr.Textbox(label='Your prompt:', value = preSetPrompt, lines=3, autofocus=1)
neg_input2 = gr.Textbox(label='Negative prompt:', value=negPreSetPrompt, lines=1)
with gr.Accordion("Advanced", open=False, visible=True):
with gr.Row():
width2 = gr.Slider(label="Width", info="If 0, the default value is used.", maximum=1216, step=32, value=0)
height2 = gr.Slider(label="Height", info="If 0, the default value is used.", maximum=1216, step=32, value=0)
with gr.Row():
steps2 = gr.Slider(label="Number of inference steps", info="If 0, the default value is used.", maximum=100, step=1, value=0)
cfg2 = gr.Slider(label="Guidance scale", info="If 0, the default value is used.", maximum=30.0, step=0.1, value=0)
seed2 = gr.Slider(label="Seed", info="Randomize Seed if -1.", minimum=-1, maximum=MAX_SEED, step=1, value=-1)
seed_rand2 = gr.Button("Randomize Seed", size="sm", variant="secondary")
seed_rand2.click(randomize_seed, None, [seed2], queue=False)
num_images = gr.Slider(1, max_images, value=max_images, step=1, label='Number of images')
with gr.Row():
gen_button2 = gr.Button('Let the machine halucinate', variant='primary', scale=2, elem_classes=["butt"])
with gr.Column(scale=1):
with gr.Group():
with gr.Row():
output2 = [gr.Image(label='', show_download_button=True,
interactive=False, width=112, height=112, visible=True, format="png",
show_share_button=False, show_label=False) for _ in range(max_images)]
with gr.Column(scale=2):
gallery2 = gr.Gallery(label="Output", show_download_button=True,
interactive=False, show_share_button=True, container=True, format="png",
preview=True, object_fit="cover", columns=2, rows=2)
for i, o in enumerate(output2):
img_i = gr.Number(i, visible=False)
num_images.change(lambda i, n: gr.update(visible = (i < n)), [img_i, num_images], o, queue=False)
gen_event2 = gr.on(triggers=[gen_button2.click, txt_input2.submit],
fn=lambda i, n, m, t1, t2, n1, n2, n3, n4, n5: gen_fn(m, t1, t2, n1, n2, n3, n4, n5) if (i < n) else None,
inputs=[img_i, num_images, model_choice2, txt_input2, neg_input2,
height2, width2, steps2, cfg2, seed2], outputs=[o],
concurrency_limit=None, queue=False)
o.change(add_gallery, [o, model_choice2, gallery2], [gallery2])
demo.launch(show_api=False, max_threads=400)
|