Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -6,21 +6,16 @@ import os
|
|
6 |
from threading import RLock
|
7 |
from datetime import datetime
|
8 |
|
9 |
-
preSetPrompt = "
|
10 |
-
# preSetPrompt = "cute tall slender athletic 20+ nude caucasian woman. gorgeous face. perky tits. gaping outie pussy. pussy juice. sly smile. explicit pose. artistic. photorealistic. cinematic. f1.4"
|
11 |
-
|
12 |
-
# H. R. Giger prompt:
|
13 |
-
# preSetPrompt = "a tall slender athletic caucasian nude 18+ female cyborg. gorgeous face. perky tits. wet skin. sensual expression. she is entangled in rusty chains, rusty barbed wire and electric cables. old dark dusty decaying spaceship designed by h.r. giger. rusty metal dildos. wet tubes and wet plastic hoses. dark, gloomy teal cinematic light. photorealistic."
|
14 |
-
|
15 |
negPreSetPrompt = "[deformed | disfigured], poorly drawn, [bad : wrong] anatomy, [extra | missing | floating | disconnected] limb, (mutated hands and fingers), blurry, text, fuzziness"
|
16 |
|
17 |
lock = RLock()
|
|
|
18 |
HF_TOKEN = os.environ.get("HF_TOKEN") if os.environ.get("HF_TOKEN") else None # If private or gated models aren't used, ENV setting is unnecessary.
|
19 |
|
20 |
def get_current_time():
|
21 |
now = datetime.now()
|
22 |
-
|
23 |
-
current_time = now2.strftime("%y-%m-%d %H:%M:%S")
|
24 |
return current_time
|
25 |
|
26 |
def load_fn(models):
|
@@ -38,9 +33,8 @@ def load_fn(models):
|
|
38 |
|
39 |
load_fn(models)
|
40 |
|
41 |
-
|
42 |
-
|
43 |
-
max_images = 6
|
44 |
inference_timeout = 400
|
45 |
default_models = models[:num_models]
|
46 |
MAX_SEED = 2**32-1
|
@@ -61,21 +55,18 @@ def random_choices():
|
|
61 |
return random.choices(models, k=num_models)
|
62 |
|
63 |
|
64 |
-
# https://huggingface.co/docs/api-inference/detailed_parameters
|
65 |
-
# https://huggingface.co/docs/huggingface_hub/package_reference/inference_client
|
66 |
async def infer(model_str, prompt, nprompt="", height=0, width=0, steps=0, cfg=0, seed=-1, timeout=inference_timeout):
|
67 |
kwargs = {}
|
68 |
if height > 0: kwargs["height"] = height
|
69 |
if width > 0: kwargs["width"] = width
|
70 |
if steps > 0: kwargs["num_inference_steps"] = steps
|
71 |
if cfg > 0: cfg = kwargs["guidance_scale"] = cfg
|
72 |
-
|
73 |
if seed == -1:
|
74 |
theSeed = randomize_seed()
|
75 |
-
kwargs["seed"] = theSeed
|
76 |
else:
|
77 |
-
kwargs["seed"] = seed
|
78 |
theSeed = seed
|
|
|
79 |
|
80 |
task = asyncio.create_task(asyncio.to_thread(models_load[model_str].fn, prompt=prompt, negative_prompt=nprompt, **kwargs, token=HF_TOKEN))
|
81 |
await asyncio.sleep(0)
|
@@ -83,26 +74,23 @@ async def infer(model_str, prompt, nprompt="", height=0, width=0, steps=0, cfg=0
|
|
83 |
result = await asyncio.wait_for(task, timeout=timeout)
|
84 |
except asyncio.TimeoutError as e:
|
85 |
print(e)
|
86 |
-
print(f"Task timed out: {model_str}")
|
87 |
if not task.done(): task.cancel()
|
88 |
result = None
|
89 |
raise Exception(f"Task timed out: {model_str}") from e
|
90 |
except Exception as e:
|
91 |
print(e)
|
|
|
92 |
if not task.done(): task.cancel()
|
93 |
result = None
|
94 |
raise Exception() from e
|
95 |
if task.done() and result is not None and not isinstance(result, tuple):
|
96 |
with lock:
|
97 |
-
# png_path = "img.png"
|
98 |
-
# png_path = get_current_time() + "_" + model_str.replace("/", "_") + ".png"
|
99 |
-
# png_path = model_str.replace("/", "_") + " - " + prompt + " - " + get_current_time() + ".png"
|
100 |
png_path = model_str.replace("/", "_") + " - " + get_current_time() + "_" + str(theSeed) + ".png"
|
101 |
-
image = save_image(result, png_path, model_str, prompt, nprompt, height, width, steps, cfg,
|
102 |
return image
|
103 |
return None
|
104 |
|
105 |
-
|
106 |
def gen_fn(model_str, prompt, nprompt="", height=0, width=0, steps=0, cfg=0, seed=-1):
|
107 |
try:
|
108 |
loop = asyncio.new_event_loop()
|
@@ -110,7 +98,7 @@ def gen_fn(model_str, prompt, nprompt="", height=0, width=0, steps=0, cfg=0, see
|
|
110 |
height, width, steps, cfg, seed, inference_timeout))
|
111 |
except (Exception, asyncio.CancelledError) as e:
|
112 |
print(e)
|
113 |
-
print(f"Task aborted: {model_str}")
|
114 |
result = None
|
115 |
raise gr.Error(f"Task aborted: {model_str}, Error: {e}")
|
116 |
finally:
|
@@ -124,44 +112,88 @@ def add_gallery(image, model_str, gallery):
|
|
124 |
if image is not None: gallery.insert(0, (image, model_str))
|
125 |
return gallery
|
126 |
|
|
|
|
|
127 |
|
128 |
-
|
129 |
-
|
130 |
-
|
131 |
-
|
132 |
-
.
|
133 |
-
|
134 |
-
|
135 |
-
js_func = """
|
136 |
-
function refresh() {
|
137 |
-
const url = new URL(window.location);
|
138 |
-
if (url.searchParams.get('__theme') !== 'dark') {
|
139 |
-
url.searchParams.set('__theme', 'dark');
|
140 |
-
window.location.href = url.href;
|
141 |
}
|
142 |
}
|
143 |
-
|
144 |
|
145 |
-
|
146 |
-
|
147 |
-
|
148 |
-
|
149 |
-
|
150 |
-
|
151 |
-
|
152 |
-
|
153 |
-
|
154 |
-
|
155 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
156 |
});
|
157 |
-
|
158 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
159 |
"""
|
160 |
|
161 |
-
with gr.Blocks(
|
162 |
-
|
163 |
-
gr.
|
164 |
-
with gr.Tab('6 Models'):
|
165 |
with gr.Column(scale=2):
|
166 |
with gr.Group():
|
167 |
txt_input = gr.Textbox(label='Your prompt:', value=preSetPrompt, lines=3, autofocus=1)
|
@@ -177,31 +209,27 @@ with gr.Blocks(theme='NoCrypt/miku@>=1.2.2', fill_width=True, css=CSS) as demo:
|
|
177 |
seed_rand = gr.Button("Randomize Seed 🎲", size="sm", variant="secondary")
|
178 |
seed_rand.click(randomize_seed, None, [seed], queue=False)
|
179 |
with gr.Row():
|
180 |
-
gen_button = gr.Button(f'Generate up to {int(num_models)} images', variant='primary', scale=3)
|
181 |
random_button = gr.Button(f'Randomize Models', variant='secondary', scale=1)
|
182 |
-
#stop_button = gr.Button('Stop', variant='stop', interactive=False, scale=1)
|
183 |
-
#gen_button.click(lambda: gr.update(interactive=True), None, stop_button)
|
184 |
-
gr.Markdown("", elem_classes="guide")
|
185 |
|
186 |
with gr.Column(scale=1):
|
187 |
with gr.Group():
|
188 |
with gr.Row():
|
189 |
-
output = [gr.Image(label=m, show_download_button=True, elem_classes="
|
190 |
interactive=False, width=112, height=112, show_share_button=False, format="png",
|
191 |
visible=True) for m in default_models]
|
192 |
current_models = [gr.Textbox(m, visible=False) for m in default_models]
|
193 |
|
194 |
with gr.Column(scale=2):
|
195 |
-
gallery = gr.Gallery(label="Output", show_download_button=True,
|
196 |
interactive=False, show_share_button=False, container=True, format="png",
|
197 |
preview=True, object_fit="cover", columns=2, rows=2)
|
198 |
|
199 |
for m, o in zip(current_models, output):
|
200 |
gen_event = gr.on(triggers=[gen_button.click, txt_input.submit], fn=gen_fn,
|
201 |
inputs=[m, txt_input, neg_input, height, width, steps, cfg, seed], outputs=[o],
|
202 |
-
concurrency_limit=None, queue=False)
|
203 |
o.change(add_gallery, [o, m, gallery], [gallery])
|
204 |
-
#stop_button.click(lambda: gr.update(interactive=False), None, stop_button, cancels=[gen_event])
|
205 |
|
206 |
with gr.Column(scale=4):
|
207 |
with gr.Accordion('Model selection'):
|
@@ -214,8 +242,6 @@ with gr.Blocks(theme='NoCrypt/miku@>=1.2.2', fill_width=True, css=CSS) as demo:
|
|
214 |
with gr.Column(scale=2):
|
215 |
model_choice2 = gr.Dropdown(models, label='Choose model', value=models[0])
|
216 |
with gr.Group():
|
217 |
-
# global preSetPrompt
|
218 |
-
# global negPreSetPrompt
|
219 |
txt_input2 = gr.Textbox(label='Your prompt:', value = preSetPrompt, lines=3, autofocus=1)
|
220 |
neg_input2 = gr.Textbox(label='Negative prompt:', value=negPreSetPrompt, lines=1)
|
221 |
with gr.Accordion("Advanced", open=False, visible=True):
|
@@ -230,19 +256,17 @@ with gr.Blocks(theme='NoCrypt/miku@>=1.2.2', fill_width=True, css=CSS) as demo:
|
|
230 |
seed_rand2.click(randomize_seed, None, [seed2], queue=False)
|
231 |
num_images = gr.Slider(1, max_images, value=max_images, step=1, label='Number of images')
|
232 |
with gr.Row():
|
233 |
-
gen_button2 = gr.Button('Let the machine halucinate', variant='primary', scale=2)
|
234 |
-
#stop_button2 = gr.Button('Stop', variant='stop', interactive=False, scale=1)
|
235 |
-
#gen_button2.click(lambda: gr.update(interactive=True), None, stop_button2)
|
236 |
|
237 |
with gr.Column(scale=1):
|
238 |
with gr.Group():
|
239 |
with gr.Row():
|
240 |
-
output2 = [gr.Image(label='', show_download_button=True,
|
241 |
interactive=False, width=112, height=112, visible=True, format="png",
|
242 |
show_share_button=False, show_label=False) for _ in range(max_images)]
|
243 |
|
244 |
with gr.Column(scale=2):
|
245 |
-
gallery2 = gr.Gallery(label="Output", show_download_button=True,
|
246 |
interactive=False, show_share_button=True, container=True, format="png",
|
247 |
preview=True, object_fit="cover", columns=2, rows=2)
|
248 |
|
@@ -253,13 +277,7 @@ with gr.Blocks(theme='NoCrypt/miku@>=1.2.2', fill_width=True, css=CSS) as demo:
|
|
253 |
fn=lambda i, n, m, t1, t2, n1, n2, n3, n4, n5: gen_fn(m, t1, t2, n1, n2, n3, n4, n5) if (i < n) else None,
|
254 |
inputs=[img_i, num_images, model_choice2, txt_input2, neg_input2,
|
255 |
height2, width2, steps2, cfg2, seed2], outputs=[o],
|
256 |
-
concurrency_limit=None, queue=False)
|
257 |
o.change(add_gallery, [o, model_choice2, gallery2], [gallery2])
|
258 |
-
#stop_button2.click(lambda: gr.update(interactive=False), None, stop_button2, cancels=[gen_event2])
|
259 |
-
|
260 |
-
# gr.Markdown(js_AutoSave)
|
261 |
-
gr.Markdown("")
|
262 |
|
263 |
-
# demo.queue(default_concurrency_limit=200, max_size=200)
|
264 |
demo.launch(show_api=False, max_threads=400)
|
265 |
-
# demo.launch(show_api=False, max_threads=400, js=js_AutoSave)
|
|
|
6 |
from threading import RLock
|
7 |
from datetime import datetime
|
8 |
|
9 |
+
preSetPrompt = "tall slender athletic 18+ caucasian woman. gorgeous face. perky tits. short wet messy hair. open jeans. torn shirt. challenging. explicit. artistic. photorealistic. f1.4"
|
|
|
|
|
|
|
|
|
|
|
10 |
negPreSetPrompt = "[deformed | disfigured], poorly drawn, [bad : wrong] anatomy, [extra | missing | floating | disconnected] limb, (mutated hands and fingers), blurry, text, fuzziness"
|
11 |
|
12 |
lock = RLock()
|
13 |
+
|
14 |
HF_TOKEN = os.environ.get("HF_TOKEN") if os.environ.get("HF_TOKEN") else None # If private or gated models aren't used, ENV setting is unnecessary.
|
15 |
|
16 |
def get_current_time():
|
17 |
now = datetime.now()
|
18 |
+
current_time = now.strftime("%y-%m-%d %H:%M:%S")
|
|
|
19 |
return current_time
|
20 |
|
21 |
def load_fn(models):
|
|
|
33 |
|
34 |
load_fn(models)
|
35 |
|
36 |
+
num_models = 12
|
37 |
+
max_images = 12
|
|
|
38 |
inference_timeout = 400
|
39 |
default_models = models[:num_models]
|
40 |
MAX_SEED = 2**32-1
|
|
|
55 |
return random.choices(models, k=num_models)
|
56 |
|
57 |
|
|
|
|
|
58 |
async def infer(model_str, prompt, nprompt="", height=0, width=0, steps=0, cfg=0, seed=-1, timeout=inference_timeout):
|
59 |
kwargs = {}
|
60 |
if height > 0: kwargs["height"] = height
|
61 |
if width > 0: kwargs["width"] = width
|
62 |
if steps > 0: kwargs["num_inference_steps"] = steps
|
63 |
if cfg > 0: cfg = kwargs["guidance_scale"] = cfg
|
64 |
+
|
65 |
if seed == -1:
|
66 |
theSeed = randomize_seed()
|
|
|
67 |
else:
|
|
|
68 |
theSeed = seed
|
69 |
+
kwargs["seed"] = theSeed
|
70 |
|
71 |
task = asyncio.create_task(asyncio.to_thread(models_load[model_str].fn, prompt=prompt, negative_prompt=nprompt, **kwargs, token=HF_TOKEN))
|
72 |
await asyncio.sleep(0)
|
|
|
74 |
result = await asyncio.wait_for(task, timeout=timeout)
|
75 |
except asyncio.TimeoutError as e:
|
76 |
print(e)
|
77 |
+
print(f"infer: Task timed out: {model_str}")
|
78 |
if not task.done(): task.cancel()
|
79 |
result = None
|
80 |
raise Exception(f"Task timed out: {model_str}") from e
|
81 |
except Exception as e:
|
82 |
print(e)
|
83 |
+
print(f"infer: exception: {model_str}")
|
84 |
if not task.done(): task.cancel()
|
85 |
result = None
|
86 |
raise Exception() from e
|
87 |
if task.done() and result is not None and not isinstance(result, tuple):
|
88 |
with lock:
|
|
|
|
|
|
|
89 |
png_path = model_str.replace("/", "_") + " - " + get_current_time() + "_" + str(theSeed) + ".png"
|
90 |
+
image = save_image(result, png_path, model_str, prompt, nprompt, height, width, steps, cfg, theSeed)
|
91 |
return image
|
92 |
return None
|
93 |
|
|
|
94 |
def gen_fn(model_str, prompt, nprompt="", height=0, width=0, steps=0, cfg=0, seed=-1):
|
95 |
try:
|
96 |
loop = asyncio.new_event_loop()
|
|
|
98 |
height, width, steps, cfg, seed, inference_timeout))
|
99 |
except (Exception, asyncio.CancelledError) as e:
|
100 |
print(e)
|
101 |
+
print(f"gen_fn: Task aborted: {model_str}")
|
102 |
result = None
|
103 |
raise gr.Error(f"Task aborted: {model_str}, Error: {e}")
|
104 |
finally:
|
|
|
112 |
if image is not None: gallery.insert(0, (image, model_str))
|
113 |
return gallery
|
114 |
|
115 |
+
JS="""
|
116 |
+
<script>
|
117 |
|
118 |
+
/*
|
119 |
+
function simulateButtonPress_() {
|
120 |
+
const button = document.getElementById('simulate-button');
|
121 |
+
if (button) {
|
122 |
+
button.click(); // Simulate the button press
|
123 |
+
console.log('Button Pressed!');
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
124 |
}
|
125 |
}
|
126 |
+
*/
|
127 |
|
128 |
+
function simulateButtonPress() {
|
129 |
+
console.log('Button Pressed!');
|
130 |
+
}
|
131 |
+
|
132 |
+
// Function to observe image changes
|
133 |
+
function observeImageChanges() {
|
134 |
+
// Select all images with the 'image-monitor' class
|
135 |
+
const images = document.querySelectorAll('.svelte-1pijsyv');
|
136 |
+
|
137 |
+
// Create a MutationObserver to watch for changes in the image src
|
138 |
+
const observer = new MutationObserver((mutationsList, observer) => {
|
139 |
+
mutationsList.forEach(mutation => {
|
140 |
+
if (mutation.type === 'attributes' && mutation.attributeName === 'src') {
|
141 |
+
// If the image src changes, simulate button press
|
142 |
+
console.log('Image changed!');
|
143 |
+
simulateButtonPress();
|
144 |
+
}
|
145 |
});
|
146 |
+
});
|
147 |
+
|
148 |
+
// Observer options: observe changes to attributes (like src)
|
149 |
+
const config = { attributes: true };
|
150 |
+
|
151 |
+
// Start observing each image
|
152 |
+
images.forEach(image => {
|
153 |
+
observer.observe(image, config);
|
154 |
+
});
|
155 |
+
}
|
156 |
+
|
157 |
+
// Start observing
|
158 |
+
|
159 |
+
|
160 |
+
window.addEventListener('load', () => {
|
161 |
+
observeImageChanges();
|
162 |
+
console.log("Yo");
|
163 |
+
});
|
164 |
+
|
165 |
+
</script>
|
166 |
+
"""
|
167 |
+
|
168 |
+
CSS="""
|
169 |
+
<style>
|
170 |
+
.image-monitor {
|
171 |
+
border:1px solid red;
|
172 |
+
}
|
173 |
+
|
174 |
+
/*
|
175 |
+
.svelte-1pijsyv{
|
176 |
+
border:1px solid green;
|
177 |
+
}
|
178 |
+
*/
|
179 |
+
|
180 |
+
.gallery-container{
|
181 |
+
max-height: 512px;
|
182 |
+
}
|
183 |
+
|
184 |
+
.butt{
|
185 |
+
background-color:#2b4764 !important
|
186 |
+
}
|
187 |
+
.butt:hover{
|
188 |
+
background-color:#3a6c9f !important;
|
189 |
+
}
|
190 |
+
|
191 |
+
</style>
|
192 |
"""
|
193 |
|
194 |
+
# with gr.Blocks(fill_width=True, head=js) as demo:
|
195 |
+
with gr.Blocks(head=CSS + JS) as demo:
|
196 |
+
with gr.Tab(str(num_models) + ' Models'):
|
|
|
197 |
with gr.Column(scale=2):
|
198 |
with gr.Group():
|
199 |
txt_input = gr.Textbox(label='Your prompt:', value=preSetPrompt, lines=3, autofocus=1)
|
|
|
209 |
seed_rand = gr.Button("Randomize Seed 🎲", size="sm", variant="secondary")
|
210 |
seed_rand.click(randomize_seed, None, [seed], queue=False)
|
211 |
with gr.Row():
|
212 |
+
gen_button = gr.Button(f'Generate up to {int(num_models)} images', variant='primary', scale=3, elem_classes=["butt"])
|
213 |
random_button = gr.Button(f'Randomize Models', variant='secondary', scale=1)
|
|
|
|
|
|
|
214 |
|
215 |
with gr.Column(scale=1):
|
216 |
with gr.Group():
|
217 |
with gr.Row():
|
218 |
+
output = [gr.Image(label=m, show_download_button=True, elem_classes=["image-monitor"],
|
219 |
interactive=False, width=112, height=112, show_share_button=False, format="png",
|
220 |
visible=True) for m in default_models]
|
221 |
current_models = [gr.Textbox(m, visible=False) for m in default_models]
|
222 |
|
223 |
with gr.Column(scale=2):
|
224 |
+
gallery = gr.Gallery(label="Output", show_download_button=True,
|
225 |
interactive=False, show_share_button=False, container=True, format="png",
|
226 |
preview=True, object_fit="cover", columns=2, rows=2)
|
227 |
|
228 |
for m, o in zip(current_models, output):
|
229 |
gen_event = gr.on(triggers=[gen_button.click, txt_input.submit], fn=gen_fn,
|
230 |
inputs=[m, txt_input, neg_input, height, width, steps, cfg, seed], outputs=[o],
|
231 |
+
concurrency_limit=None, queue=False)
|
232 |
o.change(add_gallery, [o, m, gallery], [gallery])
|
|
|
233 |
|
234 |
with gr.Column(scale=4):
|
235 |
with gr.Accordion('Model selection'):
|
|
|
242 |
with gr.Column(scale=2):
|
243 |
model_choice2 = gr.Dropdown(models, label='Choose model', value=models[0])
|
244 |
with gr.Group():
|
|
|
|
|
245 |
txt_input2 = gr.Textbox(label='Your prompt:', value = preSetPrompt, lines=3, autofocus=1)
|
246 |
neg_input2 = gr.Textbox(label='Negative prompt:', value=negPreSetPrompt, lines=1)
|
247 |
with gr.Accordion("Advanced", open=False, visible=True):
|
|
|
256 |
seed_rand2.click(randomize_seed, None, [seed2], queue=False)
|
257 |
num_images = gr.Slider(1, max_images, value=max_images, step=1, label='Number of images')
|
258 |
with gr.Row():
|
259 |
+
gen_button2 = gr.Button('Let the machine halucinate', variant='primary', scale=2, elem_classes=["butt"])
|
|
|
|
|
260 |
|
261 |
with gr.Column(scale=1):
|
262 |
with gr.Group():
|
263 |
with gr.Row():
|
264 |
+
output2 = [gr.Image(label='', show_download_button=True,
|
265 |
interactive=False, width=112, height=112, visible=True, format="png",
|
266 |
show_share_button=False, show_label=False) for _ in range(max_images)]
|
267 |
|
268 |
with gr.Column(scale=2):
|
269 |
+
gallery2 = gr.Gallery(label="Output", show_download_button=True,
|
270 |
interactive=False, show_share_button=True, container=True, format="png",
|
271 |
preview=True, object_fit="cover", columns=2, rows=2)
|
272 |
|
|
|
277 |
fn=lambda i, n, m, t1, t2, n1, n2, n3, n4, n5: gen_fn(m, t1, t2, n1, n2, n3, n4, n5) if (i < n) else None,
|
278 |
inputs=[img_i, num_images, model_choice2, txt_input2, neg_input2,
|
279 |
height2, width2, steps2, cfg2, seed2], outputs=[o],
|
280 |
+
concurrency_limit=None, queue=False)
|
281 |
o.change(add_gallery, [o, model_choice2, gallery2], [gallery2])
|
|
|
|
|
|
|
|
|
282 |
|
|
|
283 |
demo.launch(show_api=False, max_threads=400)
|
|