Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -34,7 +34,6 @@ def load_fn(models):
|
|
34 |
|
35 |
load_fn(models)
|
36 |
|
37 |
-
|
38 |
num_models = 12
|
39 |
max_images = 12
|
40 |
inference_timeout = 400
|
@@ -57,21 +56,18 @@ def random_choices():
|
|
57 |
return random.choices(models, k=num_models)
|
58 |
|
59 |
|
60 |
-
# https://huggingface.co/docs/api-inference/detailed_parameters
|
61 |
-
# https://huggingface.co/docs/huggingface_hub/package_reference/inference_client
|
62 |
async def infer(model_str, prompt, nprompt="", height=0, width=0, steps=0, cfg=0, seed=-1, timeout=inference_timeout):
|
63 |
kwargs = {}
|
64 |
if height > 0: kwargs["height"] = height
|
65 |
if width > 0: kwargs["width"] = width
|
66 |
if steps > 0: kwargs["num_inference_steps"] = steps
|
67 |
if cfg > 0: cfg = kwargs["guidance_scale"] = cfg
|
68 |
-
|
69 |
if seed == -1:
|
70 |
theSeed = randomize_seed()
|
71 |
-
kwargs["seed"] = theSeed
|
72 |
else:
|
73 |
-
kwargs["seed"] = seed
|
74 |
theSeed = seed
|
|
|
75 |
|
76 |
task = asyncio.create_task(asyncio.to_thread(models_load[model_str].fn, prompt=prompt, negative_prompt=nprompt, **kwargs, token=HF_TOKEN))
|
77 |
await asyncio.sleep(0)
|
@@ -90,15 +86,11 @@ async def infer(model_str, prompt, nprompt="", height=0, width=0, steps=0, cfg=0
|
|
90 |
raise Exception() from e
|
91 |
if task.done() and result is not None and not isinstance(result, tuple):
|
92 |
with lock:
|
93 |
-
# png_path = "img.png"
|
94 |
-
# png_path = get_current_time() + "_" + model_str.replace("/", "_") + ".png"
|
95 |
-
# png_path = model_str.replace("/", "_") + " - " + prompt + " - " + get_current_time() + ".png"
|
96 |
png_path = model_str.replace("/", "_") + " - " + get_current_time() + "_" + str(theSeed) + ".png"
|
97 |
image = save_image(result, png_path, model_str, prompt, nprompt, height, width, steps, cfg, seed)
|
98 |
return image
|
99 |
return None
|
100 |
|
101 |
-
|
102 |
def gen_fn(model_str, prompt, nprompt="", height=0, width=0, steps=0, cfg=0, seed=-1):
|
103 |
try:
|
104 |
loop = asyncio.new_event_loop()
|
@@ -120,49 +112,11 @@ def add_gallery(image, model_str, gallery):
|
|
120 |
if image is not None: gallery.insert(0, (image, model_str))
|
121 |
return gallery
|
122 |
|
123 |
-
|
124 |
-
CSS="""
|
125 |
-
.gradio-container { max-width: 1200px; margin: 0 auto; !important; }
|
126 |
-
.output { width=112px; height=112px; max_width=112px; max_height=112px; !important; }
|
127 |
-
.gallery { min_width=512px; min_height=512px; max_height=512px; !important; }
|
128 |
-
.guide { text-align: center; !important; }
|
129 |
-
"""
|
130 |
-
|
131 |
-
js_func = """
|
132 |
-
function refresh() {
|
133 |
-
const url = new URL(window.location);
|
134 |
-
|
135 |
-
if (url.searchParams.get('__theme') !== 'dark') {
|
136 |
-
url.searchParams.set('__theme', 'dark');
|
137 |
-
window.location.href = url.href;
|
138 |
-
}
|
139 |
-
}
|
140 |
-
"""
|
141 |
-
|
142 |
-
js_AutoSave="""
|
143 |
-
|
144 |
-
console.log("Yo");
|
145 |
-
|
146 |
-
var img1 = document.querySelector("div#component-355 .svelte-1kpcxni button.svelte-1kpcxni .svelte-1kpcxni img"),
|
147 |
-
observer = new MutationObserver((changes) => {
|
148 |
-
changes.forEach(change => {
|
149 |
-
if(change.attributeName.includes('src')){
|
150 |
-
console.log(img1.src);
|
151 |
-
document.querySelector("div#component-355 .svelte-1kpcxni .svelte-sr71km a.svelte-1s8vnbx button").click();
|
152 |
-
}
|
153 |
-
});
|
154 |
-
});
|
155 |
-
observer.observe(img1, {attributes : true});
|
156 |
-
|
157 |
-
"""
|
158 |
-
|
159 |
js="""
|
160 |
<script>console.log("BOOOOOOOOOOOOOOOOBS");</script>
|
161 |
"""
|
162 |
|
163 |
with gr.Blocks(fill_width=True, head=js) as demo:
|
164 |
-
# with gr.Blocks(fill_width=True, css=CSS) as demo:
|
165 |
-
# with gr.Blocks(theme='JohnSmith9982/small_and_pretty', fill_width=True, css=CSS, js=js_func) as demo:
|
166 |
gr.HTML("")
|
167 |
with gr.Tab(str(num_models) + ' Models'):
|
168 |
with gr.Column(scale=2):
|
@@ -182,8 +136,6 @@ with gr.Blocks(fill_width=True, head=js) as demo:
|
|
182 |
with gr.Row():
|
183 |
gen_button = gr.Button(f'Generate up to {int(num_models)} images', variant='primary', scale=3)
|
184 |
random_button = gr.Button(f'Randomize Models', variant='secondary', scale=1)
|
185 |
-
#stop_button = gr.Button('Stop', variant='stop', interactive=False, scale=1)
|
186 |
-
#gen_button.click(lambda: gr.update(interactive=True), None, stop_button)
|
187 |
gr.Markdown("", elem_classes="guide")
|
188 |
|
189 |
with gr.Column(scale=1):
|
@@ -202,9 +154,8 @@ with gr.Blocks(fill_width=True, head=js) as demo:
|
|
202 |
for m, o in zip(current_models, output):
|
203 |
gen_event = gr.on(triggers=[gen_button.click, txt_input.submit], fn=gen_fn,
|
204 |
inputs=[m, txt_input, neg_input, height, width, steps, cfg, seed], outputs=[o],
|
205 |
-
concurrency_limit=None, queue=False)
|
206 |
o.change(add_gallery, [o, m, gallery], [gallery])
|
207 |
-
#stop_button.click(lambda: gr.update(interactive=False), None, stop_button, cancels=[gen_event])
|
208 |
|
209 |
with gr.Column(scale=4):
|
210 |
with gr.Accordion('Model selection'):
|
@@ -217,8 +168,6 @@ with gr.Blocks(fill_width=True, head=js) as demo:
|
|
217 |
with gr.Column(scale=2):
|
218 |
model_choice2 = gr.Dropdown(models, label='Choose model', value=models[0])
|
219 |
with gr.Group():
|
220 |
-
# global preSetPrompt
|
221 |
-
# global negPreSetPrompt
|
222 |
txt_input2 = gr.Textbox(label='Your prompt:', value = preSetPrompt, lines=3, autofocus=1)
|
223 |
neg_input2 = gr.Textbox(label='Negative prompt:', value=negPreSetPrompt, lines=1)
|
224 |
with gr.Accordion("Advanced", open=False, visible=True):
|
@@ -234,8 +183,6 @@ with gr.Blocks(fill_width=True, head=js) as demo:
|
|
234 |
num_images = gr.Slider(1, max_images, value=max_images/2, step=1, label='Number of images')
|
235 |
with gr.Row():
|
236 |
gen_button2 = gr.Button('Let the machine halucinate', variant='primary', scale=2)
|
237 |
-
#stop_button2 = gr.Button('Stop', variant='stop', interactive=False, scale=1)
|
238 |
-
#gen_button2.click(lambda: gr.update(interactive=True), None, stop_button2)
|
239 |
|
240 |
with gr.Column(scale=1):
|
241 |
with gr.Group():
|
@@ -256,13 +203,9 @@ with gr.Blocks(fill_width=True, head=js) as demo:
|
|
256 |
fn=lambda i, n, m, t1, t2, n1, n2, n3, n4, n5: gen_fn(m, t1, t2, n1, n2, n3, n4, n5) if (i < n) else None,
|
257 |
inputs=[img_i, num_images, model_choice2, txt_input2, neg_input2,
|
258 |
height2, width2, steps2, cfg2, seed2], outputs=[o],
|
259 |
-
concurrency_limit=None, queue=False)
|
260 |
o.change(add_gallery, [o, model_choice2, gallery2], [gallery2])
|
261 |
-
#stop_button2.click(lambda: gr.update(interactive=False), None, stop_button2, cancels=[gen_event2])
|
262 |
|
263 |
-
# gr.Markdown(js_AutoSave)
|
264 |
gr.Markdown("")
|
265 |
|
266 |
-
# demo.queue(default_concurrency_limit=200, max_size=200)
|
267 |
demo.launch(show_api=False, max_threads=400)
|
268 |
-
# demo.launch(show_api=False, max_threads=400, js=js_AutoSave)
|
|
|
34 |
|
35 |
load_fn(models)
|
36 |
|
|
|
37 |
num_models = 12
|
38 |
max_images = 12
|
39 |
inference_timeout = 400
|
|
|
56 |
return random.choices(models, k=num_models)
|
57 |
|
58 |
|
|
|
|
|
59 |
async def infer(model_str, prompt, nprompt="", height=0, width=0, steps=0, cfg=0, seed=-1, timeout=inference_timeout):
|
60 |
kwargs = {}
|
61 |
if height > 0: kwargs["height"] = height
|
62 |
if width > 0: kwargs["width"] = width
|
63 |
if steps > 0: kwargs["num_inference_steps"] = steps
|
64 |
if cfg > 0: cfg = kwargs["guidance_scale"] = cfg
|
65 |
+
|
66 |
if seed == -1:
|
67 |
theSeed = randomize_seed()
|
|
|
68 |
else:
|
|
|
69 |
theSeed = seed
|
70 |
+
kwargs["seed"] = theSeed
|
71 |
|
72 |
task = asyncio.create_task(asyncio.to_thread(models_load[model_str].fn, prompt=prompt, negative_prompt=nprompt, **kwargs, token=HF_TOKEN))
|
73 |
await asyncio.sleep(0)
|
|
|
86 |
raise Exception() from e
|
87 |
if task.done() and result is not None and not isinstance(result, tuple):
|
88 |
with lock:
|
|
|
|
|
|
|
89 |
png_path = model_str.replace("/", "_") + " - " + get_current_time() + "_" + str(theSeed) + ".png"
|
90 |
image = save_image(result, png_path, model_str, prompt, nprompt, height, width, steps, cfg, seed)
|
91 |
return image
|
92 |
return None
|
93 |
|
|
|
94 |
def gen_fn(model_str, prompt, nprompt="", height=0, width=0, steps=0, cfg=0, seed=-1):
|
95 |
try:
|
96 |
loop = asyncio.new_event_loop()
|
|
|
112 |
if image is not None: gallery.insert(0, (image, model_str))
|
113 |
return gallery
|
114 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
115 |
js="""
|
116 |
<script>console.log("BOOOOOOOOOOOOOOOOBS");</script>
|
117 |
"""
|
118 |
|
119 |
with gr.Blocks(fill_width=True, head=js) as demo:
|
|
|
|
|
120 |
gr.HTML("")
|
121 |
with gr.Tab(str(num_models) + ' Models'):
|
122 |
with gr.Column(scale=2):
|
|
|
136 |
with gr.Row():
|
137 |
gen_button = gr.Button(f'Generate up to {int(num_models)} images', variant='primary', scale=3)
|
138 |
random_button = gr.Button(f'Randomize Models', variant='secondary', scale=1)
|
|
|
|
|
139 |
gr.Markdown("", elem_classes="guide")
|
140 |
|
141 |
with gr.Column(scale=1):
|
|
|
154 |
for m, o in zip(current_models, output):
|
155 |
gen_event = gr.on(triggers=[gen_button.click, txt_input.submit], fn=gen_fn,
|
156 |
inputs=[m, txt_input, neg_input, height, width, steps, cfg, seed], outputs=[o],
|
157 |
+
concurrency_limit=None, queue=False)
|
158 |
o.change(add_gallery, [o, m, gallery], [gallery])
|
|
|
159 |
|
160 |
with gr.Column(scale=4):
|
161 |
with gr.Accordion('Model selection'):
|
|
|
168 |
with gr.Column(scale=2):
|
169 |
model_choice2 = gr.Dropdown(models, label='Choose model', value=models[0])
|
170 |
with gr.Group():
|
|
|
|
|
171 |
txt_input2 = gr.Textbox(label='Your prompt:', value = preSetPrompt, lines=3, autofocus=1)
|
172 |
neg_input2 = gr.Textbox(label='Negative prompt:', value=negPreSetPrompt, lines=1)
|
173 |
with gr.Accordion("Advanced", open=False, visible=True):
|
|
|
183 |
num_images = gr.Slider(1, max_images, value=max_images/2, step=1, label='Number of images')
|
184 |
with gr.Row():
|
185 |
gen_button2 = gr.Button('Let the machine halucinate', variant='primary', scale=2)
|
|
|
|
|
186 |
|
187 |
with gr.Column(scale=1):
|
188 |
with gr.Group():
|
|
|
203 |
fn=lambda i, n, m, t1, t2, n1, n2, n3, n4, n5: gen_fn(m, t1, t2, n1, n2, n3, n4, n5) if (i < n) else None,
|
204 |
inputs=[img_i, num_images, model_choice2, txt_input2, neg_input2,
|
205 |
height2, width2, steps2, cfg2, seed2], outputs=[o],
|
206 |
+
concurrency_limit=None, queue=False)
|
207 |
o.change(add_gallery, [o, model_choice2, gallery2], [gallery2])
|
|
|
208 |
|
|
|
209 |
gr.Markdown("")
|
210 |
|
|
|
211 |
demo.launch(show_api=False, max_threads=400)
|
|