Uthar commited on
Commit
96a7b08
·
verified ·
1 Parent(s): 3bcddcc

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +117 -76
app.py CHANGED
@@ -10,99 +10,127 @@ preSetPrompt = "High fashion studio foto shoot. tall slender 18+ caucasian woman
10
  negPreSetPrompt = "[deformed | disfigured], poorly drawn, [bad : wrong] anatomy, [extra | missing | floating | disconnected] limb, (mutated hands and fingers), blurry, text, fuzziness"
11
 
12
  lock = RLock()
13
- HF_TOKEN = os.environ.get("HF_TOKEN") if os.environ.get("HF_TOKEN") else None
14
 
15
  def get_current_time():
16
- return datetime.now().strftime("%y-%m-%d %H:%M:%S")
 
 
 
17
 
18
  def load_fn(models):
19
  global models_load
20
  models_load = {}
21
  for model in models:
22
- if model not in models_load:
23
  try:
24
  m = gr_Interface_load(f'models/{model}', hf_token=HF_TOKEN)
25
  except Exception as error:
26
  print(error)
27
  m = gr.Interface(lambda: None, ['text'], ['image'])
28
- models_load[model] = m
 
29
 
30
  load_fn(models)
31
 
 
32
  num_models = 6
33
  max_images = 6
34
  inference_timeout = 400
35
  default_models = models[:num_models]
36
- MAX_SEED = 2**32 - 1
 
37
 
38
  def extend_choices(choices):
39
- return choices[:num_models] + (num_models - len(choices)) * ['NA']
 
40
 
41
  def update_imgbox(choices):
42
- choices_plus = extend_choices(choices)
43
- return [gr.Image(None, label=m, visible=(m != 'NA')) for m in choices_plus]
 
44
 
45
  def random_choices():
46
  import random
47
  random.seed()
48
  return random.choices(models, k=num_models)
49
 
 
 
 
50
  async def infer(model_str, prompt, nprompt="", height=0, width=0, steps=0, cfg=0, seed=-1, timeout=inference_timeout):
51
- kwargs = {"height": height if height > 0 else None, "width": width if width > 0 else None,
52
- "num_inference_steps": steps if steps > 0 else None, "guidance_scale": cfg if cfg > 0 else None}
53
-
 
 
 
54
  if seed == -1:
55
- kwargs["seed"] = randomize_seed()
56
- else:
 
57
  kwargs["seed"] = seed
58
-
 
59
  task = asyncio.create_task(asyncio.to_thread(models_load[model_str].fn, prompt=prompt, negative_prompt=nprompt, **kwargs, token=HF_TOKEN))
60
-
61
  try:
62
  result = await asyncio.wait_for(task, timeout=timeout)
63
  except asyncio.TimeoutError as e:
 
64
  print(f"Task timed out: {model_str}")
65
- task.cancel()
66
  result = None
 
67
  except Exception as e:
68
- print(f"Error generating image: {model_str} - {e}")
69
- task.cancel()
70
  result = None
71
-
72
- if result and not isinstance(result, tuple):
73
- png_path = f"{model_str.replace('/', '_')}_{get_current_time()}_{kwargs['seed']}.png"
74
  with lock:
75
- image = save_image(result, png_path, model_str, prompt, nprompt, height, width, steps, cfg, kwargs["seed"])
 
 
 
 
76
  return image
77
  return None
78
 
 
79
  def gen_fn(model_str, prompt, nprompt="", height=0, width=0, steps=0, cfg=0, seed=-1):
80
- loop = asyncio.new_event_loop()
81
  try:
82
- result = loop.run_until_complete(infer(model_str, prompt, nprompt, height, width, steps, cfg, seed, inference_timeout))
 
 
83
  except (Exception, asyncio.CancelledError) as e:
84
- print(f"Error generating image: {e}")
 
85
  result = None
86
  raise gr.Error(f"Task aborted: {model_str}, Error: {e}")
87
  finally:
88
  loop.close()
89
  return result
90
 
 
91
  def add_gallery(image, model_str, gallery):
92
- if image is not None:
93
- gallery = [(image, model_str)] + gallery[:5] # Keep only the latest 6 images
 
94
  return gallery
95
 
96
- # Interface Layout
97
- CSS = """
98
- .gradio-container { max-width: 1200px; margin: 0 auto; }
99
- .output { width: 112px; height: 112px; }
100
- .gallery { min-width: 512px; min-height: 512px; }
 
101
  """
102
 
103
  js_func = """
104
  function refresh() {
105
  const url = new URL(window.location);
 
106
  if (url.searchParams.get('__theme') !== 'dark') {
107
  url.searchParams.set('__theme', 'dark');
108
  window.location.href = url.href;
@@ -110,7 +138,30 @@ function refresh() {
110
  }
111
  """
112
 
113
- with gr.Blocks(fill_width=True, css=CSS) as demo:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
114
  gr.HTML("")
115
  with gr.Tab('6 Models'):
116
  with gr.Column(scale=2):
@@ -130,7 +181,8 @@ with gr.Blocks(fill_width=True, css=CSS) as demo:
130
  with gr.Row():
131
  gen_button = gr.Button(f'Generate up to {int(num_models)} images', variant='primary', scale=3)
132
  random_button = gr.Button(f'Randomize Models', variant='secondary', scale=1)
133
-
 
134
  gr.Markdown("", elem_classes="guide")
135
 
136
  with gr.Column(scale=1):
@@ -144,43 +196,29 @@ with gr.Blocks(fill_width=True, css=CSS) as demo:
144
  with gr.Column(scale=2):
145
  gallery = gr.Gallery(label="Output", show_download_button=True, elem_classes="gallery",
146
  interactive=False, show_share_button=False, container=True, format="png",
147
- preview=True, object_fit="cover", columns=2, rows=2)
148
-
149
- # Inside the `gr.Blocks` context where the components are defined:
150
- for i, o in enumerate(output):
151
- # Ensure event handler is inside `gr.Blocks`
152
- num_images.change(
153
- lambda num_images, i=i: gr.update(visible=(i < num_images)), # `i` corresponds to the index of the image
154
- [num_images], # Only num_images needs to be an input
155
- o, # Outputs the updated visibility of the image `o`
156
- queue=False
157
- )
158
-
159
- # Image generation function
160
- gen_event2 = gr.on(
161
- triggers=[gen_button.click, txt_input.submit],
162
- fn=gen_fn,
163
- inputs=[i, num_images, model_choice2, txt_input, neg_input, height, width, steps, cfg, seed],
164
- outputs=[o]
165
- )
166
-
167
- # Update gallery when a new image is generated
168
- o.change(add_gallery, [o, model_choice2, gallery], [gallery])
169
-
170
- # Model selection and updating outputs
171
  with gr.Column(scale=4):
172
  with gr.Accordion('Model selection'):
173
- model_choice = gr.CheckboxGroup(models, label=f'Choose up to {int(num_models)} different models from the {len(models)} available!', value=default_models, interactive=True)
174
  model_choice.change(update_imgbox, model_choice, output)
175
  model_choice.change(extend_choices, model_choice, current_models)
176
  random_button.click(random_choices, None, model_choice)
177
 
178
- # Single model section
179
  with gr.Tab('Single model'):
180
  with gr.Column(scale=2):
181
  model_choice2 = gr.Dropdown(models, label='Choose model', value=models[0])
182
  with gr.Group():
183
- txt_input2 = gr.Textbox(label='Your prompt:', value=preSetPrompt, lines=3, autofocus=1)
 
 
184
  neg_input2 = gr.Textbox(label='Negative prompt:', value=negPreSetPrompt, lines=1)
185
  with gr.Accordion("Advanced", open=False, visible=True):
186
  with gr.Row():
@@ -192,11 +230,11 @@ with gr.Blocks(fill_width=True, css=CSS) as demo:
192
  seed2 = gr.Slider(label="Seed", info="Randomize Seed if -1.", minimum=-1, maximum=MAX_SEED, step=1, value=-1)
193
  seed_rand2 = gr.Button("Randomize Seed", size="sm", variant="secondary")
194
  seed_rand2.click(randomize_seed, None, [seed2], queue=False)
195
-
196
- num_images2 = gr.Slider(1, max_images, value=max_images, step=1, label='Number of images')
197
-
198
  with gr.Row():
199
- gen_button2 = gr.Button('Let the machine hallucinate', variant='primary', scale=2)
 
 
200
 
201
  with gr.Column(scale=1):
202
  with gr.Group():
@@ -208,19 +246,22 @@ with gr.Blocks(fill_width=True, css=CSS) as demo:
208
  with gr.Column(scale=2):
209
  gallery2 = gr.Gallery(label="Output", show_download_button=True, elem_classes="gallery",
210
  interactive=False, show_share_button=True, container=True, format="png",
211
- preview=True, object_fit="cover", columns=2, rows=2)
212
 
213
  for i, o in enumerate(output2):
214
  img_i = gr.Number(i, visible=False)
215
- num_images2.change(lambda i, n: gr.update(visible=(i < n)), [img_i, num_images2], o, queue=False)
216
- gen_event2 = gr.on(
217
- triggers=[gen_button2.click, txt_input2.submit],
218
- fn=gen_fn,
219
- inputs=[img_i, num_images2, model_choice2, txt_input2, neg_input2, height2, width2, steps2, cfg2, seed2],
220
- outputs=[o]
221
- )
222
-
223
  o.change(add_gallery, [o, model_choice2, gallery2], [gallery2])
 
 
 
 
224
 
225
- demo.launch(show_api=False, max_threads=400)
226
-
 
 
10
  negPreSetPrompt = "[deformed | disfigured], poorly drawn, [bad : wrong] anatomy, [extra | missing | floating | disconnected] limb, (mutated hands and fingers), blurry, text, fuzziness"
11
 
12
  lock = RLock()
13
+ HF_TOKEN = os.environ.get("HF_TOKEN") if os.environ.get("HF_TOKEN") else None # If private or gated models aren't used, ENV setting is unnecessary.
14
 
15
  def get_current_time():
16
+ now = datetime.now()
17
+ now2 = now
18
+ current_time = now2.strftime("%y-%m-%d %H:%M:%S")
19
+ return current_time
20
 
21
  def load_fn(models):
22
  global models_load
23
  models_load = {}
24
  for model in models:
25
+ if model not in models_load.keys():
26
  try:
27
  m = gr_Interface_load(f'models/{model}', hf_token=HF_TOKEN)
28
  except Exception as error:
29
  print(error)
30
  m = gr.Interface(lambda: None, ['text'], ['image'])
31
+ models_load.update({model: m})
32
+
33
 
34
  load_fn(models)
35
 
36
+
37
  num_models = 6
38
  max_images = 6
39
  inference_timeout = 400
40
  default_models = models[:num_models]
41
+ MAX_SEED = 2**32-1
42
+
43
 
44
  def extend_choices(choices):
45
+ return choices[:num_models] + (num_models - len(choices[:num_models])) * ['NA']
46
+
47
 
48
  def update_imgbox(choices):
49
+ choices_plus = extend_choices(choices[:num_models])
50
+ return [gr.Image(None, label=m, visible=(m!='NA')) for m in choices_plus]
51
+
52
 
53
  def random_choices():
54
  import random
55
  random.seed()
56
  return random.choices(models, k=num_models)
57
 
58
+
59
+ # https://huggingface.co/docs/api-inference/detailed_parameters
60
+ # https://huggingface.co/docs/huggingface_hub/package_reference/inference_client
61
  async def infer(model_str, prompt, nprompt="", height=0, width=0, steps=0, cfg=0, seed=-1, timeout=inference_timeout):
62
+ kwargs = {}
63
+ if height > 0: kwargs["height"] = height
64
+ if width > 0: kwargs["width"] = width
65
+ if steps > 0: kwargs["num_inference_steps"] = steps
66
+ if cfg > 0: cfg = kwargs["guidance_scale"] = cfg
67
+
68
  if seed == -1:
69
+ theSeed = randomize_seed()
70
+ kwargs["seed"] = theSeed
71
+ else:
72
  kwargs["seed"] = seed
73
+ theSeed = seed
74
+
75
  task = asyncio.create_task(asyncio.to_thread(models_load[model_str].fn, prompt=prompt, negative_prompt=nprompt, **kwargs, token=HF_TOKEN))
76
+ await asyncio.sleep(0)
77
  try:
78
  result = await asyncio.wait_for(task, timeout=timeout)
79
  except asyncio.TimeoutError as e:
80
+ print(e)
81
  print(f"Task timed out: {model_str}")
82
+ if not task.done(): task.cancel()
83
  result = None
84
+ raise Exception(f"Task timed out: {model_str}") from e
85
  except Exception as e:
86
+ print(e)
87
+ if not task.done(): task.cancel()
88
  result = None
89
+ raise Exception() from e
90
+ if task.done() and result is not None and not isinstance(result, tuple):
 
91
  with lock:
92
+ # png_path = "img.png"
93
+ # png_path = get_current_time() + "_" + model_str.replace("/", "_") + ".png"
94
+ # png_path = model_str.replace("/", "_") + " - " + prompt + " - " + get_current_time() + ".png"
95
+ png_path = model_str.replace("/", "_") + " - " + get_current_time() + "_" + str(theSeed) + ".png"
96
+ image = save_image(result, png_path, model_str, prompt, nprompt, height, width, steps, cfg, seed)
97
  return image
98
  return None
99
 
100
+
101
  def gen_fn(model_str, prompt, nprompt="", height=0, width=0, steps=0, cfg=0, seed=-1):
 
102
  try:
103
+ loop = asyncio.new_event_loop()
104
+ result = loop.run_until_complete(infer(model_str, prompt, nprompt,
105
+ height, width, steps, cfg, seed, inference_timeout))
106
  except (Exception, asyncio.CancelledError) as e:
107
+ print(e)
108
+ print(f"Task aborted: {model_str}")
109
  result = None
110
  raise gr.Error(f"Task aborted: {model_str}, Error: {e}")
111
  finally:
112
  loop.close()
113
  return result
114
 
115
+
116
  def add_gallery(image, model_str, gallery):
117
+ if gallery is None: gallery = []
118
+ with lock:
119
+ if image is not None: gallery.insert(0, (image, model_str))
120
  return gallery
121
 
122
+
123
+ CSS="""
124
+ .gradio-container { max-width: 1200px; margin: 0 auto; !important; }
125
+ .output { width=112px; height=112px; max_width=112px; max_height=112px; !important; }
126
+ .gallery { min_width=512px; min_height=512px; max_height=512px; !important; }
127
+ .guide { text-align: center; !important; }
128
  """
129
 
130
  js_func = """
131
  function refresh() {
132
  const url = new URL(window.location);
133
+
134
  if (url.searchParams.get('__theme') !== 'dark') {
135
  url.searchParams.set('__theme', 'dark');
136
  window.location.href = url.href;
 
138
  }
139
  """
140
 
141
+ js_AutoSave="""
142
+
143
+ console.log("Yo");
144
+
145
+ var img1 = document.querySelector("div#component-355 .svelte-1kpcxni button.svelte-1kpcxni .svelte-1kpcxni img"),
146
+ observer = new MutationObserver((changes) => {
147
+ changes.forEach(change => {
148
+ if(change.attributeName.includes('src')){
149
+ console.log(img1.src);
150
+ document.querySelector("div#component-355 .svelte-1kpcxni .svelte-sr71km a.svelte-1s8vnbx button").click();
151
+ }
152
+ });
153
+ });
154
+ observer.observe(img1, {attributes : true});
155
+
156
+ """
157
+
158
+ js="""
159
+ <script>console.log("BOOOOOOOOOOOOOOOOBS");</script>
160
+ """
161
+
162
+ with gr.Blocks(fill_width=True, js=js) as demo:
163
+ # with gr.Blocks(fill_width=True, css=CSS) as demo:
164
+ # with gr.Blocks(theme='JohnSmith9982/small_and_pretty', fill_width=True, css=CSS, js=js_func) as demo:
165
  gr.HTML("")
166
  with gr.Tab('6 Models'):
167
  with gr.Column(scale=2):
 
181
  with gr.Row():
182
  gen_button = gr.Button(f'Generate up to {int(num_models)} images', variant='primary', scale=3)
183
  random_button = gr.Button(f'Randomize Models', variant='secondary', scale=1)
184
+ #stop_button = gr.Button('Stop', variant='stop', interactive=False, scale=1)
185
+ #gen_button.click(lambda: gr.update(interactive=True), None, stop_button)
186
  gr.Markdown("", elem_classes="guide")
187
 
188
  with gr.Column(scale=1):
 
196
  with gr.Column(scale=2):
197
  gallery = gr.Gallery(label="Output", show_download_button=True, elem_classes="gallery",
198
  interactive=False, show_share_button=False, container=True, format="png",
199
+ preview=True, object_fit="cover", columns=2, rows=2)
200
+
201
+ for m, o in zip(current_models, output):
202
+ gen_event = gr.on(triggers=[gen_button.click, txt_input.submit], fn=gen_fn,
203
+ inputs=[m, txt_input, neg_input, height, width, steps, cfg, seed], outputs=[o],
204
+ concurrency_limit=None, queue=False) # Be sure to delete ", queue=False" when activating the stop button
205
+ o.change(add_gallery, [o, m, gallery], [gallery])
206
+ #stop_button.click(lambda: gr.update(interactive=False), None, stop_button, cancels=[gen_event])
207
+
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
208
  with gr.Column(scale=4):
209
  with gr.Accordion('Model selection'):
210
+ model_choice = gr.CheckboxGroup(models, label = f'Choose up to {int(num_models)} different models from the {len(models)} available!', value=default_models, interactive=True)
211
  model_choice.change(update_imgbox, model_choice, output)
212
  model_choice.change(extend_choices, model_choice, current_models)
213
  random_button.click(random_choices, None, model_choice)
214
 
 
215
  with gr.Tab('Single model'):
216
  with gr.Column(scale=2):
217
  model_choice2 = gr.Dropdown(models, label='Choose model', value=models[0])
218
  with gr.Group():
219
+ # global preSetPrompt
220
+ # global negPreSetPrompt
221
+ txt_input2 = gr.Textbox(label='Your prompt:', value = preSetPrompt, lines=3, autofocus=1)
222
  neg_input2 = gr.Textbox(label='Negative prompt:', value=negPreSetPrompt, lines=1)
223
  with gr.Accordion("Advanced", open=False, visible=True):
224
  with gr.Row():
 
230
  seed2 = gr.Slider(label="Seed", info="Randomize Seed if -1.", minimum=-1, maximum=MAX_SEED, step=1, value=-1)
231
  seed_rand2 = gr.Button("Randomize Seed", size="sm", variant="secondary")
232
  seed_rand2.click(randomize_seed, None, [seed2], queue=False)
233
+ num_images = gr.Slider(1, max_images, value=max_images, step=1, label='Number of images')
 
 
234
  with gr.Row():
235
+ gen_button2 = gr.Button('Let the machine halucinate', variant='primary', scale=2)
236
+ #stop_button2 = gr.Button('Stop', variant='stop', interactive=False, scale=1)
237
+ #gen_button2.click(lambda: gr.update(interactive=True), None, stop_button2)
238
 
239
  with gr.Column(scale=1):
240
  with gr.Group():
 
246
  with gr.Column(scale=2):
247
  gallery2 = gr.Gallery(label="Output", show_download_button=True, elem_classes="gallery",
248
  interactive=False, show_share_button=True, container=True, format="png",
249
+ preview=True, object_fit="cover", columns=2, rows=2)
250
 
251
  for i, o in enumerate(output2):
252
  img_i = gr.Number(i, visible=False)
253
+ num_images.change(lambda i, n: gr.update(visible = (i < n)), [img_i, num_images], o, queue=False)
254
+ gen_event2 = gr.on(triggers=[gen_button2.click, txt_input2.submit],
255
+ fn=lambda i, n, m, t1, t2, n1, n2, n3, n4, n5: gen_fn(m, t1, t2, n1, n2, n3, n4, n5) if (i < n) else None,
256
+ inputs=[img_i, num_images, model_choice2, txt_input2, neg_input2,
257
+ height2, width2, steps2, cfg2, seed2], outputs=[o],
258
+ concurrency_limit=None, queue=False) # Be sure to delete ", queue=False" when activating the stop button
 
 
259
  o.change(add_gallery, [o, model_choice2, gallery2], [gallery2])
260
+ #stop_button2.click(lambda: gr.update(interactive=False), None, stop_button2, cancels=[gen_event2])
261
+
262
+ # gr.Markdown(js_AutoSave)
263
+ gr.Markdown("")
264
 
265
+ # demo.queue(default_concurrency_limit=200, max_size=200)
266
+ demo.launch(show_api=False, max_threads=400)
267
+ # demo.launch(show_api=False, max_threads=400, js=js_AutoSave)