Geek7 commited on
Commit
6d51538
1 Parent(s): 5a34e05

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +16 -130
app.py CHANGED
@@ -1,15 +1,13 @@
1
  import gradio as gr
2
  from random import randint
3
  from all_models import models
4
-
5
  from externalmod import gr_Interface_load
6
-
7
  import asyncio
8
  import os
9
  from threading import RLock
10
- lock = RLock()
11
- HF_TOKEN = os.environ.get("HF_TOKEN") if os.environ.get("HF_TOKEN") else None # If private or gated models aren't used, ENV setting is unnecessary.
12
 
 
 
13
 
14
  def load_fn(models):
15
  global models_load
@@ -24,151 +22,39 @@ def load_fn(models):
24
  m = gr.Interface(lambda: None, ['text'], ['image'])
25
  models_load.update({model: m})
26
 
27
-
28
  load_fn(models)
29
 
30
-
31
  num_models = 6
32
  MAX_SEED = 3999999999
33
  default_models = models[:num_models]
34
  inference_timeout = 600
35
- starting_seed = randint(1941, 2024)
36
-
37
- def extend_choices(choices):
38
- return choices[:num_models] + (num_models - len(choices[:num_models])) * ['NA']
39
-
40
-
41
- def update_imgbox(choices):
42
- choices_plus = extend_choices(choices[:num_models])
43
- return [gr.Image(None, label=m, visible=(m!='NA')) for m in choices_plus]
44
-
45
- def gen_fn(model_str, prompt):
46
- if model_str == 'NA':
47
- return None
48
- noise = str('') #str(randint(0, 99999999999))
49
- return models_load[model_str](f'{prompt} {noise}')
50
 
51
  async def infer(model_str, prompt, seed=1, timeout=inference_timeout):
52
- from pathlib import Path
53
- kwargs = {}
54
- noise = ""
55
- kwargs["seed"] = seed
56
- task = asyncio.create_task(asyncio.to_thread(models_load[model_str].fn,
57
- prompt=f'{prompt} {noise}', **kwargs, token=HF_TOKEN))
58
  await asyncio.sleep(0)
59
  try:
60
  result = await asyncio.wait_for(task, timeout=timeout)
61
  except (Exception, asyncio.TimeoutError) as e:
62
  print(e)
63
  print(f"Task timed out: {model_str}")
64
- if not task.done(): task.cancel()
 
65
  result = None
66
  if task.done() and result is not None:
67
  with lock:
68
  png_path = "image.png"
69
  result.save(png_path)
70
- image = str(Path(png_path).resolve())
71
- return image
72
  return None
73
 
74
- def gen_fnseed(model_str, prompt, seed=1):
75
- if model_str == 'NA':
76
- return None
77
- try:
78
- loop = asyncio.new_event_loop()
79
- result = loop.run_until_complete(infer(model_str, prompt, seed, inference_timeout))
80
- except (Exception, asyncio.CancelledError) as e:
81
- print(e)
82
- print(f"Task aborted: {model_str}")
83
- result = None
84
- finally:
85
- loop.close()
86
- return result
87
-
88
- css="""
89
- .wrapper img {font-size: 98% !important; white-space: nowrap !important; text-align: center !important;
90
- display: inline-block !important;}
91
- """
92
-
93
- with gr.Blocks(css=css) as demo:
94
-
95
- with gr.Tab('Toy World'):
96
- txt_input = gr.Textbox(label='Your prompt:', lines=4)
97
- gen_button = gr.Button('Generate up to 6 images in up to 3 minutes total')
98
- #stop_button = gr.Button('Stop', variant = 'secondary', interactive = False)
99
- gen_button.click(lambda s: gr.update(interactive = True), None)
100
- gr.HTML(
101
- """
102
- <div style="text-align: center; max-width: 1200px; margin: 0 auto;">
103
- <div>
104
- <body>
105
- <div class="center"><p style="margin-bottom: 10px; color: #000000;">Scroll down to see more images and select models.</p>
106
- </div>
107
- </body>
108
- </div>
109
- </div>
110
- """
111
- )
112
- with gr.Row():
113
- output = [gr.Image(label = m, min_width=480) for m in default_models]
114
- current_models = [gr.Textbox(m, visible = False) for m in default_models]
115
-
116
- for m, o in zip(current_models, output):
117
- gen_event = gr.on(triggers=[gen_button.click, txt_input.submit], fn=gen_fn,
118
- inputs=[m, txt_input], outputs=[o], concurrency_limit=None, queue=False)
119
- #stop_button.click(lambda s: gr.update(interactive = False), None, stop_button, cancels = [gen_event])
120
- with gr.Accordion('Model selection'):
121
- model_choice = gr.CheckboxGroup(models, label = f'Choose up to {int(num_models)} different models from the {len(models)} available!', value=default_models, interactive=True)
122
- #model_choice = gr.CheckboxGroup(models, label = f'Choose up to {num_models} different models from the 2 available! Untick them to only use one!', value = default_models, multiselect = True, max_choices = num_models, interactive = True, filterable = False)
123
- model_choice.change(update_imgbox, model_choice, output)
124
- model_choice.change(extend_choices, model_choice, current_models)
125
- with gr.Row():
126
- gr.HTML(
127
- """
128
- <div class="footer">
129
- <p> Based on the <a href="https://huggingface.co/spaces/John6666/hfd_test_nostopbutton">Huggingface NoStopButton</a> Space by John6666, <a href="https://huggingface.co/spaces/derwahnsinn/TestGen">TestGen</a> Space by derwahnsinn, the <a href="https://huggingface.co/spaces/RdnUser77/SpacIO_v1">SpacIO</a> Space by RdnUser77 and Omnibus's Maximum Multiplier! For 6 images with the same model check out the <a href="https://huggingface.co/spaces/Yntec/PrintingPress">Printing Press</a>, for the classic UI with prompt enhancer try <a href="https://huggingface.co/spaces/Yntec/blitz_diffusion">Blitz Diffusion!</a>
130
- </p>
131
- """
132
- )
133
- with gr.Tab('🌱 Use seeds!'):
134
- txt_inputseed = gr.Textbox(label='Your prompt:', lines=4)
135
- gen_buttonseed = gr.Button('Generate up to 6 images with the same seed in up to 3 minutes total')
136
- seed = gr.Slider(label="Use a seed to replicate the same image later (maximum 3999999999)", minimum=0, maximum=MAX_SEED, step=1, value=starting_seed, scale=3)
137
- #stop_button = gr.Button('Stop', variant = 'secondary', interactive = False)
138
- gen_buttonseed.click(lambda s: gr.update(interactive = True), None)
139
- gr.HTML(
140
- """
141
- <div style="text-align: center; max-width: 1200px; margin: 0 auto;">
142
- <div>
143
- <body>
144
- <div class="center"><p style="margin-bottom: 10px; color: #000000;">Scroll down to see more images and select models.</p>
145
- </div>
146
- </body>
147
- </div>
148
- </div>
149
- """
150
- )
151
- with gr.Row():
152
- output = [gr.Image(label = m, min_width=480) for m in default_models]
153
- current_models = [gr.Textbox(m, visible = False) for m in default_models]
154
-
155
- for m, o in zip(current_models, output):
156
- gen_eventseed = gr.on(triggers=[gen_buttonseed.click, txt_inputseed.submit], fn=gen_fnseed,
157
- inputs=[m, txt_inputseed, seed], outputs=[o], concurrency_limit=None, queue=False)
158
- #stop_button.click(lambda s: gr.update(interactive = False), None, stop_button, cancels = [gen_event])
159
- with gr.Accordion('Model selection'):
160
- model_choice = gr.CheckboxGroup(models, label = f'Choose up to {int(num_models)} different models from the {len(models)} available!', value=default_models, interactive=True)
161
- #model_choice = gr.CheckboxGroup(models, label = f'Choose up to {num_models} different models from the 2 available! Untick them to only use one!', value = default_models, multiselect = True, max_choices = num_models, interactive = True, filterable = False)
162
- model_choice.change(update_imgbox, model_choice, output)
163
- model_choice.change(extend_choices, model_choice, current_models)
164
- with gr.Row():
165
- gr.HTML(
166
- """
167
- <div class="footer">
168
- <p> Based on the <a href="https://huggingface.co/spaces/John6666/hfd_test_nostopbutton">Huggingface NoStopButton</a> Space by John6666, <a href="https://huggingface.co/spaces/derwahnsinn/TestGen">TestGen</a> Space by derwahnsinn, the <a href="https://huggingface.co/spaces/RdnUser77/SpacIO_v1">SpacIO</a> Space by RdnUser77 and Omnibus's Maximum Multiplier! For 6 images with the same model check out the <a href="https://huggingface.co/spaces/Yntec/PrintingPress">Printing Press</a>, for the classic UI with prompt enhancer try <a href="https://huggingface.co/spaces/Yntec/blitz_diffusion">Blitz Diffusion!</a>
169
- </p>
170
- """
171
- )
172
 
173
- demo.queue(default_concurrency_limit=200, max_size=200)
174
- demo.launch(show_api=False, max_threads=400)
 
 
1
  import gradio as gr
2
  from random import randint
3
  from all_models import models
 
4
  from externalmod import gr_Interface_load
 
5
  import asyncio
6
  import os
7
  from threading import RLock
 
 
8
 
9
+ lock = RLock()
10
+ HF_TOKEN = os.environ.get("HF_TOKEN")
11
 
12
  def load_fn(models):
13
  global models_load
 
22
  m = gr.Interface(lambda: None, ['text'], ['image'])
23
  models_load.update({model: m})
24
 
 
25
  load_fn(models)
26
 
 
27
  num_models = 6
28
  MAX_SEED = 3999999999
29
  default_models = models[:num_models]
30
  inference_timeout = 600
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
31
 
32
  async def infer(model_str, prompt, seed=1, timeout=inference_timeout):
33
+ kwargs = {"seed": seed}
34
+ task = asyncio.create_task(asyncio.to_thread(models_load[model_str].fn, prompt=prompt, **kwargs, token=HF_TOKEN))
 
 
 
 
35
  await asyncio.sleep(0)
36
  try:
37
  result = await asyncio.wait_for(task, timeout=timeout)
38
  except (Exception, asyncio.TimeoutError) as e:
39
  print(e)
40
  print(f"Task timed out: {model_str}")
41
+ if not task.done():
42
+ task.cancel()
43
  result = None
44
  if task.done() and result is not None:
45
  with lock:
46
  png_path = "image.png"
47
  result.save(png_path)
48
+ return png_path
 
49
  return None
50
 
51
+ # Expose Gradio API
52
+ def generate_api(model_str, prompt, seed=1):
53
+ result = asyncio.run(infer(model_str, prompt, seed))
54
+ if result:
55
+ return result # Path to generated image
56
+ return None
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
57
 
58
+ # Launch Gradio API without frontend
59
+ iface = gr.Interface(fn=generate_api, inputs=["text", "text", "number"], outputs="file")
60
+ iface.launch(show_api=True, share=True)