John6666 commited on
Commit
1074823
Β·
verified Β·
1 Parent(s): ef87b38

Upload 2 files

Browse files
Files changed (2) hide show
  1. README.md +1 -1
  2. app.py +27 -18
README.md CHANGED
@@ -4,7 +4,7 @@ emoji: πŸ›•πŸ›•
4
  colorFrom: green
5
  colorTo: blue
6
  sdk: gradio
7
- sdk_version: 4.41.0
8
  app_file: app.py
9
  pinned: false
10
  duplicated_from: Yntec/Diffusion80XX
 
4
  colorFrom: green
5
  colorTo: blue
6
  sdk: gradio
7
+ sdk_version: 4.42.0
8
  app_file: app.py
9
  pinned: false
10
  duplicated_from: Yntec/Diffusion80XX
app.py CHANGED
@@ -6,7 +6,7 @@ import asyncio
6
  import os
7
  from threading import RLock
8
  lock = RLock()
9
- HF_TOKEN = os.environ.get("HF_TOKEN") if os.environ.get("HF_TOKEN") else None
10
 
11
 
12
  def load_fn(models):
@@ -29,6 +29,7 @@ num_models = 6
29
  max_images = 6
30
  inference_timeout = 300
31
  default_models = models[:num_models]
 
32
 
33
 
34
  def extend_choices(choices):
@@ -48,7 +49,7 @@ def random_choices():
48
 
49
  # https://huggingface.co/docs/api-inference/detailed_parameters
50
  # https://huggingface.co/docs/huggingface_hub/package_reference/inference_client
51
- async def infer(model_str, prompt, nprompt="", height=None, width=None, steps=None, cfg=None, timeout=inference_timeout):
52
  from pathlib import Path
53
  kwargs = {}
54
  if height is not None and height >= 256: kwargs["height"] = height
@@ -56,9 +57,11 @@ async def infer(model_str, prompt, nprompt="", height=None, width=None, steps=No
56
  if steps is not None and steps >= 1: kwargs["num_inference_steps"] = steps
57
  if cfg is not None and cfg > 0: cfg = kwargs["guidance_scale"] = cfg
58
  noise = ""
59
- rand = randint(1, 500)
60
- for i in range(rand):
61
- noise += " "
 
 
62
  task = asyncio.create_task(asyncio.to_thread(models_load[model_str].fn,
63
  prompt=f'{prompt} {noise}', negative_prompt=nprompt, **kwargs, token=HF_TOKEN))
64
  await asyncio.sleep(0)
@@ -78,13 +81,13 @@ async def infer(model_str, prompt, nprompt="", height=None, width=None, steps=No
78
  return None
79
 
80
 
81
- def gen_fn(model_str, prompt, nprompt="", height=None, width=None, steps=None, cfg=None):
82
  if model_str == 'NA':
83
  return None
84
  try:
85
  loop = asyncio.new_event_loop()
86
  result = loop.run_until_complete(infer(model_str, prompt, nprompt,
87
- height, width, steps, cfg, inference_timeout))
88
  except (Exception, asyncio.CancelledError) as e:
89
  print(e)
90
  print(f"Task aborted: {model_str}")
@@ -115,10 +118,13 @@ with gr.Blocks(theme='Nymbo/Nymbo_Theme', fill_width=True, css=CSS) as demo:
115
  txt_input = gr.Textbox(label='Your prompt:', lines=4)
116
  neg_input = gr.Textbox(label='Negative prompt:', lines=1)
117
  with gr.Accordion("Advanced", open=False, visible=True):
118
- width = gr.Number(label="Width", info="If 0, the default value is used.", maximum=1216, step=32, value=0)
119
- height = gr.Number(label="Height", info="If 0, the default value is used.", maximum=1216, step=32, value=0)
120
- steps = gr.Number(label="Number of inference steps", info="If 0, the default value is used.", maximum=100, step=1, value=0)
121
- cfg = gr.Number(label="Guidance scale", info="If 0, the default value is used.", maximum=30.0, step=0.1, value=0)
 
 
 
122
  with gr.Row():
123
  gen_button = gr.Button(f'Generate up to {int(num_models)} images in up to 3 minutes total', scale=3)
124
  random_button = gr.Button(f'Random {int(num_models)} 🎲', variant='secondary', scale=1)
@@ -141,7 +147,7 @@ with gr.Blocks(theme='Nymbo/Nymbo_Theme', fill_width=True, css=CSS) as demo:
141
 
142
  for m, o in zip(current_models, output):
143
  gen_event = gr.on(triggers=[gen_button.click, txt_input.submit], fn=gen_fn,
144
- inputs=[m, txt_input, neg_input, height, width, steps, cfg], outputs=[o])
145
  o.change(add_gallery, [o, m, gallery], [gallery])
146
  stop_button.click(lambda: gr.update(interactive=False), None, stop_button, cancels=[gen_event])
147
 
@@ -159,10 +165,13 @@ with gr.Blocks(theme='Nymbo/Nymbo_Theme', fill_width=True, css=CSS) as demo:
159
  txt_input2 = gr.Textbox(label='Your prompt:', lines=4)
160
  neg_input2 = gr.Textbox(label='Negative prompt:', lines=1)
161
  with gr.Accordion("Advanced", open=False, visible=True):
162
- width2 = gr.Number(label="Width", info="If 0, the default value is used.", maximum=1216, step=32, value=0)
163
- height2 = gr.Number(label="Height", info="If 0, the default value is used.", maximum=1216, step=32, value=0)
164
- steps2 = gr.Number(label="Number of inference steps", info="If 0, the default value is used.", maximum=100, step=1, value=0)
165
- cfg2 = gr.Number(label="Guidance scale", info="If 0, the default value is used.", maximum=30.0, step=0.1, value=0)
 
 
 
166
  num_images = gr.Slider(1, max_images, value=max_images, step=1, label='Number of images')
167
  with gr.Row():
168
  gen_button2 = gr.Button('Generate', scale=2)
@@ -185,9 +194,9 @@ with gr.Blocks(theme='Nymbo/Nymbo_Theme', fill_width=True, css=CSS) as demo:
185
  img_i = gr.Number(i, visible = False)
186
  num_images.change(lambda i, n: gr.update(visible = (i < n)), [img_i, num_images], o)
187
  gen_event2 = gr.on(triggers=[gen_button2.click, txt_input2.submit],
188
- fn=lambda i, n, m, t1, t2, n1, n2, n3, n4: gen_fn(m, t1, t2, n1, n2, n3, n4) if (i < n) else None,
189
  inputs=[img_i, num_images, model_choice2, txt_input2, neg_input2,
190
- height2, width2, steps2, cfg2], outputs=[o])
191
  o.change(add_gallery, [o, model_choice2, gallery2], [gallery2])
192
  stop_button2.click(lambda: gr.update(interactive=False), None, stop_button2, cancels=[gen_event2])
193
 
 
6
  import os
7
  from threading import RLock
8
  lock = RLock()
9
+ HF_TOKEN = os.environ.get("HF_TOKEN") if os.environ.get("HF_TOKEN") else None # If private or gated models aren't used, ENV setting is unnecessary.
10
 
11
 
12
  def load_fn(models):
 
29
  max_images = 6
30
  inference_timeout = 300
31
  default_models = models[:num_models]
32
+ MAX_SEED = 2**32-1
33
 
34
 
35
  def extend_choices(choices):
 
49
 
50
  # https://huggingface.co/docs/api-inference/detailed_parameters
51
  # https://huggingface.co/docs/huggingface_hub/package_reference/inference_client
52
+ async def infer(model_str, prompt, nprompt="", height=None, width=None, steps=None, cfg=None, seed=-1, timeout=inference_timeout):
53
  from pathlib import Path
54
  kwargs = {}
55
  if height is not None and height >= 256: kwargs["height"] = height
 
57
  if steps is not None and steps >= 1: kwargs["num_inference_steps"] = steps
58
  if cfg is not None and cfg > 0: cfg = kwargs["guidance_scale"] = cfg
59
  noise = ""
60
+ if seed >= 0: kwargs["seed"] = seed
61
+ else:
62
+ rand = randint(1, 500)
63
+ for i in range(rand):
64
+ noise += " "
65
  task = asyncio.create_task(asyncio.to_thread(models_load[model_str].fn,
66
  prompt=f'{prompt} {noise}', negative_prompt=nprompt, **kwargs, token=HF_TOKEN))
67
  await asyncio.sleep(0)
 
81
  return None
82
 
83
 
84
+ def gen_fn(model_str, prompt, nprompt="", height=None, width=None, steps=None, cfg=None, seed=-1):
85
  if model_str == 'NA':
86
  return None
87
  try:
88
  loop = asyncio.new_event_loop()
89
  result = loop.run_until_complete(infer(model_str, prompt, nprompt,
90
+ height, width, steps, cfg, seed, inference_timeout))
91
  except (Exception, asyncio.CancelledError) as e:
92
  print(e)
93
  print(f"Task aborted: {model_str}")
 
118
  txt_input = gr.Textbox(label='Your prompt:', lines=4)
119
  neg_input = gr.Textbox(label='Negative prompt:', lines=1)
120
  with gr.Accordion("Advanced", open=False, visible=True):
121
+ with gr.Row():
122
+ width = gr.Number(label="Width", info="If 0, the default value is used.", maximum=1216, step=32, value=0)
123
+ height = gr.Number(label="Height", info="If 0, the default value is used.", maximum=1216, step=32, value=0)
124
+ with gr.Row():
125
+ steps = gr.Number(label="Number of inference steps", info="If 0, the default value is used.", maximum=100, step=1, value=0)
126
+ cfg = gr.Number(label="Guidance scale", info="If 0, the default value is used.", maximum=30.0, step=0.1, value=0)
127
+ seed = gr.Slider(label="Seed", info="Randomize Seed if -1.", minimum=-1, maximum=MAX_SEED, step=1, value=-1)
128
  with gr.Row():
129
  gen_button = gr.Button(f'Generate up to {int(num_models)} images in up to 3 minutes total', scale=3)
130
  random_button = gr.Button(f'Random {int(num_models)} 🎲', variant='secondary', scale=1)
 
147
 
148
  for m, o in zip(current_models, output):
149
  gen_event = gr.on(triggers=[gen_button.click, txt_input.submit], fn=gen_fn,
150
+ inputs=[m, txt_input, neg_input, height, width, steps, cfg, seed], outputs=[o])
151
  o.change(add_gallery, [o, m, gallery], [gallery])
152
  stop_button.click(lambda: gr.update(interactive=False), None, stop_button, cancels=[gen_event])
153
 
 
165
  txt_input2 = gr.Textbox(label='Your prompt:', lines=4)
166
  neg_input2 = gr.Textbox(label='Negative prompt:', lines=1)
167
  with gr.Accordion("Advanced", open=False, visible=True):
168
+ with gr.Row():
169
+ width2 = gr.Number(label="Width", info="If 0, the default value is used.", maximum=1216, step=32, value=0)
170
+ height2 = gr.Number(label="Height", info="If 0, the default value is used.", maximum=1216, step=32, value=0)
171
+ with gr.Row():
172
+ steps2 = gr.Number(label="Number of inference steps", info="If 0, the default value is used.", maximum=100, step=1, value=0)
173
+ cfg2 = gr.Number(label="Guidance scale", info="If 0, the default value is used.", maximum=30.0, step=0.1, value=0)
174
+ seed2 = gr.Slider(label="Seed", info="Randomize Seed if -1.", minimum=-1, maximum=MAX_SEED, step=1, value=-1)
175
  num_images = gr.Slider(1, max_images, value=max_images, step=1, label='Number of images')
176
  with gr.Row():
177
  gen_button2 = gr.Button('Generate', scale=2)
 
194
  img_i = gr.Number(i, visible = False)
195
  num_images.change(lambda i, n: gr.update(visible = (i < n)), [img_i, num_images], o)
196
  gen_event2 = gr.on(triggers=[gen_button2.click, txt_input2.submit],
197
+ fn=lambda i, n, m, t1, t2, n1, n2, n3, n4, n5: gen_fn(m, t1, t2, n1, n2, n3, n4, n5) if (i < n) else None,
198
  inputs=[img_i, num_images, model_choice2, txt_input2, neg_input2,
199
+ height2, width2, steps2, cfg2, seed2], outputs=[o])
200
  o.change(add_gallery, [o, model_choice2, gallery2], [gallery2])
201
  stop_button2.click(lambda: gr.update(interactive=False), None, stop_button2, cancels=[gen_event2])
202