John6666 commited on
Commit
155d3fa
1 Parent(s): 948f4a3

Upload 2 files

Browse files
Files changed (2) hide show
  1. app.py +38 -14
  2. externalmod.py +6 -3
app.py CHANGED
@@ -10,7 +10,6 @@ lock = RLock()
10
  def load_fn(models):
11
  global models_load
12
  models_load = {}
13
-
14
  for model in models:
15
  if model not in models_load.keys():
16
  try:
@@ -26,7 +25,7 @@ load_fn(models)
26
 
27
  num_models = 6
28
  max_images = 6
29
- timeout = 60
30
  default_models = models[:num_models]
31
 
32
 
@@ -39,13 +38,19 @@ def update_imgbox(choices):
39
  return [gr.Image(None, label = m, visible = (m != 'NA')) for m in choices_plus]
40
 
41
 
42
- async def infer(model_str, prompt, timeout):
43
- from PIL import Image
 
 
 
 
 
44
  noise = ""
45
  rand = randint(1, 500)
46
  for i in range(rand):
47
  noise += " "
48
- task = asyncio.create_task(asyncio.to_thread(models_load[model_str], f'{prompt} {noise}'))
 
49
  await asyncio.sleep(0)
50
  try:
51
  result = await asyncio.wait_for(task, timeout=timeout)
@@ -56,17 +61,20 @@ async def infer(model_str, prompt, timeout):
56
  result = None
57
  if task.done() and result is not None:
58
  with lock:
59
- image = Image.open(result).convert('RGBA')
 
 
60
  return image
61
  return None
62
 
63
 
64
- def gen_fn(model_str, prompt):
65
  if model_str == 'NA':
66
  return None
67
  try:
68
  loop = asyncio.new_event_loop()
69
- result = loop.run_until_complete(infer(model_str, prompt, timeout))
 
70
  except (Exception, asyncio.CancelledError) as e:
71
  print(e)
72
  print(f"Task aborted: {model_str}")
@@ -93,7 +101,14 @@ CSS="""
93
  with gr.Blocks(theme='Nymbo/Nymbo_Theme', fill_width=True, css=CSS) as demo:
94
  with gr.Tab('The Dream'):
95
  with gr.Column(scale=2):
96
- txt_input = gr.Textbox(label='Your prompt:', lines=4)
 
 
 
 
 
 
 
97
  with gr.Row():
98
  gen_button = gr.Button(f'Generate up to {int(num_models)} images in up to 3 minutes total', scale=2)
99
  stop_button = gr.Button('Stop', variant='secondary', interactive=False, scale=1)
@@ -114,7 +129,8 @@ with gr.Blocks(theme='Nymbo/Nymbo_Theme', fill_width=True, css=CSS) as demo:
114
  preview=True, object_fit="cover", columns=2, rows=2)
115
 
116
  for m, o in zip(current_models, output):
117
- gen_event = gr.on(triggers=[gen_button.click, txt_input.submit], fn=gen_fn, inputs=[m, txt_input], outputs=[o])
 
118
  o.change(add_gallery, [o, m, gallery], [gallery])
119
  stop_button.click(lambda: gr.update(interactive=False), None, stop_button, cancels=[gen_event])
120
 
@@ -127,12 +143,19 @@ with gr.Blocks(theme='Nymbo/Nymbo_Theme', fill_width=True, css=CSS) as demo:
127
  with gr.Tab('Single model'):
128
  with gr.Column(scale=2):
129
  model_choice2 = gr.Dropdown(models, label='Choose model', value=models[0])
130
- txt_input2 = gr.Textbox(label='Your prompt:', lines=4)
 
 
 
 
 
 
 
131
  num_images = gr.Slider(1, max_images, value=max_images, step=1, label='Number of images')
132
  with gr.Row():
133
  gen_button2 = gr.Button('Generate', scale=2)
134
  stop_button2 = gr.Button('Stop', variant='secondary', interactive=False, scale=1)
135
- gen_button2.click(lambda: gr.update(interactive = True), None, stop_button2)
136
 
137
  with gr.Column(scale=1):
138
  with gr.Group():
@@ -150,8 +173,9 @@ with gr.Blocks(theme='Nymbo/Nymbo_Theme', fill_width=True, css=CSS) as demo:
150
  img_i = gr.Number(i, visible = False)
151
  num_images.change(lambda i, n: gr.update(visible = (i < n)), [img_i, num_images], o)
152
  gen_event2 = gr.on(triggers=[gen_button2.click, txt_input2.submit],
153
- fn=lambda i, n, m, t: gen_fn(m, t) if (i < n) else None,
154
- inputs=[img_i, num_images, model_choice2, txt_input2], outputs=[o])
 
155
  o.change(add_gallery, [o, model_choice2, gallery2], [gallery2])
156
  stop_button2.click(lambda: gr.update(interactive=False), None, stop_button2, cancels=[gen_event2])
157
 
 
10
  def load_fn(models):
11
  global models_load
12
  models_load = {}
 
13
  for model in models:
14
  if model not in models_load.keys():
15
  try:
 
25
 
26
  num_models = 6
27
  max_images = 6
28
+ inference_timeout = 300
29
  default_models = models[:num_models]
30
 
31
 
 
38
  return [gr.Image(None, label = m, visible = (m != 'NA')) for m in choices_plus]
39
 
40
 
41
+ async def infer(model_str, prompt, nprompt="", height=None, width=None, steps=None, cfg=None, timeout=inference_timeout):
42
+ from pathlib import Path
43
+ kwargs = {}
44
+ if height is not None and height >= 256: kwargs["height"] = height
45
+ if width is not None and width >= 256: kwargs["width"] = width
46
+ if steps is not None and steps >= 1: kwargs["num_inference_steps"] = steps
47
+ if cfg is not None and cfg > 0: cfg = kwargs["guidance_scale"] = cfg
48
  noise = ""
49
  rand = randint(1, 500)
50
  for i in range(rand):
51
  noise += " "
52
+ task = asyncio.create_task(asyncio.to_thread(models_load[model_str].fn,
53
+ prompt=f'{prompt} {noise}', negative_prompt=nprompt, **kwargs))
54
  await asyncio.sleep(0)
55
  try:
56
  result = await asyncio.wait_for(task, timeout=timeout)
 
61
  result = None
62
  if task.done() and result is not None:
63
  with lock:
64
+ png_path = "image.png"
65
+ result.save(png_path)
66
+ image = str(Path(png_path).resolve())
67
  return image
68
  return None
69
 
70
 
71
+ def gen_fn(model_str, prompt, nprompt="", height=None, width=None, steps=None, cfg=None):
72
  if model_str == 'NA':
73
  return None
74
  try:
75
  loop = asyncio.new_event_loop()
76
+ result = loop.run_until_complete(infer(model_str, prompt, nprompt,
77
+ height, width, steps, cfg, inference_timeout))
78
  except (Exception, asyncio.CancelledError) as e:
79
  print(e)
80
  print(f"Task aborted: {model_str}")
 
101
  with gr.Blocks(theme='Nymbo/Nymbo_Theme', fill_width=True, css=CSS) as demo:
102
  with gr.Tab('The Dream'):
103
  with gr.Column(scale=2):
104
+ with gr.Group():
105
+ txt_input = gr.Textbox(label='Your prompt:', lines=4)
106
+ neg_input = gr.Textbox(label='Negative prompt:', lines=1)
107
+ with gr.Accordion("Advanced", open=False, visible=True):
108
+ width = gr.Number(label="Width", info="If 0, the default value is used.", maximum=1216, step=32, value=None)
109
+ height = gr.Number(label="Height", info="If 0, the default value is used.", maximum=1216, step=32, value=None)
110
+ steps = gr.Number(label="Number of inference steps", info="If 0, the default value is used.", maximum=100, step=1, value=None)
111
+ cfg = gr.Number(label="Guidance scale", info="If 0, the default value is used.", maximum=30.0, step=0.1, value=None)
112
  with gr.Row():
113
  gen_button = gr.Button(f'Generate up to {int(num_models)} images in up to 3 minutes total', scale=2)
114
  stop_button = gr.Button('Stop', variant='secondary', interactive=False, scale=1)
 
129
  preview=True, object_fit="cover", columns=2, rows=2)
130
 
131
  for m, o in zip(current_models, output):
132
+ gen_event = gr.on(triggers=[gen_button.click, txt_input.submit], fn=gen_fn,
133
+ inputs=[m, txt_input, neg_input, height, width, steps, cfg], outputs=[o])
134
  o.change(add_gallery, [o, m, gallery], [gallery])
135
  stop_button.click(lambda: gr.update(interactive=False), None, stop_button, cancels=[gen_event])
136
 
 
143
  with gr.Tab('Single model'):
144
  with gr.Column(scale=2):
145
  model_choice2 = gr.Dropdown(models, label='Choose model', value=models[0])
146
+ with gr.Group():
147
+ txt_input2 = gr.Textbox(label='Your prompt:', lines=4)
148
+ neg_input2 = gr.Textbox(label='Negative prompt:', lines=1)
149
+ with gr.Accordion("Advanced", open=False, visible=True):
150
+ width2 = gr.Number(label="Width", info="If 0, the default value is used.", maximum=1216, step=32, value=None)
151
+ height2 = gr.Number(label="Height", info="If 0, the default value is used.", maximum=1216, step=32, value=None)
152
+ steps2 = gr.Number(label="Number of inference steps", info="If 0, the default value is used.", maximum=100, step=1, value=None)
153
+ cfg2 = gr.Number(label="Guidance scale", info="If 0, the default value is used.", maximum=30.0, step=0.1, value=None)
154
  num_images = gr.Slider(1, max_images, value=max_images, step=1, label='Number of images')
155
  with gr.Row():
156
  gen_button2 = gr.Button('Generate', scale=2)
157
  stop_button2 = gr.Button('Stop', variant='secondary', interactive=False, scale=1)
158
+ gen_button2.click(lambda: gr.update(interactive=True), None, stop_button2)
159
 
160
  with gr.Column(scale=1):
161
  with gr.Group():
 
173
  img_i = gr.Number(i, visible = False)
174
  num_images.change(lambda i, n: gr.update(visible = (i < n)), [img_i, num_images], o)
175
  gen_event2 = gr.on(triggers=[gen_button2.click, txt_input2.submit],
176
+ fn=lambda i, n, m, t1, t2, n1, n2, n3, n4: gen_fn(m, t1, t2, n1, n2, n3, n4) if (i < n) else None,
177
+ inputs=[img_i, num_images, model_choice2, txt_input2, neg_input2,
178
+ height2, width2, steps2, cfg2], outputs=[o])
179
  o.change(add_gallery, [o, model_choice2, gallery2], [gallery2])
180
  stop_button2.click(lambda: gr.update(interactive=False), None, stop_button2, cancels=[gen_event2])
181
 
externalmod.py CHANGED
@@ -33,6 +33,9 @@ if TYPE_CHECKING:
33
  from gradio.interface import Interface
34
 
35
 
 
 
 
36
  @document()
37
  def load(
38
  name: str,
@@ -115,7 +118,7 @@ def from_model(model_name: str, hf_token: str | None, alias: str | None, **kwarg
115
 
116
  headers["X-Wait-For-Model"] = "true"
117
  client = huggingface_hub.InferenceClient(
118
- model=model_name, headers=headers, token=hf_token, timeout=120,
119
  )
120
 
121
  # For tasks that are not yet supported by the InferenceClient
@@ -365,10 +368,10 @@ def from_model(model_name: str, hf_token: str | None, alias: str | None, **kwarg
365
  else:
366
  raise ValueError(f"Unsupported pipeline type: {p}")
367
 
368
- def query_huggingface_inference_endpoints(*data):
369
  if preprocess is not None:
370
  data = preprocess(*data)
371
- data = fn(*data) # type: ignore
372
  if postprocess is not None:
373
  data = postprocess(data) # type: ignore
374
  return data
 
33
  from gradio.interface import Interface
34
 
35
 
36
+ server_timeout = 600
37
+
38
+
39
  @document()
40
  def load(
41
  name: str,
 
118
 
119
  headers["X-Wait-For-Model"] = "true"
120
  client = huggingface_hub.InferenceClient(
121
+ model=model_name, headers=headers, token=hf_token, timeout=server_timeout,
122
  )
123
 
124
  # For tasks that are not yet supported by the InferenceClient
 
368
  else:
369
  raise ValueError(f"Unsupported pipeline type: {p}")
370
 
371
+ def query_huggingface_inference_endpoints(*data, **kwargs):
372
  if preprocess is not None:
373
  data = preprocess(*data)
374
+ data = fn(*data, **kwargs) # type: ignore
375
  if postprocess is not None:
376
  data = postprocess(data) # type: ignore
377
  return data