Yntec commited on
Commit
dd1315b
β€’
1 Parent(s): 1507364

Upload 2 files

Browse files
Files changed (2) hide show
  1. app.py +161 -221
  2. externalmod.py +28 -27
app.py CHANGED
@@ -1,221 +1,161 @@
1
- import gradio as gr
2
- import os
3
- import sys
4
- from pathlib import Path
5
- from all_models import models
6
- from externalmod import gr_Interface_load
7
- from prompt_extend import extend_prompt
8
- from random import randint
9
- import asyncio
10
- from threading import RLock
11
- lock = RLock()
12
- HF_TOKEN = os.environ.get("HF_TOKEN") if os.environ.get("HF_TOKEN") else None # If private or gated models aren't used, ENV setting is unnecessary.
13
-
14
- inference_timeout = 600
15
- MAX_SEED = 2**32-1
16
- current_model = models[0]
17
- text_gen1 = extend_prompt
18
- #text_gen1=gr.Interface.load("spaces/phenomenon1981/MagicPrompt-Stable-Diffusion")
19
- #text_gen1=gr.Interface.load("spaces/Yntec/prompt-extend")
20
- #text_gen1=gr.Interface.load("spaces/daspartho/prompt-extend")
21
- #text_gen1=gr.Interface.load("spaces/Omnibus/MagicPrompt-Stable-Diffusion_link")
22
-
23
- models2 = [gr_Interface_load(f"models/{m}", live=False, preprocess=True, postprocess=False, hf_token=HF_TOKEN) for m in models]
24
-
25
- def text_it1(inputs, text_gen1=text_gen1):
26
- go_t1 = text_gen1(inputs)
27
- return(go_t1)
28
-
29
- def set_model(current_model):
30
- current_model = models[current_model]
31
- return gr.update(label=(f"{current_model}"))
32
-
33
- def send_it1(inputs, model_choice, neg_input, height, width, steps, cfg, seed):
34
- output1 = gen_fn(model_choice, inputs, neg_input, height, width, steps, cfg, seed)
35
- return (output1)
36
-
37
- # https://huggingface.co/docs/api-inference/detailed_parameters
38
- # https://huggingface.co/docs/huggingface_hub/package_reference/inference_client
39
- async def infer(model_index, prompt, nprompt="", height=None, width=None, steps=None, cfg=None, seed=-1, timeout=inference_timeout):
40
- from pathlib import Path
41
- kwargs = {}
42
- if height is not None and height >= 256: kwargs["height"] = height
43
- if width is not None and width >= 256: kwargs["width"] = width
44
- if steps is not None and steps >= 1: kwargs["num_inference_steps"] = steps
45
- if cfg is not None and cfg > 0: cfg = kwargs["guidance_scale"] = cfg
46
- noise = ""
47
- if seed >= 0: kwargs["seed"] = seed
48
- else:
49
- rand = randint(1, 500)
50
- for i in range(rand):
51
- noise += " "
52
- task = asyncio.create_task(asyncio.to_thread(models2[model_index].fn,
53
- prompt=f'{prompt} {noise}', negative_prompt=nprompt, **kwargs, token=HF_TOKEN))
54
- await asyncio.sleep(0)
55
- try:
56
- result = await asyncio.wait_for(task, timeout=timeout)
57
- except (Exception, asyncio.TimeoutError) as e:
58
- print(e)
59
- print(f"Task timed out: {models2[model_index]}")
60
- if not task.done(): task.cancel()
61
- result = None
62
- if task.done() and result is not None:
63
- with lock:
64
- png_path = "image.png"
65
- result.save(png_path)
66
- image = str(Path(png_path).resolve())
67
- return image
68
- return None
69
-
70
- def gen_fn(model_index, prompt, nprompt="", height=None, width=None, steps=None, cfg=None, seed=-1):
71
- try:
72
- loop = asyncio.new_event_loop()
73
- result = loop.run_until_complete(infer(model_index, prompt, nprompt,
74
- height, width, steps, cfg, seed, inference_timeout))
75
- except (Exception, asyncio.CancelledError) as e:
76
- print(e)
77
- print(f"Task aborted: {models2[model_index]}")
78
- result = None
79
- finally:
80
- loop.close()
81
- return result
82
-
83
- css="""
84
- .gradio-container {background-image: linear-gradient(#254150, #1e2f40, #182634) !important;
85
- color: #ffaa66 !important; font-family: 'IBM Plex Sans', sans-serif !important;}
86
- h1 {font-size: 6em; color: #ffc99f; margin-top: 30px; margin-bottom: 30px;
87
- text-shadow: 3px 3px 0 rgba(0, 0, 0, 1) !important;}
88
- h3 {color: #ffc99f; !important;}
89
- h4 {display: inline-block; color: #ffffff !important;}
90
- .wrapper img {font-size: 98% !important; white-space: nowrap !important; text-align: center !important;
91
- display: inline-block !important; color: #ffffff !important;}
92
- .wrapper {color: #ffffff !important;}
93
- .text-gray-500 {color: #ffc99f !important;}
94
- .gr-box {background-image: linear-gradient(#182634, #1e2f40, #254150) !important;
95
- border-top-color: #000000 !important; border-right-color: #ffffff !important;
96
- border-bottom-color: #ffffff !important; border-left-color: #000000 !important;}
97
- .gr-input {color: #ffc99f; !important; background-color: #254150 !important;}
98
- :root {--neutral-100: #000000 !important;}
99
- .gr-button {color: #ffffff !important; text-shadow: 1px 1px 0 rgba(0, 0, 0, 1) !important;
100
- background-image: linear-gradient(#76635a, #d2a489) !important; border-radius: 24px !important;
101
- border: solid 1px !important; border-top-color: #ffc99f !important; border-right-color: #000000 !important;
102
- border-bottom-color: #000000 !important; border-left-color: #ffc99f !important; padding: 6px 30px;}
103
- .gr-button:active {color: #ffc99f !important; font-size: 98% !important;
104
- text-shadow: 0px 0px 0 rgba(0, 0, 0, 1) !important; background-image: linear-gradient(#d2a489, #76635a) !important;
105
- border-top-color: #000000 !important; border-right-color: #ffffff !important;
106
- border-bottom-color: #ffffff !important; border-left-color: #000000 !important;}
107
- .gr-button:hover {filter: brightness(130%);}
108
- """
109
-
110
- with gr.Blocks(theme='John6666/YntecDark', fill_width=True, css=css) as myface:
111
- gr.HTML(f"""
112
- <div style="text-align: center; max-width: 1200px; margin: 0 auto;">
113
- <div>
114
- <style>
115
- h1 {{
116
- font-size: 6em;
117
- color: #ffc99f;
118
- margin-top: 30px;
119
- margin-bottom: 30px;
120
- text-shadow: 3px 3px 0 rgba(0, 0, 0, 1) !important;
121
- }}
122
- h3 {{
123
- color: #ffc99f; !important;
124
- }}
125
- h4 {{
126
- display: inline-block;
127
- color: #ffffff !important;
128
- }}
129
- .wrapper img {{
130
- font-size: 98% !important;
131
- white-space: nowrap !important;
132
- text-align: center !important;
133
- display: inline-block !important;
134
- color: #ffffff !important;
135
- }}
136
- .wrapper {{
137
- color: #ffffff !important;
138
- }}
139
- .gradio-container {{
140
- background-image: linear-gradient(#254150, #1e2f40, #182634) !important;
141
- color: #ffaa66 !important;
142
- font-family: 'IBM Plex Sans', sans-serif !important;
143
- }}
144
- .text-gray-500 {{
145
- color: #ffc99f !important;
146
- }}
147
- .gr-box {{
148
- background-image: linear-gradient(#182634, #1e2f40, #254150) !important;
149
- border-top-color: #000000 !important;
150
- border-right-color: #ffffff !important;
151
- border-bottom-color: #ffffff !important;
152
- border-left-color: #000000 !important;
153
- }}
154
- .gr-input {{
155
- color: #ffc99f; !important;
156
- background-color: #254150 !important;
157
- }}
158
- :root {{
159
- --neutral-100: #000000 !important;
160
- }}
161
- </style>
162
- <body>
163
- <div class="center"><h1>Blitz Diffusion</h1>
164
- </div>
165
- </body>
166
- </div>
167
- <p style="margin-bottom: 1px; color: #ffaa66;">
168
- <h3>{int(len(models))} Stable Diffusion models, but why? For your enjoyment!</h3></p>
169
- <br><div class="wrapper">9.3 <img src="https://huggingface.co/Yntec/DucHaitenLofi/resolve/main/NEW.webp" alt="NEW!" style="width:32px;height:16px;">This has become a legacy backup copy of old <u><a href="https://huggingface.co/spaces/Yntec/ToyWorld">ToyWorld</a></u>'s UI! Newer models added dailty over there! 25 new models since last update!</div>
170
- <p style="margin-bottom: 1px; font-size: 98%">
171
- <br><h4>If a model is already loaded each new image takes less than <b>10</b> seconds to generate!</h4></p>
172
- <p style="margin-bottom: 1px; color: #ffffff;">
173
- <br><div class="wrapper">Generate 6 images from 1 prompt at the <u><a href="https://huggingface.co/spaces/Yntec/PrintingPress">PrintingPress</a></u>, and use 6 different models at <u><a href="https://huggingface.co/spaces/Yntec/diffusion80xx">Huggingface Diffusion!</a></u>!
174
- </p></p>
175
- </div>
176
- """, elem_classes="gr-box")
177
- with gr.Row():
178
- with gr.Column(scale=100):
179
- #Model selection dropdown
180
- model_name1 = gr.Dropdown(label="Select Model", choices=[m for m in models], type="index",
181
- value=current_model, interactive=True, elem_classes=["gr-box", "gr-input"])
182
- with gr.Row():
183
- with gr.Column(scale=100):
184
- with gr.Group():
185
- magic1 = gr.Textbox(label="Your Prompt", lines=4, elem_classes=["gr-box", "gr-input"]) #Positive
186
- with gr.Accordion("Advanced", open=False, visible=True):
187
- neg_input = gr.Textbox(label='Negative prompt', lines=1, elem_classes=["gr-box", "gr-input"])
188
- with gr.Row():
189
- width = gr.Slider(label="Width", info="If 0, the default value is used.", maximum=1216, step=32, value=0, elem_classes=["gr-box", "gr-input"])
190
- height = gr.Slider(label="Height", info="If 0, the default value is used.", maximum=1216, step=32, value=0, elem_classes=["gr-box", "gr-input"])
191
- with gr.Row():
192
- steps = gr.Slider(label="Number of inference steps", info="If 0, the default value is used.", maximum=100, step=1, value=0, elem_classes=["gr-box", "gr-input"])
193
- cfg = gr.Slider(label="Guidance scale", info="If 0, the default value is used.", maximum=30.0, step=0.1, value=0, elem_classes=["gr-box", "gr-input"])
194
- seed = gr.Slider(label="Seed", info="Randomize Seed if -1.", minimum=-1, maximum=MAX_SEED, step=1, value=-1, elem_classes=["gr-box", "gr-input"])
195
- run = gr.Button("Generate Image", elem_classes="gr-button")
196
-
197
- with gr.Row():
198
- with gr.Column():
199
- output1 = gr.Image(label=(f"{current_model}"), show_download_button=True,
200
- interactive=False, show_share_button=False, format=".png", elem_classes="gr-box")
201
-
202
- with gr.Row():
203
- with gr.Column(scale=50):
204
- input_text=gr.Textbox(label="Use this box to extend an idea automagically, by typing some words and clicking Extend Idea", lines=2, elem_classes=["gr-box", "gr-input"])
205
- see_prompts=gr.Button("Extend Idea -> overwrite the contents of the `Your PromptΒ΄ box above", elem_classes="gr-button")
206
- use_short=gr.Button("Copy the contents of this box to the `Your PromptΒ΄ box above", elem_classes="gr-button")
207
- def short_prompt(inputs):
208
- return (inputs)
209
-
210
- model_name1.change(set_model, inputs=model_name1, outputs=[output1])
211
- gr.on(
212
- triggers=[run.click, magic1.submit],
213
- fn=send_it1,
214
- inputs=[magic1, model_name1, neg_input, height, width, steps, cfg, seed],
215
- outputs=[output1],
216
- )
217
- use_short.click(short_prompt, inputs=[input_text], outputs=magic1)
218
- see_prompts.click(text_it1, inputs=[input_text], outputs=magic1)
219
-
220
- myface.queue(default_concurrency_limit=200, max_size=200)
221
- myface.launch(show_api=False, max_threads=400)
 
1
+ import gradio as gr
2
+ import os
3
+ import sys
4
+ from pathlib import Path
5
+ from all_models import models
6
+ from externalmod import gr_Interface_load
7
+ from prompt_extend import extend_prompt
8
+ from random import randint
9
+ import asyncio
10
+ from threading import RLock
11
+ lock = RLock()
12
+ HF_TOKEN = os.environ.get("HF_TOKEN") if os.environ.get("HF_TOKEN") else None # If private or gated models aren't used, ENV setting is unnecessary.
13
+
14
+ inference_timeout = 300
15
+ MAX_SEED = 2**32-1
16
+ current_model = models[0]
17
+ text_gen1 = extend_prompt
18
+
19
+ models2 = [gr_Interface_load(f"models/{m}", live=False, preprocess=True, postprocess=False, hf_token=HF_TOKEN) for m in models]
20
+
21
+ def text_it1(inputs, text_gen1=text_gen1):
22
+ go_t1 = text_gen1(inputs)
23
+ return(go_t1)
24
+
25
+ def set_model(current_model):
26
+ current_model = models[current_model]
27
+ return gr.update(label=(f"{current_model}"))
28
+
29
+ def send_it1(inputs, model_choice, neg_input, height, width, steps, cfg, seed):
30
+ output1 = gen_fn(model_choice, inputs, neg_input, height, width, steps, cfg, seed)
31
+ return (output1)
32
+
33
+ # https://huggingface.co/docs/api-inference/detailed_parameters
34
+ # https://huggingface.co/docs/huggingface_hub/package_reference/inference_client
35
+ async def infer(model_index, prompt, nprompt="", height=None, width=None, steps=None, cfg=None, seed=-1, timeout=inference_timeout):
36
+ from pathlib import Path
37
+ kwargs = {}
38
+ if height is not None and height >= 256: kwargs["height"] = height
39
+ if width is not None and width >= 256: kwargs["width"] = width
40
+ if steps is not None and steps >= 1: kwargs["num_inference_steps"] = steps
41
+ if cfg is not None and cfg > 0: cfg = kwargs["guidance_scale"] = cfg
42
+ noise = ""
43
+ if seed >= 0: kwargs["seed"] = seed
44
+ else:
45
+ rand = randint(1, 500)
46
+ for i in range(rand):
47
+ noise += " "
48
+ task = asyncio.create_task(asyncio.to_thread(models2[model_index].fn,
49
+ prompt=f'{prompt} {noise}', negative_prompt=nprompt, **kwargs, token=HF_TOKEN))
50
+ await asyncio.sleep(0)
51
+ try:
52
+ result = await asyncio.wait_for(task, timeout=timeout)
53
+ except asyncio.TimeoutError as e:
54
+ print(e)
55
+ print(f"Task timed out: {models2[model_index]}")
56
+ if not task.done(): task.cancel()
57
+ result = None
58
+ raise Exception(f"Task timed out: {models2[model_index]}")
59
+ except Exception as e:
60
+ print(e)
61
+ if not task.done(): task.cancel()
62
+ result = None
63
+ raise Exception(e)
64
+ if task.done() and result is not None and not isinstance(result, tuple):
65
+ with lock:
66
+ png_path = "image.png"
67
+ result.save(png_path)
68
+ image = str(Path(png_path).resolve())
69
+ return image
70
+ return None
71
+
72
+ def gen_fn(model_index, prompt, nprompt="", height=None, width=None, steps=None, cfg=None, seed=-1):
73
+ try:
74
+ loop = asyncio.new_event_loop()
75
+ result = loop.run_until_complete(infer(model_index, prompt, nprompt,
76
+ height, width, steps, cfg, seed, inference_timeout))
77
+ except (Exception, asyncio.CancelledError) as e:
78
+ print(e)
79
+ print(f"Task aborted: {models2[model_index]}")
80
+ result = None
81
+ raise gr.Error(f"Task aborted: {models2[model_index]}, Error: {e}")
82
+ finally:
83
+ loop.close()
84
+ return result
85
+
86
+ css="""
87
+ .gradio-container {background-image: linear-gradient(#254150, #1e2f40, #182634) !important;
88
+ color: #ffaa66 !important; font-family: 'IBM Plex Sans', sans-serif !important;}
89
+ h1 {font-size: 6em; color: #ffc99f; margin-top: 30px; margin-bottom: 30px;
90
+ text-shadow: 3px 3px 0 rgba(0, 0, 0, 1) !important;}
91
+ h3 {color: #ffc99f; !important;}
92
+ h4 {display: inline-block; color: #ffffff !important;}
93
+ .wrapper img {font-size: 98% !important; white-space: nowrap !important; text-align: center !important;
94
+ display: inline-block !important; color: #ffffff !important;}
95
+ .wrapper {color: #ffffff !important;}
96
+ .gr-box {background-image: linear-gradient(#182634, #1e2f40, #254150) !important;
97
+ border-top-color: #000000 !important; border-right-color: #ffffff !important;
98
+ border-bottom-color: #ffffff !important; border-left-color: #000000 !important;}
99
+ """
100
+
101
+ with gr.Blocks(theme='John6666/YntecDark', fill_width=True, css=css) as myface:
102
+ gr.HTML(f"""
103
+ <div style="text-align: center; max-width: 1200px; margin: 0 auto;">
104
+ <div class="center"><h1>Blitz Diffusion</h1></div>
105
+ <p style="margin-bottom: 1px; color: #ffaa66;">
106
+ <h3>{int(len(models))} Stable Diffusion models, but why? For your enjoyment!</h3></p>
107
+ <br><div class="wrapper">9.3 <img src="https://huggingface.co/Yntec/DucHaitenLofi/resolve/main/NEW.webp" alt="NEW!" style="width:32px;height:16px;">This has become a legacy backup copy of old <u><a href="https://huggingface.co/spaces/Yntec/ToyWorld">ToyWorld</a></u>'s UI! Newer models added dailty over there! 25 new models since last update!</div>
108
+ <p style="margin-bottom: 1px; font-size: 98%">
109
+ <br><h4>If a model is already loaded each new image takes less than <b>10</b> seconds to generate!</h4></p>
110
+ <p style="margin-bottom: 1px; color: #ffffff;">
111
+ <br><div class="wrapper">Generate 6 images from 1 prompt at the <u><a href="https://huggingface.co/spaces/Yntec/PrintingPress">PrintingPress</a></u>, and use 6 different models at <u><a href="https://huggingface.co/spaces/Yntec/diffusion80xx">Huggingface Diffusion!</a></u>!
112
+ </p></p></div>
113
+ """, elem_classes="gr-box")
114
+ with gr.Row():
115
+ with gr.Column(scale=100):
116
+ # Model selection dropdown
117
+ model_name1 = gr.Dropdown(label="Select Model", choices=[m for m in models], type="index",
118
+ value=current_model, interactive=True, elem_classes=["gr-box", "gr-input"])
119
+ with gr.Row():
120
+ with gr.Column(scale=100):
121
+ with gr.Group():
122
+ magic1 = gr.Textbox(label="Your Prompt", lines=4, elem_classes=["gr-box", "gr-input"]) #Positive
123
+ with gr.Accordion("Advanced", open=False, visible=True):
124
+ neg_input = gr.Textbox(label='Negative prompt', lines=1, elem_classes=["gr-box", "gr-input"])
125
+ with gr.Row():
126
+ width = gr.Slider(label="Width", info="If 0, the default value is used.", maximum=1216, step=32, value=0, elem_classes=["gr-box", "gr-input"])
127
+ height = gr.Slider(label="Height", info="If 0, the default value is used.", maximum=1216, step=32, value=0, elem_classes=["gr-box", "gr-input"])
128
+ with gr.Row():
129
+ steps = gr.Slider(label="Number of inference steps", info="If 0, the default value is used.", maximum=100, step=1, value=0, elem_classes=["gr-box", "gr-input"])
130
+ cfg = gr.Slider(label="Guidance scale", info="If 0, the default value is used.", maximum=30.0, step=0.1, value=0, elem_classes=["gr-box", "gr-input"])
131
+ seed = gr.Slider(label="Seed", info="Randomize Seed if -1.", minimum=-1, maximum=MAX_SEED, step=1, value=-1, elem_classes=["gr-box", "gr-input"])
132
+ run = gr.Button("Generate Image", variant="primary", elem_classes="gr-button")
133
+
134
+ with gr.Row():
135
+ with gr.Column():
136
+ output1 = gr.Image(label=(f"{current_model}"), show_download_button=True,
137
+ interactive=False, show_share_button=False, format=".png", elem_classes="gr-box")
138
+
139
+ with gr.Row():
140
+ with gr.Column(scale=50):
141
+ input_text=gr.Textbox(label="Use this box to extend an idea automagically, by typing some words and clicking Extend Idea", lines=2, elem_classes=["gr-box", "gr-input"])
142
+ see_prompts=gr.Button("Extend Idea -> overwrite the contents of the `Your PromptΒ΄ box above", variant="primary", elem_classes="gr-button")
143
+ use_short=gr.Button("Copy the contents of this box to the `Your PromptΒ΄ box above", variant="primary", elem_classes="gr-button")
144
+ def short_prompt(inputs):
145
+ return (inputs)
146
+
147
+ model_name1.change(set_model, inputs=model_name1, outputs=[output1])
148
+ gr.on(
149
+ triggers=[run.click, magic1.submit],
150
+ fn=send_it1,
151
+ inputs=[magic1, model_name1, neg_input, height, width, steps, cfg, seed],
152
+ outputs=[output1],
153
+ concurrency_limit=None,
154
+ queue=False,
155
+ )
156
+ use_short.click(short_prompt, inputs=[input_text], outputs=magic1, queue=False)
157
+ see_prompts.click(text_it1, inputs=[input_text], outputs=magic1, queue=False)
158
+
159
+ myface.queue(default_concurrency_limit=200, max_size=200)
160
+ myface.launch(show_api=False, max_threads=400)
161
+ # https://github.com/gradio-app/gradio/issues/6339
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
externalmod.py CHANGED
@@ -9,7 +9,7 @@ import re
9
  import tempfile
10
  import warnings
11
  from pathlib import Path
12
- from typing import TYPE_CHECKING, Callable
13
 
14
  import httpx
15
  import huggingface_hub
@@ -33,6 +33,7 @@ if TYPE_CHECKING:
33
  from gradio.interface import Interface
34
 
35
 
 
36
  server_timeout = 600
37
 
38
 
@@ -40,7 +41,7 @@ server_timeout = 600
40
  def load(
41
  name: str,
42
  src: str | None = None,
43
- hf_token: str | None = None,
44
  alias: str | None = None,
45
  **kwargs,
46
  ) -> Blocks:
@@ -51,7 +52,7 @@ def load(
51
  Parameters:
52
  name: the name of the model (e.g. "gpt2" or "facebook/bart-base") or space (e.g. "flax-community/spanish-gpt2"), can include the `src` as prefix (e.g. "models/facebook/bart-base")
53
  src: the source of the model: `models` or `spaces` (or leave empty if source is provided as a prefix in `name`)
54
- hf_token: optional access token for loading private Hugging Face Hub models or spaces. Find your token here: https://huggingface.co/settings/tokens. Warning: only provide this if you are loading a trusted private Space as it can be read by the Space you are loading.
55
  alias: optional string used as the name of the loaded model instead of the default name (only applies if loading a Space running Gradio 2.x)
56
  Returns:
57
  a Gradio Blocks object for the given model
@@ -68,7 +69,7 @@ def load(
68
  def load_blocks_from_repo(
69
  name: str,
70
  src: str | None = None,
71
- hf_token: str | None = None,
72
  alias: str | None = None,
73
  **kwargs,
74
  ) -> Blocks:
@@ -92,7 +93,7 @@ def load_blocks_from_repo(
92
  if src.lower() not in factory_methods:
93
  raise ValueError(f"parameter: src must be one of {factory_methods.keys()}")
94
 
95
- if hf_token is not None:
96
  if Context.hf_token is not None and Context.hf_token != hf_token:
97
  warnings.warn(
98
  """You are loading a model/Space with a different access token than the one you used to load a previous model/Space. This is not recommended, as it may cause unexpected behavior."""
@@ -103,12 +104,16 @@ def load_blocks_from_repo(
103
  return blocks
104
 
105
 
106
- def from_model(model_name: str, hf_token: str | None, alias: str | None, **kwargs):
 
 
107
  model_url = f"https://huggingface.co/{model_name}"
108
  api_url = f"https://api-inference.huggingface.co/models/{model_name}"
109
  print(f"Fetching model from: {model_url}")
110
 
111
- headers = {"Authorization": f"Bearer {hf_token}"} if hf_token is not None else {}
 
 
112
  response = httpx.request("GET", api_url, headers=headers)
113
  if response.status_code != 200:
114
  raise ModelNotFoundError(
@@ -371,7 +376,11 @@ def from_model(model_name: str, hf_token: str | None, alias: str | None, **kwarg
371
  def query_huggingface_inference_endpoints(*data, **kwargs):
372
  if preprocess is not None:
373
  data = preprocess(*data)
374
- data = fn(*data, **kwargs) # type: ignore
 
 
 
 
375
  if postprocess is not None:
376
  data = postprocess(data) # type: ignore
377
  return data
@@ -383,7 +392,7 @@ def from_model(model_name: str, hf_token: str | None, alias: str | None, **kwarg
383
  "inputs": inputs,
384
  "outputs": outputs,
385
  "title": model_name,
386
- # "examples": examples,
387
  }
388
 
389
  kwargs = dict(interface_info, **kwargs)
@@ -394,19 +403,12 @@ def from_model(model_name: str, hf_token: str | None, alias: str | None, **kwarg
394
  def from_spaces(
395
  space_name: str, hf_token: str | None, alias: str | None, **kwargs
396
  ) -> Blocks:
397
- client = Client(
398
- space_name,
399
- hf_token=hf_token,
400
- download_files=False,
401
- _skip_components=False,
402
- )
403
-
404
  space_url = f"https://huggingface.co/spaces/{space_name}"
405
 
406
  print(f"Fetching Space from: {space_url}")
407
 
408
  headers = {}
409
- if hf_token is not None:
410
  headers["Authorization"] = f"Bearer {hf_token}"
411
 
412
  iframe_url = (
@@ -443,8 +445,7 @@ def from_spaces(
443
  "Blocks or Interface locally. You may find this Guide helpful: "
444
  "https://gradio.app/using_blocks_like_functions/"
445
  )
446
- if client.app_version < version.Version("4.0.0b14"):
447
- return from_spaces_blocks(space=space_name, hf_token=hf_token)
448
 
449
 
450
  def from_spaces_blocks(space: str, hf_token: str | None) -> Blocks:
@@ -489,7 +490,7 @@ def from_spaces_interface(
489
  config = external_utils.streamline_spaces_interface(config)
490
  api_url = f"{iframe_url}/api/predict/"
491
  headers = {"Content-Type": "application/json"}
492
- if hf_token is not None:
493
  headers["Authorization"] = f"Bearer {hf_token}"
494
 
495
  # The function should call the API with preprocessed data
@@ -529,7 +530,7 @@ def gr_Interface_load(
529
  src: str | None = None,
530
  hf_token: str | None = None,
531
  alias: str | None = None,
532
- **kwargs,
533
  ) -> Blocks:
534
  try:
535
  return load_blocks_from_repo(name, src, hf_token, alias)
@@ -543,8 +544,8 @@ def list_uniq(l):
543
 
544
 
545
  def get_status(model_name: str):
546
- from huggingface_hub import InferenceClient
547
- client = InferenceClient(timeout=10)
548
  return client.get_model_status(model_name)
549
 
550
 
@@ -563,22 +564,22 @@ def is_loadable(model_name: str, force_gpu: bool = False):
563
 
564
  def find_model_list(author: str="", tags: list[str]=[], not_tag="", sort: str="last_modified", limit: int=30, force_gpu=False, check_status=False):
565
  from huggingface_hub import HfApi
566
- api = HfApi()
567
  default_tags = ["diffusers"]
568
  if not sort: sort = "last_modified"
569
  limit = limit * 20 if check_status and force_gpu else limit * 5
570
  models = []
571
  try:
572
- model_infos = api.list_models(author=author, task="text-to-image",
573
  tags=list_uniq(default_tags + tags), cardData=True, sort=sort, limit=limit)
574
  except Exception as e:
575
  print(f"Error: Failed to list models.")
576
  print(e)
577
  return models
578
  for model in model_infos:
579
- if not model.private and not model.gated:
580
  loadable = is_loadable(model.id, force_gpu) if check_status else True
581
  if not_tag and not_tag in model.tags or not loadable: continue
582
  models.append(model.id)
583
  if len(models) == limit: break
584
- return models
 
9
  import tempfile
10
  import warnings
11
  from pathlib import Path
12
+ from typing import TYPE_CHECKING, Callable, Literal
13
 
14
  import httpx
15
  import huggingface_hub
 
33
  from gradio.interface import Interface
34
 
35
 
36
+ HF_TOKEN = os.environ.get("HF_TOKEN") if os.environ.get("HF_TOKEN") else None # If private or gated models aren't used, ENV setting is unnecessary.
37
  server_timeout = 600
38
 
39
 
 
41
  def load(
42
  name: str,
43
  src: str | None = None,
44
+ hf_token: str | Literal[False] | None = None,
45
  alias: str | None = None,
46
  **kwargs,
47
  ) -> Blocks:
 
52
  Parameters:
53
  name: the name of the model (e.g. "gpt2" or "facebook/bart-base") or space (e.g. "flax-community/spanish-gpt2"), can include the `src` as prefix (e.g. "models/facebook/bart-base")
54
  src: the source of the model: `models` or `spaces` (or leave empty if source is provided as a prefix in `name`)
55
+ hf_token: optional access token for loading private Hugging Face Hub models or spaces. Will default to the locally saved token if not provided. Pass `token=False` if you don't want to send your token to the server. Find your token here: https://huggingface.co/settings/tokens. Warning: only provide a token if you are loading a trusted private Space as it can be read by the Space you are loading.
56
  alias: optional string used as the name of the loaded model instead of the default name (only applies if loading a Space running Gradio 2.x)
57
  Returns:
58
  a Gradio Blocks object for the given model
 
69
  def load_blocks_from_repo(
70
  name: str,
71
  src: str | None = None,
72
+ hf_token: str | Literal[False] | None = None,
73
  alias: str | None = None,
74
  **kwargs,
75
  ) -> Blocks:
 
93
  if src.lower() not in factory_methods:
94
  raise ValueError(f"parameter: src must be one of {factory_methods.keys()}")
95
 
96
+ if hf_token is not None and hf_token is not False:
97
  if Context.hf_token is not None and Context.hf_token != hf_token:
98
  warnings.warn(
99
  """You are loading a model/Space with a different access token than the one you used to load a previous model/Space. This is not recommended, as it may cause unexpected behavior."""
 
104
  return blocks
105
 
106
 
107
+ def from_model(
108
+ model_name: str, hf_token: str | Literal[False] | None, alias: str | None, **kwargs
109
+ ):
110
  model_url = f"https://huggingface.co/{model_name}"
111
  api_url = f"https://api-inference.huggingface.co/models/{model_name}"
112
  print(f"Fetching model from: {model_url}")
113
 
114
+ headers = (
115
+ {} if hf_token in [False, None] else {"Authorization": f"Bearer {hf_token}"}
116
+ )
117
  response = httpx.request("GET", api_url, headers=headers)
118
  if response.status_code != 200:
119
  raise ModelNotFoundError(
 
376
  def query_huggingface_inference_endpoints(*data, **kwargs):
377
  if preprocess is not None:
378
  data = preprocess(*data)
379
+ try:
380
+ data = fn(*data, **kwargs) # type: ignore
381
+ except huggingface_hub.utils.HfHubHTTPError as e:
382
+ if "429" in str(e):
383
+ raise TooManyRequestsError() from e
384
  if postprocess is not None:
385
  data = postprocess(data) # type: ignore
386
  return data
 
392
  "inputs": inputs,
393
  "outputs": outputs,
394
  "title": model_name,
395
+ #"examples": examples,
396
  }
397
 
398
  kwargs = dict(interface_info, **kwargs)
 
403
  def from_spaces(
404
  space_name: str, hf_token: str | None, alias: str | None, **kwargs
405
  ) -> Blocks:
 
 
 
 
 
 
 
406
  space_url = f"https://huggingface.co/spaces/{space_name}"
407
 
408
  print(f"Fetching Space from: {space_url}")
409
 
410
  headers = {}
411
+ if hf_token not in [False, None]:
412
  headers["Authorization"] = f"Bearer {hf_token}"
413
 
414
  iframe_url = (
 
445
  "Blocks or Interface locally. You may find this Guide helpful: "
446
  "https://gradio.app/using_blocks_like_functions/"
447
  )
448
+ return from_spaces_blocks(space=space_name, hf_token=hf_token)
 
449
 
450
 
451
  def from_spaces_blocks(space: str, hf_token: str | None) -> Blocks:
 
490
  config = external_utils.streamline_spaces_interface(config)
491
  api_url = f"{iframe_url}/api/predict/"
492
  headers = {"Content-Type": "application/json"}
493
+ if hf_token not in [False, None]:
494
  headers["Authorization"] = f"Bearer {hf_token}"
495
 
496
  # The function should call the API with preprocessed data
 
530
  src: str | None = None,
531
  hf_token: str | None = None,
532
  alias: str | None = None,
533
+ **kwargs, # ignore
534
  ) -> Blocks:
535
  try:
536
  return load_blocks_from_repo(name, src, hf_token, alias)
 
544
 
545
 
546
  def get_status(model_name: str):
547
+ from huggingface_hub import AsyncInferenceClient
548
+ client = AsyncInferenceClient(token=HF_TOKEN, timeout=10)
549
  return client.get_model_status(model_name)
550
 
551
 
 
564
 
565
  def find_model_list(author: str="", tags: list[str]=[], not_tag="", sort: str="last_modified", limit: int=30, force_gpu=False, check_status=False):
566
  from huggingface_hub import HfApi
567
+ api = HfApi(token=HF_TOKEN)
568
  default_tags = ["diffusers"]
569
  if not sort: sort = "last_modified"
570
  limit = limit * 20 if check_status and force_gpu else limit * 5
571
  models = []
572
  try:
573
+ model_infos = api.list_models(author=author, #task="text-to-image",
574
  tags=list_uniq(default_tags + tags), cardData=True, sort=sort, limit=limit)
575
  except Exception as e:
576
  print(f"Error: Failed to list models.")
577
  print(e)
578
  return models
579
  for model in model_infos:
580
+ if not model.private and not model.gated or HF_TOKEN is not None:
581
  loadable = is_loadable(model.id, force_gpu) if check_status else True
582
  if not_tag and not_tag in model.tags or not loadable: continue
583
  models.append(model.id)
584
  if len(models) == limit: break
585
+ return models