Spaces:
Running
Running
Update main.py
Browse files
main.py
CHANGED
@@ -13,15 +13,10 @@ else:
|
|
13 |
print(gpu_info)
|
14 |
is_gpu = True
|
15 |
print(is_gpu)
|
16 |
-
|
17 |
from IPython.display import clear_output
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
def check_enviroment():
|
22 |
try:
|
23 |
import torch
|
24 |
-
|
25 |
print("Enviroment is already installed.")
|
26 |
except ImportError:
|
27 |
print("Enviroment not found. Installing...")
|
@@ -33,14 +28,9 @@ def check_enviroment():
|
|
33 |
os.system("pip install python-dotenv")
|
34 |
# Clear the output
|
35 |
clear_output()
|
36 |
-
|
37 |
print("Enviroment installed successfully.")
|
38 |
-
|
39 |
-
|
40 |
# Call the function to check and install Packages if necessary
|
41 |
check_enviroment()
|
42 |
-
|
43 |
-
|
44 |
from IPython.display import clear_output
|
45 |
import os
|
46 |
import gradio as gr
|
@@ -49,16 +39,14 @@ import PIL
|
|
49 |
import base64
|
50 |
import io
|
51 |
import torch
|
|
|
|
|
52 |
# SDXL
|
53 |
from diffusers import UNet2DConditionModel, DiffusionPipeline, LCMScheduler
|
54 |
#requests
|
55 |
import requests
|
56 |
import random
|
57 |
from PIL import Image
|
58 |
-
|
59 |
-
|
60 |
-
|
61 |
-
|
62 |
# Get the current directory
|
63 |
current_dir = os.getcwd()
|
64 |
model_path = os.path.join(current_dir)
|
@@ -67,13 +55,10 @@ cache_path = os.path.join(current_dir, "cache")
|
|
67 |
MAX_SEED = np.iinfo(np.int32).max
|
68 |
MAX_IMAGE_SIZE = int(os.getenv("MAX_IMAGE_SIZE", "1024"))
|
69 |
SECRET_TOKEN = os.getenv("SECRET_TOKEN", "default_secret")
|
70 |
-
|
71 |
API_TOKEN = os.environ.get("HF_READ_TOKEN")
|
72 |
headers = {"Authorization": f"Bearer {API_TOKEN}"}
|
73 |
-
|
74 |
# Uncomment the following line if you are using PyTorch 1.10 or later
|
75 |
# os.environ["TORCH_USE_CUDA_DSA"] = "1"
|
76 |
-
|
77 |
if is_gpu:
|
78 |
# Uncomment the following line if you want to enable CUDA launch blocking
|
79 |
os.environ["CUDA_LAUNCH_BLOCKING"] = "1"
|
@@ -85,7 +70,6 @@ current_dir = os.getcwd()
|
|
85 |
model_path = os.path.join(current_dir)
|
86 |
# Set the cache path
|
87 |
cache_path = os.path.join(current_dir, "cache")
|
88 |
-
|
89 |
def load_pipeline(use_cuda):
|
90 |
device = "cuda" if use_cuda and torch.cuda.is_available() else "cpu"
|
91 |
if device == "cuda":
|
@@ -99,12 +83,11 @@ def load_pipeline(use_cuda):
|
|
99 |
pipe = DiffusionPipeline.from_pretrained("stabilityai/sdxl-turbo", use_safetensors=True)
|
100 |
pipe = pipe.to(device)
|
101 |
return pipe
|
102 |
-
|
103 |
if is_sdxl:
|
104 |
torch_dtype=torch.float16
|
105 |
variant="fp16"
|
106 |
unet = UNet2DConditionModel.from_pretrained(
|
107 |
-
"latent-consistency/lcm-sdxl",
|
108 |
torch_dtype=torch_dtype,
|
109 |
variant=variant,
|
110 |
cache_dir=cache_path,
|
@@ -135,12 +118,9 @@ if is_ssd:
|
|
135 |
# load and fuse
|
136 |
pipe.load_lora_weights("latent-consistency/lcm-lora-ssd-1b")
|
137 |
pipe.fuse_lora()
|
138 |
-
|
139 |
if is_sdxl_turbo:
|
140 |
use_cuda=is_gpu
|
141 |
pipe = load_pipeline(use_cuda)
|
142 |
-
|
143 |
-
|
144 |
def generate(
|
145 |
prompt: str,
|
146 |
negative_prompt: str = "",
|
@@ -155,10 +135,7 @@ def generate(
|
|
155 |
raise gr.Error(
|
156 |
f"Invalid secret token. Please fork the original space if you want to use it for yourself."
|
157 |
)
|
158 |
-
|
159 |
generator = torch.Generator().manual_seed(seed)
|
160 |
-
|
161 |
-
|
162 |
if not use_request:
|
163 |
image = pipe(
|
164 |
prompt=prompt,
|
@@ -170,7 +147,6 @@ def generate(
|
|
170 |
generator=generator,
|
171 |
output_type="pil",
|
172 |
).images[0]
|
173 |
-
|
174 |
else:
|
175 |
API_URL = "https://api-inference.huggingface.co/models/segmind/SSD-1B"
|
176 |
payload = {
|
@@ -180,19 +156,19 @@ def generate(
|
|
180 |
"cfg_scale": guidance_scale,
|
181 |
"seed": seed if seed is not None else random.randint(-1, 2147483647)
|
182 |
}
|
183 |
-
|
184 |
image_bytes = requests.post(API_URL, headers=headers, json=payload).content
|
185 |
-
image = Image.open(io.BytesIO(image_bytes))
|
186 |
-
|
187 |
-
|
188 |
-
|
189 |
clear_output()
|
190 |
-
|
191 |
from IPython.display import display
|
192 |
|
193 |
-
|
194 |
def generate_image(prompt="A beautiful and sexy girl",secret_token="default_secret"):
|
195 |
-
|
|
|
|
|
|
|
|
|
196 |
generated_image = generate(
|
197 |
prompt=prompt,
|
198 |
negative_prompt="",
|
@@ -203,9 +179,27 @@ def generate_image(prompt="A beautiful and sexy girl",secret_token="default_secr
|
|
203 |
num_inference_steps=4,
|
204 |
secret_token=secret_token
|
205 |
)
|
206 |
-
|
207 |
-
|
208 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
209 |
|
210 |
if not run_api:
|
211 |
secret_token = gr.Text(
|
@@ -228,7 +222,6 @@ if not run_api:
|
|
228 |
visible=True,
|
229 |
)
|
230 |
seed = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=0)
|
231 |
-
|
232 |
width = gr.Slider(
|
233 |
label="Width",
|
234 |
minimum=256,
|
@@ -266,11 +259,8 @@ if not run_api:
|
|
266 |
title="Image Generator",
|
267 |
description="Generate images based on prompts.",
|
268 |
)
|
269 |
-
|
270 |
#iface.launch()
|
271 |
iface.queue(max_size=32).launch(server_name="0.0.0.0", server_port=7860) # Docker
|
272 |
-
|
273 |
-
|
274 |
if run_api:
|
275 |
with gr.Blocks() as demo:
|
276 |
gr.HTML(
|
@@ -302,7 +292,6 @@ if run_api:
|
|
302 |
visible=True,
|
303 |
)
|
304 |
seed = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=0)
|
305 |
-
|
306 |
width = gr.Slider(
|
307 |
label="Width",
|
308 |
minimum=256,
|
@@ -323,7 +312,6 @@ if run_api:
|
|
323 |
num_inference_steps = gr.Slider(
|
324 |
label="Number of inference steps", minimum=1, maximum=8, step=1, value=4
|
325 |
)
|
326 |
-
|
327 |
inputs = [
|
328 |
prompt,
|
329 |
negative_prompt,
|
@@ -340,16 +328,12 @@ if run_api:
|
|
340 |
outputs=result,
|
341 |
api_name="run",
|
342 |
)
|
343 |
-
|
344 |
# demo.queue(max_size=32).launch()
|
345 |
# Launch the Gradio app with multiple workers and debug mode enabled
|
346 |
-
# demo.queue(max_size=32).launch(debug=True)
|
|
|
347 |
demo.queue(max_size=32).launch(server_name="0.0.0.0", server_port=7860) # Docker
|
348 |
-
|
349 |
-
|
350 |
'''
|
351 |
-
|
352 |
-
|
353 |
import gradio as gr
|
354 |
import subprocess
|
355 |
def run_command(command):
|
@@ -369,7 +353,7 @@ iface = gr.Interface(
|
|
369 |
["ls"],
|
370 |
["pwd"],
|
371 |
["echo 'Hello, Gradio!'"],
|
372 |
-
["python --version"]
|
373 |
-
)
|
374 |
iface.launch(server_name="0.0.0.0", server_port=7860)
|
375 |
'''
|
|
|
13 |
print(gpu_info)
|
14 |
is_gpu = True
|
15 |
print(is_gpu)
|
|
|
16 |
from IPython.display import clear_output
|
|
|
|
|
|
|
17 |
def check_enviroment():
|
18 |
try:
|
19 |
import torch
|
|
|
20 |
print("Enviroment is already installed.")
|
21 |
except ImportError:
|
22 |
print("Enviroment not found. Installing...")
|
|
|
28 |
os.system("pip install python-dotenv")
|
29 |
# Clear the output
|
30 |
clear_output()
|
|
|
31 |
print("Enviroment installed successfully.")
|
|
|
|
|
32 |
# Call the function to check and install Packages if necessary
|
33 |
check_enviroment()
|
|
|
|
|
34 |
from IPython.display import clear_output
|
35 |
import os
|
36 |
import gradio as gr
|
|
|
39 |
import base64
|
40 |
import io
|
41 |
import torch
|
42 |
+
import tempfile # Added for temporary file management
|
43 |
+
|
44 |
# SDXL
|
45 |
from diffusers import UNet2DConditionModel, DiffusionPipeline, LCMScheduler
|
46 |
#requests
|
47 |
import requests
|
48 |
import random
|
49 |
from PIL import Image
|
|
|
|
|
|
|
|
|
50 |
# Get the current directory
|
51 |
current_dir = os.getcwd()
|
52 |
model_path = os.path.join(current_dir)
|
|
|
55 |
MAX_SEED = np.iinfo(np.int32).max
|
56 |
MAX_IMAGE_SIZE = int(os.getenv("MAX_IMAGE_SIZE", "1024"))
|
57 |
SECRET_TOKEN = os.getenv("SECRET_TOKEN", "default_secret")
|
|
|
58 |
API_TOKEN = os.environ.get("HF_READ_TOKEN")
|
59 |
headers = {"Authorization": f"Bearer {API_TOKEN}"}
|
|
|
60 |
# Uncomment the following line if you are using PyTorch 1.10 or later
|
61 |
# os.environ["TORCH_USE_CUDA_DSA"] = "1"
|
|
|
62 |
if is_gpu:
|
63 |
# Uncomment the following line if you want to enable CUDA launch blocking
|
64 |
os.environ["CUDA_LAUNCH_BLOCKING"] = "1"
|
|
|
70 |
model_path = os.path.join(current_dir)
|
71 |
# Set the cache path
|
72 |
cache_path = os.path.join(current_dir, "cache")
|
|
|
73 |
def load_pipeline(use_cuda):
|
74 |
device = "cuda" if use_cuda and torch.cuda.is_available() else "cpu"
|
75 |
if device == "cuda":
|
|
|
83 |
pipe = DiffusionPipeline.from_pretrained("stabilityai/sdxl-turbo", use_safetensors=True)
|
84 |
pipe = pipe.to(device)
|
85 |
return pipe
|
|
|
86 |
if is_sdxl:
|
87 |
torch_dtype=torch.float16
|
88 |
variant="fp16"
|
89 |
unet = UNet2DConditionModel.from_pretrained(
|
90 |
+
"latent-consistency/lcm-sdxl",
|
91 |
torch_dtype=torch_dtype,
|
92 |
variant=variant,
|
93 |
cache_dir=cache_path,
|
|
|
118 |
# load and fuse
|
119 |
pipe.load_lora_weights("latent-consistency/lcm-lora-ssd-1b")
|
120 |
pipe.fuse_lora()
|
|
|
121 |
if is_sdxl_turbo:
|
122 |
use_cuda=is_gpu
|
123 |
pipe = load_pipeline(use_cuda)
|
|
|
|
|
124 |
def generate(
|
125 |
prompt: str,
|
126 |
negative_prompt: str = "",
|
|
|
135 |
raise gr.Error(
|
136 |
f"Invalid secret token. Please fork the original space if you want to use it for yourself."
|
137 |
)
|
|
|
138 |
generator = torch.Generator().manual_seed(seed)
|
|
|
|
|
139 |
if not use_request:
|
140 |
image = pipe(
|
141 |
prompt=prompt,
|
|
|
147 |
generator=generator,
|
148 |
output_type="pil",
|
149 |
).images[0]
|
|
|
150 |
else:
|
151 |
API_URL = "https://api-inference.huggingface.co/models/segmind/SSD-1B"
|
152 |
payload = {
|
|
|
156 |
"cfg_scale": guidance_scale,
|
157 |
"seed": seed if seed is not None else random.randint(-1, 2147483647)
|
158 |
}
|
|
|
159 |
image_bytes = requests.post(API_URL, headers=headers, json=payload).content
|
160 |
+
image = Image.open(io.BytesIO(image_bytes))
|
161 |
+
return image
|
|
|
|
|
162 |
clear_output()
|
|
|
163 |
from IPython.display import display
|
164 |
|
165 |
+
# MODIFIED FUNCTION
|
166 |
def generate_image(prompt="A beautiful and sexy girl",secret_token="default_secret"):
|
167 |
+
"""
|
168 |
+
Generates an image, displays it, and immediately deletes the temporary file
|
169 |
+
to prevent storing images on disk.
|
170 |
+
"""
|
171 |
+
# Generate the image in-memory using the prompt
|
172 |
generated_image = generate(
|
173 |
prompt=prompt,
|
174 |
negative_prompt="",
|
|
|
179 |
num_inference_steps=4,
|
180 |
secret_token=secret_token
|
181 |
)
|
182 |
+
|
183 |
+
# Create a temporary file to save the image.
|
184 |
+
# 'delete=False' allows us to manage its deletion manually.
|
185 |
+
temp_file = tempfile.NamedTemporaryFile(delete=False, suffix=".png")
|
186 |
+
temp_filepath = temp_file.name
|
187 |
+
|
188 |
+
try:
|
189 |
+
# Save the generated image to the temporary file
|
190 |
+
generated_image.save(temp_filepath)
|
191 |
+
|
192 |
+
# Display the image (this displays the in-memory object, not the file)
|
193 |
+
print("Displaying image...")
|
194 |
+
display(generated_image)
|
195 |
+
print("Image displayed.")
|
196 |
+
|
197 |
+
finally:
|
198 |
+
# This block ensures the file is always closed and deleted,
|
199 |
+
# even if errors occur.
|
200 |
+
temp_file.close()
|
201 |
+
os.remove(temp_filepath)
|
202 |
+
print(f"Temporary image file '{temp_filepath}' has been deleted.")
|
203 |
|
204 |
if not run_api:
|
205 |
secret_token = gr.Text(
|
|
|
222 |
visible=True,
|
223 |
)
|
224 |
seed = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=0)
|
|
|
225 |
width = gr.Slider(
|
226 |
label="Width",
|
227 |
minimum=256,
|
|
|
259 |
title="Image Generator",
|
260 |
description="Generate images based on prompts.",
|
261 |
)
|
|
|
262 |
#iface.launch()
|
263 |
iface.queue(max_size=32).launch(server_name="0.0.0.0", server_port=7860) # Docker
|
|
|
|
|
264 |
if run_api:
|
265 |
with gr.Blocks() as demo:
|
266 |
gr.HTML(
|
|
|
292 |
visible=True,
|
293 |
)
|
294 |
seed = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=0)
|
|
|
295 |
width = gr.Slider(
|
296 |
label="Width",
|
297 |
minimum=256,
|
|
|
312 |
num_inference_steps = gr.Slider(
|
313 |
label="Number of inference steps", minimum=1, maximum=8, step=1, value=4
|
314 |
)
|
|
|
315 |
inputs = [
|
316 |
prompt,
|
317 |
negative_prompt,
|
|
|
328 |
outputs=result,
|
329 |
api_name="run",
|
330 |
)
|
|
|
331 |
# demo.queue(max_size=32).launch()
|
332 |
# Launch the Gradio app with multiple workers and debug mode enabled
|
333 |
+
# demo.queue(max_size=32).launch(debug=True)
|
334 |
+
# For Standard
|
335 |
demo.queue(max_size=32).launch(server_name="0.0.0.0", server_port=7860) # Docker
|
|
|
|
|
336 |
'''
|
|
|
|
|
337 |
import gradio as gr
|
338 |
import subprocess
|
339 |
def run_command(command):
|
|
|
353 |
["ls"],
|
354 |
["pwd"],
|
355 |
["echo 'Hello, Gradio!'"],
|
356 |
+
["python --version"]
|
357 |
+
])
|
358 |
iface.launch(server_name="0.0.0.0", server_port=7860)
|
359 |
'''
|