Update app.py
Browse files
app.py
CHANGED
@@ -4,10 +4,16 @@ import random
|
|
4 |
from diffusers import DiffusionPipeline
|
5 |
import torch
|
6 |
import time
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
7 |
|
8 |
# Device and hardware configuration
|
9 |
DEVICE = "cpu"
|
10 |
-
NUM_CPU_CORES = 2
|
11 |
|
12 |
# Model Options (optimized for CPU and memory constraints)
|
13 |
MODEL_OPTIONS = {
|
@@ -34,12 +40,13 @@ def load_pipeline(model_id):
|
|
34 |
def generate_image(prompt, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps, num_images, model_choice):
|
35 |
if not prompt:
|
36 |
raise gr.Error("Будь ласка, введіть опис для зображення.")
|
37 |
-
|
|
|
38 |
|
39 |
pipe = load_pipeline(MODEL_OPTIONS[model_choice])
|
40 |
|
41 |
# Adjust memory usage based on available RAM
|
42 |
-
torch.cuda.empty_cache()
|
43 |
|
44 |
generator = torch.Generator(device=DEVICE)
|
45 |
if not randomize_seed:
|
@@ -54,14 +61,20 @@ def generate_image(prompt, negative_prompt, seed, randomize_seed, width, height,
|
|
54 |
guidance_scale=guidance_scale,
|
55 |
num_inference_steps=num_inference_steps,
|
56 |
num_images_per_prompt=num_images,
|
57 |
-
generator=generator
|
58 |
-
|
59 |
|
60 |
end_time = time.time()
|
61 |
generation_time = end_time - start_time
|
62 |
|
63 |
return images, f"Час генерації: {generation_time:.2f} секунд"
|
64 |
|
|
|
|
|
|
|
|
|
|
|
|
|
65 |
run_button = gr.Button("Згенерувати")
|
66 |
gallery = gr.Gallery(label="Згенеровані зображення")
|
67 |
status_text = gr.Textbox(label="Статус")
|
|
|
4 |
from diffusers import DiffusionPipeline
|
5 |
import torch
|
6 |
import time
|
7 |
+
import psutil
|
8 |
+
|
9 |
+
# Get the number of physical CPU cores (excluding hyperthreads)
|
10 |
+
NUM_CPU_CORES = psutil.cpu_count(logical=False)
|
11 |
+
|
12 |
+
# Cap the number of threads to the available physical cores
|
13 |
+
MAX_THREADS = min(8, NUM_CPU_CORES)
|
14 |
|
15 |
# Device and hardware configuration
|
16 |
DEVICE = "cpu"
|
|
|
17 |
|
18 |
# Model Options (optimized for CPU and memory constraints)
|
19 |
MODEL_OPTIONS = {
|
|
|
40 |
def generate_image(prompt, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps, num_images, model_choice):
|
41 |
if not prompt:
|
42 |
raise gr.Error("Будь ласка, введіть опис для зображення.")
|
43 |
+
|
44 |
+
torch.set_num_threads(MAX_THREADS) # Set the maximum number of threads
|
45 |
|
46 |
pipe = load_pipeline(MODEL_OPTIONS[model_choice])
|
47 |
|
48 |
# Adjust memory usage based on available RAM
|
49 |
+
torch.cuda.empty_cache()
|
50 |
|
51 |
generator = torch.Generator(device=DEVICE)
|
52 |
if not randomize_seed:
|
|
|
61 |
guidance_scale=guidance_scale,
|
62 |
num_inference_steps=num_inference_steps,
|
63 |
num_images_per_prompt=num_images,
|
64 |
+
generator=generator,
|
65 |
+
).images
|
66 |
|
67 |
end_time = time.time()
|
68 |
generation_time = end_time - start_time
|
69 |
|
70 |
return images, f"Час генерації: {generation_time:.2f} секунд"
|
71 |
|
72 |
+
# ... (Gradio interface remains the same)
|
73 |
+
|
74 |
+
generation_time = end_time - start_time
|
75 |
+
|
76 |
+
return images, f"Час генерації: {generation_time:.2f} секунд"
|
77 |
+
|
78 |
run_button = gr.Button("Згенерувати")
|
79 |
gallery = gr.Gallery(label="Згенеровані зображення")
|
80 |
status_text = gr.Textbox(label="Статус")
|