Update app.py
Browse files
app.py
CHANGED
@@ -15,13 +15,20 @@ import gradio as gr
|
|
15 |
import os
|
16 |
import random
|
17 |
import gc
|
18 |
-
|
|
|
|
|
19 |
# ๋ฉ๋ชจ๋ฆฌ ๊ด๋ฆฌ ์ค์
|
20 |
torch.cuda.empty_cache()
|
21 |
gc.collect()
|
22 |
os.environ['PYTORCH_CUDA_ALLOC_CONF'] = 'max_split_size_mb:128'
|
23 |
torch.backends.cudnn.benchmark = True
|
24 |
torch.backends.cuda.matmul.allow_tf32 = True
|
|
|
|
|
|
|
|
|
|
|
25 |
|
26 |
# ์์ ์ ์
|
27 |
MAX_SEED = 2**32 - 1
|
@@ -63,7 +70,10 @@ def get_fashion_pipe():
|
|
63 |
torch_dtype=torch.float16,
|
64 |
use_auth_token=HF_TOKEN
|
65 |
)
|
66 |
-
|
|
|
|
|
|
|
67 |
fashion_pipe.enable_sequential_cpu_offload()
|
68 |
return fashion_pipe
|
69 |
|
@@ -129,51 +139,44 @@ def contains_korean(text):
|
|
129 |
|
130 |
@spaces.GPU()
|
131 |
def generate_fashion(prompt, mode, cfg_scale, steps, randomize_seed, seed, width, height, lora_scale, progress=gr.Progress(track_tqdm=True)):
|
132 |
-
|
133 |
|
134 |
-
|
135 |
-
|
136 |
-
|
137 |
-
|
138 |
-
|
139 |
-
|
140 |
-
|
141 |
-
|
142 |
-
|
143 |
-
|
144 |
-
|
145 |
-
|
146 |
-
|
147 |
-
|
148 |
-
|
149 |
-
|
150 |
-
|
151 |
-
|
152 |
-
|
153 |
-
|
154 |
-
|
155 |
-
|
156 |
-
|
157 |
-
|
158 |
-
|
159 |
-
|
160 |
-
|
161 |
-
|
162 |
-
|
163 |
-
|
164 |
-
|
165 |
-
|
166 |
-
|
167 |
-
|
168 |
-
|
169 |
-
|
170 |
-
generator=generator,
|
171 |
-
joint_attention_kwargs={"scale": lora_scale},
|
172 |
-
).images[0]
|
173 |
-
|
174 |
-
progress(100, "Completed!")
|
175 |
-
return image, seed
|
176 |
-
|
177 |
|
178 |
def leffa_predict(src_image_path, ref_image_path, control_type):
|
179 |
torch.cuda.empty_cache()
|
|
|
15 |
import os
|
16 |
import random
|
17 |
import gc
|
18 |
+
# ๋ฉ๋ชจ๋ฆฌ ๊ด๋ฆฌ ์ค์ ์ถ๊ฐ
|
19 |
+
import torch.backends.cuda
|
20 |
+
torch.backends.cuda.max_split_size_mb = 128 # ๋ฉ๋ชจ๋ฆฌ ๋ถํ ํฌ๊ธฐ ์ ํ
|
21 |
# ๋ฉ๋ชจ๋ฆฌ ๊ด๋ฆฌ ์ค์
|
22 |
torch.cuda.empty_cache()
|
23 |
gc.collect()
|
24 |
os.environ['PYTORCH_CUDA_ALLOC_CONF'] = 'max_split_size_mb:128'
|
25 |
torch.backends.cudnn.benchmark = True
|
26 |
torch.backends.cuda.matmul.allow_tf32 = True
|
27 |
+
def clear_memory():
|
28 |
+
if torch.cuda.is_available():
|
29 |
+
torch.cuda.empty_cache()
|
30 |
+
torch.cuda.synchronize()
|
31 |
+
gc.collect()
|
32 |
|
33 |
# ์์ ์ ์
|
34 |
MAX_SEED = 2**32 - 1
|
|
|
70 |
torch_dtype=torch.float16,
|
71 |
use_auth_token=HF_TOKEN
|
72 |
)
|
73 |
+
try:
|
74 |
+
fashion_pipe.enable_xformers_memory_efficient_attention() # ์์ ๋ ๋ถ๋ถ
|
75 |
+
except Exception as e:
|
76 |
+
print(f"Warning: Could not enable memory efficient attention: {e}")
|
77 |
fashion_pipe.enable_sequential_cpu_offload()
|
78 |
return fashion_pipe
|
79 |
|
|
|
139 |
|
140 |
@spaces.GPU()
|
141 |
def generate_fashion(prompt, mode, cfg_scale, steps, randomize_seed, seed, width, height, lora_scale, progress=gr.Progress(track_tqdm=True)):
|
142 |
+
clear_memory() # ๋ฉ๋ชจ๋ฆฌ ์ ๋ฆฌ
|
143 |
|
144 |
+
try:
|
145 |
+
if contains_korean(prompt):
|
146 |
+
translator = get_translator()
|
147 |
+
translated = translator(prompt)[0]['translation_text']
|
148 |
+
actual_prompt = translated
|
149 |
+
else:
|
150 |
+
actual_prompt = prompt
|
151 |
+
|
152 |
+
pipe = get_fashion_pipe()
|
153 |
+
|
154 |
+
# ๋ฉ๋ชจ๋ฆฌ ์ฌ์ฉ๋ ์ ํ์ ์ํ ํฌ๊ธฐ ์กฐ์
|
155 |
+
width = min(width, 768) # ์ต๋ ํฌ๊ธฐ ์ ํ
|
156 |
+
height = min(height, 768) # ์ต๋ ํฌ๊ธฐ ์ ํ
|
157 |
+
|
158 |
+
if randomize_seed:
|
159 |
+
seed = random.randint(0, MAX_SEED)
|
160 |
+
generator = torch.Generator(device="cuda").manual_seed(seed)
|
161 |
+
|
162 |
+
progress(0, "Starting fashion generation...")
|
163 |
+
|
164 |
+
image = pipe(
|
165 |
+
prompt=f"{actual_prompt} {trigger_word}",
|
166 |
+
num_inference_steps=min(steps, 30), # ์คํ
์ ์ ํ
|
167 |
+
guidance_scale=cfg_scale,
|
168 |
+
width=width,
|
169 |
+
height=height,
|
170 |
+
generator=generator,
|
171 |
+
joint_attention_kwargs={"scale": lora_scale},
|
172 |
+
).images[0]
|
173 |
+
|
174 |
+
clear_memory() # ๋ฉ๋ชจ๋ฆฌ ์ ๋ฆฌ
|
175 |
+
return image, seed
|
176 |
+
|
177 |
+
except Exception as e:
|
178 |
+
clear_memory() # ์ค๋ฅ ๋ฐ์ ์์๋ ๋ฉ๋ชจ๋ฆฌ ์ ๋ฆฌ
|
179 |
+
raise e
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
180 |
|
181 |
def leffa_predict(src_image_path, ref_image_path, control_type):
|
182 |
torch.cuda.empty_cache()
|