Spaces:
Runtime error
Runtime error
gokaygokay
commited on
Commit
β’
91d3bd5
1
Parent(s):
05f54e2
Update app.py
Browse files
app.py
CHANGED
@@ -52,61 +52,25 @@ def download_models():
|
|
52 |
|
53 |
download_models()
|
54 |
|
55 |
-
class LazyRealESRGAN:
|
56 |
-
def __init__(self, device, scale):
|
57 |
-
self.device = device
|
58 |
-
self.scale = scale
|
59 |
-
self.model = None
|
60 |
-
|
61 |
-
def load_model(self):
|
62 |
-
if self.model is None:
|
63 |
-
self.model = RealESRGAN(self.device, scale=self.scale)
|
64 |
-
self.model.load_weights(f'models/upscalers/RealESRGAN_x{self.scale}.pth', download=False)
|
65 |
-
|
66 |
-
def predict(self, img):
|
67 |
-
self.load_model()
|
68 |
-
return self.model.predict(img)
|
69 |
-
|
70 |
-
lazy_realesrgan_x2 = LazyRealESRGAN(device, scale=2)
|
71 |
-
lazy_realesrgan_x4 = LazyRealESRGAN(device, scale=4)
|
72 |
-
|
73 |
-
def resize_and_upscale(input_image, resolution):
|
74 |
-
scale = 2 if resolution <= 2048 else 4
|
75 |
-
input_image = input_image.convert("RGB")
|
76 |
-
W, H = input_image.size
|
77 |
-
k = float(resolution) / min(H, W)
|
78 |
-
H = int(round(H * k / 64.0)) * 64
|
79 |
-
W = int(round(W * k / 64.0)) * 64
|
80 |
-
img = input_image.resize((W, H), resample=Image.LANCZOS)
|
81 |
-
if scale == 2:
|
82 |
-
img = lazy_realesrgan_x2.predict(img)
|
83 |
-
else:
|
84 |
-
img = lazy_realesrgan_x4.predict(img)
|
85 |
-
return img
|
86 |
-
|
87 |
-
def create_hdr_effect(original_image, hdr):
|
88 |
-
if hdr == 0:
|
89 |
-
return original_image
|
90 |
-
cv_original = cv2.cvtColor(np.array(original_image), cv2.COLOR_RGB2BGR)
|
91 |
-
factors = [1.0 - 0.9 * hdr, 1.0 - 0.7 * hdr, 1.0 - 0.45 * hdr,
|
92 |
-
1.0 - 0.25 * hdr, 1.0, 1.0 + 0.2 * hdr,
|
93 |
-
1.0 + 0.4 * hdr, 1.0 + 0.6 * hdr, 1.0 + 0.8 * hdr]
|
94 |
-
images = [cv2.convertScaleAbs(cv_original, alpha=factor) for factor in factors]
|
95 |
-
merge_mertens = cv2.createMergeMertens()
|
96 |
-
hdr_image = merge_mertens.process(images)
|
97 |
-
hdr_image_8bit = np.clip(hdr_image * 255, 0, 255).astype('uint8')
|
98 |
-
return Image.fromarray(cv2.cvtColor(hdr_image_8bit, cv2.COLOR_BGR2RGB))
|
99 |
-
|
100 |
import time
|
101 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
102 |
class LazyLoadPipeline:
|
103 |
def __init__(self):
|
104 |
self.pipe = None
|
105 |
|
|
|
106 |
def load(self):
|
107 |
if self.pipe is None:
|
108 |
print("Starting to load the pipeline...")
|
109 |
-
start_time = time.time()
|
110 |
try:
|
111 |
self.pipe = self.setup_pipeline()
|
112 |
if ENABLE_CPU_OFFLOAD:
|
@@ -118,18 +82,24 @@ class LazyLoadPipeline:
|
|
118 |
if USE_TORCH_COMPILE:
|
119 |
print("Compiling the model...")
|
120 |
self.pipe.unet = torch.compile(self.pipe.unet, mode="reduce-overhead", fullgraph=True)
|
121 |
-
print(f"Pipeline loaded successfully in {time.time() - start_time:.2f} seconds")
|
122 |
except Exception as e:
|
123 |
print(f"Error loading pipeline: {str(e)}")
|
124 |
raise
|
125 |
|
|
|
126 |
def setup_pipeline(self):
|
127 |
print("Setting up the pipeline...")
|
|
|
128 |
controlnet = ControlNetModel.from_single_file(
|
129 |
"models/ControlNet/control_v11f1e_sd15_tile.pth", torch_dtype=torch.float16
|
130 |
)
|
|
|
|
|
|
|
131 |
safety_checker = StableDiffusionSafetyChecker.from_pretrained("CompVis/stable-diffusion-safety-checker")
|
|
|
132 |
|
|
|
133 |
model_path = "models/models/Stable-diffusion/juggernaut_reborn.safetensors"
|
134 |
pipe = StableDiffusionControlNetImg2ImgPipeline.from_single_file(
|
135 |
model_path,
|
@@ -138,21 +108,32 @@ class LazyLoadPipeline:
|
|
138 |
use_safetensors=True,
|
139 |
safety_checker=safety_checker
|
140 |
)
|
|
|
141 |
|
|
|
142 |
vae = AutoencoderKL.from_single_file(
|
143 |
"models/VAE/vae-ft-mse-840000-ema-pruned.safetensors",
|
144 |
torch_dtype=torch.float16
|
145 |
)
|
146 |
pipe.vae = vae
|
|
|
147 |
|
148 |
print("Loading textual inversions and LoRA weights...")
|
|
|
149 |
pipe.load_textual_inversion("models/embeddings/verybadimagenegative_v1.3.pt")
|
150 |
pipe.load_textual_inversion("models/embeddings/JuggernautNegative-neg.pt")
|
|
|
|
|
|
|
151 |
pipe.load_lora_weights("models/Lora/SDXLrender_v2.0.safetensors")
|
152 |
pipe.fuse_lora(lora_scale=0.5)
|
153 |
pipe.load_lora_weights("models/Lora/more_details.safetensors")
|
|
|
|
|
|
|
154 |
pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config)
|
155 |
pipe.enable_freeu(s1=0.9, s2=0.2, b1=1.3, b2=1.4)
|
|
|
156 |
|
157 |
return pipe
|
158 |
|
@@ -160,9 +141,57 @@ class LazyLoadPipeline:
|
|
160 |
self.load()
|
161 |
return self.pipe(*args, **kwargs)
|
162 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
163 |
lazy_pipe = LazyLoadPipeline()
|
164 |
|
165 |
@spaces.GPU
|
|
|
166 |
def gradio_process_image(input_image, resolution, num_inference_steps, strength, hdr, guidance_scale):
|
167 |
print("Starting image processing...")
|
168 |
torch.cuda.empty_cache()
|
|
|
52 |
|
53 |
download_models()
|
54 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
55 |
import time
|
56 |
|
57 |
+
def timer_func(func):
|
58 |
+
def wrapper(*args, **kwargs):
|
59 |
+
start_time = time.time()
|
60 |
+
result = func(*args, **kwargs)
|
61 |
+
end_time = time.time()
|
62 |
+
print(f"{func.__name__} took {end_time - start_time:.2f} seconds")
|
63 |
+
return result
|
64 |
+
return wrapper
|
65 |
+
|
66 |
class LazyLoadPipeline:
|
67 |
def __init__(self):
|
68 |
self.pipe = None
|
69 |
|
70 |
+
@timer_func
|
71 |
def load(self):
|
72 |
if self.pipe is None:
|
73 |
print("Starting to load the pipeline...")
|
|
|
74 |
try:
|
75 |
self.pipe = self.setup_pipeline()
|
76 |
if ENABLE_CPU_OFFLOAD:
|
|
|
82 |
if USE_TORCH_COMPILE:
|
83 |
print("Compiling the model...")
|
84 |
self.pipe.unet = torch.compile(self.pipe.unet, mode="reduce-overhead", fullgraph=True)
|
|
|
85 |
except Exception as e:
|
86 |
print(f"Error loading pipeline: {str(e)}")
|
87 |
raise
|
88 |
|
89 |
+
@timer_func
|
90 |
def setup_pipeline(self):
|
91 |
print("Setting up the pipeline...")
|
92 |
+
start_time = time.time()
|
93 |
controlnet = ControlNetModel.from_single_file(
|
94 |
"models/ControlNet/control_v11f1e_sd15_tile.pth", torch_dtype=torch.float16
|
95 |
)
|
96 |
+
print(f"ControlNet loaded in {time.time() - start_time:.2f} seconds")
|
97 |
+
|
98 |
+
start_time = time.time()
|
99 |
safety_checker = StableDiffusionSafetyChecker.from_pretrained("CompVis/stable-diffusion-safety-checker")
|
100 |
+
print(f"Safety checker loaded in {time.time() - start_time:.2f} seconds")
|
101 |
|
102 |
+
start_time = time.time()
|
103 |
model_path = "models/models/Stable-diffusion/juggernaut_reborn.safetensors"
|
104 |
pipe = StableDiffusionControlNetImg2ImgPipeline.from_single_file(
|
105 |
model_path,
|
|
|
108 |
use_safetensors=True,
|
109 |
safety_checker=safety_checker
|
110 |
)
|
111 |
+
print(f"Main pipeline loaded in {time.time() - start_time:.2f} seconds")
|
112 |
|
113 |
+
start_time = time.time()
|
114 |
vae = AutoencoderKL.from_single_file(
|
115 |
"models/VAE/vae-ft-mse-840000-ema-pruned.safetensors",
|
116 |
torch_dtype=torch.float16
|
117 |
)
|
118 |
pipe.vae = vae
|
119 |
+
print(f"VAE loaded in {time.time() - start_time:.2f} seconds")
|
120 |
|
121 |
print("Loading textual inversions and LoRA weights...")
|
122 |
+
start_time = time.time()
|
123 |
pipe.load_textual_inversion("models/embeddings/verybadimagenegative_v1.3.pt")
|
124 |
pipe.load_textual_inversion("models/embeddings/JuggernautNegative-neg.pt")
|
125 |
+
print(f"Textual inversions loaded in {time.time() - start_time:.2f} seconds")
|
126 |
+
|
127 |
+
start_time = time.time()
|
128 |
pipe.load_lora_weights("models/Lora/SDXLrender_v2.0.safetensors")
|
129 |
pipe.fuse_lora(lora_scale=0.5)
|
130 |
pipe.load_lora_weights("models/Lora/more_details.safetensors")
|
131 |
+
print(f"LoRA weights loaded in {time.time() - start_time:.2f} seconds")
|
132 |
+
|
133 |
+
start_time = time.time()
|
134 |
pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config)
|
135 |
pipe.enable_freeu(s1=0.9, s2=0.2, b1=1.3, b2=1.4)
|
136 |
+
print(f"Scheduler and FreeU set up in {time.time() - start_time:.2f} seconds")
|
137 |
|
138 |
return pipe
|
139 |
|
|
|
141 |
self.load()
|
142 |
return self.pipe(*args, **kwargs)
|
143 |
|
144 |
+
class LazyRealESRGAN:
|
145 |
+
def __init__(self, device, scale):
|
146 |
+
self.device = device
|
147 |
+
self.scale = scale
|
148 |
+
self.model = None
|
149 |
+
|
150 |
+
def load_model(self):
|
151 |
+
if self.model is None:
|
152 |
+
self.model = RealESRGAN(self.device, scale=self.scale)
|
153 |
+
self.model.load_weights(f'models/upscalers/RealESRGAN_x{self.scale}.pth', download=False)
|
154 |
+
|
155 |
+
def predict(self, img):
|
156 |
+
self.load_model()
|
157 |
+
return self.model.predict(img)
|
158 |
+
|
159 |
+
lazy_realesrgan_x2 = LazyRealESRGAN(device, scale=2)
|
160 |
+
lazy_realesrgan_x4 = LazyRealESRGAN(device, scale=4)
|
161 |
+
|
162 |
+
@timer_func
|
163 |
+
def resize_and_upscale(input_image, resolution):
|
164 |
+
scale = 2 if resolution <= 2048 else 4
|
165 |
+
input_image = input_image.convert("RGB")
|
166 |
+
W, H = input_image.size
|
167 |
+
k = float(resolution) / min(H, W)
|
168 |
+
H = int(round(H * k / 64.0)) * 64
|
169 |
+
W = int(round(W * k / 64.0)) * 64
|
170 |
+
img = input_image.resize((W, H), resample=Image.LANCZOS)
|
171 |
+
if scale == 2:
|
172 |
+
img = lazy_realesrgan_x2.predict(img)
|
173 |
+
else:
|
174 |
+
img = lazy_realesrgan_x4.predict(img)
|
175 |
+
return img
|
176 |
+
|
177 |
+
@timer_func
|
178 |
+
def create_hdr_effect(original_image, hdr):
|
179 |
+
if hdr == 0:
|
180 |
+
return original_image
|
181 |
+
cv_original = cv2.cvtColor(np.array(original_image), cv2.COLOR_RGB2BGR)
|
182 |
+
factors = [1.0 - 0.9 * hdr, 1.0 - 0.7 * hdr, 1.0 - 0.45 * hdr,
|
183 |
+
1.0 - 0.25 * hdr, 1.0, 1.0 + 0.2 * hdr,
|
184 |
+
1.0 + 0.4 * hdr, 1.0 + 0.6 * hdr, 1.0 + 0.8 * hdr]
|
185 |
+
images = [cv2.convertScaleAbs(cv_original, alpha=factor) for factor in factors]
|
186 |
+
merge_mertens = cv2.createMergeMertens()
|
187 |
+
hdr_image = merge_mertens.process(images)
|
188 |
+
hdr_image_8bit = np.clip(hdr_image * 255, 0, 255).astype('uint8')
|
189 |
+
return Image.fromarray(cv2.cvtColor(hdr_image_8bit, cv2.COLOR_BGR2RGB))
|
190 |
+
|
191 |
lazy_pipe = LazyLoadPipeline()
|
192 |
|
193 |
@spaces.GPU
|
194 |
+
@timer_func
|
195 |
def gradio_process_image(input_image, resolution, num_inference_steps, strength, hdr, guidance_scale):
|
196 |
print("Starting image processing...")
|
197 |
torch.cuda.empty_cache()
|