gokaygokay commited on
Commit
4da2d90
1 Parent(s): 443a04d

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +245 -0
app.py ADDED
@@ -0,0 +1,245 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import requests
3
+ import torch
4
+ from diffusers import StableDiffusionControlNetImg2ImgPipeline, ControlNetModel, DDIMScheduler, DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler
5
+ from diffusers.models import AutoencoderKL
6
+ from PIL import Image
7
+ from RealESRGAN import RealESRGAN
8
+ import cv2
9
+ import numpy as np
10
+ import spaces
11
+
12
+ # Constants
13
+ SD15_WEIGHTS = "weights"
14
+ CONTROLNET_CACHE = "controlnet-cache"
15
+ SCHEDULERS = {
16
+ "DDIM": DDIMScheduler,
17
+ "DPMSolverMultistep": DPMSolverMultistepScheduler,
18
+ "K_EULER_ANCESTRAL": EulerAncestralDiscreteScheduler,
19
+ "K_EULER": EulerDiscreteScheduler,
20
+ }
21
+
22
+ # Function to download files
23
+ def download_file(url, folder_path, filename):
24
+ if not os.path.exists(folder_path):
25
+ os.makedirs(folder_path)
26
+ file_path = os.path.join(folder_path, filename)
27
+
28
+ if os.path.isfile(file_path):
29
+ print(f"File already exists: {file_path}")
30
+ else:
31
+ response = requests.get(url, stream=True)
32
+ if response.status_code == 200:
33
+ with open(file_path, 'wb') as file:
34
+ for chunk in response.iter_content(chunk_size=1024):
35
+ file.write(chunk)
36
+ print(f"File successfully downloaded and saved: {file_path}")
37
+ else:
38
+ print(f"Error downloading the file. Status code: {response.status_code}")
39
+
40
+ # Download necessary models and files
41
+
42
+ # MODEL
43
+ download_file(
44
+ "https://huggingface.co/dantea1118/juggernaut_reborn/resolve/main/juggernaut_reborn.safetensors?download=true",
45
+ "models/models/Stable-diffusion",
46
+ "juggernaut_reborn.safetensors"
47
+ )
48
+
49
+ # UPSCALER
50
+
51
+ download_file(
52
+ "https://huggingface.co/ai-forever/Real-ESRGAN/resolve/main/RealESRGAN_x2.pth?download=true",
53
+ "models/upscalers/",
54
+ "RealESRGAN_x2.pth"
55
+ )
56
+
57
+ download_file(
58
+ "https://huggingface.co/ai-forever/Real-ESRGAN/resolve/main/RealESRGAN_x4.pth?download=true",
59
+ "models/upscalers/",
60
+ "RealESRGAN_x4.pth"
61
+ )
62
+
63
+ # NEGATIVE
64
+ download_file(
65
+ "https://huggingface.co/philz1337x/embeddings/resolve/main/verybadimagenegative_v1.3.pt?download=true",
66
+ "models/embeddings",
67
+ "verybadimagenegative_v1.3.pt"
68
+ )
69
+ download_file(
70
+ "https://huggingface.co/datasets/AddictiveFuture/sd-negative-embeddings/resolve/main/JuggernautNegative-neg.pt?download=true",
71
+ "models/embeddings",
72
+ "JuggernautNegative-neg.pt"
73
+ )
74
+
75
+ # LORA
76
+
77
+ download_file(
78
+ "https://huggingface.co/philz1337x/loras/resolve/main/SDXLrender_v2.0.safetensors?download=true",
79
+ "models/Lora",
80
+ "SDXLrender_v2.0.safetensors"
81
+ )
82
+ download_file(
83
+ "https://huggingface.co/philz1337x/loras/resolve/main/more_details.safetensors?download=true",
84
+ "models/Lora",
85
+ "more_details.safetensors"
86
+ )
87
+
88
+ # CONTROLNET
89
+
90
+ download_file(
91
+ "https://huggingface.co/lllyasviel/ControlNet-v1-1/resolve/main/control_v11f1e_sd15_tile.pth?download=true",
92
+ "models/ControlNet",
93
+ "control_v11f1e_sd15_tile.pth"
94
+ )
95
+
96
+ # VAE
97
+
98
+ download_file(
99
+ "https://huggingface.co/stabilityai/sd-vae-ft-mse-original/resolve/main/vae-ft-mse-840000-ema-pruned.safetensors?download=true",
100
+ "models/VAE",
101
+ "vae-ft-mse-840000-ema-pruned.safetensors"
102
+ )
103
+
104
+ # Set up the device
105
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
106
+
107
+ # Load ControlNet model
108
+ controlnet = ControlNetModel.from_pretrained(
109
+ "lllyasviel/control_v11f1e_sd15_tile", torch_dtype=torch.float16
110
+ )
111
+
112
+ # Load the Stable Diffusion pipeline with Juggernaut Reborn model
113
+ model_path = "models/models/Stable-diffusion/juggernaut_reborn.safetensors"
114
+ pipe = StableDiffusionControlNetImg2ImgPipeline.from_single_file(
115
+ model_path,
116
+ controlnet=controlnet,
117
+ torch_dtype=torch.float16,
118
+ use_safetensors=True
119
+ )
120
+
121
+ # Load and set VAE
122
+ vae = AutoencoderKL.from_single_file(
123
+ "models/VAE/vae-ft-mse-840000-ema-pruned.safetensors",
124
+ torch_dtype=torch.float16
125
+ )
126
+ pipe.vae = vae
127
+
128
+
129
+ # Load embeddings and LoRA models
130
+ pipe.load_textual_inversion("models/embeddings/verybadimagenegative_v1.3.pt")
131
+ pipe.load_textual_inversion("models/embeddings/JuggernautNegative-neg.pt")
132
+ pipe.load_lora_weights("models/Lora/SDXLrender_v2.0.safetensors")
133
+ pipe.fuse_lora(lora_scale=0.5)
134
+ pipe.load_lora_weights("models/Lora/more_details.safetensors")
135
+ # Set up the scheduler
136
+ pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config)
137
+
138
+ # Move the pipeline to the device and enable memory efficient attention
139
+ pipe = pipe.to(device)
140
+ pipe.enable_xformers_memory_efficient_attention()
141
+
142
+ # Enable FreeU
143
+ pipe.enable_freeu(s1=0.9, s2=0.2, b1=1.3, b2=1.4)
144
+
145
+ def resize_and_upscale(input_image, resolution):
146
+ scale = 2
147
+ if resolution == 2048:
148
+ init_w = 1024
149
+ elif resolution == 2560:
150
+ init_w = 1280
151
+ elif resolution == 3072:
152
+ init_w = 1536
153
+ else:
154
+ init_w = 1024
155
+ scale = 4
156
+
157
+ input_image = input_image.convert("RGB")
158
+ W, H = input_image.size
159
+ k = float(init_w) / min(H, W)
160
+ H *= k
161
+ W *= k
162
+ H = int(round(H / 64.0)) * 64
163
+ W = int(round(W / 64.0)) * 64
164
+ img = input_image.resize((W, H), resample=Image.LANCZOS)
165
+ model = RealESRGAN(device, scale=scale)
166
+ model.load_weights(f'models/upscalers/RealESRGAN_x{scale}.pth', download=False)
167
+ img = model.predict(img)
168
+ return img
169
+
170
+ def calculate_brightness_factors(hdr_intensity):
171
+ factors = [1.0] * 9
172
+ if hdr_intensity > 0:
173
+ factors = [1.0 - 0.9 * hdr_intensity, 1.0 - 0.7 * hdr_intensity, 1.0 - 0.45 * hdr_intensity,
174
+ 1.0 - 0.25 * hdr_intensity, 1.0, 1.0 + 0.2 * hdr_intensity,
175
+ 1.0 + 0.4 * hdr_intensity, 1.0 + 0.6 * hdr_intensity, 1.0 + 0.8 * hdr_intensity]
176
+ return factors
177
+
178
+ def pil_to_cv(pil_image):
179
+ return cv2.cvtColor(np.array(pil_image), cv2.COLOR_RGB2BGR)
180
+
181
+ def adjust_brightness(cv_image, factor):
182
+ hsv_image = cv2.cvtColor(cv_image, cv2.COLOR_BGR2HSV)
183
+ h, s, v = cv2.split(hsv_image)
184
+ v = np.clip(v * factor, 0, 255).astype('uint8')
185
+ adjusted_hsv = cv2.merge([h, s, v])
186
+ return cv2.cvtColor(adjusted_hsv, cv2.COLOR_HSV2BGR)
187
+
188
+ def create_hdr_effect(original_image, hdr):
189
+ cv_original = pil_to_cv(original_image)
190
+ brightness_factors = calculate_brightness_factors(hdr)
191
+ images = [adjust_brightness(cv_original, factor) for factor in brightness_factors]
192
+
193
+ merge_mertens = cv2.createMergeMertens()
194
+ hdr_image = merge_mertens.process(images)
195
+ hdr_image_8bit = np.clip(hdr_image * 255, 0, 255).astype('uint8')
196
+ hdr_image_pil = Image.fromarray(cv2.cvtColor(hdr_image_8bit, cv2.COLOR_BGR2RGB))
197
+
198
+ return hdr_image_pil
199
+
200
+ def process_image(input_image, prompt, negative_prompt, resolution=2048, num_inference_steps=50, guidance_scale=3, strength=0.35, hdr=0):
201
+ condition_image = resize_and_upscale(input_image, resolution)
202
+ condition_image = create_hdr_effect(condition_image, hdr)
203
+
204
+ result = pipe(
205
+ prompt=prompt,
206
+ negative_prompt=negative_prompt,
207
+ image=condition_image,
208
+ control_image=condition_image,
209
+ width=condition_image.size[0],
210
+ height=condition_image.size[1],
211
+ strength=strength,
212
+ num_inference_steps=num_inference_steps,
213
+ guidance_scale=guidance_scale,
214
+ generator=torch.manual_seed(0),
215
+ ).images[0]
216
+
217
+ return result
218
+
219
+ @spaces.GPU
220
+ def gradio_process_image(input_image, resolution, num_inference_steps, strength, hdr, guidance_scale):
221
+ prompt = "masterpiece, best quality, highres"
222
+ negative_prompt = "low quality, normal quality, ugly, blurry, blur, lowres, bad anatomy, bad hands, cropped, worst quality, verybadimagenegative_v1.3, JuggernautNegative-neg"
223
+ result = process_image(input_image, prompt, negative_prompt, resolution, num_inference_steps, guidance_scale, strength, hdr)
224
+ return result
225
+
226
+ # Simple options
227
+ simple_options = [
228
+ gr.inputs.Image(type="pil", label="Input Image"),
229
+ gr.inputs.Slider(minimum=2048, maximum=3072, step=512, default=2048, label="Resolution"),
230
+ gr.inputs.Slider(minimum=10, maximum=100, step=10, default=20, label="Inference Steps"),
231
+ gr.inputs.Slider(minimum=0.0, maximum=1.0, step=0.05, default=0.35, label="Strength"),
232
+ gr.inputs.Slider(minimum=0.0, maximum=1.0, step=0.1, default=0, label="HDR"),
233
+ gr.inputs.Slider(minimum=1, maximum=10, step=0.1, default=3, label="Guidance Scale")
234
+ ]
235
+
236
+ # Create the Gradio interface
237
+ iface = gr.Interface(
238
+ fn=gradio_process_image,
239
+ inputs=simple_options,
240
+ outputs=gr.outputs.Image(type="pil", label="Output Image"),
241
+ title="Image Processing with Stable Diffusion",
242
+ description="Upload an image and adjust the settings to process it using Stable Diffusion."
243
+ )
244
+
245
+ iface.launch()