Tile-Upscaler / app.py
gokaygokay's picture
Update app.py
e8864dd verified
raw
history blame
9.29 kB
import spaces
import os
import requests
import torch
from diffusers import StableDiffusionControlNetImg2ImgPipeline, ControlNetModel, DDIMScheduler, DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler
from diffusers.pipelines.stable_diffusion import StableDiffusionSafetyChecker
from diffusers.models import AutoencoderKL
from PIL import Image
from RealESRGAN import RealESRGAN
import cv2
import numpy as np
from diffusers.models.attention_processor import AttnProcessor2_0
import gradio as gr
USE_TORCH_COMPILE = 0
ENABLE_CPU_OFFLOAD = 0
# Function to download files
def download_file(url, folder_path, filename):
if not os.path.exists(folder_path):
os.makedirs(folder_path)
file_path = os.path.join(folder_path, filename)
if os.path.isfile(file_path):
print(f"File already exists: {file_path}")
else:
response = requests.get(url, stream=True)
if response.status_code == 200:
with open(file_path, 'wb') as file:
for chunk in response.iter_content(chunk_size=1024):
file.write(chunk)
print(f"File successfully downloaded and saved: {file_path}")
else:
print(f"Error downloading the file. Status code: {response.status_code}")
# Download necessary models and files
# MODEL
download_file(
"https://huggingface.co/dantea1118/juggernaut_reborn/resolve/main/juggernaut_reborn.safetensors?download=true",
"models/models/Stable-diffusion",
"juggernaut_reborn.safetensors"
)
# UPSCALER
download_file(
"https://huggingface.co/ai-forever/Real-ESRGAN/resolve/main/RealESRGAN_x2.pth?download=true",
"models/upscalers/",
"RealESRGAN_x2.pth"
)
download_file(
"https://huggingface.co/ai-forever/Real-ESRGAN/resolve/main/RealESRGAN_x4.pth?download=true",
"models/upscalers/",
"RealESRGAN_x4.pth"
)
# NEGATIVE
download_file(
"https://huggingface.co/philz1337x/embeddings/resolve/main/verybadimagenegative_v1.3.pt?download=true",
"models/embeddings",
"verybadimagenegative_v1.3.pt"
)
download_file(
"https://huggingface.co/datasets/AddictiveFuture/sd-negative-embeddings/resolve/main/JuggernautNegative-neg.pt?download=true",
"models/embeddings",
"JuggernautNegative-neg.pt"
)
# LORA
download_file(
"https://huggingface.co/philz1337x/loras/resolve/main/SDXLrender_v2.0.safetensors?download=true",
"models/Lora",
"SDXLrender_v2.0.safetensors"
)
download_file(
"https://huggingface.co/philz1337x/loras/resolve/main/more_details.safetensors?download=true",
"models/Lora",
"more_details.safetensors"
)
# CONTROLNET
download_file(
"https://huggingface.co/lllyasviel/ControlNet-v1-1/resolve/main/control_v11f1e_sd15_tile.pth?download=true",
"models/ControlNet",
"control_v11f1e_sd15_tile.pth"
)
# VAE
download_file(
"https://huggingface.co/stabilityai/sd-vae-ft-mse-original/resolve/main/vae-ft-mse-840000-ema-pruned.safetensors?download=true",
"models/VAE",
"vae-ft-mse-840000-ema-pruned.safetensors"
)
# Set up the device
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# Load ControlNet model
controlnet = ControlNetModel.from_single_file(
"models/ControlNet/control_v11f1e_sd15_tile.pth", torch_dtype=torch.float16
)
safety_checker = StableDiffusionSafetyChecker.from_pretrained("CompVis/stable-diffusion-safety-checker")
# Load the Stable Diffusion pipeline with Juggernaut Reborn model
model_path = "models/models/Stable-diffusion/juggernaut_reborn.safetensors"
pipe = StableDiffusionControlNetImg2ImgPipeline.from_single_file(
model_path,
controlnet=controlnet,
torch_dtype=torch.float16,
use_safetensors=True,
safety_checker=safety_checker
)
# Load and set VAE
vae = AutoencoderKL.from_single_file(
"models/VAE/vae-ft-mse-840000-ema-pruned.safetensors",
torch_dtype=torch.float16
)
pipe.vae = vae
# Load embeddings and LoRA models
pipe.load_textual_inversion("models/embeddings/verybadimagenegative_v1.3.pt")
pipe.load_textual_inversion("models/embeddings/JuggernautNegative-neg.pt")
pipe.load_lora_weights("models/Lora/SDXLrender_v2.0.safetensors")
pipe.fuse_lora(lora_scale=0.5)
pipe.load_lora_weights("models/Lora/more_details.safetensors")
# Set up the scheduler
pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config)
# Move the pipeline to the device and enable memory efficient attention
# Enable FreeU
pipe.enable_freeu(s1=0.9, s2=0.2, b1=1.3, b2=1.4)
class LazyRealESRGAN:
def __init__(self, device, scale):
self.device = device
self.scale = scale
self.model = None
def load_model(self):
if self.model is None:
self.model = RealESRGAN(self.device, scale=self.scale)
self.model.load_weights(f'models/upscalers/RealESRGAN_x{self.scale}.pth', download=False)
def predict(self, img):
self.load_model()
return self.model.predict(img)
# Initialize the lazy models
lazy_realesrgan_x2 = LazyRealESRGAN(device, scale=2)
lazy_realesrgan_x4 = LazyRealESRGAN(device, scale=4)
def resize_and_upscale(input_image, resolution):
scale = 2
if resolution == 2048:
init_w = 1024
elif resolution == 2560:
init_w = 1280
elif resolution == 3072:
init_w = 1536
else:
init_w = 1024
scale = 4
input_image = input_image.convert("RGB")
W, H = input_image.size
k = float(init_w) / min(H, W)
H *= k
W *= k
H = int(round(H / 64.0)) * 64
W = int(round(W / 64.0)) * 64
img = input_image.resize((W, H), resample=Image.LANCZOS)
model = RealESRGAN(device, scale=scale)
model.load_weights(f'models/upscalers/RealESRGAN_x{scale}.pth', download=False)
img = model.predict(img)
if scale == 2:
img = lazy_realesrgan_x2.predict(img)
else:
img = lazy_realesrgan_x4.predict(img)
return img
def calculate_brightness_factors(hdr_intensity):
factors = [1.0] * 9
if hdr_intensity > 0:
factors = [1.0 - 0.9 * hdr_intensity, 1.0 - 0.7 * hdr_intensity, 1.0 - 0.45 * hdr_intensity,
1.0 - 0.25 * hdr_intensity, 1.0, 1.0 + 0.2 * hdr_intensity,
1.0 + 0.4 * hdr_intensity, 1.0 + 0.6 * hdr_intensity, 1.0 + 0.8 * hdr_intensity]
return factors
def pil_to_cv(pil_image):
return cv2.cvtColor(np.array(pil_image), cv2.COLOR_RGB2BGR)
def adjust_brightness(cv_image, factor):
hsv_image = cv2.cvtColor(cv_image, cv2.COLOR_BGR2HSV)
h, s, v = cv2.split(hsv_image)
v = np.clip(v * factor, 0, 255).astype('uint8')
adjusted_hsv = cv2.merge([h, s, v])
return cv2.cvtColor(adjusted_hsv, cv2.COLOR_HSV2BGR)
def create_hdr_effect(original_image, hdr):
cv_original = pil_to_cv(original_image)
brightness_factors = calculate_brightness_factors(hdr)
images = [adjust_brightness(cv_original, factor) for factor in brightness_factors]
merge_mertens = cv2.createMergeMertens()
hdr_image = merge_mertens.process(images)
hdr_image_8bit = np.clip(hdr_image * 255, 0, 255).astype('uint8')
hdr_image_pil = Image.fromarray(cv2.cvtColor(hdr_image_8bit, cv2.COLOR_BGR2RGB))
return hdr_image_pil
def process_image(input_image, prompt, negative_prompt, resolution=2048, num_inference_steps=50, guidance_scale=3, strength=0.35, hdr=0):
condition_image = resize_and_upscale(input_image, resolution)
condition_image = create_hdr_effect(condition_image, hdr)
result = pipe(
prompt=prompt,
negative_prompt=negative_prompt,
image=condition_image,
control_image=condition_image,
width=condition_image.size[0],
height=condition_image.size[1],
strength=strength,
num_inference_steps=num_inference_steps,
guidance_scale=guidance_scale,
generator=torch.manual_seed(0),
).images[0]
return result
@spaces.GPU
def gradio_process_image(input_image, resolution, num_inference_steps, strength, hdr, guidance_scale):
pipe = pipe.to(device)
pipe.unet.set_attn_processor(AttnProcessor2_0())
prompt = "masterpiece, best quality, highres"
negative_prompt = "low quality, normal quality, ugly, blurry, blur, lowres, bad anatomy, bad hands, cropped, worst quality, verybadimagenegative_v1.3, JuggernautNegative-neg"
result = process_image(input_image, prompt, negative_prompt, resolution, num_inference_steps, guidance_scale, strength, hdr)
return result
# Simple options
simple_options = [
gr.Image(type="pil", label="Input Image"),
gr.Slider(minimum=2048, maximum=3072, step=512, value=2048, label="Resolution"),
gr.Slider(minimum=10, maximum=100, step=10, value=20, label="Inference Steps"),
gr.Slider(minimum=0.0, maximum=1.0, step=0.05, value=0.35, label="Strength"),
gr.Slider(minimum=0.0, maximum=1.0, step=0.1, value=0, label="HDR"),
gr.Slider(minimum=1, maximum=10, step=0.1, value=3, label="Guidance Scale")
]
# Create the Gradio interface
iface = gr.Interface(
fn=gradio_process_image,
inputs=simple_options,
outputs=gr.Image(type="pil", label="Output Image"),
title="Image Processing with Stable Diffusion",
description="Upload an image and adjust the settings to process it using Stable Diffusion."
)
iface.launch()