File size: 11,759 Bytes
aff1d7c 0c421ed d482ffb aff1d7c aaef04d 3a4f72c bad9b95 6f02348 bad9b95 aff1d7c 29a3409 47e0f32 aff1d7c 3a4f72c aff1d7c 40b693f aff1d7c 40b693f 2d940f6 aff1d7c d482ffb aff1d7c 3a4f72c aff1d7c 3a4f72c 7d7dadd e52fd17 3a4f72c aff1d7c d482ffb aff1d7c ea7b3db aff1d7c ea7b3db aff1d7c ea7b3db aff1d7c a6c8ce4 aff1d7c e7bf78a acdd046 aff1d7c e7bf78a aff1d7c a6c8ce4 aff1d7c d482ffb aff1d7c 7198c2a aff1d7c d482ffb |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 |
import os
import io
import asyncio
import socket
import requests
import sys
import logging
from fastapi import FastAPI, File, UploadFile, Form
from fastapi.responses import FileResponse, StreamingResponse
from fastapi.middleware.cors import CORSMiddleware
from PIL import Image
import torch
from diffusers import (
DiffusionPipeline,
AutoencoderKL,
StableDiffusionControlNetPipeline,
ControlNetModel,
StableDiffusionLatentUpscalePipeline,
StableDiffusionImg2ImgPipeline,
StableDiffusionControlNetImg2ImgPipeline,
DPMSolverMultistepScheduler,
EulerDiscreteScheduler
)
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from transformers import AutoFeatureExtractor
import random
import time
import tempfile
logger = logging.getLogger(__name__)
# Set the logging level (DEBUG, INFO, WARNING, ERROR, CRITICAL)
logger.setLevel(logging.DEBUG)
file_handler = logging.FileHandler('inference.log')
stream_handler = logging.StreamHandler(sys.stdout)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
# Set the formatter for the stream handler (command line)
stream_handler.setFormatter(formatter)
# Add the file handler and stream handler to the logger
logger.addHandler(file_handler)
logger.addHandler(stream_handler)
app = FastAPI()
BASE_MODEL = "SG161222/Realistic_Vision_V5.1_noVAE"
# Initialize both pipelines
vae = AutoencoderKL.from_pretrained("stabilityai/sd-vae-ft-mse", torch_dtype=torch.float16)
controlnet = ControlNetModel.from_pretrained("monster-labs/control_v1p_sd15_qrcode_monster", torch_dtype=torch.float16)
safety_model_id = "CompVis/stable-diffusion-safety-checker"
main_pipe = StableDiffusionControlNetPipeline.from_pretrained(
BASE_MODEL,
controlnet=controlnet,
vae=vae,
safety_model_id = safety_model_id,
safety_feature_extractor = AutoFeatureExtractor.from_pretrained(safety_model_id),
safety_checker = StableDiffusionSafetyChecker.from_pretrained(safety_model_id),
torch_dtype=torch.float16,
).to("cuda")
image_pipe = StableDiffusionControlNetImg2ImgPipeline(**main_pipe.components)
# Sampler map
SAMPLER_MAP = {
"DPM++ Karras SDE": lambda config: DPMSolverMultistepScheduler.from_config(config, use_karras=True, algorithm_type="sde-dpmsolver++"),
"Euler": lambda config: EulerDiscreteScheduler.from_config(config),
}
def center_crop_resize(img, output_size=(512, 512)):
width, height = img.size
# Calculate dimensions to crop to the center
new_dimension = min(width, height)
left = (width - new_dimension)/2
top = (height - new_dimension)/2
right = (width + new_dimension)/2
bottom = (height + new_dimension)/2
# Crop and resize
img = img.crop((left, top, right, bottom))
img = img.resize(output_size)
return img
def common_upscale(samples, width, height, upscale_method, crop=False):
if crop == "center":
old_width = samples.shape[3]
old_height = samples.shape[2]
old_aspect = old_width / old_height
new_aspect = width / height
x = 0
y = 0
if old_aspect > new_aspect:
x = round((old_width - old_width * (new_aspect / old_aspect)) / 2)
elif old_aspect < new_aspect:
y = round((old_height - old_height * (old_aspect / new_aspect)) / 2)
s = samples[:,:,y:old_height-y,x:old_width-x]
else:
s = samples
return torch.nn.functional.interpolate(s, size=(height, width), mode=upscale_method)
def upscale(samples, upscale_method, scale_by):
#s = samples.copy()
width = round(samples["images"].shape[3] * scale_by)
height = round(samples["images"].shape[2] * scale_by)
s = common_upscale(samples["images"], width, height, upscale_method, "disabled")
return (s)
#
def convert_to_pil(base64_image):
pil_image = processing_utils.decode_base64_to_image(base64_image)
return pil_image
def convert_to_base64(pil_image):
base64_image = processing_utils.encode_pil_to_base64(pil_image)
return base64_image
def inference(
control_image: Image.Image,
prompt: str,
negative_prompt = "sexual content, racism, humans, faces",
guidance_scale: float = 8.0,
controlnet_conditioning_scale: float = 1,
control_guidance_start: float = 1,
control_guidance_end: float = 1,
upscaler_strength: float = 0.5,
seed: int = -1,
sampler = "DPM++ Karras SDE",
#profile: gr.OAuthProfile | None = None,
):
try:
# Log input types and values
logger.debug("Input Types: control_image=%s, prompt=%s, negative_prompt=%s, guidance_scale=%s, controlnet_conditioning_scale=%s, control_guidance_start=%s, control_guidance_end=%s, upscaler_strength=%s, seed=%s, sampler=%s",
type(control_image), type(prompt), type(negative_prompt), type(guidance_scale), type(controlnet_conditioning_scale),
type(control_guidance_start), type(control_guidance_end), type(upscaler_strength), type(seed), type(sampler))
logger.debug("Input Values: control_image=%s, prompt=%s, negative_prompt=%s, guidance_scale=%s, controlnet_conditioning_scale=%s, control_guidance_start=%s, control_guidance_end=%s, upscaler_strength=%s, seed=%s, sampler=%s",
control_image, prompt, negative_prompt, guidance_scale, controlnet_conditioning_scale,
control_guidance_start, control_guidance_end, upscaler_strength, seed, sampler)
start_time = time.time()
start_time_struct = time.localtime(start_time)
start_time_formatted = time.strftime("%H:%M:%S", start_time_struct)
logger.info(f"Inference started at {start_time_formatted}")
# Rest of your existing code
control_image_small = center_crop_resize(control_image)
control_image_large = center_crop_resize(control_image, (1024, 1024))
main_pipe.scheduler = SAMPLER_MAP[sampler](main_pipe.scheduler.config)
my_seed = random.randint(0, 2**32 - 1) if seed == -1 else seed
generator = torch.Generator(device="cuda").manual_seed(my_seed)
out = main_pipe(
prompt=prompt,
negative_prompt=negative_prompt,
image=control_image_small,
guidance_scale=float(guidance_scale),
controlnet_conditioning_scale=float(controlnet_conditioning_scale),
generator=generator,
control_guidance_start=float(control_guidance_start),
control_guidance_end=float(control_guidance_end),
num_inference_steps=15,
output_type="latent"
)
upscaled_latents = upscale(out, "nearest-exact", 2)
out_image = image_pipe(
prompt=prompt,
negative_prompt=negative_prompt,
control_image=control_image_large,
image=upscaled_latents,
guidance_scale=float(guidance_scale),
generator=generator,
num_inference_steps=20,
strength=upscaler_strength,
control_guidance_start=float(control_guidance_start),
control_guidance_end=float(control_guidance_end),
controlnet_conditioning_scale=float(controlnet_conditioning_scale)
)
end_time = time.time()
end_time_struct = time.localtime(end_time)
end_time_formatted = time.strftime("%H:%M:%S", end_time_struct)
print(f"Inference ended at {end_time_formatted}, taking {end_time-start_time}s")
logger.debug("Output Types: generated_image=%s", type(None))
logger.debug("Content of out_image: %s", out_image)
logger.debug("Structure of out_image: %s", dir(out_image))
return out_image["images"][0]
except Exception as e:
# Handle exceptions and log error message
logger.error("Error occurred during inference: %s", str(e))
return str(e)
def generate_image_from_parameters(prompt, guidance_scale, controlnet_scale, controlnet_end, upscaler_strength, seed, sampler_type, image):
try:
# Save the uploaded image to a temporary file
temp_image_path = f"/tmp/{int(time.time())}_{image.filename}"
with open(temp_image_path, "wb") as temp_image:
temp_image.write(image.file.read())
# Open the uploaded image using PIL
control_image_path = "scrollwhite.png"
control_image = Image.open(control_image_path)
# Call existing inference function with the provided parameters
generated_image, _, _, _ = inference(control_image, prompt, "", guidance_scale, controlnet_scale, 0, controlnet_end, upscaler_strength, seed, sampler_type)
# Save the generated image as binary data
output_image_io = io.BytesIO()
generated_image.save(output_image_io, format="PNG")
output_image_io.seek(0)
output_image_binary = output_image_io.read()
# Return the generated image binary data
logger.debug("Output Values: generated_image=<binary data>")
return output_image_binary
except Exception as e:
# Handle exceptions and return an error message if something goes wrong
return str(e)
app.add_middleware(
CORSMiddleware,
allow_origins=["*"], # You can replace ["*"] with specific origins if needed
allow_credentials=True,
allow_methods=["*"], # Allow all methods
allow_headers=["*"], # Allow all headers
)
@app.post("/generate_image")
async def generate_image(
prompt: str = Form(...),
guidance_scale: float = Form(...),
controlnet_scale: float = Form(...),
controlnet_end: float = Form(...),
upscaler_strength: float = Form(...),
seed: int = Form(...),
sampler_type: str = Form(...),
image: UploadFile = File(...)
):
try:
# Save the uploaded image to a temporary file
temp_image_path = f"/tmp/{int(time.time())}_{image.filename}"
with open(temp_image_path, "wb") as temp_image:
temp_image.write(image.file.read())
# Open the uploaded image using PIL
control_image = Image.open(temp_image_path)
# Call existing inference function with the provided parameters
generated_image = inference(control_image, prompt, "", guidance_scale, controlnet_scale, 0, controlnet_end, upscaler_strength, seed, sampler_type)
if generated_image is None:
return "Failed to generate image"
# Save the generated image as binary data
output_image_io = io.BytesIO()
generated_image.save(output_image_io, format="PNG")
output_image_io.seek(0)
# Return the image as a streaming response
return StreamingResponse(content=output_image_io, media_type="image/png")
except Exception as e:
logger.error("Error occurred during image generation: %s", str(e))
return "Failed to generate image"
async def start_fastapi():
# Get internal IP address
internal_ip = socket.gethostbyname(socket.gethostname())
# Get public IP address using a public API (this may not work if you are behind a router/NAT)
try:
public_ip = requests.get("http://api.ipify.org").text
except requests.RequestException:
public_ip = "Not Available"
print(f"Internal URL: http://{internal_ip}:7860")
print(f"Public URL: http://{public_ip}:7860")
# Run FastAPI using hypercorn
config = uvicorn.Config(app="app:app", host="0.0.0.0", port=7860, reload=True)
server = uvicorn.Server(config)
await server.serve()
# Call the asynchronous function using asyncio.run()
if __name__ == "__main__":
asyncio.run(start_fastapi())
|