canny_coyo1m / app.py
merve's picture
merve HF staff
Update app.py
29d76f4
raw history blame
No virus
2.79 kB
import gradio as gr
import jax
import numpy as np
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
from PIL import Image
from diffusers import FlaxStableDiffusionControlNetPipeline, FlaxControlNetModel
import cv2
import os
def load_image(image):
if isinstance(image, str):
if image.startswith("http://") or image.startswith("https://"):
image = PIL.Image.open(requests.get(image, stream=True).raw)
elif os.path.isfile(image):
image = PIL.Image.open(image)
elif isinstance(image, PIL.Image.Image):
image = image
image = PIL.ImageOps.exif_transpose(image)
image = image.convert("RGB")
return image
def image_grid(imgs, rows, cols):
w, h = imgs[0].size
grid = Image.new("RGB", size=(cols * w, rows * h))
for i, img in enumerate(imgs):
grid.paste(img, box=(i % cols * w, i // cols * h))
return grid
def create_key(seed=0):
return jax.random.PRNGKey(seed)
rng = create_key(0)
def canny_filter(image):
gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
blurred_image = cv2.GaussianBlur(gray_image, (5, 5), 0)
edges_image = cv2.Canny(blurred_image, 50, 150)
return edges_image
def infer(prompts, negative_prompts, image):
# load control net and stable diffusion v1-5
controlnet, controlnet_params = FlaxControlNetModel.from_pretrained(
"jax-diffusers-event/canny-coyo1m", dtype=jnp.float32
)
pipe, params = FlaxStableDiffusionControlNetPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5", controlnet=controlnet, from_pt=True, dtype=jnp.float32
)
params["controlnet"] = controlnet_params
num_samples = jax.device_count()
rng = jax.random.split(rng, jax.device_count())
canny_image = canny_filter(image)
prompt_ids = pipe.prepare_text_inputs([prompts] * num_samples)
negative_prompt_ids = pipe.prepare_text_inputs([negative_prompts] * num_samples)
processed_image = pipe.prepare_image_inputs([canny_image] * num_samples)
p_params = replicate(params)
prompt_ids = shard(prompt_ids)
negative_prompt_ids = shard(negative_prompt_ids)
processed_image = shard(processed_image)
output = pipe(
prompt_ids=prompt_ids,
image=processed_image,
params=p_params,
prng_seed=rng,
num_inference_steps=50,
neg_prompt_ids=negative_prompt_ids,
jit=True,
).images
output_images = pipe.numpy_to_pil(np.asarray(output.reshape((num_samples,) + output.shape[-3:])))
output_images = image_grid(output_images, num_samples // 4, 4)
return output_images
gr.Interface(infer, inputs=["text", "text", "image"], outputs="gallery").launch()