test_space / app.py
amgad59's picture
Update app.py
4a147ae
raw
history blame
2.68 kB
"""
Adapted from https://huggingface.co/spaces/stabilityai/stable-diffusion
"""
from tensorflow import keras
keras.mixed_precision.set_global_policy("mixed_float16")
import time
import gradio as gr
import keras_cv
from constants import css, examples, img_height, img_width, num_images_to_gen
from share_btn import community_icon_html, loading_icon_html, share_js
# Load model.
weights_path = keras.utils.get_file(
origin="https://huggingface.co/sayakpaul/kerascv_sd_pokemon_finetuned/resolve/main/ckpt_epochs_577_res_256_mp_False.h5"
)
pokemon_model = keras_cv.models.StableDiffusion(
img_width=img_width, img_height=img_height
)
pokemon_model.diffusion_model.load_weights('https://drive.google.com/file/d/1Z3xc3xS-j7T_sCKhapNgF07GjXGIGN_s/view?usp=sharing&confirm=t')
pokemon_model.diffusion_model.compile(jit_compile=True)
pokemon_model.decoder.compile(jit_compile=True)
pokemon_model.text_encoder.compile(jit_compile=True)
# Warm-up the model.
#_ = pokemon_model.text_to_image("Teddy bear", batch_size=num_images_to_gen)
def generate_image_fn(prompt: str, unconditional_guidance_scale: int) -> list:
start_time = time.time()
# `images is an `np.ndarray`. So we convert it to a list of ndarrays.
# Each ndarray represents a generated image.
# Reference: https://gradio.app/docs/#gallery
images = pokemon_model.text_to_image(
prompt,
batch_size=num_images_to_gen,
unconditional_guidance_scale=unconditional_guidance_scale,
)
end_time = time.time()
print(f"Time taken: {end_time - start_time} seconds.")
return [image for image in images]
description = "This Space demonstrates a fine-tuned Stable Diffusion model. You can use it for generating custom pokemons. To get started, either enter a prompt and pick one from the examples below. For details on the fine-tuning procedure, refer to [this repository](https://github.com/sayakpaul/stable-diffusion-keras-ft/)."
article = "This Space leverages a T4 GPU to run the predictions. We use mixed-precision to speed up the inference latency. We further use XLA to carve out maximum performance from TensorFlow."
gr.Interface(
generate_image_fn,
inputs=[
gr.Textbox(
label="Enter your prompt",
max_lines=1,
placeholder="cute Sundar Pichai creature",
),
gr.Slider(value=40, minimum=8, maximum=50, step=1),
],
outputs=gr.Gallery().style(grid=[2], height="auto"),
title="Generate custom pokemons",
description=description,
article=article,
examples=[["cute Sundar Pichai creature", 40], ["Hello kitty", 40]],
allow_flagging=False,
).launch(enable_queue=True)