Spaces:
aiqtech
/
Running on Zero

flxgif / app.py
aiqtech's picture
Update app.py
d70dd6e verified
raw
history blame
3.77 kB
import random
import gradio as gr
import numpy as np
import torch
import spaces
from diffusers import FluxPipeline
from PIL import Image
from diffusers.utils import export_to_gif
from transformers import pipeline
HEIGHT = 256
WIDTH = 1024
MAX_SEED = np.iinfo(np.int32).max
device = "cuda" if torch.cuda.is_available() else "cpu"
pipe = FluxPipeline.from_pretrained(
"black-forest-labs/FLUX.1-dev",
torch_dtype=torch.bfloat16
).to(device)
translator = pipeline("translation", model="Helsinki-NLP/opus-mt-ko-en")
def split_image(input_image, num_splits=4):
output_images = []
for i in range(num_splits):
left = i * 256
right = (i + 1) * 256
box = (left, 0, right, 256)
output_images.append(input_image.crop(box))
return output_images
def translate_to_english(text):
return translator(text)[0]['translation_text']
@spaces.GPU()
def predict(prompt, seed=42, randomize_seed=False, guidance_scale=5.0, num_inference_steps=28, progress=gr.Progress(track_tqdm=True)):
if any('\u3131' <= char <= '\u318E' or '\uAC00' <= char <= '\uD7A3' for char in prompt):
prompt = translate_to_english(prompt)
prompt_template = f"""
A side by side 4 frame image showing consecutive stills from a looped gif moving from left to right. The gif is of {prompt}.
"""
if randomize_seed:
seed = random.randint(0, MAX_SEED)
image = pipe(
prompt=prompt_template,
guidance_scale=guidance_scale,
num_inference_steps=num_inference_steps,
num_images_per_prompt=1,
generator=torch.Generator("cpu").manual_seed(seed),
height=HEIGHT,
width=WIDTH
).images[0]
return export_to_gif(split_image(image, 4), "flux.gif", fps=4), image, seed
css = """
footer { visibility: hidden;}
"""
examples = [
"๊ณ ์–‘์ด๊ฐ€ ๊ณต์ค‘์—์„œ ๋ฐœ์„ ํ”๋“œ๋Š” ๋ชจ์Šต",
"ํŒฌ๋”๊ฐ€ ์—‰๋ฉ์ด๋ฅผ ์ขŒ์šฐ๋กœ ํ”๋“œ๋Š” ๋ชจ์Šต",
"๊ฝƒ์ด ํ”ผ์–ด๋‚˜๋Š” ๊ณผ์ •"
]
with gr.Blocks(theme="Nymbo/Nymbo_Theme", css=css) as demo:
with gr.Column(elem_id="col-container"):
with gr.Row():
prompt = gr.Text(label="ํ”„๋กฌํ”„ํŠธ", show_label=False, max_lines=1, placeholder="ํ”„๋กฌํ”„ํŠธ๋ฅผ ์ž…๋ ฅํ•˜์„ธ์š”")
submit = gr.Button("์ œ์ถœ", scale=0)
output = gr.Image(label="GIF", show_label=False)
output_stills = gr.Image(label="์Šคํ‹ธ ์ด๋ฏธ์ง€", show_label=False, elem_id="stills")
with gr.Accordion("๊ณ ๊ธ‰ ์„ค์ •", open=False):
seed = gr.Slider(
label="์‹œ๋“œ",
minimum=0,
maximum=MAX_SEED,
step=1,
value=0,
)
randomize_seed = gr.Checkbox(label="์‹œ๋“œ ๋ฌด์ž‘์œ„ํ™”", value=True)
with gr.Row():
guidance_scale = gr.Slider(
label="๊ฐ€์ด๋˜์Šค ์Šค์ผ€์ผ",
minimum=1,
maximum=15,
step=0.1,
value=3.5,
)
num_inference_steps = gr.Slider(
label="์ถ”๋ก  ๋‹จ๊ณ„ ์ˆ˜",
minimum=1,
maximum=50,
step=1,
value=28,
)
gr.Examples(
examples=examples,
fn=predict,
inputs=[prompt],
outputs=[output, output_stills, seed],
cache_examples="lazy"
)
gr.on(
triggers=[submit.click, prompt.submit],
fn=predict,
inputs=[prompt, seed, randomize_seed, guidance_scale, num_inference_steps],
outputs=[output, output_stills, seed]
)
demo.launch()