ruslanmv's picture
Update app.py
d5f8b09
raw
history blame contribute delete
No virus
2.89 kB
## Created by ruslanmv.com
## Happy coding!
import gradio as gr
import torch
import numpy as np
from diffusers import DiffusionPipeline
from transformers import pipeline
pipe = pipeline("text-generation", model="daspartho/prompt-extend")
def extend_prompt(prompt):
return pipe(prompt + ",", num_return_sequences=1)[0]["generated_text"]
def text_it(inputs):
return extend_prompt(inputs)
def load_pipeline(use_cuda):
device = "cuda" if use_cuda and torch.cuda.is_available() else "cpu"
if device == "cuda":
torch.cuda.max_memory_allocated(device=device)
torch.cuda.empty_cache()
pipe = DiffusionPipeline.from_pretrained(
"stabilityai/sdxl-turbo",
torch_dtype=torch.float16,
variant="fp16",
use_safetensors=True
)
pipe.enable_xformers_memory_efficient_attention()
pipe = pipe.to(device)
torch.cuda.empty_cache()
else:
pipe = DiffusionPipeline.from_pretrained(
"stabilityai/sdxl-turbo", use_safetensors=True
)
pipe = pipe.to(device)
return pipe
def genie(prompt="sexy woman", steps=2, seed=398231747038484200, use_cuda=False):
pipe = load_pipeline(use_cuda)
generator = np.random.seed(0) if seed == 0 else torch.manual_seed(seed)
extended_prompt = extend_prompt(prompt)
int_image = pipe(
prompt=extended_prompt,
generator=generator,
num_inference_steps=steps,
guidance_scale=0.0,
).images[0]
return int_image, extended_prompt
with gr.Blocks() as myface:
gr.HTML()
with gr.Row():
with gr.Row():
input_text = gr.Textbox(label="Prompt idea.", lines=1)
steps_slider = gr.Slider(
1, maximum=5, value=2, step=1, label="Number of Iterations"
)
seed_slider = gr.Slider(
minimum=0, step=1, maximum=999999999999999999, randomize=True
)
cuda_checkbox = gr.Checkbox(label="cuda", info="Do you have cuda?")
with gr.Row():
generate_button = gr.Button("Generate")
with gr.Row():
output_image = gr.Image()
output_text = gr.Textbox(label="Generated Text", lines=2)
generate_button.click(
genie,
inputs=[input_text, steps_slider, seed_slider, cuda_checkbox],
outputs=[output_image, output_text],
concurrency_limit=10,
)
# Define the example
example = [["sexy woman", 2, 398231747038484200, ""]]
with gr.Interface(
fn=genie,
inputs=[input_text, steps_slider, seed_slider, cuda_checkbox],
outputs=[output_image, output_text],
title="Stable Diffusion Turbo CPU or GPU",
description="Type your idea, and let's create a detailed image.",
examples=example,
) as iface:
iface.launch(inline=True, show_api=False, max_threads=200)