Text-To-Gif / app.py
saikub's picture
Update app.py
0f53f40 verified
raw
history blame contribute delete
No virus
4.99 kB
import gradio as gr
import numpy as np
import random
from diffusers import DiffusionPipeline
import torch
device = "cuda" if torch.cuda.is_available() else "cpu"
# List of models
models = {
"sdxl-turbo": "stabilityai/sdxl-turbo",
"MistoLine": "TheMistoAI/MistoLine",
"UnfilteredAI/NSFW": "UnfilteredAI/NSFW-gen-v2",
"runwayml/SD":"runwayml/stable-diffusion-v1-5"
}
# Cache to store loaded pipelines
pipelines = {}
# Function to load a model
def load_model(model_name):
if model_name in pipelines:
return pipelines[model_name]
if model_name not in models:
raise ValueError(f"Model {model_name} is not available.")
model_path = models[model_name]
if torch.cuda.is_available():
torch.cuda.max_memory_allocated(device=device)
pipe = DiffusionPipeline.from_pretrained(model_path, torch_dtype=torch.float16, variant="fp16")
pipe.enable_xformers_memory_efficient_attention()
else:
pipe = DiffusionPipeline.from_pretrained(model_path)
pipe = pipe.to(device)
# Disable NSFW filters if the pipeline supports it
if hasattr(pipe, 'safety_checker'):
pipe.safety_checker = None
pipelines[model_name] = pipe
return pipe
MAX_SEED = np.iinfo(np.int32).max
MAX_IMAGE_SIZE = 1024
def infer(model_name, prompt, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps):
pipe = load_model(model_name)
if randomize_seed:
seed = random.randint(0, MAX_SEED)
generator = torch.Generator().manual_seed(seed)
image = pipe(
prompt=prompt,
negative_prompt=negative_prompt,
guidance_scale=guidance_scale,
num_inference_steps=num_inference_steps,
width=width,
height=height,
generator=generator
).images[0]
return image
examples = [
"Astronaut in a jungle, cold color palette, muted colors, detailed, 8k",
"An astronaut riding a green horse",
"A delicious ceviche cheesecake slice",
]
css="""
#col-container {
margin: 0 auto;
max-width: 520px;
}
"""
if torch.cuda.is_available():
power_device = "GPU"
else:
power_device = "CPU"
with gr.Blocks(css=css) as demo:
with gr.Column(elem_id="col-container"):
gr.Markdown(f"""
# Text-to-Image Gradio Template
Currently running on {power_device}.
""")
with gr.Row():
model_name = gr.Dropdown(
label="Select Model",
choices=list(models.keys()),
value="sdxl-turbo",
show_label=True
)
prompt = gr.Text(
label="Prompt",
show_label=False,
max_lines=1,
placeholder="Enter your prompt",
container=False,
)
run_button = gr.Button("Run", scale=0)
result = gr.Image(label="Result", show_label=False)
with gr.Accordion("Advanced Settings", open=False):
negative_prompt = gr.Text(
label="Negative prompt",
max_lines=1,
placeholder="Enter a negative prompt",
visible=False,
)
seed = gr.Slider(
label="Seed",
minimum=0,
maximum=MAX_SEED,
step=1,
value=0,
)
randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
with gr.Row():
width = gr.Slider(
label="Width",
minimum=256,
maximum=MAX_IMAGE_SIZE,
step=32,
value=512,
)
height = gr.Slider(
label="Height",
minimum=256,
maximum=MAX_IMAGE_SIZE,
step=32,
value=512,
)
with gr.Row():
guidance_scale = gr.Slider(
label="Guidance scale",
minimum=0.0,
maximum=10.0,
step=0.1,
value=0.0,
)
num_inference_steps = gr.Slider(
label="Number of inference steps",
minimum=1,
maximum=12,
step=1,
value=2,
)
gr.Examples(
examples = examples,
inputs = [prompt]
)
run_button.click(
fn = infer,
inputs = [model_name, prompt, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps],
outputs = [result]
)
demo.queue().launch()