ritwikraha
chore: creating app
09d39fb
raw
history blame
2.15 kB
import torch
import spaces
from PIL import Image
from diffusers import DiffusionPipeline, AutoencoderKL
from gradio import Interface
# Load models (outside of the app function for efficiency)
vae = AutoencoderKL.from_pretrained("madebyollin/sdxl-vae-fp16-fix")
pipe = DiffusionPipeline.from_pretrained(
"stabilityai/stable-diffusion-xl-base-1.0",
vae=vae,
torch_dtype=torch.float16,
variant="fp16",
)
pipe.load_lora_weights("ritwikraha/khabib_sketch_LoRA") # Assuming correct model ID
# Move models to CUDA if available (outside of the app function for efficiency)
if torch.cuda.is_available():
pipe.to("cuda")
@spaces.GPU
def inference(prompt, negative_prompt=None, guidance_scale=3, num_inference_steps=50):
"""Generates an image using the Stable Diffusion XL model with LoRA weights.
Args:
prompt (str): Prompt for image generation, entered by the user.
negative_prompt (str, optional): Negative prompt to guide model away from unwanted features. Defaults to "ugly face, multiple bodies, bad anatomy, disfigured, extra fingers".
guidance_scale (float, optional): Controls the strength of the guidance from the prompt. Defaults to 3.
num_inference_steps (int, optional): Number of inference steps for image generation. Defaults to 50.
Returns:
PIL.Image: Generated image.
"""
image = pipe(
prompt=prompt,
negative_prompt=negative_prompt,
guidance_scale=guidance_scale,
num_inference_steps=num_inference_steps,
).images[0]
return image.convert("RGB") # Ensure RGB format for compatibility
# Create the Gradio interface
interface = Interface(
fn=inference,
inputs=[
"text", # Prompt from user
"text", # Optional negative prompt
{"type": "slider", "min": 1, "max": 10, "default": 3},
{"type": "slider", "min": 10, "max": 100, "default": 50},
],
outputs="image",
title="Stable Diffusion XL with Khabib LoRA",
description="Generate sketches using the Stable Diffusion XL model fine-tuned on Khabib Nurmagomedov sketches.",
)
# Launch the Space
interface.launch()