import gradio as gr import torch from diffusers import StableDiffusionImg2ImgPipeline from PIL import Image # Use CPU and optimize precision device = "cpu" dtype = torch.float32 # float16 is only for GPUs # Load model with reduced precision for CPU pipe = StableDiffusionImg2ImgPipeline.from_pretrained( "nitrosocke/Ghibli-Diffusion", torch_dtype=dtype ).to(device) # Disable xformers (only for GPU) print("⚠️ Running on CPU: xformers disabled, inference will be slow.") def process_image(input_img): if input_img is None: return None input_img = input_img.convert("RGB").resize((512, 512)) result = pipe( prompt="ghibli style, studio ghibli, anime art", image=input_img, strength=0.5, # Reduce strength to speed up processing guidance_scale=7.5 # Lower guidance for faster inference ).images[0] return result # Gradio UI demo = gr.Interface( fn=process_image, inputs=gr.Image(type="pil"), outputs=gr.Image(type="pil"), title="🎨 Ghibli Style Transfer (CPU Optimized)", description="Upload an image to transform it into Studio Ghibli style artwork" ) demo.launch()