import gradio as gr import torch import modin.pandas as pd import numpy as np from diffusers import DiffusionPipeline device = "cuda" if torch.cuda.is_available() else "cpu" if torch.cuda.is_available(): print("cuda") torch.cuda.max_memory_allocated(device=device) torch.cuda.empty_cache() pipe = DiffusionPipeline.from_pretrained("hf-models/sdxl-turbo", torch_dtype=torch.float16, variant="fp16", use_safetensors=True) pipe.enable_xformers_memory_efficient_attention() pipe = pipe.to(device) torch.cuda.empty_cache() else: print("cpu") pipe = DiffusionPipeline.from_pretrained("hf-models/sdxl-turbo", use_safetensors=True) pipe = pipe.to(device) def genie (prompt, steps, seed): generator = np.random.seed(0) if seed == 0 else torch.manual_seed(seed) int_image = pipe(prompt=prompt, generator=generator, num_inference_steps=steps, guidance_scale=0.0).images[0] return int_image gr.Interface(fn=genie, inputs=[gr.Textbox(label='What you want the AI to generate. 77 Token Limit.'), gr.Slider(1, maximum=5, value=2, step=1, label='Number of Iterations'), gr.Slider(minimum=0, step=1, maximum=999999999999999999, randomize=True), ], outputs='image', title="Stable Diffusion Turbo", description="SDXL Turbo.

WARNING: This model is capable of producing NSFW (Softcore) images.", article = "Hosted on gitee-ai").launch(debug=True, max_threads=80)