Spaces:
Running
Running
File size: 1,975 Bytes
931ee55 d97d865 931ee55 d97d865 931ee55 d97d865 931ee55 d97d865 931ee55 d97d865 931ee55 d97d865 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 |
import gradio as gr
import numpy as np
import random
from diffusers import DiffusionPipeline
import torch
# ตั้งค่าอุปกรณ์และโมเดล
device = "cuda" if torch.cuda.is_available() else "cpu"
model_repo_id = "stabilityai/sdxl-turbo"
torch_dtype = torch.float16 if torch.cuda.is_available() else torch.float32
pipe = DiffusionPipeline.from_pretrained(model_repo_id, torch_dtype=torch_dtype)
pipe = pipe.to(device)
MAX_SEED = np.iinfo(np.int32).max
MAX_IMAGE_SIZE = 1024
# ฟังก์ชันหลัก
def infer(prompt, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps):
if randomize_seed:
seed = random.randint(0, MAX_SEED)
generator = torch.Generator().manual_seed(seed)
image = pipe(
prompt=prompt,
negative_prompt=negative_prompt,
guidance_scale=guidance_scale,
num_inference_steps=num_inference_steps,
width=width,
height=height,
generator=generator,
).images[0]
return image, seed
# อินเทอร์เฟซ Gradio ที่รองรับ API
iface = gr.Interface(
fn=infer,
inputs=[
gr.Textbox(label="Prompt"),
gr.Textbox(label="Negative Prompt", value=""),
gr.Slider(0, MAX_SEED, step=1, value=0, label="Seed"),
gr.Checkbox(label="Randomize Seed", value=True),
gr.Slider(256, MAX_IMAGE_SIZE, step=32, value=1024, label="Width"),
gr.Slider(256, MAX_IMAGE_SIZE, step=32, value=1024, label="Height"),
gr.Slider(0.0, 10.0, step=0.1, value=0.0, label="Guidance Scale"),
gr.Slider(1, 50, step=1, value=2, label="Inference Steps"),
],
outputs=[
gr.Image(label="Generated Image"),
gr.Number(label="Used Seed"),
],
title="Text-to-Image API with SDXL-Turbo",
description="Enter a prompt to generate an image. Works with API too!"
)
if __name__ == "__main__":
iface.launch() |