File size: 1,310 Bytes
aafe4a6 fa41512 eb40866 fa41512 f0c76db da36afd f0c76db eb40866 fa41512 eb40866 fa41512 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 |
import torch
from diffusers import StableDiffusionXLPipeline
import base64
from io import BytesIO
class InferenceHandler:
def __init__(self):
self.device = "cuda" if torch.cuda.is_available() else "cpu"
model_name = "colt12/maxcushion"
# If your model is private, include the use_auth_token parameter
self.pipe = StableDiffusionXLPipeline.from_pretrained(
model_name,
torch_dtype=torch.float16,
use_safetensors=True,
# Uncomment the line below and replace with your token if needed
# use_auth_token="your_huggingface_token"
).to(self.device)
def __call__(self, inputs):
prompt = inputs.get("prompt", "")
if not prompt:
raise ValueError("A prompt must be provided")
negative_prompt = inputs.get("negative_prompt", "")
image = self.pipe(
prompt=prompt,
negative_prompt=negative_prompt,
num_inference_steps=30,
guidance_scale=7.5
).images[0]
buffered = BytesIO()
image.save(buffered, format="PNG")
image_base64 = base64.b64encode(buffered.getvalue()).decode("utf-8")
return {"image_base64": image_base64}
handler = InferenceHandler() |