File size: 6,000 Bytes
470726d |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 |
# Prediction interface for Cog ⚙️
# https://github.com/replicate/cog/blob/main/docs/python.md
from cog import BasePredictor, Input, Path
import os
import time
import torch
import numpy as np
from typing import List
from transformers import CLIPImageProcessor
from diffusers import (
StableDiffusionXLPipeline,
DPMSolverMultistepScheduler,
DDIMScheduler,
HeunDiscreteScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
PNDMScheduler
)
from diffusers.pipelines.stable_diffusion.safety_checker import (
StableDiffusionSafetyChecker,
)
class KarrasDPM:
def from_config(config):
return DPMSolverMultistepScheduler.from_config(config, use_karras_sigmas=True)
SCHEDULERS = {
"DDIM": DDIMScheduler,
"DPMSolverMultistep": DPMSolverMultistepScheduler,
"HeunDiscrete": HeunDiscreteScheduler,
"KarrasDPM": KarrasDPM,
"K_EULER_ANCESTRAL": EulerAncestralDiscreteScheduler,
"K_EULER": EulerDiscreteScheduler,
"PNDM": PNDMScheduler,
}
MODEL_NAME = "artificialguybr/NebulRedmond"
MODEL_CACHE = "model-cache"
SAFETY_CACHE = "safety-cache"
FEATURE_EXTRACTOR = "feature-extractor"
class Predictor(BasePredictor):
def setup(self) -> None:
"""Load the model into memory to make running multiple predictions efficient"""
start = time.time()
print("Loading safety checker...")
self.safety_checker = StableDiffusionSafetyChecker.from_pretrained(
SAFETY_CACHE, torch_dtype=torch.float16
).to("cuda")
self.feature_extractor = CLIPImageProcessor.from_pretrained(FEATURE_EXTRACTOR)
print("Loading txt2img model")
self.pipe = StableDiffusionXLPipeline.from_pretrained(
MODEL_NAME,
torch_dtype=torch.float16,
use_safetensors=True,
cache_dir=MODEL_CACHE
).to('cuda')
print("setup took: ", time.time() - start)
def run_safety_checker(self, image):
safety_checker_input = self.feature_extractor(image, return_tensors="pt").to(
"cuda"
)
np_image = [np.array(val) for val in image]
image, has_nsfw_concept = self.safety_checker(
images=np_image,
clip_input=safety_checker_input.pixel_values.to(torch.float16),
)
return image, has_nsfw_concept
@torch.inference_mode()
def predict(
self,
prompt: str = Input(
description="Input prompt",
default="An astronaut riding a rainbow unicorn",
),
negative_prompt: str = Input(
description="Input Negative Prompt",
default="",
),
width: int = Input(
description="Width of output image",
default=1024,
),
height: int = Input(
description="Height of output image",
default=1024,
),
num_outputs: int = Input(
description="Number of images to output.",
ge=1,
le=4,
default=1,
),
scheduler: str = Input(
description="scheduler",
choices=SCHEDULERS.keys(),
default="K_EULER",
),
num_inference_steps: int = Input(
description="Number of denoising steps", ge=1, le=100, default=40
),
guidance_scale: float = Input(
description="Scale for classifier-free guidance", ge=1, le=20, default=7.5
),
seed: int = Input(
description="Random seed. Leave blank to randomize the seed", default=None
),
apply_watermark: bool = Input(
description="Applies a watermark to enable determining if an image is generated in downstream applications. If you have other provisions for generating or deploying images safely, you can use this to disable watermarking.",
default=True,
),
disable_safety_checker: bool = Input(
description="Disable safety checker for generated images. This feature is only available through the API. See [https://replicate.com/docs/how-does-replicate-work#safety](https://replicate.com/docs/how-does-replicate-work#safety)",
default=False
)
) -> List[Path]:
"""Run a single prediction on the model."""
if seed is None:
seed = int.from_bytes(os.urandom(3), "big")
print(f"Using seed: {seed}")
generator = torch.Generator("cuda").manual_seed(seed)
pipe = self.pipe
pipe.scheduler = SCHEDULERS[scheduler].from_config(pipe.scheduler.config)
# toggles watermark for this prediction
if not apply_watermark:
watermark_cache = pipe.watermark
pipe.watermark = None
sdxl_kwargs = {}
sdxl_kwargs["width"] = width
sdxl_kwargs["height"] = height
common_args = {
"prompt": [prompt] * num_outputs,
"negative_prompt": [negative_prompt] * num_outputs,
"guidance_scale": guidance_scale,
"generator": generator,
"num_inference_steps": num_inference_steps,
}
output = pipe(**common_args, **sdxl_kwargs)
if not apply_watermark:
pipe.watermark = watermark_cache
if not disable_safety_checker:
_, has_nsfw_content = self.run_safety_checker(output.images)
output_paths = []
for i, image in enumerate(output.images):
if not disable_safety_checker:
if has_nsfw_content[i]:
print(f"NSFW content detected in image {i}")
continue
output_path = f"/tmp/out-{i}.png"
image.save(output_path)
output_paths.append(Path(output_path))
if len(output_paths) == 0:
raise Exception(
f"NSFW content detected. Try running it again, or try a different prompt."
)
return output_paths |