Spaces:
Build error
Build error
Update app.py
Browse files
app.py
CHANGED
@@ -11,10 +11,10 @@ model_id = "CompVis/stable-diffusion-v1-4"
|
|
11 |
device = "cuda"
|
12 |
|
13 |
#If you are running this code locally, you need to either do a 'huggingface-cli login` or paste your User Access Token from here https://huggingface.co/settings/tokens into the use_auth_token field below.
|
14 |
-
pipe = StableDiffusionPipeline.from_pretrained(model_id, use_auth_token=
|
15 |
pipe = pipe.to(device)
|
16 |
#When running locally, you won`t have access to this, so you can remove this part
|
17 |
-
word_list_dataset = load_dataset("stabilityai/word-list", data_files="list.txt", use_auth_token=
|
18 |
word_list = word_list_dataset["train"]['text']
|
19 |
|
20 |
def infer(prompt, samples, steps, scale, seed):
|
@@ -26,15 +26,15 @@ def infer(prompt, samples, steps, scale, seed):
|
|
26 |
generator = torch.Generator(device=device).manual_seed(seed)
|
27 |
|
28 |
#If you are running locally with CPU, you can remove the `with autocast("cuda")`
|
29 |
-
with autocast("cuda"):
|
30 |
-
images_list = pipe(
|
31 |
-
[prompt] * samples,
|
32 |
-
num_inference_steps=steps,
|
33 |
-
|
34 |
-
generator=generator,
|
35 |
-
)
|
36 |
images = []
|
37 |
-
safe_image = Image.open(r"unsafe.png")
|
38 |
for i, image in enumerate(images_list["sample"]):
|
39 |
if(images_list["nsfw_content_detected"][i]):
|
40 |
images.append(safe_image)
|
|
|
11 |
device = "cuda"
|
12 |
|
13 |
#If you are running this code locally, you need to either do a 'huggingface-cli login` or paste your User Access Token from here https://huggingface.co/settings/tokens into the use_auth_token field below.
|
14 |
+
pipe = StableDiffusionPipeline.from_pretrained(model_id, use_auth_token="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", revision="fp16", torch_dtype=torch.float16)
|
15 |
pipe = pipe.to(device)
|
16 |
#When running locally, you won`t have access to this, so you can remove this part
|
17 |
+
word_list_dataset = load_dataset("stabilityai/word-list", data_files="list.txt", use_auth_token="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")
|
18 |
word_list = word_list_dataset["train"]['text']
|
19 |
|
20 |
def infer(prompt, samples, steps, scale, seed):
|
|
|
26 |
generator = torch.Generator(device=device).manual_seed(seed)
|
27 |
|
28 |
#If you are running locally with CPU, you can remove the `with autocast("cuda")`
|
29 |
+
#with autocast("cuda"):
|
30 |
+
#images_list = pipe(
|
31 |
+
#[prompt] * samples,
|
32 |
+
#num_inference_steps=steps,
|
33 |
+
#guidance_scale=scale,
|
34 |
+
#generator=generator,
|
35 |
+
#)
|
36 |
images = []
|
37 |
+
#safe_image = Image.open(r"unsafe.png")
|
38 |
for i, image in enumerate(images_list["sample"]):
|
39 |
if(images_list["nsfw_content_detected"][i]):
|
40 |
images.append(safe_image)
|