File size: 2,752 Bytes
066cf1b
 
269cbe7
 
 
 
066cf1b
 
269cbe7
066cf1b
 
269cbe7
066cf1b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
269cbe7
 
 
 
066cf1b
269cbe7
066cf1b
 
 
 
 
 
 
 
 
 
 
269cbe7
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
#!/usr/bin/env python3
# !pip install transformers accelerate
import os
import PIL
from pathlib import Path
from diffusers import StableDiffusionControlNetInpaintPipeline, ControlNetModel, DDIMScheduler, StableDiffusionInpaintPipeline, StableDiffusionImg2ImgPipeline, StableDiffusionControlNetImg2ImgPipeline
from diffusers.utils import load_image
import numpy as np
from huggingface_hub import HfApi
import torch

api = HfApi()
init_image = load_image(
    "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_inpaint/boy.png"
)
init_image = init_image.resize((512, 512))

generator = torch.Generator(device="cpu").manual_seed(33)

mask_image = load_image(
    "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_inpaint/boy_mask.png"
)
mask_image = mask_image.resize((512, 512))


def make_inpaint_condition(image, image_mask):
    image = np.array(image.convert("RGB")).astype(np.float32) / 255.0
    image_mask = np.array(image_mask.convert("L")).astype(np.float32) / 255.0

    assert image.shape[0:1] == image_mask.shape[0:1], "image and image_mask must have the same image size"
    image[image_mask > 0.5] = -1.0  # set as masked pixel
    image = np.expand_dims(image, 0).transpose(0, 3, 1, 2)
    image = torch.from_numpy(image)
    return image


control_image = make_inpaint_condition(init_image, mask_image)

mask_image = PIL.Image.open("/home/patrick/images/mask.png").convert('RGB')
init_image = PIL.Image.open("/home/patrick/images/init.png").convert('RGB')
control_image = PIL.Image.open("/home/patrick/images/seg.png").convert('RGB')

controlnet = ControlNetModel.from_pretrained(
    "mfidabel/controlnet-segment-anything", torch_dtype=torch.float16
)
pipe = StableDiffusionControlNetInpaintPipeline.from_pretrained(
    "runwayml/stable-diffusion-v1-5", controlnet=controlnet, torch_dtype=torch.float16
)

# speed up diffusion process with faster scheduler and memory optimization
pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config)

pipe.enable_model_cpu_offload()

# generate image
for t in [2]:
    image = pipe(
        "a bench in front of a beautiful lake and white mountain",
        num_inference_steps=t,
        generator=generator,
        eta=1.0,
        image=init_image,
        mask_image=mask_image,
        control_image=control_image,
    ).images[0]

    file_name = f"aa_{t}"
    path = os.path.join(Path.home(), "images", f"{file_name}.png")
    image.save(path)

    api.upload_file(
        path_or_fileobj=path,
        path_in_repo=path.split("/")[-1],
        repo_id="patrickvonplaten/images",
        repo_type="dataset",
    )
    print(f"https://huggingface.co/datasets/patrickvonplaten/images/blob/main/{file_name}.png")