Upload 6 files
Browse files- cldm_v21.yaml +85 -0
- environment.yaml +35 -0
- gradio_seg2image.py +97 -0
- prompt.json +0 -0
- tool_add_control_sd21.py +50 -0
- tutorial_train_sd21.py +35 -0
cldm_v21.yaml
ADDED
@@ -0,0 +1,85 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
model:
|
2 |
+
target: cldm.cldm.ControlLDM
|
3 |
+
params:
|
4 |
+
linear_start: 0.00085
|
5 |
+
linear_end: 0.0120
|
6 |
+
num_timesteps_cond: 1
|
7 |
+
log_every_t: 200
|
8 |
+
timesteps: 1000
|
9 |
+
first_stage_key: "jpg"
|
10 |
+
cond_stage_key: "txt"
|
11 |
+
control_key: "hint"
|
12 |
+
image_size: 64
|
13 |
+
channels: 4
|
14 |
+
cond_stage_trainable: false
|
15 |
+
conditioning_key: crossattn
|
16 |
+
monitor: val/loss_simple_ema
|
17 |
+
scale_factor: 0.18215
|
18 |
+
use_ema: False
|
19 |
+
only_mid_control: False
|
20 |
+
|
21 |
+
control_stage_config:
|
22 |
+
target: cldm.cldm.ControlNet
|
23 |
+
params:
|
24 |
+
use_checkpoint: True
|
25 |
+
image_size: 32 # unused
|
26 |
+
in_channels: 4
|
27 |
+
hint_channels: 3
|
28 |
+
model_channels: 320
|
29 |
+
attention_resolutions: [ 4, 2, 1 ]
|
30 |
+
num_res_blocks: 2
|
31 |
+
channel_mult: [ 1, 2, 4, 4 ]
|
32 |
+
num_head_channels: 64 # need to fix for flash-attn
|
33 |
+
use_spatial_transformer: True
|
34 |
+
use_linear_in_transformer: True
|
35 |
+
transformer_depth: 1
|
36 |
+
context_dim: 1024
|
37 |
+
legacy: False
|
38 |
+
|
39 |
+
unet_config:
|
40 |
+
target: cldm.cldm.ControlledUnetModel
|
41 |
+
params:
|
42 |
+
use_checkpoint: True
|
43 |
+
image_size: 32 # unused
|
44 |
+
in_channels: 4
|
45 |
+
out_channels: 4
|
46 |
+
model_channels: 320
|
47 |
+
attention_resolutions: [ 4, 2, 1 ]
|
48 |
+
num_res_blocks: 2
|
49 |
+
channel_mult: [ 1, 2, 4, 4 ]
|
50 |
+
num_head_channels: 64 # need to fix for flash-attn
|
51 |
+
use_spatial_transformer: True
|
52 |
+
use_linear_in_transformer: True
|
53 |
+
transformer_depth: 1
|
54 |
+
context_dim: 1024
|
55 |
+
legacy: False
|
56 |
+
|
57 |
+
first_stage_config:
|
58 |
+
target: ldm.models.autoencoder.AutoencoderKL
|
59 |
+
params:
|
60 |
+
embed_dim: 4
|
61 |
+
monitor: val/rec_loss
|
62 |
+
ddconfig:
|
63 |
+
#attn_type: "vanilla-xformers"
|
64 |
+
double_z: true
|
65 |
+
z_channels: 4
|
66 |
+
resolution: 256
|
67 |
+
in_channels: 3
|
68 |
+
out_ch: 3
|
69 |
+
ch: 128
|
70 |
+
ch_mult:
|
71 |
+
- 1
|
72 |
+
- 2
|
73 |
+
- 4
|
74 |
+
- 4
|
75 |
+
num_res_blocks: 2
|
76 |
+
attn_resolutions: []
|
77 |
+
dropout: 0.0
|
78 |
+
lossconfig:
|
79 |
+
target: torch.nn.Identity
|
80 |
+
|
81 |
+
cond_stage_config:
|
82 |
+
target: ldm.modules.encoders.modules.FrozenOpenCLIPEmbedder
|
83 |
+
params:
|
84 |
+
freeze: True
|
85 |
+
layer: "penultimate"
|
environment.yaml
ADDED
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
name: control
|
2 |
+
channels:
|
3 |
+
- pytorch
|
4 |
+
- defaults
|
5 |
+
dependencies:
|
6 |
+
- python=3.8.5
|
7 |
+
- pip=20.3
|
8 |
+
- cudatoolkit=11.3
|
9 |
+
- pytorch=1.12.1
|
10 |
+
- torchvision=0.13.1
|
11 |
+
- numpy=1.23.1
|
12 |
+
- pip:
|
13 |
+
- gradio==3.16.2
|
14 |
+
- albumentations==1.3.0
|
15 |
+
- opencv-contrib-python==4.3.0.36
|
16 |
+
- imageio==2.9.0
|
17 |
+
- imageio-ffmpeg==0.4.2
|
18 |
+
- pytorch-lightning==1.5.0
|
19 |
+
- omegaconf==2.1.1
|
20 |
+
- test-tube>=0.7.5
|
21 |
+
- streamlit==1.12.1
|
22 |
+
- einops==0.3.0
|
23 |
+
- transformers==4.19.2
|
24 |
+
- webdataset==0.2.5
|
25 |
+
- kornia==0.6
|
26 |
+
- open_clip_torch==2.0.2
|
27 |
+
- invisible-watermark>=0.1.5
|
28 |
+
- streamlit-drawable-canvas==0.8.0
|
29 |
+
- torchmetrics==0.6.0
|
30 |
+
- timm==0.6.12
|
31 |
+
- addict==2.4.0
|
32 |
+
- yapf==0.32.0
|
33 |
+
- prettytable==3.6.0
|
34 |
+
- safetensors==0.2.7
|
35 |
+
- basicsr==1.4.2
|
gradio_seg2image.py
ADDED
@@ -0,0 +1,97 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from share import *
|
2 |
+
import config
|
3 |
+
|
4 |
+
import cv2
|
5 |
+
import einops
|
6 |
+
import gradio as gr
|
7 |
+
import numpy as np
|
8 |
+
import torch
|
9 |
+
import random
|
10 |
+
|
11 |
+
from pytorch_lightning import seed_everything
|
12 |
+
from annotator.util import resize_image, HWC3
|
13 |
+
from annotator.uniformer import UniformerDetector
|
14 |
+
from cldm.model import create_model, load_state_dict
|
15 |
+
from cldm.ddim_hacked import DDIMSampler
|
16 |
+
|
17 |
+
|
18 |
+
apply_uniformer = UniformerDetector()
|
19 |
+
|
20 |
+
model = create_model('./models/cldm_v21.yaml').cpu()
|
21 |
+
model.load_state_dict(load_state_dict('./lightning_logs/version_1/checkpoints/epoch=123-step=1476467.ckpt', location='cuda'))
|
22 |
+
model = model.cuda()
|
23 |
+
ddim_sampler = DDIMSampler(model)
|
24 |
+
|
25 |
+
|
26 |
+
def process(input_image, prompt, a_prompt, n_prompt, num_samples, image_resolution, detect_resolution, ddim_steps, guess_mode, strength, scale, seed, eta):
|
27 |
+
with torch.no_grad():
|
28 |
+
input_image = HWC3(input_image)
|
29 |
+
detected_map = apply_uniformer(resize_image(input_image, detect_resolution))
|
30 |
+
img = resize_image(input_image, image_resolution)
|
31 |
+
H, W, C = img.shape
|
32 |
+
|
33 |
+
detected_map = cv2.resize(input_image, (W, H), interpolation=cv2.INTER_NEAREST)
|
34 |
+
|
35 |
+
control = torch.from_numpy(detected_map.copy()).float().cuda() / 255.0
|
36 |
+
control = torch.stack([control for _ in range(num_samples)], dim=0)
|
37 |
+
control = einops.rearrange(control, 'b h w c -> b c h w').clone()
|
38 |
+
|
39 |
+
if seed == -1:
|
40 |
+
seed = random.randint(0, 65535)
|
41 |
+
seed_everything(seed)
|
42 |
+
|
43 |
+
if config.save_memory:
|
44 |
+
model.low_vram_shift(is_diffusing=False)
|
45 |
+
|
46 |
+
cond = {"c_concat": [control], "c_crossattn": [model.get_learned_conditioning([prompt + ', ' + a_prompt] * num_samples)]}
|
47 |
+
un_cond = {"c_concat": None if guess_mode else [control], "c_crossattn": [model.get_learned_conditioning([n_prompt] * num_samples)]}
|
48 |
+
shape = (4, H // 8, W // 8)
|
49 |
+
|
50 |
+
if config.save_memory:
|
51 |
+
model.low_vram_shift(is_diffusing=True)
|
52 |
+
|
53 |
+
model.control_scales = [strength * (0.825 ** float(12 - i)) for i in range(13)] if guess_mode else ([strength] * 13) # Magic number. IDK why. Perhaps because 0.825**12<0.01 but 0.826**12>0.01
|
54 |
+
samples, intermediates = ddim_sampler.sample(ddim_steps, num_samples,
|
55 |
+
shape, cond, verbose=False, eta=eta,
|
56 |
+
unconditional_guidance_scale=scale,
|
57 |
+
unconditional_conditioning=un_cond)
|
58 |
+
|
59 |
+
if config.save_memory:
|
60 |
+
model.low_vram_shift(is_diffusing=False)
|
61 |
+
|
62 |
+
x_samples = model.decode_first_stage(samples)
|
63 |
+
x_samples = (einops.rearrange(x_samples, 'b c h w -> b h w c') * 127.5 + 127.5).cpu().numpy().clip(0, 255).astype(np.uint8)
|
64 |
+
|
65 |
+
results = [x_samples[i] for i in range(num_samples)]
|
66 |
+
return [detected_map] + results
|
67 |
+
|
68 |
+
|
69 |
+
block = gr.Blocks().queue()
|
70 |
+
with block:
|
71 |
+
with gr.Row():
|
72 |
+
gr.Markdown("## Control Stable Diffusion with Segmentation Maps")
|
73 |
+
with gr.Row():
|
74 |
+
with gr.Column():
|
75 |
+
input_image = gr.Image(source='upload', type="numpy")
|
76 |
+
prompt = gr.Textbox(label="Prompt")
|
77 |
+
run_button = gr.Button(label="Run")
|
78 |
+
with gr.Accordion("Advanced options", open=False):
|
79 |
+
num_samples = gr.Slider(label="Images", minimum=1, maximum=12, value=1, step=1)
|
80 |
+
image_resolution = gr.Slider(label="Image Resolution", minimum=256, maximum=768, value=512, step=64)
|
81 |
+
strength = gr.Slider(label="Control Strength", minimum=0.0, maximum=2.0, value=1.0, step=0.01)
|
82 |
+
guess_mode = gr.Checkbox(label='Guess Mode', value=False)
|
83 |
+
detect_resolution = gr.Slider(label="Segmentation Resolution", minimum=64, maximum=1024, value=512, step=1)
|
84 |
+
ddim_steps = gr.Slider(label="Steps", minimum=1, maximum=100, value=20, step=1)
|
85 |
+
scale = gr.Slider(label="Guidance Scale", minimum=0.1, maximum=30.0, value=9.0, step=0.1)
|
86 |
+
seed = gr.Slider(label="Seed", minimum=-1, maximum=2147483647, step=1, randomize=True)
|
87 |
+
eta = gr.Number(label="eta (DDIM)", value=0.0)
|
88 |
+
a_prompt = gr.Textbox(label="Added Prompt", value='best quality, extremely detailed')
|
89 |
+
n_prompt = gr.Textbox(label="Negative Prompt",
|
90 |
+
value='longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality')
|
91 |
+
with gr.Column():
|
92 |
+
result_gallery = gr.Gallery(label='Output', show_label=False, elem_id="gallery").style(grid=2, height='auto')
|
93 |
+
ips = [input_image, prompt, a_prompt, n_prompt, num_samples, image_resolution, detect_resolution, ddim_steps, guess_mode, strength, scale, seed, eta]
|
94 |
+
run_button.click(fn=process, inputs=ips, outputs=[result_gallery])
|
95 |
+
|
96 |
+
|
97 |
+
block.launch(server_name='0.0.0.0')
|
prompt.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
tool_add_control_sd21.py
ADDED
@@ -0,0 +1,50 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import sys
|
2 |
+
import os
|
3 |
+
|
4 |
+
assert len(sys.argv) == 3, 'Args are wrong.'
|
5 |
+
|
6 |
+
input_path = sys.argv[1]
|
7 |
+
output_path = sys.argv[2]
|
8 |
+
|
9 |
+
assert os.path.exists(input_path), 'Input model does not exist.'
|
10 |
+
assert not os.path.exists(output_path), 'Output filename already exists.'
|
11 |
+
assert os.path.exists(os.path.dirname(output_path)), 'Output path is not valid.'
|
12 |
+
|
13 |
+
import torch
|
14 |
+
from share import *
|
15 |
+
from cldm.model import create_model
|
16 |
+
|
17 |
+
|
18 |
+
def get_node_name(name, parent_name):
|
19 |
+
if len(name) <= len(parent_name):
|
20 |
+
return False, ''
|
21 |
+
p = name[:len(parent_name)]
|
22 |
+
if p != parent_name:
|
23 |
+
return False, ''
|
24 |
+
return True, name[len(parent_name):]
|
25 |
+
|
26 |
+
|
27 |
+
model = create_model(config_path='./models/cldm_v21.yaml')
|
28 |
+
|
29 |
+
pretrained_weights = torch.load(input_path)
|
30 |
+
if 'state_dict' in pretrained_weights:
|
31 |
+
pretrained_weights = pretrained_weights['state_dict']
|
32 |
+
|
33 |
+
scratch_dict = model.state_dict()
|
34 |
+
|
35 |
+
target_dict = {}
|
36 |
+
for k in scratch_dict.keys():
|
37 |
+
is_control, name = get_node_name(k, 'control_')
|
38 |
+
if is_control:
|
39 |
+
copy_k = 'model.diffusion_' + name
|
40 |
+
else:
|
41 |
+
copy_k = k
|
42 |
+
if copy_k in pretrained_weights:
|
43 |
+
target_dict[k] = pretrained_weights[copy_k].clone()
|
44 |
+
else:
|
45 |
+
target_dict[k] = scratch_dict[k].clone()
|
46 |
+
print(f'These weights are newly added: {k}')
|
47 |
+
|
48 |
+
model.load_state_dict(target_dict, strict=True)
|
49 |
+
torch.save(model.state_dict(), output_path)
|
50 |
+
print('Done.')
|
tutorial_train_sd21.py
ADDED
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from share import *
|
2 |
+
|
3 |
+
import pytorch_lightning as pl
|
4 |
+
from torch.utils.data import DataLoader
|
5 |
+
from tutorial_dataset import MyDataset
|
6 |
+
from cldm.logger import ImageLogger
|
7 |
+
from cldm.model import create_model, load_state_dict
|
8 |
+
|
9 |
+
|
10 |
+
# Configs
|
11 |
+
resume_path = './models/control_sd21_ini.ckpt'
|
12 |
+
batch_size = 4
|
13 |
+
logger_freq = 300
|
14 |
+
learning_rate = 1e-5
|
15 |
+
sd_locked = True
|
16 |
+
only_mid_control = False
|
17 |
+
|
18 |
+
|
19 |
+
# First use cpu to load models. Pytorch Lightning will automatically move it to GPUs.
|
20 |
+
model = create_model('./models/cldm_v21.yaml').cpu()
|
21 |
+
model.load_state_dict(load_state_dict(resume_path, location='cpu'))
|
22 |
+
model.learning_rate = learning_rate
|
23 |
+
model.sd_locked = sd_locked
|
24 |
+
model.only_mid_control = only_mid_control
|
25 |
+
|
26 |
+
|
27 |
+
# Misc
|
28 |
+
dataset = MyDataset()
|
29 |
+
dataloader = DataLoader(dataset, num_workers=0, batch_size=batch_size, shuffle=True)
|
30 |
+
logger = ImageLogger(batch_frequency=logger_freq)
|
31 |
+
trainer = pl.Trainer(gpus=1, precision=32, callbacks=[logger])
|
32 |
+
|
33 |
+
|
34 |
+
# Train!
|
35 |
+
trainer.fit(model, dataloader)
|