File size: 17,119 Bytes
1f39cf9
 
 
 
89f6983
 
 
 
 
 
 
1f39cf9
 
 
89f6983
 
 
 
 
 
 
1f39cf9
 
 
 
 
 
 
89f6983
1f39cf9
 
 
 
 
89f6983
 
 
1f39cf9
 
 
 
 
 
89f6983
 
 
1f39cf9
 
 
89f6983
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1f39cf9
89f6983
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1f39cf9
89f6983
1f39cf9
89f6983
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1f39cf9
89f6983
1f39cf9
 
 
 
89f6983
1f39cf9
89f6983
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1f39cf9
89f6983
1f39cf9
 
89f6983
1f39cf9
 
 
89f6983
 
1f39cf9
 
89f6983
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1f39cf9
6007e4c
 
 
89f6983
 
 
 
 
1f39cf9
 
 
89f6983
 
 
 
 
 
 
 
 
 
 
1f39cf9
89f6983
 
 
1f39cf9
 
89f6983
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1f39cf9
89f6983
 
 
 
 
 
1f39cf9
89f6983
 
 
 
 
 
1f39cf9
 
2335a8f
 
 
89f6983
 
 
 
 
 
 
2335a8f
 
1f39cf9
2335a8f
89f6983
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2335a8f
89f6983
 
 
 
 
 
 
 
 
 
 
 
 
 
2335a8f
1f39cf9
89f6983
 
 
 
 
2335a8f
89f6983
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2335a8f
 
 
 
 
1f39cf9
89f6983
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2335a8f
1f39cf9
2335a8f
 
89f6983
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2335a8f
1f39cf9
89f6983
 
 
2335a8f
1f39cf9
89f6983
1f39cf9
89f6983
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
import torch
import models
import utils
from models import pipelines, sam
from utils import parse, guidance, attn, latents, vis
from shared import (
    model_dict,
    sam_model_dict,
    DEFAULT_SO_NEGATIVE_PROMPT,
    DEFAULT_OVERALL_NEGATIVE_PROMPT,
)

verbose = False

vae, tokenizer, text_encoder, unet, dtype = (
    model_dict.vae,
    model_dict.tokenizer,
    model_dict.text_encoder,
    model_dict.unet,
    model_dict.dtype,
)

model_dict.update(sam_model_dict)


# Hyperparams
height = 512  # default height of Stable Diffusion
width = 512  # default width of Stable Diffusion
H, W = height // 8, width // 8  # size of the latent
guidance_scale = 7.5  # Scale for classifier-free guidance

# batch size that is not 1 is not supported
overall_batch_size = 1

# semantic guidance kwargs (single object)
guidance_attn_keys = pipelines.DEFAULT_GUIDANCE_ATTN_KEYS

# discourage masks with confidence below
discourage_mask_below_confidence = 0.85

# discourage masks with iou (with coarse binarized attention mask) below
discourage_mask_below_coarse_iou = 0.25

# This is controls the foreground variations
fg_blending_ratio = 0.1

run_ind = None


def generate_single_object_with_box(
    prompt,
    box,
    phrase,
    word,
    input_latents,
    input_embeddings,
    semantic_guidance_kwargs,
    obj_attn_key,
    saved_cross_attn_keys,
    sam_refine_kwargs,
    num_inference_steps,
    gligen_scheduled_sampling_beta=0.3,
    verbose=False,
    visualize=False,
    **kwargs,
):
    bboxes, phrases, words = [box], [phrase], [word]

    if verbose:
        print(f"Getting token map (prompt: {prompt})")

    object_positions, word_token_indices = guidance.get_phrase_indices(
        tokenizer=tokenizer,
        prompt=prompt,
        phrases=phrases,
        words=words,
        return_word_token_indices=True,
        # Since the prompt for single object is from background prompt + object name, we will not have the case of not found
        add_suffix_if_not_found=False,
        verbose=verbose,
    )
    # phrases only has one item, so we select the first item in word_token_indices
    word_token_index = word_token_indices[0]

    if verbose:
        print("word_token_index:", word_token_index)

    # `offload_guidance_cross_attn_to_cpu` will greatly slow down generation
    (
        latents,
        single_object_images,
        saved_attns,
        single_object_pil_images_box_ann,
        latents_all,
    ) = pipelines.generate_gligen(
        model_dict,
        input_latents,
        input_embeddings,
        num_inference_steps,
        bboxes,
        phrases,
        gligen_scheduled_sampling_beta=gligen_scheduled_sampling_beta,
        guidance_scale=guidance_scale,
        return_saved_cross_attn=True,
        semantic_guidance=True,
        semantic_guidance_bboxes=bboxes,
        semantic_guidance_object_positions=object_positions,
        semantic_guidance_kwargs=semantic_guidance_kwargs,
        saved_cross_attn_keys=[obj_attn_key, *saved_cross_attn_keys],
        return_cond_ca_only=True,
        return_token_ca_only=word_token_index,
        offload_cross_attn_to_cpu=False,
        return_box_vis=True,
        save_all_latents=True,
        dynamic_num_inference_steps=True,
        **kwargs,
    )
    # `saved_cross_attn_keys` kwargs may have duplicates

    utils.free_memory()

    single_object_pil_image_box_ann = single_object_pil_images_box_ann[0]

    if visualize:
        print("Single object image")
        vis.display(single_object_pil_image_box_ann)

    mask_selected, conf_score_selected = sam.sam_refine_box(
        sam_input_image=single_object_images[0],
        box=box,
        model_dict=model_dict,
        verbose=verbose,
        **sam_refine_kwargs,
    )

    mask_selected_tensor = torch.tensor(mask_selected)

    if verbose:
        vis.visualize(mask_selected, "Mask (selected) after resize")
        # This is only for visualizations
        masked_latents = latents_all * mask_selected_tensor[None, None, None, ...]
        vis.visualize_masked_latents(
            latents_all, masked_latents, timestep_T=False, timestep_0=True
        )

    return (
        latents_all,
        mask_selected_tensor,
        saved_attns,
        single_object_pil_image_box_ann,
    )


def get_masked_latents_all_list(
    so_prompt_phrase_word_box_list,
    input_latents_list,
    so_input_embeddings,
    verbose=False,
    **kwargs,
):
    latents_all_list, mask_tensor_list, saved_attns_list, so_img_list = [], [], [], []

    if not so_prompt_phrase_word_box_list:
        return latents_all_list, mask_tensor_list, saved_attns_list

    so_uncond_embeddings, so_cond_embeddings = so_input_embeddings

    for idx, ((prompt, phrase, word, box), input_latents) in enumerate(
        zip(so_prompt_phrase_word_box_list, input_latents_list)
    ):
        so_current_cond_embeddings = so_cond_embeddings[idx : idx + 1]
        so_current_text_embeddings = torch.cat(
            [so_uncond_embeddings, so_current_cond_embeddings], dim=0
        )
        so_current_input_embeddings = (
            so_current_text_embeddings,
            so_uncond_embeddings,
            so_current_cond_embeddings,
        )

        latents_all, mask_tensor, saved_attns, so_img = generate_single_object_with_box(
            prompt,
            box,
            phrase,
            word,
            input_latents,
            input_embeddings=so_current_input_embeddings,
            verbose=verbose,
            **kwargs,
        )
        latents_all_list.append(latents_all)
        mask_tensor_list.append(mask_tensor)
        saved_attns_list.append(saved_attns)
        so_img_list.append(so_img)

    return latents_all_list, mask_tensor_list, saved_attns_list, so_img_list


# Note: need to keep the supervision, especially the box corrdinates, corresponds to each other in single object and overall.


def run(
    spec,
    bg_seed=1,
    overall_prompt_override="",
    fg_seed_start=20,
    frozen_step_ratio=0.4,
    num_inference_steps=20,
    loss_scale=20,
    loss_threshold=5.0,
    max_iter=[2] * 5 + [1] * 10,
    max_index_step=15,
    overall_loss_scale=20,
    overall_loss_threshold=5.0,
    overall_max_iter=[4] * 5 + [3] * 5 + [2] * 5 + [2] * 5 + [1] * 10,
    overall_max_index_step=30,
    so_gligen_scheduled_sampling_beta=0.4,
    overall_gligen_scheduled_sampling_beta=0.4,
    ref_ca_loss_weight=0.5,
    so_center_box=False,
    fg_blending_ratio=0.1,
    scheduler_key="dpm_scheduler",
    so_negative_prompt=DEFAULT_SO_NEGATIVE_PROMPT,
    overall_negative_prompt=DEFAULT_OVERALL_NEGATIVE_PROMPT,
    so_horizontal_center_only=True,
    align_with_overall_bboxes=False,
    horizontal_shift_only=True,
    use_fast_schedule=True,
    # Transfer the cross-attention from single object generation (with ref_ca_saved_attns)
    # Use reference cross attention to guide the cross attention in the overall generation
    use_ref_ca=True,
    use_autocast=False,
):
    """
    so_center_box: using centered box in single object generation
    so_horizontal_center_only: move to the center horizontally only

    align_with_overall_bboxes: Align the center of the mask, latents, and cross-attention with the center of the box in overall bboxes
    horizontal_shift_only: only shift horizontally for the alignment of mask, latents, and cross-attention
    """

    frozen_step_ratio = min(max(frozen_step_ratio, 0.0), 1.0)
    frozen_steps = int(num_inference_steps * frozen_step_ratio)

    print(
        "generation:",
        spec,
        bg_seed,
        fg_seed_start,
        frozen_step_ratio,
        so_gligen_scheduled_sampling_beta,
        overall_gligen_scheduled_sampling_beta,
        overall_max_index_step,
    )

    (
        so_prompt_phrase_word_box_list,
        overall_prompt,
        overall_phrases_words_bboxes,
    ) = parse.convert_spec(spec, height, width, verbose=verbose)

    if overall_prompt_override and overall_prompt_override.strip():
        overall_prompt = overall_prompt_override.strip()

    overall_phrases, overall_words, overall_bboxes = (
        [item[0] for item in overall_phrases_words_bboxes],
        [item[1] for item in overall_phrases_words_bboxes],
        [item[2] for item in overall_phrases_words_bboxes],
    )

    # The so box is centered but the overall boxes are not (since we need to place to the right place).
    if so_center_box:
        so_prompt_phrase_word_box_list = [
            (
                prompt,
                phrase,
                word,
                utils.get_centered_box(
                    bbox, horizontal_center_only=so_horizontal_center_only
                ),
            )
            for prompt, phrase, word, bbox in so_prompt_phrase_word_box_list
        ]
        if verbose:
            print(
                f"centered so_prompt_phrase_word_box_list: {so_prompt_phrase_word_box_list}"
            )
    so_boxes = [item[-1] for item in so_prompt_phrase_word_box_list]

    so_negative_prompt = DEFAULT_SO_NEGATIVE_PROMPT
    overall_negative_prompt = DEFAULT_OVERALL_NEGATIVE_PROMPT
    if "extra_neg_prompt" in spec and spec["extra_neg_prompt"]:
        so_negative_prompt = spec["extra_neg_prompt"] + ", " + so_negative_prompt
        overall_negative_prompt = (
            spec["extra_neg_prompt"] + ", " + overall_negative_prompt
        )

    semantic_guidance_kwargs = dict(
        loss_scale=loss_scale,
        loss_threshold=loss_threshold,
        max_iter=max_iter,
        max_index_step=max_index_step,
        use_ratio_based_loss=False,
        guidance_attn_keys=guidance_attn_keys,
        verbose=True,
    )

    sam_refine_kwargs = dict(
        discourage_mask_below_confidence=discourage_mask_below_confidence,
        discourage_mask_below_coarse_iou=discourage_mask_below_coarse_iou,
        height=height,
        width=width,
        H=H,
        W=W,
    )

    if verbose:
        vis.visualize_bboxes(
            bboxes=[item[-1] for item in so_prompt_phrase_word_box_list], H=H, W=W
        )

    # Note that so and overall use different negative prompts

    with torch.autocast("cuda", enabled=use_autocast):
        so_prompts = [item[0] for item in so_prompt_phrase_word_box_list]
        if so_prompts:
            so_input_embeddings = models.encode_prompts(
                prompts=so_prompts,
                tokenizer=tokenizer,
                text_encoder=text_encoder,
                negative_prompt=so_negative_prompt,
                one_uncond_input_only=True,
            )
        else:
            so_input_embeddings = []

        input_latents_list, latents_bg = latents.get_input_latents_list(
            model_dict,
            bg_seed=bg_seed,
            fg_seed_start=fg_seed_start,
            so_boxes=so_boxes,
            fg_blending_ratio=fg_blending_ratio,
            height=height,
            width=width,
            verbose=False,
        )

        if use_fast_schedule:
            fast_after_steps = max(frozen_steps, overall_max_index_step) if use_ref_ca else frozen_steps
        else:
            fast_after_steps = None

        if use_ref_ca or frozen_steps > 0:
            (
                latents_all_list,
                mask_tensor_list,
                saved_attns_list,
                so_img_list,
            ) = get_masked_latents_all_list(
                so_prompt_phrase_word_box_list,
                input_latents_list,
                gligen_scheduled_sampling_beta=so_gligen_scheduled_sampling_beta,
                semantic_guidance_kwargs=semantic_guidance_kwargs,
                obj_attn_key=("down", 2, 1, 0),
                saved_cross_attn_keys=guidance_attn_keys if use_ref_ca else [],
                sam_refine_kwargs=sam_refine_kwargs,
                so_input_embeddings=so_input_embeddings,
                num_inference_steps=num_inference_steps,
                scheduler_key=scheduler_key,
                verbose=verbose,
                fast_after_steps=fast_after_steps,
                fast_rate=2,
            )
        else:
            # No per-box guidance
            (latents_all_list, mask_tensor_list, saved_attns_list, so_img_list) = [], [], [], []

        (
            composed_latents,
            foreground_indices,
            offset_list,
        ) = latents.compose_latents_with_alignment(
            model_dict,
            latents_all_list,
            mask_tensor_list,
            num_inference_steps,
            overall_batch_size,
            height,
            width,
            latents_bg=latents_bg,
            align_with_overall_bboxes=align_with_overall_bboxes,
            overall_bboxes=overall_bboxes,
            horizontal_shift_only=horizontal_shift_only,
            use_fast_schedule=use_fast_schedule,
            fast_after_steps=fast_after_steps,
        )

        # NOTE: need to ensure overall embeddings are generated after the update of overall prompt
        (
            overall_object_positions,
            overall_word_token_indices,
            overall_prompt
        ) = guidance.get_phrase_indices(
            tokenizer=tokenizer,
            prompt=overall_prompt,
            phrases=overall_phrases,
            words=overall_words,
            verbose=verbose,
            return_word_token_indices=True,
            add_suffix_if_not_found=True
        )

        overall_input_embeddings = models.encode_prompts(
            prompts=[overall_prompt],
            tokenizer=tokenizer,
            negative_prompt=overall_negative_prompt,
            text_encoder=text_encoder,
        )

        if use_ref_ca:
            # ref_ca_saved_attns has the same hierarchy as bboxes
            ref_ca_saved_attns = []

            flattened_box_idx = 0
            for bboxes in overall_bboxes:
                # bboxes: correspond to a phrase
                ref_ca_current_phrase_saved_attns = []
                for bbox in bboxes:
                    # each individual bbox
                    saved_attns = saved_attns_list[flattened_box_idx]
                    if align_with_overall_bboxes:
                        offset = offset_list[flattened_box_idx]
                        saved_attns = attn.shift_saved_attns(
                            saved_attns,
                            offset,
                            guidance_attn_keys=guidance_attn_keys,
                            horizontal_shift_only=horizontal_shift_only,
                        )
                    ref_ca_current_phrase_saved_attns.append(saved_attns)
                    flattened_box_idx += 1
                ref_ca_saved_attns.append(ref_ca_current_phrase_saved_attns)

        overall_bboxes_flattened, overall_phrases_flattened = [], []
        for overall_bboxes_item, overall_phrase in zip(overall_bboxes, overall_phrases):
            for overall_bbox in overall_bboxes_item:
                overall_bboxes_flattened.append(overall_bbox)
                overall_phrases_flattened.append(overall_phrase)

        # This is currently not-shared with the single object one.
        overall_semantic_guidance_kwargs = dict(
            loss_scale=overall_loss_scale,
            loss_threshold=overall_loss_threshold,
            max_iter=overall_max_iter,
            max_index_step=overall_max_index_step,
            # ref_ca comes from the attention map of the word token of the phrase in single object generation, so we apply it only to the word token of the phrase in overall generation.
            ref_ca_word_token_only=True,
            # If a word is not provided, we use the last token.
            ref_ca_last_token_only=True,
            ref_ca_saved_attns=ref_ca_saved_attns if use_ref_ca else None,
            word_token_indices=overall_word_token_indices,
            guidance_attn_keys=guidance_attn_keys,
            ref_ca_loss_weight=ref_ca_loss_weight,
            use_ratio_based_loss=False,
            verbose=True,
        )

        # Generate with composed latents

        # Foreground should be frozen
        frozen_mask = foreground_indices != 0

        _, images = pipelines.generate_gligen(
            model_dict,
            composed_latents,
            overall_input_embeddings,
            num_inference_steps,
            overall_bboxes_flattened,
            overall_phrases_flattened,
            guidance_scale=guidance_scale,
            gligen_scheduled_sampling_beta=overall_gligen_scheduled_sampling_beta,
            semantic_guidance=True,
            semantic_guidance_bboxes=overall_bboxes,
            semantic_guidance_object_positions=overall_object_positions,
            semantic_guidance_kwargs=overall_semantic_guidance_kwargs,
            frozen_steps=frozen_steps,
            frozen_mask=frozen_mask,
            scheduler_key=scheduler_key,
        )

        print(
            f"Generation with spatial guidance from input latents and first {frozen_steps} steps frozen (directly from the composed latents input)"
        )
        print("Generation from composed latents (with semantic guidance)")

    utils.free_memory()

    return images[0], so_img_list