File size: 57,600 Bytes
15f87d2 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 |
{
"cells": [
{
"cell_type": "markdown",
"metadata": {
"editable": true,
"slideshow": {
"slide_type": ""
},
"tags": []
},
"source": [
"## StoryDiffusion: Consistent Self-Attention for Long-Range Image and Video Generation \n",
"[]()\n",
"[[Paper]()]   [[Project Page]()]   <br>"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Import Packages"
]
},
{
"cell_type": "code",
"execution_count": 1,
"metadata": {},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"/home/tjut_lixiang/anaconda3/envs/storydiffusion/lib/python3.10/site-packages/tqdm/auto.py:21: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n",
" from .autonotebook import tqdm as notebook_tqdm\n"
]
}
],
"source": [
"# %load_ext autoreload\n",
"# %autoreload 2\n",
"import gradio as gr\n",
"import numpy as np\n",
"import torch\n",
"import requests\n",
"import random\n",
"import os\n",
"import sys\n",
"import pickle\n",
"from PIL import Imagex\n",
"from tqdm.auto import tqdm\n",
"from datetime import datetime\n",
"from utils.gradio_utils import is_torch2_available\n",
"if is_torch2_available():\n",
" from utils.gradio_utils import \\\n",
" AttnProcessor2_0 as AttnProcessor\n",
"else:\n",
" from utils.gradio_utils import AttnProcessor\n",
"\n",
"import diffusers\n",
"from diffusers import StableDiffusionXLPipeline\n",
"from diffusers import DDIMScheduler\n",
"import torch.nn.functional as F\n",
"from utils.gradio_utils import cal_attn_mask_xl\n",
"import copy\n",
"import os\n",
"from diffusers.utils import load_image\n",
"from utils.utils import get_comic\n",
"from utils.style_template import styles"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Set Config "
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {},
"outputs": [],
"source": [
"## Global\n",
"STYLE_NAMES = list(styles.keys())\n",
"DEFAULT_STYLE_NAME = \"(No style)\"\n",
"MAX_SEED = np.iinfo(np.int32).max\n",
"global models_dict\n",
"use_va = False\n",
"models_dict = {\n",
" \"Juggernaut\":\"RunDiffusion/Juggernaut-XL-v8\",\n",
" \"RealVision\":\"SG161222/RealVisXL_V4.0\" ,\n",
" \"SDXL\":\"stabilityai/stable-diffusion-xl-base-1.0\" ,\n",
" \"Unstable\": \"stablediffusionapi/sdxl-unstable-diffusers-y\"\n",
"}"
]
},
{
"cell_type": "code",
"execution_count": 3,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"True"
]
},
"execution_count": 3,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"torch.cuda.is_available()"
]
},
{
"cell_type": "code",
"execution_count": 4,
"metadata": {},
"outputs": [],
"source": [
"def setup_seed(seed):\n",
" torch.manual_seed(seed)\n",
" torch.cuda.manual_seed_all(seed)\n",
" np.random.seed(seed)\n",
" random.seed(seed)\n",
" torch.backends.cudnn.deterministic = True\n",
"\n",
" \n",
"#################################################\n",
"########Consistent Self-Attention################\n",
"#################################################\n",
"class SpatialAttnProcessor2_0(torch.nn.Module):\n",
" r\"\"\"\n",
" Attention processor for IP-Adapater for PyTorch 2.0.\n",
" Args:\n",
" hidden_size (`int`):\n",
" The hidden size of the attention layer.\n",
" cross_attention_dim (`int`):\n",
" The number of channels in the `encoder_hidden_states`.\n",
" text_context_len (`int`, defaults to 77):\n",
" The context length of the text features.\n",
" scale (`float`, defaults to 1.0):\n",
" the weight scale of image prompt.\n",
" \"\"\"\n",
"\n",
" def __init__(self, hidden_size = None, cross_attention_dim=None,id_length = 4,device = \"cuda:0\",dtype = torch.float16):\n",
" super().__init__()\n",
" if not hasattr(F, \"scaled_dot_product_attention\"):\n",
" raise ImportError(\"AttnProcessor2_0 requires PyTorch 2.0, to use it, please upgrade PyTorch to 2.0.\")\n",
" self.device = device\n",
" self.dtype = dtype\n",
" self.hidden_size = hidden_size\n",
" self.cross_attention_dim = cross_attention_dim\n",
" self.total_length = id_length + 1\n",
" self.id_length = id_length\n",
" self.id_bank = {}\n",
"\n",
" def __call__(\n",
" self,\n",
" attn,\n",
" hidden_states,\n",
" encoder_hidden_states=None,\n",
" attention_mask=None,\n",
" temb=None):\n",
" global total_count,attn_count,cur_step,mask1024,mask4096\n",
" global sa32, sa64\n",
" global write\n",
" global height,width\n",
" if write:\n",
" # print(f\"white:{cur_step}\")\n",
" self.id_bank[cur_step] = [hidden_states[:self.id_length], hidden_states[self.id_length:]]\n",
" else:\n",
" encoder_hidden_states = torch.cat((self.id_bank[cur_step][0].to(self.device),hidden_states[:1],self.id_bank[cur_step][1].to(self.device),hidden_states[1:]))\n",
" # skip in early step\n",
" if cur_step <5:\n",
" hidden_states = self.__call2__(attn, hidden_states,encoder_hidden_states,attention_mask,temb)\n",
" else: # 256 1024 4096\n",
" random_number = random.random()\n",
" if cur_step <20:\n",
" rand_num = 0.3\n",
" else:\n",
" rand_num = 0.1\n",
" if random_number > rand_num:\n",
" if not write:\n",
" if hidden_states.shape[1] == (height//32) * (width//32):\n",
" attention_mask = mask1024[mask1024.shape[0] // self.total_length * self.id_length:]\n",
" else:\n",
" attention_mask = mask4096[mask4096.shape[0] // self.total_length * self.id_length:]\n",
" else:\n",
" if hidden_states.shape[1] == (height//32) * (width//32):\n",
" attention_mask = mask1024[:mask1024.shape[0] // self.total_length * self.id_length,:mask1024.shape[0] // self.total_length * self.id_length]\n",
" else:\n",
" attention_mask = mask4096[:mask4096.shape[0] // self.total_length * self.id_length,:mask4096.shape[0] // self.total_length * self.id_length]\n",
" hidden_states = self.__call1__(attn, hidden_states,encoder_hidden_states,attention_mask,temb)\n",
" else:\n",
" hidden_states = self.__call2__(attn, hidden_states,None,attention_mask,temb)\n",
" attn_count +=1\n",
" if attn_count == total_count:\n",
" attn_count = 0\n",
" cur_step += 1\n",
" mask1024,mask4096 = cal_attn_mask_xl(self.total_length,self.id_length,sa32,sa64,height,width, device=self.device, dtype= self.dtype)\n",
"\n",
" return hidden_states\n",
" def __call1__(\n",
" self,\n",
" attn,\n",
" hidden_states,\n",
" encoder_hidden_states=None,\n",
" attention_mask=None,\n",
" temb=None,\n",
" ):\n",
" residual = hidden_states\n",
" if attn.spatial_norm is not None:\n",
" hidden_states = attn.spatial_norm(hidden_states, temb)\n",
" input_ndim = hidden_states.ndim\n",
"\n",
" if input_ndim == 4:\n",
" total_batch_size, channel, height, width = hidden_states.shape\n",
" hidden_states = hidden_states.view(total_batch_size, channel, height * width).transpose(1, 2)\n",
" total_batch_size,nums_token,channel = hidden_states.shape\n",
" img_nums = total_batch_size//2\n",
" hidden_states = hidden_states.view(-1,img_nums,nums_token,channel).reshape(-1,img_nums * nums_token,channel)\n",
"\n",
" batch_size, sequence_length, _ = hidden_states.shape\n",
"\n",
" if attn.group_norm is not None:\n",
" hidden_states = attn.group_norm(hidden_states.transpose(1, 2)).transpose(1, 2)\n",
"\n",
" query = attn.to_q(hidden_states)\n",
"\n",
" if encoder_hidden_states is None:\n",
" encoder_hidden_states = hidden_states # B, N, C\n",
" else:\n",
" encoder_hidden_states = encoder_hidden_states.view(-1,self.id_length+1,nums_token,channel).reshape(-1,(self.id_length+1) * nums_token,channel)\n",
"\n",
" key = attn.to_k(encoder_hidden_states)\n",
" value = attn.to_v(encoder_hidden_states)\n",
"\n",
"\n",
" inner_dim = key.shape[-1]\n",
" head_dim = inner_dim // attn.heads\n",
"\n",
" query = query.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2)\n",
"\n",
" key = key.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2)\n",
" value = value.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2)\n",
" hidden_states = F.scaled_dot_product_attention(\n",
" query, key, value, attn_mask=attention_mask, dropout_p=0.0, is_causal=False\n",
" )\n",
"\n",
" hidden_states = hidden_states.transpose(1, 2).reshape(total_batch_size, -1, attn.heads * head_dim)\n",
" hidden_states = hidden_states.to(query.dtype)\n",
"\n",
"\n",
"\n",
" # linear proj\n",
" hidden_states = attn.to_out[0](hidden_states)\n",
" # dropout\n",
" hidden_states = attn.to_out[1](hidden_states)\n",
"\n",
"\n",
" if input_ndim == 4:\n",
" hidden_states = hidden_states.transpose(-1, -2).reshape(total_batch_size, channel, height, width)\n",
" if attn.residual_connection:\n",
" hidden_states = hidden_states + residual\n",
" hidden_states = hidden_states / attn.rescale_output_factor\n",
" # print(hidden_states.shape)\n",
" return hidden_states\n",
" def __call2__(\n",
" self,\n",
" attn,\n",
" hidden_states,\n",
" encoder_hidden_states=None,\n",
" attention_mask=None,\n",
" temb=None):\n",
" residual = hidden_states\n",
"\n",
" if attn.spatial_norm is not None:\n",
" hidden_states = attn.spatial_norm(hidden_states, temb)\n",
"\n",
" input_ndim = hidden_states.ndim\n",
"\n",
" if input_ndim == 4:\n",
" batch_size, channel, height, width = hidden_states.shape\n",
" hidden_states = hidden_states.view(batch_size, channel, height * width).transpose(1, 2)\n",
"\n",
" batch_size, sequence_length, channel = (\n",
" hidden_states.shape\n",
" )\n",
" # print(hidden_states.shape)\n",
" if attention_mask is not None:\n",
" attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size)\n",
" # scaled_dot_product_attention expects attention_mask shape to be\n",
" # (batch, heads, source_length, target_length)\n",
" attention_mask = attention_mask.view(batch_size, attn.heads, -1, attention_mask.shape[-1])\n",
"\n",
" if attn.group_norm is not None:\n",
" hidden_states = attn.group_norm(hidden_states.transpose(1, 2)).transpose(1, 2)\n",
"\n",
" query = attn.to_q(hidden_states)\n",
"\n",
" if encoder_hidden_states is None:\n",
" encoder_hidden_states = hidden_states # B, N, C\n",
" else:\n",
" encoder_hidden_states = encoder_hidden_states.view(-1,self.id_length+1,sequence_length,channel).reshape(-1,(self.id_length+1) * sequence_length,channel)\n",
"\n",
" key = attn.to_k(encoder_hidden_states)\n",
" value = attn.to_v(encoder_hidden_states)\n",
"\n",
" inner_dim = key.shape[-1]\n",
" head_dim = inner_dim // attn.heads\n",
"\n",
" query = query.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2)\n",
"\n",
" key = key.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2)\n",
" value = value.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2)\n",
"\n",
" hidden_states = F.scaled_dot_product_attention(\n",
" query, key, value, attn_mask=attention_mask, dropout_p=0.0, is_causal=False\n",
" )\n",
"\n",
" hidden_states = hidden_states.transpose(1, 2).reshape(batch_size, -1, attn.heads * head_dim)\n",
" hidden_states = hidden_states.to(query.dtype)\n",
"\n",
" # linear proj\n",
" hidden_states = attn.to_out[0](hidden_states)\n",
" # dropout\n",
" hidden_states = attn.to_out[1](hidden_states)\n",
"\n",
" if input_ndim == 4:\n",
" hidden_states = hidden_states.transpose(-1, -2).reshape(batch_size, channel, height, width)\n",
"\n",
" if attn.residual_connection:\n",
" hidden_states = hidden_states + residual\n",
"\n",
" hidden_states = hidden_states / attn.rescale_output_factor\n",
"\n",
" return hidden_states\n",
"\n",
"def set_attention_processor(unet,id_length):\n",
" attn_procs = {}\n",
" for name in unet.attn_processors.keys():\n",
" cross_attention_dim = None if name.endswith(\"attn1.processor\") else unet.config.cross_attention_dim\n",
" if name.startswith(\"mid_block\"):\n",
" hidden_size = unet.config.block_out_channels[-1]\n",
" elif name.startswith(\"up_blocks\"):\n",
" block_id = int(name[len(\"up_blocks.\")])\n",
" hidden_size = list(reversed(unet.config.block_out_channels))[block_id]\n",
" elif name.startswith(\"down_blocks\"):\n",
" block_id = int(name[len(\"down_blocks.\")])\n",
" hidden_size = unet.config.block_out_channels[block_id]\n",
" if cross_attention_dim is None:\n",
" if name.startswith(\"up_blocks\") :\n",
" attn_procs[name] = SpatialAttnProcessor2_0(id_length = id_length)\n",
" else: \n",
" attn_procs[name] = AttnProcessor()\n",
" else:\n",
" attn_procs[name] = AttnProcessor()\n",
"\n",
" unet.set_attn_processor(attn_procs)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Load Pipeline"
]
},
{
"cell_type": "code",
"execution_count": 5,
"metadata": {},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"/home/tjut_lixiang/anaconda3/envs/storydiffusion/lib/python3.10/site-packages/huggingface_hub/file_download.py:1150: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`.\n",
" warnings.warn(\n",
"Loading pipeline components...: 100%|██████████| 7/7 [00:49<00:00, 7.13s/it]\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"successsfully load consistent self-attention\n",
"number of the processor : 36\n"
]
}
],
"source": [
"global attn_count, total_count, id_length, total_length,cur_step, cur_model_type\n",
"global write\n",
"global sa32, sa64\n",
"global height,width\n",
"attn_count = 0\n",
"total_count = 0\n",
"cur_step = 0\n",
"id_length = 4\n",
"total_length = 5\n",
"cur_model_type = \"\"\n",
"device=\"cuda:0\"\n",
"global attn_procs,unet\n",
"attn_procs = {}\n",
"###\n",
"write = False\n",
"### strength of consistent self-attention: the larger, the stronger\n",
"sa32 = 0.5\n",
"sa64 = 0.5\n",
"### Res. of the Generated Comics. Please Note: SDXL models may do worse in a low-resolution! \n",
"height = 768\n",
"width = 768\n",
"###\n",
"global pipe\n",
"global sd_model_path\n",
"sd_model_path = models_dict[\"RealVision\"] #\"SG161222/RealVisXL_V4.0\"\n",
"### LOAD Stable Diffusion Pipeline\n",
"pipe = StableDiffusionXLPipeline.from_pretrained(sd_model_path, torch_dtype=torch.float16, use_safetensors=False)\n",
"pipe = pipe.to(device)\n",
"pipe.enable_freeu(s1=0.6, s2=0.4, b1=1.1, b2=1.2)\n",
"pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config)\n",
"pipe.scheduler.set_timesteps(50)\n",
"unet = pipe.unet\n",
"\n",
"### Insert PairedAttention\n",
"for name in unet.attn_processors.keys():\n",
" cross_attention_dim = None if name.endswith(\"attn1.processor\") else unet.config.cross_attention_dim\n",
" if name.startswith(\"mid_block\"):\n",
" hidden_size = unet.config.block_out_channels[-1]\n",
" elif name.startswith(\"up_blocks\"):\n",
" block_id = int(name[len(\"up_blocks.\")])\n",
" hidden_size = list(reversed(unet.config.block_out_channels))[block_id]\n",
" elif name.startswith(\"down_blocks\"):\n",
" block_id = int(name[len(\"down_blocks.\")])\n",
" hidden_size = unet.config.block_out_channels[block_id]\n",
" if cross_attention_dim is None and (name.startswith(\"up_blocks\") ) :\n",
" attn_procs[name] = SpatialAttnProcessor2_0(id_length = id_length)\n",
" total_count +=1\n",
" else:\n",
" attn_procs[name] = AttnProcessor()\n",
"print(\"successsfully load consistent self-attention\")\n",
"print(f\"number of the processor : {total_count}\")\n",
"unet.set_attn_processor(copy.deepcopy(attn_procs))\n",
"global mask1024,mask4096\n",
"mask1024, mask4096 = cal_attn_mask_xl(total_length,id_length,sa32,sa64,height,width,device=device,dtype= torch.float16)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Create the text description for the comics\n",
"Tips: Existing text2image diffusion models may not always generate images that accurately match text descriptions. Our training-free approach can improve the consistency of characters, but it does not enhance the control over the text. Therefore, in some cases, you may need to carefully craft your prompts."
]
},
{
"cell_type": "code",
"execution_count": 6,
"metadata": {},
"outputs": [],
"source": [
"guidance_scale = 5.0\n",
"seed = 2047\n",
"sa32 = 0.5\n",
"sa64 = 0.5\n",
"id_length = 4\n",
"num_steps = 50\n",
"general_prompt = \"a man with a black suit\"\n",
"negative_prompt = \"naked, deformed, bad anatomy, disfigured, poorly drawn face, mutation, extra limb, ugly, disgusting, poorly drawn hands, missing limb, floating limbs, disconnected limbs, blurry, watermarks, oversaturated, distorted hands, amputation\"\n",
"prompt_array = [\"wake up in the bed\",\n",
" \"have breakfast\",\n",
" \"is on the road, go to the company\",\n",
" \"work in the company\",\n",
" \"running in the playground\",\n",
" \"reading book in the home\"\n",
" ]"
]
},
{
"cell_type": "code",
"execution_count": 7,
"metadata": {},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
" 30%|███ | 15/50 [00:12<00:30, 1.16it/s]\n"
]
},
{
"ename": "OutOfMemoryError",
"evalue": "CUDA out of memory. Tried to allocate 3.16 GiB (GPU 0; 23.70 GiB total capacity; 17.71 GiB already allocated; 1.04 GiB free; 21.02 GiB reserved in total by PyTorch) If reserved memory is >> allocated memory try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF",
"output_type": "error",
"traceback": [
"\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
"\u001b[0;31mOutOfMemoryError\u001b[0m Traceback (most recent call last)",
"Cell \u001b[0;32mIn[7], line 20\u001b[0m\n\u001b[1;32m 18\u001b[0m attn_count \u001b[38;5;241m=\u001b[39m \u001b[38;5;241m0\u001b[39m\n\u001b[1;32m 19\u001b[0m id_prompts, negative_prompt \u001b[38;5;241m=\u001b[39m apply_style(style_name, id_prompts, negative_prompt)\n\u001b[0;32m---> 20\u001b[0m id_images \u001b[38;5;241m=\u001b[39m \u001b[43mpipe\u001b[49m\u001b[43m(\u001b[49m\u001b[43mid_prompts\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mnum_inference_steps\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43m \u001b[49m\u001b[43mnum_steps\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mguidance_scale\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mguidance_scale\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mheight\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43m \u001b[49m\u001b[43mheight\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mwidth\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43m \u001b[49m\u001b[43mwidth\u001b[49m\u001b[43m,\u001b[49m\u001b[43mnegative_prompt\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43m \u001b[49m\u001b[43mnegative_prompt\u001b[49m\u001b[43m,\u001b[49m\u001b[43mgenerator\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43m \u001b[49m\u001b[43mgenerator\u001b[49m\u001b[43m)\u001b[49m\u001b[38;5;241m.\u001b[39mimages\n\u001b[1;32m 22\u001b[0m write \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;01mFalse\u001b[39;00m\n\u001b[1;32m 23\u001b[0m \u001b[38;5;28;01mfor\u001b[39;00m id_image \u001b[38;5;129;01min\u001b[39;00m id_images:\n",
"File \u001b[0;32m~/anaconda3/envs/storydiffusion/lib/python3.10/site-packages/torch/utils/_contextlib.py:115\u001b[0m, in \u001b[0;36mcontext_decorator.<locals>.decorate_context\u001b[0;34m(*args, **kwargs)\u001b[0m\n\u001b[1;32m 112\u001b[0m \u001b[38;5;129m@functools\u001b[39m\u001b[38;5;241m.\u001b[39mwraps(func)\n\u001b[1;32m 113\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m\u001b[38;5;250m \u001b[39m\u001b[38;5;21mdecorate_context\u001b[39m(\u001b[38;5;241m*\u001b[39margs, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs):\n\u001b[1;32m 114\u001b[0m \u001b[38;5;28;01mwith\u001b[39;00m ctx_factory():\n\u001b[0;32m--> 115\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mfunc\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n",
"File \u001b[0;32m~/anaconda3/envs/storydiffusion/lib/python3.10/site-packages/diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl.py:1216\u001b[0m, in \u001b[0;36mStableDiffusionXLPipeline.__call__\u001b[0;34m(self, prompt, prompt_2, height, width, num_inference_steps, timesteps, denoising_end, guidance_scale, negative_prompt, negative_prompt_2, num_images_per_prompt, eta, generator, latents, prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds, ip_adapter_image, output_type, return_dict, cross_attention_kwargs, guidance_rescale, original_size, crops_coords_top_left, target_size, negative_original_size, negative_crops_coords_top_left, negative_target_size, clip_skip, callback_on_step_end, callback_on_step_end_tensor_inputs, **kwargs)\u001b[0m\n\u001b[1;32m 1214\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m ip_adapter_image \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m:\n\u001b[1;32m 1215\u001b[0m added_cond_kwargs[\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mimage_embeds\u001b[39m\u001b[38;5;124m\"\u001b[39m] \u001b[38;5;241m=\u001b[39m image_embeds\n\u001b[0;32m-> 1216\u001b[0m noise_pred \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43munet\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 1217\u001b[0m \u001b[43m \u001b[49m\u001b[43mlatent_model_input\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 1218\u001b[0m \u001b[43m \u001b[49m\u001b[43mt\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 1219\u001b[0m \u001b[43m \u001b[49m\u001b[43mencoder_hidden_states\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mprompt_embeds\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 1220\u001b[0m \u001b[43m \u001b[49m\u001b[43mtimestep_cond\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mtimestep_cond\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 1221\u001b[0m \u001b[43m \u001b[49m\u001b[43mcross_attention_kwargs\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mcross_attention_kwargs\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 1222\u001b[0m \u001b[43m \u001b[49m\u001b[43madded_cond_kwargs\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43madded_cond_kwargs\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 1223\u001b[0m \u001b[43m \u001b[49m\u001b[43mreturn_dict\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;28;43;01mFalse\u001b[39;49;00m\u001b[43m,\u001b[49m\n\u001b[1;32m 1224\u001b[0m \u001b[43m\u001b[49m\u001b[43m)\u001b[49m[\u001b[38;5;241m0\u001b[39m]\n\u001b[1;32m 1226\u001b[0m \u001b[38;5;66;03m# perform guidance\u001b[39;00m\n\u001b[1;32m 1227\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mdo_classifier_free_guidance:\n",
"File \u001b[0;32m~/anaconda3/envs/storydiffusion/lib/python3.10/site-packages/torch/nn/modules/module.py:1501\u001b[0m, in \u001b[0;36mModule._call_impl\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m 1496\u001b[0m \u001b[38;5;66;03m# If we don't have any hooks, we want to skip the rest of the logic in\u001b[39;00m\n\u001b[1;32m 1497\u001b[0m \u001b[38;5;66;03m# this function, and just call forward.\u001b[39;00m\n\u001b[1;32m 1498\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m (\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_backward_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_backward_pre_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_forward_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_forward_pre_hooks\n\u001b[1;32m 1499\u001b[0m \u001b[38;5;129;01mor\u001b[39;00m _global_backward_pre_hooks \u001b[38;5;129;01mor\u001b[39;00m _global_backward_hooks\n\u001b[1;32m 1500\u001b[0m \u001b[38;5;129;01mor\u001b[39;00m _global_forward_hooks \u001b[38;5;129;01mor\u001b[39;00m _global_forward_pre_hooks):\n\u001b[0;32m-> 1501\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mforward_call\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 1502\u001b[0m \u001b[38;5;66;03m# Do not call functions when jit is used\u001b[39;00m\n\u001b[1;32m 1503\u001b[0m full_backward_hooks, non_full_backward_hooks \u001b[38;5;241m=\u001b[39m [], []\n",
"File \u001b[0;32m~/anaconda3/envs/storydiffusion/lib/python3.10/site-packages/diffusers/models/unet_2d_condition.py:1177\u001b[0m, in \u001b[0;36mUNet2DConditionModel.forward\u001b[0;34m(self, sample, timestep, encoder_hidden_states, class_labels, timestep_cond, attention_mask, cross_attention_kwargs, added_cond_kwargs, down_block_additional_residuals, mid_block_additional_residual, down_intrablock_additional_residuals, encoder_attention_mask, return_dict)\u001b[0m\n\u001b[1;32m 1174\u001b[0m upsample_size \u001b[38;5;241m=\u001b[39m down_block_res_samples[\u001b[38;5;241m-\u001b[39m\u001b[38;5;241m1\u001b[39m]\u001b[38;5;241m.\u001b[39mshape[\u001b[38;5;241m2\u001b[39m:]\n\u001b[1;32m 1176\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28mhasattr\u001b[39m(upsample_block, \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mhas_cross_attention\u001b[39m\u001b[38;5;124m\"\u001b[39m) \u001b[38;5;129;01mand\u001b[39;00m upsample_block\u001b[38;5;241m.\u001b[39mhas_cross_attention:\n\u001b[0;32m-> 1177\u001b[0m sample \u001b[38;5;241m=\u001b[39m \u001b[43mupsample_block\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 1178\u001b[0m \u001b[43m \u001b[49m\u001b[43mhidden_states\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43msample\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 1179\u001b[0m \u001b[43m \u001b[49m\u001b[43mtemb\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43memb\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 1180\u001b[0m \u001b[43m \u001b[49m\u001b[43mres_hidden_states_tuple\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mres_samples\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 1181\u001b[0m \u001b[43m \u001b[49m\u001b[43mencoder_hidden_states\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mencoder_hidden_states\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 1182\u001b[0m \u001b[43m \u001b[49m\u001b[43mcross_attention_kwargs\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mcross_attention_kwargs\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 1183\u001b[0m \u001b[43m \u001b[49m\u001b[43mupsample_size\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mupsample_size\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 1184\u001b[0m \u001b[43m \u001b[49m\u001b[43mattention_mask\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mattention_mask\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 1185\u001b[0m \u001b[43m \u001b[49m\u001b[43mencoder_attention_mask\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mencoder_attention_mask\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 1186\u001b[0m \u001b[43m \u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 1187\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[1;32m 1188\u001b[0m sample \u001b[38;5;241m=\u001b[39m upsample_block(\n\u001b[1;32m 1189\u001b[0m hidden_states\u001b[38;5;241m=\u001b[39msample,\n\u001b[1;32m 1190\u001b[0m temb\u001b[38;5;241m=\u001b[39memb,\n\u001b[0;32m (...)\u001b[0m\n\u001b[1;32m 1193\u001b[0m scale\u001b[38;5;241m=\u001b[39mlora_scale,\n\u001b[1;32m 1194\u001b[0m )\n",
"File \u001b[0;32m~/anaconda3/envs/storydiffusion/lib/python3.10/site-packages/torch/nn/modules/module.py:1501\u001b[0m, in \u001b[0;36mModule._call_impl\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m 1496\u001b[0m \u001b[38;5;66;03m# If we don't have any hooks, we want to skip the rest of the logic in\u001b[39;00m\n\u001b[1;32m 1497\u001b[0m \u001b[38;5;66;03m# this function, and just call forward.\u001b[39;00m\n\u001b[1;32m 1498\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m (\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_backward_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_backward_pre_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_forward_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_forward_pre_hooks\n\u001b[1;32m 1499\u001b[0m \u001b[38;5;129;01mor\u001b[39;00m _global_backward_pre_hooks \u001b[38;5;129;01mor\u001b[39;00m _global_backward_hooks\n\u001b[1;32m 1500\u001b[0m \u001b[38;5;129;01mor\u001b[39;00m _global_forward_hooks \u001b[38;5;129;01mor\u001b[39;00m _global_forward_pre_hooks):\n\u001b[0;32m-> 1501\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mforward_call\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 1502\u001b[0m \u001b[38;5;66;03m# Do not call functions when jit is used\u001b[39;00m\n\u001b[1;32m 1503\u001b[0m full_backward_hooks, non_full_backward_hooks \u001b[38;5;241m=\u001b[39m [], []\n",
"File \u001b[0;32m~/anaconda3/envs/storydiffusion/lib/python3.10/site-packages/diffusers/models/unet_2d_blocks.py:2354\u001b[0m, in \u001b[0;36mCrossAttnUpBlock2D.forward\u001b[0;34m(self, hidden_states, res_hidden_states_tuple, temb, encoder_hidden_states, cross_attention_kwargs, upsample_size, attention_mask, encoder_attention_mask)\u001b[0m\n\u001b[1;32m 2352\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[1;32m 2353\u001b[0m hidden_states \u001b[38;5;241m=\u001b[39m resnet(hidden_states, temb, scale\u001b[38;5;241m=\u001b[39mlora_scale)\n\u001b[0;32m-> 2354\u001b[0m hidden_states \u001b[38;5;241m=\u001b[39m \u001b[43mattn\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 2355\u001b[0m \u001b[43m \u001b[49m\u001b[43mhidden_states\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 2356\u001b[0m \u001b[43m \u001b[49m\u001b[43mencoder_hidden_states\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mencoder_hidden_states\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 2357\u001b[0m \u001b[43m \u001b[49m\u001b[43mcross_attention_kwargs\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mcross_attention_kwargs\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 2358\u001b[0m \u001b[43m \u001b[49m\u001b[43mattention_mask\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mattention_mask\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 2359\u001b[0m \u001b[43m \u001b[49m\u001b[43mencoder_attention_mask\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mencoder_attention_mask\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 2360\u001b[0m \u001b[43m \u001b[49m\u001b[43mreturn_dict\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;28;43;01mFalse\u001b[39;49;00m\u001b[43m,\u001b[49m\n\u001b[1;32m 2361\u001b[0m \u001b[43m \u001b[49m\u001b[43m)\u001b[49m[\u001b[38;5;241m0\u001b[39m]\n\u001b[1;32m 2363\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mupsamplers \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m:\n\u001b[1;32m 2364\u001b[0m \u001b[38;5;28;01mfor\u001b[39;00m upsampler \u001b[38;5;129;01min\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mupsamplers:\n",
"File \u001b[0;32m~/anaconda3/envs/storydiffusion/lib/python3.10/site-packages/torch/nn/modules/module.py:1501\u001b[0m, in \u001b[0;36mModule._call_impl\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m 1496\u001b[0m \u001b[38;5;66;03m# If we don't have any hooks, we want to skip the rest of the logic in\u001b[39;00m\n\u001b[1;32m 1497\u001b[0m \u001b[38;5;66;03m# this function, and just call forward.\u001b[39;00m\n\u001b[1;32m 1498\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m (\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_backward_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_backward_pre_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_forward_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_forward_pre_hooks\n\u001b[1;32m 1499\u001b[0m \u001b[38;5;129;01mor\u001b[39;00m _global_backward_pre_hooks \u001b[38;5;129;01mor\u001b[39;00m _global_backward_hooks\n\u001b[1;32m 1500\u001b[0m \u001b[38;5;129;01mor\u001b[39;00m _global_forward_hooks \u001b[38;5;129;01mor\u001b[39;00m _global_forward_pre_hooks):\n\u001b[0;32m-> 1501\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mforward_call\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 1502\u001b[0m \u001b[38;5;66;03m# Do not call functions when jit is used\u001b[39;00m\n\u001b[1;32m 1503\u001b[0m full_backward_hooks, non_full_backward_hooks \u001b[38;5;241m=\u001b[39m [], []\n",
"File \u001b[0;32m~/anaconda3/envs/storydiffusion/lib/python3.10/site-packages/diffusers/models/transformer_2d.py:392\u001b[0m, in \u001b[0;36mTransformer2DModel.forward\u001b[0;34m(self, hidden_states, encoder_hidden_states, timestep, added_cond_kwargs, class_labels, cross_attention_kwargs, attention_mask, encoder_attention_mask, return_dict)\u001b[0m\n\u001b[1;32m 380\u001b[0m hidden_states \u001b[38;5;241m=\u001b[39m torch\u001b[38;5;241m.\u001b[39mutils\u001b[38;5;241m.\u001b[39mcheckpoint\u001b[38;5;241m.\u001b[39mcheckpoint(\n\u001b[1;32m 381\u001b[0m create_custom_forward(block),\n\u001b[1;32m 382\u001b[0m hidden_states,\n\u001b[0;32m (...)\u001b[0m\n\u001b[1;32m 389\u001b[0m \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mckpt_kwargs,\n\u001b[1;32m 390\u001b[0m )\n\u001b[1;32m 391\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[0;32m--> 392\u001b[0m hidden_states \u001b[38;5;241m=\u001b[39m \u001b[43mblock\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 393\u001b[0m \u001b[43m \u001b[49m\u001b[43mhidden_states\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 394\u001b[0m \u001b[43m \u001b[49m\u001b[43mattention_mask\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mattention_mask\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 395\u001b[0m \u001b[43m \u001b[49m\u001b[43mencoder_hidden_states\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mencoder_hidden_states\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 396\u001b[0m \u001b[43m \u001b[49m\u001b[43mencoder_attention_mask\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mencoder_attention_mask\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 397\u001b[0m \u001b[43m \u001b[49m\u001b[43mtimestep\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mtimestep\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 398\u001b[0m \u001b[43m \u001b[49m\u001b[43mcross_attention_kwargs\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mcross_attention_kwargs\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 399\u001b[0m \u001b[43m \u001b[49m\u001b[43mclass_labels\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mclass_labels\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 400\u001b[0m \u001b[43m \u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 402\u001b[0m \u001b[38;5;66;03m# 3. Output\u001b[39;00m\n\u001b[1;32m 403\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mis_input_continuous:\n",
"File \u001b[0;32m~/anaconda3/envs/storydiffusion/lib/python3.10/site-packages/torch/nn/modules/module.py:1501\u001b[0m, in \u001b[0;36mModule._call_impl\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m 1496\u001b[0m \u001b[38;5;66;03m# If we don't have any hooks, we want to skip the rest of the logic in\u001b[39;00m\n\u001b[1;32m 1497\u001b[0m \u001b[38;5;66;03m# this function, and just call forward.\u001b[39;00m\n\u001b[1;32m 1498\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m (\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_backward_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_backward_pre_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_forward_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_forward_pre_hooks\n\u001b[1;32m 1499\u001b[0m \u001b[38;5;129;01mor\u001b[39;00m _global_backward_pre_hooks \u001b[38;5;129;01mor\u001b[39;00m _global_backward_hooks\n\u001b[1;32m 1500\u001b[0m \u001b[38;5;129;01mor\u001b[39;00m _global_forward_hooks \u001b[38;5;129;01mor\u001b[39;00m _global_forward_pre_hooks):\n\u001b[0;32m-> 1501\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mforward_call\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 1502\u001b[0m \u001b[38;5;66;03m# Do not call functions when jit is used\u001b[39;00m\n\u001b[1;32m 1503\u001b[0m full_backward_hooks, non_full_backward_hooks \u001b[38;5;241m=\u001b[39m [], []\n",
"File \u001b[0;32m~/anaconda3/envs/storydiffusion/lib/python3.10/site-packages/diffusers/models/attention.py:329\u001b[0m, in \u001b[0;36mBasicTransformerBlock.forward\u001b[0;34m(self, hidden_states, attention_mask, encoder_hidden_states, encoder_attention_mask, timestep, cross_attention_kwargs, class_labels, added_cond_kwargs)\u001b[0m\n\u001b[1;32m 326\u001b[0m cross_attention_kwargs \u001b[38;5;241m=\u001b[39m cross_attention_kwargs\u001b[38;5;241m.\u001b[39mcopy() \u001b[38;5;28;01mif\u001b[39;00m cross_attention_kwargs \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m \u001b[38;5;28;01melse\u001b[39;00m {}\n\u001b[1;32m 327\u001b[0m gligen_kwargs \u001b[38;5;241m=\u001b[39m cross_attention_kwargs\u001b[38;5;241m.\u001b[39mpop(\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mgligen\u001b[39m\u001b[38;5;124m\"\u001b[39m, \u001b[38;5;28;01mNone\u001b[39;00m)\n\u001b[0;32m--> 329\u001b[0m attn_output \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mattn1\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 330\u001b[0m \u001b[43m \u001b[49m\u001b[43mnorm_hidden_states\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 331\u001b[0m \u001b[43m \u001b[49m\u001b[43mencoder_hidden_states\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mencoder_hidden_states\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;28;43;01mif\u001b[39;49;00m\u001b[43m \u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43monly_cross_attention\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;28;43;01melse\u001b[39;49;00m\u001b[43m \u001b[49m\u001b[38;5;28;43;01mNone\u001b[39;49;00m\u001b[43m,\u001b[49m\n\u001b[1;32m 332\u001b[0m \u001b[43m \u001b[49m\u001b[43mattention_mask\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mattention_mask\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 333\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mcross_attention_kwargs\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 334\u001b[0m \u001b[43m\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 335\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39muse_ada_layer_norm_zero:\n\u001b[1;32m 336\u001b[0m attn_output \u001b[38;5;241m=\u001b[39m gate_msa\u001b[38;5;241m.\u001b[39munsqueeze(\u001b[38;5;241m1\u001b[39m) \u001b[38;5;241m*\u001b[39m attn_output\n",
"File \u001b[0;32m~/anaconda3/envs/storydiffusion/lib/python3.10/site-packages/torch/nn/modules/module.py:1501\u001b[0m, in \u001b[0;36mModule._call_impl\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m 1496\u001b[0m \u001b[38;5;66;03m# If we don't have any hooks, we want to skip the rest of the logic in\u001b[39;00m\n\u001b[1;32m 1497\u001b[0m \u001b[38;5;66;03m# this function, and just call forward.\u001b[39;00m\n\u001b[1;32m 1498\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m (\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_backward_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_backward_pre_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_forward_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_forward_pre_hooks\n\u001b[1;32m 1499\u001b[0m \u001b[38;5;129;01mor\u001b[39;00m _global_backward_pre_hooks \u001b[38;5;129;01mor\u001b[39;00m _global_backward_hooks\n\u001b[1;32m 1500\u001b[0m \u001b[38;5;129;01mor\u001b[39;00m _global_forward_hooks \u001b[38;5;129;01mor\u001b[39;00m _global_forward_pre_hooks):\n\u001b[0;32m-> 1501\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mforward_call\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 1502\u001b[0m \u001b[38;5;66;03m# Do not call functions when jit is used\u001b[39;00m\n\u001b[1;32m 1503\u001b[0m full_backward_hooks, non_full_backward_hooks \u001b[38;5;241m=\u001b[39m [], []\n",
"File \u001b[0;32m~/anaconda3/envs/storydiffusion/lib/python3.10/site-packages/diffusers/models/attention_processor.py:527\u001b[0m, in \u001b[0;36mAttention.forward\u001b[0;34m(self, hidden_states, encoder_hidden_states, attention_mask, **cross_attention_kwargs)\u001b[0m\n\u001b[1;32m 508\u001b[0m \u001b[38;5;250m\u001b[39m\u001b[38;5;124mr\u001b[39m\u001b[38;5;124;03m\"\"\"\u001b[39;00m\n\u001b[1;32m 509\u001b[0m \u001b[38;5;124;03mThe forward method of the `Attention` class.\u001b[39;00m\n\u001b[1;32m 510\u001b[0m \n\u001b[0;32m (...)\u001b[0m\n\u001b[1;32m 522\u001b[0m \u001b[38;5;124;03m `torch.Tensor`: The output of the attention layer.\u001b[39;00m\n\u001b[1;32m 523\u001b[0m \u001b[38;5;124;03m\"\"\"\u001b[39;00m\n\u001b[1;32m 524\u001b[0m \u001b[38;5;66;03m# The `Attention` class can call different attention processors / attention functions\u001b[39;00m\n\u001b[1;32m 525\u001b[0m \u001b[38;5;66;03m# here we simply pass along all tensors to the selected processor class\u001b[39;00m\n\u001b[1;32m 526\u001b[0m \u001b[38;5;66;03m# For standard processors that are defined here, `**cross_attention_kwargs` is empty\u001b[39;00m\n\u001b[0;32m--> 527\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mprocessor\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 528\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[43m,\u001b[49m\n\u001b[1;32m 529\u001b[0m \u001b[43m \u001b[49m\u001b[43mhidden_states\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 530\u001b[0m \u001b[43m \u001b[49m\u001b[43mencoder_hidden_states\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mencoder_hidden_states\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 531\u001b[0m \u001b[43m \u001b[49m\u001b[43mattention_mask\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mattention_mask\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 532\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mcross_attention_kwargs\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 533\u001b[0m \u001b[43m\u001b[49m\u001b[43m)\u001b[49m\n",
"Cell \u001b[0;32mIn[4], line 74\u001b[0m, in \u001b[0;36mSpatialAttnProcessor2_0.__call__\u001b[0;34m(self, attn, hidden_states, encoder_hidden_states, attention_mask, temb)\u001b[0m\n\u001b[1;32m 72\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[1;32m 73\u001b[0m attention_mask \u001b[38;5;241m=\u001b[39m mask4096[:mask4096\u001b[38;5;241m.\u001b[39mshape[\u001b[38;5;241m0\u001b[39m] \u001b[38;5;241m/\u001b[39m\u001b[38;5;241m/\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mtotal_length \u001b[38;5;241m*\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mid_length,:mask4096\u001b[38;5;241m.\u001b[39mshape[\u001b[38;5;241m0\u001b[39m] \u001b[38;5;241m/\u001b[39m\u001b[38;5;241m/\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mtotal_length \u001b[38;5;241m*\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mid_length]\n\u001b[0;32m---> 74\u001b[0m hidden_states \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m__call1__\u001b[49m\u001b[43m(\u001b[49m\u001b[43mattn\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mhidden_states\u001b[49m\u001b[43m,\u001b[49m\u001b[43mencoder_hidden_states\u001b[49m\u001b[43m,\u001b[49m\u001b[43mattention_mask\u001b[49m\u001b[43m,\u001b[49m\u001b[43mtemb\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 75\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[1;32m 76\u001b[0m hidden_states \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m__call2__(attn, hidden_states,\u001b[38;5;28;01mNone\u001b[39;00m,attention_mask,temb)\n",
"Cell \u001b[0;32mIn[4], line 127\u001b[0m, in \u001b[0;36mSpatialAttnProcessor2_0.__call1__\u001b[0;34m(self, attn, hidden_states, encoder_hidden_states, attention_mask, temb)\u001b[0m\n\u001b[1;32m 125\u001b[0m key \u001b[38;5;241m=\u001b[39m key\u001b[38;5;241m.\u001b[39mview(batch_size, \u001b[38;5;241m-\u001b[39m\u001b[38;5;241m1\u001b[39m, attn\u001b[38;5;241m.\u001b[39mheads, head_dim)\u001b[38;5;241m.\u001b[39mtranspose(\u001b[38;5;241m1\u001b[39m, \u001b[38;5;241m2\u001b[39m)\n\u001b[1;32m 126\u001b[0m value \u001b[38;5;241m=\u001b[39m value\u001b[38;5;241m.\u001b[39mview(batch_size, \u001b[38;5;241m-\u001b[39m\u001b[38;5;241m1\u001b[39m, attn\u001b[38;5;241m.\u001b[39mheads, head_dim)\u001b[38;5;241m.\u001b[39mtranspose(\u001b[38;5;241m1\u001b[39m, \u001b[38;5;241m2\u001b[39m)\n\u001b[0;32m--> 127\u001b[0m hidden_states \u001b[38;5;241m=\u001b[39m \u001b[43mF\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mscaled_dot_product_attention\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 128\u001b[0m \u001b[43m \u001b[49m\u001b[43mquery\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mkey\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mvalue\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mattn_mask\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mattention_mask\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mdropout_p\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;241;43m0.0\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mis_causal\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;28;43;01mFalse\u001b[39;49;00m\n\u001b[1;32m 129\u001b[0m \u001b[43m\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 131\u001b[0m hidden_states \u001b[38;5;241m=\u001b[39m hidden_states\u001b[38;5;241m.\u001b[39mtranspose(\u001b[38;5;241m1\u001b[39m, \u001b[38;5;241m2\u001b[39m)\u001b[38;5;241m.\u001b[39mreshape(total_batch_size, \u001b[38;5;241m-\u001b[39m\u001b[38;5;241m1\u001b[39m, attn\u001b[38;5;241m.\u001b[39mheads \u001b[38;5;241m*\u001b[39m head_dim)\n\u001b[1;32m 132\u001b[0m hidden_states \u001b[38;5;241m=\u001b[39m hidden_states\u001b[38;5;241m.\u001b[39mto(query\u001b[38;5;241m.\u001b[39mdtype)\n",
"\u001b[0;31mOutOfMemoryError\u001b[0m: CUDA out of memory. Tried to allocate 3.16 GiB (GPU 0; 23.70 GiB total capacity; 17.71 GiB already allocated; 1.04 GiB free; 21.02 GiB reserved in total by PyTorch) If reserved memory is >> allocated memory try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF"
]
}
],
"source": [
"##########################################################################################\n",
"def apply_style_positive(style_name: str, positive: str):\n",
" p, n = styles.get(style_name, styles[DEFAULT_STYLE_NAME])\n",
" return p.replace(\"{prompt}\", positive) \n",
"def apply_style(style_name: str, positives: list, negative: str = \"\"):\n",
" p, n = styles.get(style_name, styles[DEFAULT_STYLE_NAME])\n",
" return [p.replace(\"{prompt}\", positive) for positive in positives], n + ' ' + negative\n",
"### Set the generated Style\n",
"style_name = \"Comic book\"\n",
"setup_seed(seed)\n",
"generator = torch.Generator(device=\"cuda:0\").manual_seed(seed)\n",
"prompts = [general_prompt+\",\"+prompt for prompt in prompt_array]\n",
"id_prompts = prompts[:id_length]\n",
"real_prompts = prompts[id_length:]\n",
"torch.cuda.empty_cache()\n",
"write = True\n",
"cur_step = 0\n",
"attn_count = 0\n",
"id_prompts, negative_prompt = apply_style(style_name, id_prompts, negative_prompt)\n",
"id_images = pipe(id_prompts, num_inference_steps = num_steps, guidance_scale=guidance_scale, height = height, width = width,negative_prompt = negative_prompt,generator = generator).images\n",
"\n",
"write = False\n",
"for id_image in id_images:\n",
" display(id_image)\n",
"real_images = []\n",
"for real_prompt in real_prompts:\n",
" cur_step = 0\n",
" real_prompt = apply_style_positive(style_name, real_prompt)\n",
" real_images.append(pipe(real_prompt, num_inference_steps=num_steps, guidance_scale=guidance_scale, height = height, width = width,negative_prompt = negative_prompt,generator = generator).images[0])\n",
"for real_image in real_images:\n",
" display(real_image) "
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Continued Creation\n",
"From now on, you can create endless stories about this character without worrying about memory constraints."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"new_prompt_array = [\"siting on the sofa\",\n",
" \"on the bed, at night \"]\n",
"new_prompts = [general_prompt+\",\"+prompt for prompt in new_prompt_array]\n",
"new_images = []\n",
"for new_prompt in new_prompts :\n",
" cur_step = 0\n",
" new_prompt = apply_style_positive(style_name, new_prompt)\n",
" new_images.append(pipe(new_prompt, num_inference_steps=num_steps, guidance_scale=guidance_scale, height = height, width = width,negative_prompt = negative_prompt,generator = generator).images[0])\n",
"for new_image in new_images:\n",
" display(new_image) "
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Make pictures into comics"
]
},
{
"cell_type": "code",
"execution_count": 222,
"metadata": {},
"outputs": [],
"source": [
"###\n",
"total_images = id_images + real_images + new_images\n",
"from PIL import Image,ImageOps,ImageDraw, ImageFont\n",
"#### LOAD Fonts, can also replace with any Fonts you have!\n",
"font = ImageFont.truetype(\"./fonts/Inkfree.ttf\", 30)\n"
]
},
{
"cell_type": "code",
"execution_count": 223,
"metadata": {},
"outputs": [],
"source": [
"# import importlib\n",
"# import utils.utils\n",
"# importlib.reload(utils)\n",
"from utils.utils import get_row_image\n",
"from utils.utils import get_row_image\n",
"from utils.utils import get_comic_4panel"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"comics = get_comic_4panel(total_images, captions = prompt_array+ new_prompts,font = font )\n",
"for comic in comics:\n",
" display(comic)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"fileId": "51613593-0d85-430e-8fce-c85e580fc483",
"kernelspec": {
"display_name": "storydiffusion",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.16"
}
},
"nbformat": 4,
"nbformat_minor": 4
}
|