root
training
4bacd0b
epoch 1/10
Traceback (most recent call last):
File "/workspace/kohya_ss/./sdxl_train_network.py", line 176, in <module>
trainer.train(args)
File "/workspace/kohya_ss/train_network.py", line 773, in train
noise_pred = self.call_unet(
File "/workspace/kohya_ss/./sdxl_train_network.py", line 156, in call_unet
noise_pred = unet(noisy_latents, timesteps, text_embedding, vector_embedding)
File "/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py", line 1501, in _call_impl
return forward_call(*args, **kwargs)
File "/usr/local/lib/python3.10/dist-packages/accelerate/utils/operations.py", line 521, in forward
return model_forward(*args, **kwargs)
File "/usr/local/lib/python3.10/dist-packages/accelerate/utils/operations.py", line 509, in __call__
return convert_to_fp32(self.model_forward(*args, **kwargs))
File "/usr/local/lib/python3.10/dist-packages/torch/amp/autocast_mode.py", line 14, in decorate_autocast
return func(*args, **kwargs)
File "/workspace/kohya_ss/library/sdxl_original_unet.py", line 1088, in forward
h = call_module(module, h, emb, context)
File "/workspace/kohya_ss/library/sdxl_original_unet.py", line 1071, in call_module
x = layer(x, emb)
File "/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py", line 1501, in _call_impl
return forward_call(*args, **kwargs)
File "/workspace/kohya_ss/library/sdxl_original_unet.py", line 328, in forward
x = self.forward_body(x, emb)
File "/workspace/kohya_ss/library/sdxl_original_unet.py", line 309, in forward_body
h = self.in_layers(x)
File "/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py", line 1501, in _call_impl
return forward_call(*args, **kwargs)
File "/usr/local/lib/python3.10/dist-packages/torch/nn/modules/container.py", line 217, in forward
input = module(input)
File "/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py", line 1501, in _call_impl
return forward_call(*args, **kwargs)
File "/workspace/kohya_ss/library/sdxl_original_unet.py", line 272, in forward
return super().forward(x)
File "/usr/local/lib/python3.10/dist-packages/torch/nn/modules/normalization.py", line 273, in forward
return F.group_norm(
File "/usr/local/lib/python3.10/dist-packages/torch/nn/functional.py", line 2530, in group_norm
return torch.group_norm(input, num_groups, weight, bias, eps, torch.backends.cudnn.enabled)
torch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 40.00 MiB (GPU 0; 19.71 GiB total capacity; 17.84 GiB already allocated; 6.62 MiB free; 18.10 GiB reserved in total by PyTorch) If reserved memory is >> allocated memory try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF
╭─────────────────────────────── Traceback (most recent call last) ────────────────────────────────╮
│ /workspace/kohya_ss/./sdxl_train_network.py:176 in <module> │
│ │
│ 173 │ args = train_util.read_config_from_file(args, parser) │
│ 174 │ │
│ 175 │ trainer = SdxlNetworkTrainer() │
│ ❱ 176 │ trainer.train(args) │
│ 177 │
│ │
│ /workspace/kohya_ss/train_network.py:773 in train │
│ │
│ 770 │ │ │ │ │ │
│ 771 │ │ │ │ │ # Predict the noise residual │
│ 772 │ │ │ │ │ with accelerator.autocast(): │
│ ❱ 773 │ │ │ │ │ │ noise_pred = self.call_unet( │
│ 774 │ │ │ │ │ │ │ args, accelerator, unet, noisy_latents, timesteps, text_enco │
│ 775 │ │ │ │ │ │ ) │
│ 776 │
│ │
│ /workspace/kohya_ss/./sdxl_train_network.py:156 in call_unet │
│ │
│ 153 │ │ vector_embedding = torch.cat([pool2, embs], dim=1).to(weight_dtype) │
│ 154 │ │ text_embedding = torch.cat([encoder_hidden_states1, encoder_hidden_states2], dim │
│ 155 │ │ │
│ ❱ 156 │ │ noise_pred = unet(noisy_latents, timesteps, text_embedding, vector_embedding) │
│ 157 │ │ return noise_pred │
│ 158 │ │
│ 159 │ def sample_images(self, accelerator, args, epoch, global_step, device, vae, tokenize │
│ │
│ /usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py:1501 in _call_impl │
│ │
│ 1498 │ │ if not (self._backward_hooks or self._backward_pre_hooks or self._forward_hooks │
│ 1499 │ │ │ │ or _global_backward_pre_hooks or _global_backward_hooks │
│ 1500 │ │ │ │ or _global_forward_hooks or _global_forward_pre_hooks): │
│ ❱ 1501 │ │ │ return forward_call(*args, **kwargs) │
│ 1502 │ │ # Do not call functions when jit is used │
│ 1503 │ │ full_backward_hooks, non_full_backward_hooks = [], [] │
│ 1504 │ │ backward_pre_hooks = [] │
│ │
│ /usr/local/lib/python3.10/dist-packages/accelerate/utils/operations.py:521 in forward │
│ │
│ 518 │ model_forward = ConvertOutputsToFp32(model_forward) │
│ 519 │ │
│ 520 │ def forward(*args, **kwargs): │
│ ❱ 521 │ │ return model_forward(*args, **kwargs) │
│ 522 │ │
│ 523 │ # To act like a decorator so that it can be popped when doing `extract_model_from_pa │
│ 524 │ forward.__wrapped__ = model_forward │
│ │
│ /usr/local/lib/python3.10/dist-packages/accelerate/utils/operations.py:509 in __call__ │
│ │
│ 506 │ │ update_wrapper(self, model_forward) │
│ 507 │ │
│ 508 │ def __call__(self, *args, **kwargs): │
│ ❱ 509 │ │ return convert_to_fp32(self.model_forward(*args, **kwargs)) │
│ 510 │ │
│ 511 │ def __getstate__(self): │
│ 512 │ │ raise pickle.PicklingError( │
│ │
│ /usr/local/lib/python3.10/dist-packages/torch/amp/autocast_mode.py:14 in decorate_autocast │
│ │
│ 11 │ @functools.wraps(func) │
│ 12 │ def decorate_autocast(*args, **kwargs): │
│ 13 │ │ with autocast_instance: │
│ ❱  14 │ │ │ return func(*args, **kwargs) │
│ 15 │ decorate_autocast.__script_unsupported = '@autocast() decorator is not supported in  │
│ 16 │ return decorate_autocast │
│ 17 │
│ │
│ /workspace/kohya_ss/library/sdxl_original_unet.py:1088 in forward │
│ │
│ 1085 │ │ │
│ 1086 │ │ for module in self.output_blocks: │
│ 1087 │ │ │ h = torch.cat([h, hs.pop()], dim=1) │
│ ❱ 1088 │ │ │ h = call_module(module, h, emb, context) │
│ 1089 │ │ │
│ 1090 │ │ h = h.type(x.dtype) │
│ 1091 │ │ h = call_module(self.out, h, emb, context) │
│ │
│ /workspace/kohya_ss/library/sdxl_original_unet.py:1071 in call_module │
│ │
│ 1068 │ │ │ for layer in module: │
│ 1069 │ │ │ │ # print(layer.__class__.__name__, x.dtype, emb.dtype, context.dtype if c │
│ 1070 │ │ │ │ if isinstance(layer, ResnetBlock2D): │
│ ❱ 1071 │ │ │ │ │ x = layer(x, emb) │
│ 1072 │ │ │ │ elif isinstance(layer, Transformer2DModel): │
│ 1073 │ │ │ │ │ x = layer(x, context) │
│ 1074 │ │ │ │ else: │
│ │
│ /usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py:1501 in _call_impl │
│ │
│ 1498 │ │ if not (self._backward_hooks or self._backward_pre_hooks or self._forward_hooks │
│ 1499 │ │ │ │ or _global_backward_pre_hooks or _global_backward_hooks │
│ 1500 │ │ │ │ or _global_forward_hooks or _global_forward_pre_hooks): │
│ ❱ 1501 │ │ │ return forward_call(*args, **kwargs) │
│ 1502 │ │ # Do not call functions when jit is used │
│ 1503 │ │ full_backward_hooks, non_full_backward_hooks = [], [] │
│ 1504 │ │ backward_pre_hooks = [] │
│ │
│ /workspace/kohya_ss/library/sdxl_original_unet.py:328 in forward │
│ │
│ 325 │ │ │ │
│ 326 │ │ │ x = torch.utils.checkpoint.checkpoint(create_custom_forward(self.forward_bod │
│ 327 │ │ else: │
│ ❱  328 │ │ │ x = self.forward_body(x, emb) │
│ 329 │ │ │
│ 330 │ │ return x │
│ 331 │
│ │
│ /workspace/kohya_ss/library/sdxl_original_unet.py:309 in forward_body │
│ │
│ 306 │ │ self.gradient_checkpointing = False │
│ 307 │ │
│ 308 │ def forward_body(self, x, emb): │
│ ❱  309 │ │ h = self.in_layers(x) │
│ 310 │ │ emb_out = self.emb_layers(emb).type(h.dtype) │
│ 311 │ │ h = h + emb_out[:, :, None, None] │
│ 312 │ │ h = self.out_layers(h) │
│ │
│ /usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py:1501 in _call_impl │
│ │
│ 1498 │ │ if not (self._backward_hooks or self._backward_pre_hooks or self._forward_hooks │
│ 1499 │ │ │ │ or _global_backward_pre_hooks or _global_backward_hooks │
│ 1500 │ │ │ │ or _global_forward_hooks or _global_forward_pre_hooks): │
│ ❱ 1501 │ │ │ return forward_call(*args, **kwargs) │
│ 1502 │ │ # Do not call functions when jit is used │
│ 1503 │ │ full_backward_hooks, non_full_backward_hooks = [], [] │
│ 1504 │ │ backward_pre_hooks = [] │
│ │
│ /usr/local/lib/python3.10/dist-packages/torch/nn/modules/container.py:217 in forward │
│ │
│ 214 │ # with Any as TorchScript expects a more precise type │
│ 215 │ def forward(self, input): │
│ 216 │ │ for module in self: │
│ ❱ 217 │ │ │ input = module(input) │
│ 218 │ │ return input │
│ 219 │ │
│ 220 │ def append(self, module: Module) -> 'Sequential': │
│ │
│ /usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py:1501 in _call_impl │
│ │
│ 1498 │ │ if not (self._backward_hooks or self._backward_pre_hooks or self._forward_hooks │
│ 1499 │ │ │ │ or _global_backward_pre_hooks or _global_backward_hooks │
│ 1500 │ │ │ │ or _global_forward_hooks or _global_forward_pre_hooks): │
│ ❱ 1501 │ │ │ return forward_call(*args, **kwargs) │
│ 1502 │ │ # Do not call functions when jit is used │
│ 1503 │ │ full_backward_hooks, non_full_backward_hooks = [], [] │
│ 1504 │ │ backward_pre_hooks = [] │
│ │
│ /workspace/kohya_ss/library/sdxl_original_unet.py:272 in forward │
│ │
│ 269 class GroupNorm32(nn.GroupNorm): │
│ 270 │ def forward(self, x): │
│ 271 │ │ if self.weight.dtype != torch.float32: │
│ ❱  272 │ │ │ return super().forward(x) │
│ 273 │ │ return super().forward(x.float()).type(x.dtype) │
│ 274 │
│ 275 │
│ │
│ /usr/local/lib/python3.10/dist-packages/torch/nn/modules/normalization.py:273 in forward │
│ │
│ 270 │ │ │ init.zeros_(self.bias) │
│ 271 │ │
│ 272 │ def forward(self, input: Tensor) -> Tensor: │
│ ❱ 273 │ │ return F.group_norm( │
│ 274 │ │ │ input, self.num_groups, self.weight, self.bias, self.eps) │
│ 275 │ │
│ 276 │ def extra_repr(self) -> str: │
│ │
│ /usr/local/lib/python3.10/dist-packages/torch/nn/functional.py:2530 in group_norm │
│ │
│ 2527 │ if input.dim() < 2: │
│ 2528 │ │ raise RuntimeError(f"Expected at least 2 dimensions for input tensor but receive │
│ 2529 │ _verify_batch_size([input.size(0) * input.size(1) // num_groups, num_groups] + list( │
│ ❱ 2530 │ return torch.group_norm(input, num_groups, weight, bias, eps, torch.backends.cudnn.e │
│ 2531 │
│ 2532 │
│ 2533 def local_response_norm(input: Tensor, size: int, alpha: float = 1e-4, beta: float = 0.7 │
╰──────────────────────────────────────────────────────────────────────────────────────────────────╯
OutOfMemoryError: CUDA out of memory. Tried to allocate 40.00 MiB (GPU 0; 19.71 GiB total capacity; 17.84 GiB already allocated; 6.62 MiB free; 18.10 GiB reserved in total by PyTorch) If reserved
memory is >> allocated memory try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF