Datasets:

ArXiv:
diffusers-benchmarking-bot commited on
Commit
00c7cc7
·
verified ·
1 Parent(s): 25e2914

Upload folder using huggingface_hub

Browse files
Files changed (1) hide show
  1. main/matryoshka.py +4 -4
main/matryoshka.py CHANGED
@@ -868,7 +868,7 @@ class CrossAttnDownBlock2D(nn.Module):
868
  blocks = list(zip(self.resnets, self.attentions))
869
 
870
  for i, (resnet, attn) in enumerate(blocks):
871
- if self.training and self.gradient_checkpointing:
872
 
873
  def create_custom_forward(module, return_dict=None):
874
  def custom_forward(*inputs):
@@ -1029,7 +1029,7 @@ class UNetMidBlock2DCrossAttn(nn.Module):
1029
 
1030
  hidden_states = self.resnets[0](hidden_states, temb)
1031
  for attn, resnet in zip(self.attentions, self.resnets[1:]):
1032
- if self.training and self.gradient_checkpointing:
1033
 
1034
  def create_custom_forward(module, return_dict=None):
1035
  def custom_forward(*inputs):
@@ -1191,7 +1191,7 @@ class CrossAttnUpBlock2D(nn.Module):
1191
 
1192
  hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1)
1193
 
1194
- if self.training and self.gradient_checkpointing:
1195
 
1196
  def create_custom_forward(module, return_dict=None):
1197
  def custom_forward(*inputs):
@@ -1364,7 +1364,7 @@ class MatryoshkaTransformer2DModel(LegacyModelMixin, LegacyConfigMixin):
1364
 
1365
  # Blocks
1366
  for block in self.transformer_blocks:
1367
- if self.training and self.gradient_checkpointing:
1368
 
1369
  def create_custom_forward(module, return_dict=None):
1370
  def custom_forward(*inputs):
 
868
  blocks = list(zip(self.resnets, self.attentions))
869
 
870
  for i, (resnet, attn) in enumerate(blocks):
871
+ if torch.is_grad_enabled() and self.gradient_checkpointing:
872
 
873
  def create_custom_forward(module, return_dict=None):
874
  def custom_forward(*inputs):
 
1029
 
1030
  hidden_states = self.resnets[0](hidden_states, temb)
1031
  for attn, resnet in zip(self.attentions, self.resnets[1:]):
1032
+ if torch.is_grad_enabled() and self.gradient_checkpointing:
1033
 
1034
  def create_custom_forward(module, return_dict=None):
1035
  def custom_forward(*inputs):
 
1191
 
1192
  hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1)
1193
 
1194
+ if torch.is_grad_enabled() and self.gradient_checkpointing:
1195
 
1196
  def create_custom_forward(module, return_dict=None):
1197
  def custom_forward(*inputs):
 
1364
 
1365
  # Blocks
1366
  for block in self.transformer_blocks:
1367
+ if torch.is_grad_enabled() and self.gradient_checkpointing:
1368
 
1369
  def create_custom_forward(module, return_dict=None):
1370
  def custom_forward(*inputs):