radames commited on
Commit
cf3ff1a
1 Parent(s): 55e36ea

remove memory on MPS

Browse files
pipelines/controlnet.py CHANGED
@@ -149,7 +149,8 @@ class Pipeline:
149
  self.canny_torch = SobelOperator(device=device)
150
  self.pipe.set_progress_bar_config(disable=True)
151
  self.pipe.to(device=device, dtype=torch_dtype)
152
- self.pipe.unet.to(memory_format=torch.channels_last)
 
153
 
154
  # check if computer has less than 64GB of RAM using sys or os
155
  if psutil.virtual_memory().total < 64 * 1024**3:
 
149
  self.canny_torch = SobelOperator(device=device)
150
  self.pipe.set_progress_bar_config(disable=True)
151
  self.pipe.to(device=device, dtype=torch_dtype)
152
+ if device.type != "mps":
153
+ self.pipe.unet.to(memory_format=torch.channels_last)
154
 
155
  # check if computer has less than 64GB of RAM using sys or os
156
  if psutil.virtual_memory().total < 64 * 1024**3:
pipelines/controlnetLoraSD15.py CHANGED
@@ -171,6 +171,8 @@ class Pipeline:
171
  pipe.scheduler = LCMScheduler.from_config(pipe.scheduler.config)
172
  pipe.set_progress_bar_config(disable=True)
173
  pipe.to(device=device, dtype=torch_dtype).to(device)
 
 
174
 
175
  if psutil.virtual_memory().total < 64 * 1024**3:
176
  pipe.enable_attention_slicing()
 
171
  pipe.scheduler = LCMScheduler.from_config(pipe.scheduler.config)
172
  pipe.set_progress_bar_config(disable=True)
173
  pipe.to(device=device, dtype=torch_dtype).to(device)
174
+ if device.type != "mps":
175
+ self.pipe.unet.to(memory_format=torch.channels_last)
176
 
177
  if psutil.virtual_memory().total < 64 * 1024**3:
178
  pipe.enable_attention_slicing()
pipelines/controlnetLoraSDXL.py CHANGED
@@ -179,6 +179,8 @@ class Pipeline:
179
  self.pipe.scheduler = LCMScheduler.from_config(self.pipe.scheduler.config)
180
  self.pipe.set_progress_bar_config(disable=True)
181
  self.pipe.to(device=device, dtype=torch_dtype).to(device)
 
 
182
 
183
  if psutil.virtual_memory().total < 64 * 1024**3:
184
  self.pipe.enable_attention_slicing()
 
179
  self.pipe.scheduler = LCMScheduler.from_config(self.pipe.scheduler.config)
180
  self.pipe.set_progress_bar_config(disable=True)
181
  self.pipe.to(device=device, dtype=torch_dtype).to(device)
182
+ if device.type != "mps":
183
+ self.pipe.unet.to(memory_format=torch.channels_last)
184
 
185
  if psutil.virtual_memory().total < 64 * 1024**3:
186
  self.pipe.enable_attention_slicing()
pipelines/img2img.py CHANGED
@@ -83,7 +83,8 @@ class Pipeline:
83
 
84
  self.pipe.set_progress_bar_config(disable=True)
85
  self.pipe.to(device=device, dtype=torch_dtype)
86
- self.pipe.unet.to(memory_format=torch.channels_last)
 
87
 
88
  # check if computer has less than 64GB of RAM using sys or os
89
  if psutil.virtual_memory().total < 64 * 1024**3:
 
83
 
84
  self.pipe.set_progress_bar_config(disable=True)
85
  self.pipe.to(device=device, dtype=torch_dtype)
86
+ if device.type != "mps":
87
+ self.pipe.unet.to(memory_format=torch.channels_last)
88
 
89
  # check if computer has less than 64GB of RAM using sys or os
90
  if psutil.virtual_memory().total < 64 * 1024**3:
pipelines/txt2img.py CHANGED
@@ -69,7 +69,8 @@ class Pipeline:
69
 
70
  self.pipe.set_progress_bar_config(disable=True)
71
  self.pipe.to(device=device, dtype=torch_dtype)
72
- self.pipe.unet.to(memory_format=torch.channels_last)
 
73
 
74
  # check if computer has less than 64GB of RAM using sys or os
75
  if psutil.virtual_memory().total < 64 * 1024**3:
 
69
 
70
  self.pipe.set_progress_bar_config(disable=True)
71
  self.pipe.to(device=device, dtype=torch_dtype)
72
+ if device.type != "mps":
73
+ self.pipe.unet.to(memory_format=torch.channels_last)
74
 
75
  # check if computer has less than 64GB of RAM using sys or os
76
  if psutil.virtual_memory().total < 64 * 1024**3:
pipelines/txt2imgLora.py CHANGED
@@ -70,7 +70,8 @@ class Pipeline:
70
  self.pipe.scheduler = LCMScheduler.from_config(self.pipe.scheduler.config)
71
  self.pipe.set_progress_bar_config(disable=True)
72
  self.pipe.to(device=device, dtype=torch_dtype)
73
- self.pipe.unet.to(memory_format=torch.channels_last)
 
74
 
75
  # check if computer has less than 64GB of RAM using sys or os
76
  if psutil.virtual_memory().total < 64 * 1024**3:
 
70
  self.pipe.scheduler = LCMScheduler.from_config(self.pipe.scheduler.config)
71
  self.pipe.set_progress_bar_config(disable=True)
72
  self.pipe.to(device=device, dtype=torch_dtype)
73
+ if device.type != "mps":
74
+ self.pipe.unet.to(memory_format=torch.channels_last)
75
 
76
  # check if computer has less than 64GB of RAM using sys or os
77
  if psutil.virtual_memory().total < 64 * 1024**3:
pipelines/txt2imgLoraSDXL.py CHANGED
@@ -89,6 +89,8 @@ class Pipeline:
89
  self.pipe.scheduler = LCMScheduler.from_config(self.pipe.scheduler.config)
90
  self.pipe.set_progress_bar_config(disable=True)
91
  self.pipe.to(device=device, dtype=torch_dtype).to(device)
 
 
92
 
93
  if psutil.virtual_memory().total < 64 * 1024**3:
94
  self.pipe.enable_attention_slicing()
 
89
  self.pipe.scheduler = LCMScheduler.from_config(self.pipe.scheduler.config)
90
  self.pipe.set_progress_bar_config(disable=True)
91
  self.pipe.to(device=device, dtype=torch_dtype).to(device)
92
+ if device.type != "mps":
93
+ self.pipe.unet.to(memory_format=torch.channels_last)
94
 
95
  if psutil.virtual_memory().total < 64 * 1024**3:
96
  self.pipe.enable_attention_slicing()