nseq commited on
Commit
c4546f2
·
verified ·
1 Parent(s): 7ebf1b7

Update memory_management.py

Browse files
Files changed (1) hide show
  1. memory_management.py +4 -4
memory_management.py CHANGED
@@ -13,10 +13,6 @@ from pathlib import Path
13
  from enum import Enum
14
  from backend import stream, utils
15
  from backend.args import args
16
- gpu_sync = None
17
- if args.gpu_device_id is not None:
18
- gpu_sync = GPUSync(args.gpu_device_id)
19
-
20
 
21
  cpu = torch.device('cpu')
22
 
@@ -143,6 +139,10 @@ class GPUSync:
143
  if Path("/tmp/sd_forge_gpu_sync.lock").exists():
144
  os.remove("/tmp/sd_forge_gpu_sync.lock")
145
 
 
 
 
 
146
  class VRAMState(Enum):
147
  DISABLED = 0 # No vram present: no need to move models to vram
148
  NO_VRAM = 1 # Very low vram: enable all the options to save vram
 
13
  from enum import Enum
14
  from backend import stream, utils
15
  from backend.args import args
 
 
 
 
16
 
17
  cpu = torch.device('cpu')
18
 
 
139
  if Path("/tmp/sd_forge_gpu_sync.lock").exists():
140
  os.remove("/tmp/sd_forge_gpu_sync.lock")
141
 
142
+ gpu_sync = None
143
+ if args.gpu_device_id is not None:
144
+ gpu_sync = GPUSync(args.gpu_device_id)
145
+
146
  class VRAMState(Enum):
147
  DISABLED = 0 # No vram present: no need to move models to vram
148
  NO_VRAM = 1 # Very low vram: enable all the options to save vram