Upload model_management.py
Browse files- comfy/model_management.py +10 -7
comfy/model_management.py
CHANGED
|
@@ -177,14 +177,17 @@ def get_torch_device():
|
|
| 177 |
if cpu_state == CPUState.CPU:
|
| 178 |
return torch.device("cpu")
|
| 179 |
else:
|
| 180 |
-
if
|
| 181 |
-
|
| 182 |
-
|
| 183 |
-
|
| 184 |
-
|
| 185 |
-
|
|
|
|
|
|
|
|
|
|
| 186 |
else:
|
| 187 |
-
return torch.device(
|
| 188 |
|
| 189 |
def get_total_memory(dev=None, torch_total_too=False):
|
| 190 |
global directml_enabled
|
|
|
|
| 177 |
if cpu_state == CPUState.CPU:
|
| 178 |
return torch.device("cpu")
|
| 179 |
else:
|
| 180 |
+
if torch.cuda.is_available(): # Добавьте эту проверку!
|
| 181 |
+
if is_intel_xpu():
|
| 182 |
+
return torch.device("xpu", torch.xpu.current_device())
|
| 183 |
+
elif is_ascend_npu():
|
| 184 |
+
return torch.device("npu", torch.npu.current_device())
|
| 185 |
+
elif is_mlu():
|
| 186 |
+
return torch.device("mlu", torch.mlu.current_device())
|
| 187 |
+
else:
|
| 188 |
+
return torch.device(torch.cuda.current_device())
|
| 189 |
else:
|
| 190 |
+
return torch.device("cpu") # Fallback на CPU, если CUDA недоступен
|
| 191 |
|
| 192 |
def get_total_memory(dev=None, torch_total_too=False):
|
| 193 |
global directml_enabled
|