Spaces:
Running
on
Zero
Running
on
Zero
Yardenfren
commited on
Update inf.py
Browse files
inf.py
CHANGED
@@ -20,7 +20,7 @@ class InferencePipeline:
|
|
20 |
self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
|
21 |
if self.device.type == 'cpu':
|
22 |
self.pipe = StableDiffusionXLPipeline.from_pretrained(
|
23 |
-
self.base_model_id, use_auth_token=self.hf_token
|
24 |
else:
|
25 |
self.pipe = StableDiffusionXLPipeline.from_pretrained(
|
26 |
self.base_model_id,
|
@@ -111,6 +111,8 @@ class InferencePipeline:
|
|
111 |
if not torch.cuda.is_available():
|
112 |
raise gr.Error('CUDA is not available.')
|
113 |
|
|
|
|
|
114 |
self.load_pipe(content_lora_model_id, style_lora_model_id, content_alpha, style_alpha)
|
115 |
|
116 |
generator = torch.Generator().manual_seed(seed)
|
|
|
20 |
self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
|
21 |
if self.device.type == 'cpu':
|
22 |
self.pipe = StableDiffusionXLPipeline.from_pretrained(
|
23 |
+
self.base_model_id, use_auth_token=self.hf_token)
|
24 |
else:
|
25 |
self.pipe = StableDiffusionXLPipeline.from_pretrained(
|
26 |
self.base_model_id,
|
|
|
111 |
if not torch.cuda.is_available():
|
112 |
raise gr.Error('CUDA is not available.')
|
113 |
|
114 |
+
print(f"running with {self.device}, {self.pipe.device}")
|
115 |
+
|
116 |
self.load_pipe(content_lora_model_id, style_lora_model_id, content_alpha, style_alpha)
|
117 |
|
118 |
generator = torch.Generator().manual_seed(seed)
|