Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -16,8 +16,8 @@ if torch.cuda.is_available():
|
|
16 |
else:
|
17 |
torch_dtype = torch.float32
|
18 |
|
19 |
-
pipe = DiffusionPipeline.from_pretrained(model_repo_id, torch_dtype=torch_dtype)
|
20 |
-
pipe = pipe.to(device)
|
21 |
|
22 |
MAX_SEED = np.iinfo(np.int32).max
|
23 |
MAX_IMAGE_SIZE = 1024
|
@@ -41,6 +41,9 @@ def infer(
|
|
41 |
|
42 |
generator = torch.Generator().manual_seed(seed)
|
43 |
|
|
|
|
|
|
|
44 |
image = pipe(
|
45 |
prompt=prompt,
|
46 |
negative_prompt=negative_prompt,
|
|
|
16 |
else:
|
17 |
torch_dtype = torch.float32
|
18 |
|
19 |
+
# pipe = DiffusionPipeline.from_pretrained(model_repo_id, torch_dtype=torch_dtype)
|
20 |
+
# pipe = pipe.to(device)
|
21 |
|
22 |
MAX_SEED = np.iinfo(np.int32).max
|
23 |
MAX_IMAGE_SIZE = 1024
|
|
|
41 |
|
42 |
generator = torch.Generator().manual_seed(seed)
|
43 |
|
44 |
+
pipe = DiffusionPipeline.from_pretrained(model_repo_id, torch_dtype=torch_dtype)
|
45 |
+
pipe = pipe.to(device)
|
46 |
+
|
47 |
image = pipe(
|
48 |
prompt=prompt,
|
49 |
negative_prompt=negative_prompt,
|