rename
Browse files- README.md +2 -2
- convert_mvdream_to_diffusers.py +3 -3
- main.py +2 -2
- mvdream/pipeline_mvdream.py +1 -1
README.md
CHANGED
@@ -26,9 +26,9 @@ detailed usage:
|
|
26 |
```python
|
27 |
import torch
|
28 |
import kiui
|
29 |
-
from mvdream.pipeline_mvdream import
|
30 |
|
31 |
-
pipe =
|
32 |
pipe = pipe.to("cuda")
|
33 |
|
34 |
prompt = "a photo of an astronaut riding a horse on mars"
|
|
|
26 |
```python
|
27 |
import torch
|
28 |
import kiui
|
29 |
+
from mvdream.pipeline_mvdream import MVDreamPipeline
|
30 |
|
31 |
+
pipe = MVDreamPipeline.from_pretrained('./weights', torch_dtype=torch.float16)
|
32 |
pipe = pipe.to("cuda")
|
33 |
|
34 |
prompt = "a photo of an astronaut riding a horse on mars"
|
convert_mvdream_to_diffusers.py
CHANGED
@@ -16,7 +16,7 @@ from typing import Any
|
|
16 |
from accelerate import init_empty_weights
|
17 |
from accelerate.utils import set_module_tensor_to_device
|
18 |
from mvdream.models import MultiViewUNetModel
|
19 |
-
from mvdream.pipeline_mvdream import
|
20 |
from transformers import CLIPTokenizer, CLIPTextModel
|
21 |
|
22 |
logger = logging.get_logger(__name__)
|
@@ -460,7 +460,7 @@ def convert_from_original_mvdream_ckpt(checkpoint_path, original_config_file, de
|
|
460 |
f"Unknown context_dim: {original_config.model.paams.unet_config.params.context_dim}"
|
461 |
)
|
462 |
|
463 |
-
pipe =
|
464 |
vae=vae,
|
465 |
unet=unet,
|
466 |
tokenizer=tokenizer,
|
@@ -547,7 +547,7 @@ if __name__ == "__main__":
|
|
547 |
image.save(f"image_{i}.png") # type: ignore
|
548 |
|
549 |
print(f"Testing entire pipeline...")
|
550 |
-
loaded_pipe:
|
551 |
images = loaded_pipe(
|
552 |
prompt="Head of Hatsune Miku",
|
553 |
negative_prompt="painting, bad quality, flat",
|
|
|
16 |
from accelerate import init_empty_weights
|
17 |
from accelerate.utils import set_module_tensor_to_device
|
18 |
from mvdream.models import MultiViewUNetModel
|
19 |
+
from mvdream.pipeline_mvdream import MVDreamPipeline
|
20 |
from transformers import CLIPTokenizer, CLIPTextModel
|
21 |
|
22 |
logger = logging.get_logger(__name__)
|
|
|
460 |
f"Unknown context_dim: {original_config.model.paams.unet_config.params.context_dim}"
|
461 |
)
|
462 |
|
463 |
+
pipe = MVDreamPipeline(
|
464 |
vae=vae,
|
465 |
unet=unet,
|
466 |
tokenizer=tokenizer,
|
|
|
547 |
image.save(f"image_{i}.png") # type: ignore
|
548 |
|
549 |
print(f"Testing entire pipeline...")
|
550 |
+
loaded_pipe: MVDreamPipeline = MVDreamPipeline.from_pretrained(args.dump_path, safe_serialization=args.to_safetensors) # type: ignore
|
551 |
images = loaded_pipe(
|
552 |
prompt="Head of Hatsune Miku",
|
553 |
negative_prompt="painting, bad quality, flat",
|
main.py
CHANGED
@@ -2,9 +2,9 @@ import torch
|
|
2 |
import kiui
|
3 |
import numpy as np
|
4 |
import argparse
|
5 |
-
from mvdream.pipeline_mvdream import
|
6 |
|
7 |
-
pipe =
|
8 |
"./weights", # local weights
|
9 |
# "ashawkey/mvdream-sd2.1-diffusers",
|
10 |
torch_dtype=torch.float16
|
|
|
2 |
import kiui
|
3 |
import numpy as np
|
4 |
import argparse
|
5 |
+
from mvdream.pipeline_mvdream import MVDreamPipeline
|
6 |
|
7 |
+
pipe = MVDreamPipeline.from_pretrained(
|
8 |
"./weights", # local weights
|
9 |
# "ashawkey/mvdream-sd2.1-diffusers",
|
10 |
torch_dtype=torch.float16
|
mvdream/pipeline_mvdream.py
CHANGED
@@ -74,7 +74,7 @@ def get_camera(
|
|
74 |
return torch.tensor(np.stack(cameras, 0)).float()
|
75 |
|
76 |
|
77 |
-
class
|
78 |
def __init__(
|
79 |
self,
|
80 |
vae: AutoencoderKL,
|
|
|
74 |
return torch.tensor(np.stack(cameras, 0)).float()
|
75 |
|
76 |
|
77 |
+
class MVDreamPipeline(DiffusionPipeline):
|
78 |
def __init__(
|
79 |
self,
|
80 |
vae: AutoencoderKL,
|