Spaces:
Running
on
Zero
Running
on
Zero
root
commited on
Commit
·
27cacbd
1
Parent(s):
8231326
support safetensor
Browse files- P3-SAM/demo/assets/Beetle_Car.glb +0 -0
- P3-SAM/demo/assets/Beetle_Car.png +0 -0
- P3-SAM/demo/assets/Female_Warrior.glb +0 -0
- P3-SAM/demo/assets/Female_Warrior.png +0 -0
- P3-SAM/demo/assets/Suspended_Island.glb +0 -0
- P3-SAM/demo/assets/Suspended_Island.png +0 -0
- P3-SAM/demo/auto_mask_no_postprocess.py +0 -0
- P3-SAM/model.py +8 -3
- XPart/data/Coffee_Machine.glb +0 -0
- XPart/data/Coffee_Machine.png +0 -0
- XPart/data/Computer_Desk.glb +0 -0
- XPart/data/Computer_Desk.png +0 -0
- XPart/data/Gundam.glb +0 -0
- XPart/data/Gundam.png +0 -0
- XPart/data/Koi_Fish.png +0 -0
- XPart/data/Motorcycle.glb +0 -0
- XPart/data/Motorcycle.png +0 -0
- XPart/partgen/bbox_estimator/auto_mask_api.py +17 -21
- XPart/partgen/partformer_pipeline.py +34 -32
- XPart/partgen/utils/misc.py +2 -2
- app.py +1 -2
- app_test.py +0 -0
- requirements.txt +1 -0
P3-SAM/demo/assets/Beetle_Car.glb
CHANGED
|
File without changes
|
P3-SAM/demo/assets/Beetle_Car.png
CHANGED
|
Git LFS Details
|
|
Git LFS Details
|
P3-SAM/demo/assets/Female_Warrior.glb
CHANGED
|
File without changes
|
P3-SAM/demo/assets/Female_Warrior.png
CHANGED
|
Git LFS Details
|
|
Git LFS Details
|
P3-SAM/demo/assets/Suspended_Island.glb
CHANGED
|
File without changes
|
P3-SAM/demo/assets/Suspended_Island.png
CHANGED
|
Git LFS Details
|
|
Git LFS Details
|
P3-SAM/demo/auto_mask_no_postprocess.py
CHANGED
|
File without changes
|
P3-SAM/model.py
CHANGED
|
@@ -113,14 +113,19 @@ def load_state_dict(self,
|
|
| 113 |
ignore_seg_s2_mlp=False,
|
| 114 |
ignore_iou_mlp=False):
|
| 115 |
if ckpt_path is not None:
|
| 116 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 117 |
elif state_dict is None:
|
| 118 |
# download from huggingface
|
| 119 |
print(f'trying to download model from huggingface...')
|
| 120 |
from huggingface_hub import hf_hub_download
|
| 121 |
-
ckpt_path = hf_hub_download(repo_id="tencent/Hunyuan3D-Part", filename="p3sam.
|
| 122 |
print(f'download model from huggingface to: {ckpt_path}')
|
| 123 |
-
|
|
|
|
| 124 |
|
| 125 |
local_state_dict = self.state_dict()
|
| 126 |
seen_keys = {k: False for k in local_state_dict.keys()}
|
|
|
|
| 113 |
ignore_seg_s2_mlp=False,
|
| 114 |
ignore_iou_mlp=False):
|
| 115 |
if ckpt_path is not None:
|
| 116 |
+
if ckpt_path.endswith('.pt') or ckpt_path.endswith('.ckpt'):
|
| 117 |
+
state_dict = torch.load(ckpt_path, map_location="cpu")["state_dict"]
|
| 118 |
+
elif ckpt_path.endswith('.safetensors'):
|
| 119 |
+
from safetensors.torch import load_file
|
| 120 |
+
state_dict = load_file(ckpt_path)
|
| 121 |
elif state_dict is None:
|
| 122 |
# download from huggingface
|
| 123 |
print(f'trying to download model from huggingface...')
|
| 124 |
from huggingface_hub import hf_hub_download
|
| 125 |
+
ckpt_path = hf_hub_download(repo_id="tencent/Hunyuan3D-Part", filename="p3sam/p3sam.safetensors", local_dir='weights')
|
| 126 |
print(f'download model from huggingface to: {ckpt_path}')
|
| 127 |
+
from safetensors.torch import load_file
|
| 128 |
+
state_dict = load_file(ckpt_path)
|
| 129 |
|
| 130 |
local_state_dict = self.state_dict()
|
| 131 |
seen_keys = {k: False for k in local_state_dict.keys()}
|
XPart/data/Coffee_Machine.glb
CHANGED
|
File without changes
|
XPart/data/Coffee_Machine.png
CHANGED
|
Git LFS Details
|
|
Git LFS Details
|
XPart/data/Computer_Desk.glb
CHANGED
|
File without changes
|
XPart/data/Computer_Desk.png
CHANGED
|
Git LFS Details
|
|
Git LFS Details
|
XPart/data/Gundam.glb
CHANGED
|
File without changes
|
XPart/data/Gundam.png
CHANGED
|
Git LFS Details
|
|
Git LFS Details
|
XPart/data/Koi_Fish.png
CHANGED
|
Git LFS Details
|
|
Git LFS Details
|
XPart/data/Motorcycle.glb
CHANGED
|
File without changes
|
XPart/data/Motorcycle.png
CHANGED
|
Git LFS Details
|
|
Git LFS Details
|
XPart/partgen/bbox_estimator/auto_mask_api.py
CHANGED
|
@@ -33,24 +33,22 @@ class YSAM(nn.Module):
|
|
| 33 |
super().__init__()
|
| 34 |
build_P3SAM(self)
|
| 35 |
|
| 36 |
-
def load_state_dict(
|
| 37 |
-
|
| 38 |
-
|
| 39 |
-
|
| 40 |
-
|
| 41 |
-
|
| 42 |
-
|
| 43 |
-
|
| 44 |
-
|
| 45 |
-
|
| 46 |
-
|
| 47 |
-
|
| 48 |
-
|
| 49 |
-
|
| 50 |
-
|
| 51 |
-
|
| 52 |
-
ignore_iou_mlp=ignore_iou_mlp,
|
| 53 |
-
)
|
| 54 |
|
| 55 |
def forward(self, feats, points, point_prompt, iter=1):
|
| 56 |
"""
|
|
@@ -1362,9 +1360,7 @@ class AutoMask:
|
|
| 1362 |
post_process: bool, 是否后处理
|
| 1363 |
"""
|
| 1364 |
self.model = YSAM()
|
| 1365 |
-
self.model.load_state_dict(
|
| 1366 |
-
state_dict=torch.load(ckpt_path, weights_only=False, map_location="cpu")["state_dict"]
|
| 1367 |
-
)
|
| 1368 |
self.model.eval()
|
| 1369 |
# self.model_parallel = torch.nn.DataParallel(self.model)
|
| 1370 |
self.model_parallel = self.model
|
|
|
|
| 33 |
super().__init__()
|
| 34 |
build_P3SAM(self)
|
| 35 |
|
| 36 |
+
def load_state_dict(self,
|
| 37 |
+
ckpt_path=None,
|
| 38 |
+
state_dict=None,
|
| 39 |
+
strict=True,
|
| 40 |
+
assign=False,
|
| 41 |
+
ignore_seg_mlp=False,
|
| 42 |
+
ignore_seg_s2_mlp=False,
|
| 43 |
+
ignore_iou_mlp=False):
|
| 44 |
+
load_state_dict(self,
|
| 45 |
+
ckpt_path=ckpt_path,
|
| 46 |
+
state_dict=state_dict,
|
| 47 |
+
strict=strict,
|
| 48 |
+
assign=assign,
|
| 49 |
+
ignore_seg_mlp=ignore_seg_mlp,
|
| 50 |
+
ignore_seg_s2_mlp=ignore_seg_s2_mlp,
|
| 51 |
+
ignore_iou_mlp=ignore_iou_mlp)
|
|
|
|
|
|
|
| 52 |
|
| 53 |
def forward(self, feats, points, point_prompt, iter=1):
|
| 54 |
"""
|
|
|
|
| 1360 |
post_process: bool, 是否后处理
|
| 1361 |
"""
|
| 1362 |
self.model = YSAM()
|
| 1363 |
+
self.model.load_state_dict(ckpt_path)
|
|
|
|
|
|
|
| 1364 |
self.model.eval()
|
| 1365 |
# self.model_parallel = torch.nn.DataParallel(self.model)
|
| 1366 |
self.model_parallel = self.model
|
XPart/partgen/partformer_pipeline.py
CHANGED
|
@@ -8,6 +8,7 @@ from tqdm import tqdm
|
|
| 8 |
import copy
|
| 9 |
from typing import List, Optional, Union
|
| 10 |
import os
|
|
|
|
| 11 |
from .utils.mesh_utils import (
|
| 12 |
SampleMesh,
|
| 13 |
load_surface_points,
|
|
@@ -21,7 +22,9 @@ from .utils.misc import (
|
|
| 21 |
get_config_from_file,
|
| 22 |
smart_load_model,
|
| 23 |
)
|
|
|
|
| 24 |
|
|
|
|
| 25 |
from diffusers.utils.torch_utils import randn_tensor
|
| 26 |
from pathlib import Path
|
| 27 |
|
|
@@ -190,7 +193,7 @@ class PartFormerPipeline(TokenAllocMixin):
|
|
| 190 |
ckpt[model_name][new_key] = value
|
| 191 |
else:
|
| 192 |
# ckpt = torch.load(ckpt_path, map_location="cpu", weights_only=True)
|
| 193 |
-
ckpt = torch.load(ckpt_path,
|
| 194 |
# load model
|
| 195 |
model = instantiate_from_config(config["model"])
|
| 196 |
# model.load_state_dict(ckpt["model"])
|
|
@@ -222,43 +225,42 @@ class PartFormerPipeline(TokenAllocMixin):
|
|
| 222 |
@classmethod
|
| 223 |
def from_pretrained(
|
| 224 |
cls,
|
| 225 |
-
|
| 226 |
dtype=torch.float32,
|
| 227 |
-
ignore_keys=(),
|
| 228 |
device="cuda",
|
| 229 |
**kwargs,
|
| 230 |
):
|
| 231 |
-
|
| 232 |
-
|
| 233 |
-
str(
|
| 234 |
-
Path(__file__).parent.parent
|
| 235 |
-
/ "config"
|
| 236 |
-
/ "partformer_full_pipeline_512_with_sonata.yaml"
|
| 237 |
-
)
|
| 238 |
-
)
|
| 239 |
-
ckpt_path = smart_load_model(
|
| 240 |
-
model_path="tencent/Hunyuan3D-Part",
|
| 241 |
)
|
| 242 |
-
|
| 243 |
-
|
| 244 |
-
|
| 245 |
-
# model.load_state_dict(ckpt["model"])
|
| 246 |
-
init_from_ckpt(model, ckpt, prefix="model", ignore_keys=ignore_keys)
|
| 247 |
-
vae = instantiate_from_config(config["shapevae"])
|
| 248 |
-
# vae.load_state_dict(ckpt["shapevae"], strict=False)
|
| 249 |
-
init_from_ckpt(vae, ckpt, prefix="shapevae", ignore_keys=ignore_keys)
|
| 250 |
-
if config.get("conditioner", None) is not None:
|
| 251 |
-
conditioner = instantiate_from_config(config["conditioner"])
|
| 252 |
-
init_from_ckpt(
|
| 253 |
-
conditioner, ckpt, prefix="conditioner", ignore_keys=ignore_keys
|
| 254 |
-
)
|
| 255 |
-
else:
|
| 256 |
-
conditioner = vae
|
| 257 |
-
scheduler = instantiate_from_config(config["scheduler"])
|
| 258 |
-
config["bbox_predictor"]["params"]["ckpt_path"] = os.path.join(
|
| 259 |
-
ckpt_path, "p3sam.pt"
|
| 260 |
)
|
| 261 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 262 |
model_kwargs = dict(
|
| 263 |
vae=vae,
|
| 264 |
model=model,
|
|
|
|
| 8 |
import copy
|
| 9 |
from typing import List, Optional, Union
|
| 10 |
import os
|
| 11 |
+
from safetensors.torch import load_file
|
| 12 |
from .utils.mesh_utils import (
|
| 13 |
SampleMesh,
|
| 14 |
load_surface_points,
|
|
|
|
| 22 |
get_config_from_file,
|
| 23 |
smart_load_model,
|
| 24 |
)
|
| 25 |
+
from easydict import EasyDict
|
| 26 |
|
| 27 |
+
import json
|
| 28 |
from diffusers.utils.torch_utils import randn_tensor
|
| 29 |
from pathlib import Path
|
| 30 |
|
|
|
|
| 193 |
ckpt[model_name][new_key] = value
|
| 194 |
else:
|
| 195 |
# ckpt = torch.load(ckpt_path, map_location="cpu", weights_only=True)
|
| 196 |
+
ckpt = torch.load(ckpt_path, map_location="cpu")
|
| 197 |
# load model
|
| 198 |
model = instantiate_from_config(config["model"])
|
| 199 |
# model.load_state_dict(ckpt["model"])
|
|
|
|
| 225 |
@classmethod
|
| 226 |
def from_pretrained(
|
| 227 |
cls,
|
| 228 |
+
model_path="tencent/Hunyuan3D-Part",
|
| 229 |
dtype=torch.float32,
|
|
|
|
| 230 |
device="cuda",
|
| 231 |
**kwargs,
|
| 232 |
):
|
| 233 |
+
model_dir = smart_load_model(
|
| 234 |
+
model_path=model_path,
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 235 |
)
|
| 236 |
+
model_ckpt = load_file(os.path.join(model_dir, "model/model.safetensors"))
|
| 237 |
+
conditioner_ckpt = load_file(
|
| 238 |
+
os.path.join(model_dir, "conditioner/conditioner.safetensors")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 239 |
)
|
| 240 |
+
shapevae_ckpt = load_file(
|
| 241 |
+
os.path.join(model_dir, "shapevae/shapevae.safetensors")
|
| 242 |
+
)
|
| 243 |
+
p3sam_path = os.path.join(model_dir, "p3sam/p3sam.safetensors")
|
| 244 |
+
with open(os.path.join(model_dir, "model/config.json"), "r") as f:
|
| 245 |
+
model_config = EasyDict(json.load(f))
|
| 246 |
+
with open(os.path.join(model_dir, "conditioner/config.json"), "r") as f:
|
| 247 |
+
conditioner_config = EasyDict(json.load(f))
|
| 248 |
+
with open(os.path.join(model_dir, "shapevae/config.json"), "r") as f:
|
| 249 |
+
shapevae_config = EasyDict(json.load(f))
|
| 250 |
+
with open(os.path.join(model_dir, "scheduler/config.json"), "r") as f:
|
| 251 |
+
scheduler_config = EasyDict(json.load(f))
|
| 252 |
+
with open(os.path.join(model_dir, "p3sam/config.json"), "r") as f:
|
| 253 |
+
bbox_predictor_config = EasyDict(json.load(f))
|
| 254 |
+
bbox_predictor_config["params"]["ckpt_path"] = p3sam_path
|
| 255 |
+
# load model
|
| 256 |
+
model = instantiate_from_config(model_config)
|
| 257 |
+
model.load_state_dict(model_ckpt)
|
| 258 |
+
vae = instantiate_from_config(shapevae_config)
|
| 259 |
+
vae.load_state_dict(shapevae_ckpt)
|
| 260 |
+
conditioner = instantiate_from_config(conditioner_config)
|
| 261 |
+
conditioner.load_state_dict(conditioner_ckpt)
|
| 262 |
+
scheduler = instantiate_from_config(scheduler_config)
|
| 263 |
+
bbox_predictor = instantiate_from_config(bbox_predictor_config)
|
| 264 |
model_kwargs = dict(
|
| 265 |
vae=vae,
|
| 266 |
model=model,
|
XPart/partgen/utils/misc.py
CHANGED
|
@@ -150,7 +150,7 @@ def smart_load_model(
|
|
| 150 |
original_model_path = model_path
|
| 151 |
# try local path
|
| 152 |
base_dir = os.environ.get("HY3DGEN_MODELS", "~/.cache/xpart")
|
| 153 |
-
|
| 154 |
logger.info(f"Try to load model from local path: {model_path}")
|
| 155 |
if not os.path.exists(model_path):
|
| 156 |
logger.info("Model path not exists, try to download from huggingface")
|
|
@@ -161,7 +161,7 @@ def smart_load_model(
|
|
| 161 |
path = snapshot_download(
|
| 162 |
repo_id=original_model_path,
|
| 163 |
# allow_patterns=[f"{subfolder}/*"], # 关键修改:模式匹配子文件夹
|
| 164 |
-
local_dir=
|
| 165 |
)
|
| 166 |
model_path = path # os.path.join(path, subfolder) # 保持路径拼接逻辑不变
|
| 167 |
except ImportError:
|
|
|
|
| 150 |
original_model_path = model_path
|
| 151 |
# try local path
|
| 152 |
base_dir = os.environ.get("HY3DGEN_MODELS", "~/.cache/xpart")
|
| 153 |
+
model_path = os.path.expanduser(os.path.join(base_dir, model_path))
|
| 154 |
logger.info(f"Try to load model from local path: {model_path}")
|
| 155 |
if not os.path.exists(model_path):
|
| 156 |
logger.info("Model path not exists, try to download from huggingface")
|
|
|
|
| 161 |
path = snapshot_download(
|
| 162 |
repo_id=original_model_path,
|
| 163 |
# allow_patterns=[f"{subfolder}/*"], # 关键修改:模式匹配子文件夹
|
| 164 |
+
local_dir=model_path,
|
| 165 |
)
|
| 166 |
model_path = path # os.path.join(path, subfolder) # 保持路径拼接逻辑不变
|
| 167 |
except ImportError:
|
app.py
CHANGED
|
@@ -25,9 +25,8 @@ def _load_pipeline():
|
|
| 25 |
config, "ckpt_path"
|
| 26 |
), "ckpt or ckpt_path must be specified in config"
|
| 27 |
pipeline = PartFormerPipeline.from_pretrained(
|
| 28 |
-
|
| 29 |
verbose=True,
|
| 30 |
-
ignore_keys=config.get("ignore_keys", []),
|
| 31 |
)
|
| 32 |
|
| 33 |
device = "cuda"
|
|
|
|
| 25 |
config, "ckpt_path"
|
| 26 |
), "ckpt or ckpt_path must be specified in config"
|
| 27 |
pipeline = PartFormerPipeline.from_pretrained(
|
| 28 |
+
model_path="tencent/Hunyuan3D-Part",
|
| 29 |
verbose=True,
|
|
|
|
| 30 |
)
|
| 31 |
|
| 32 |
device = "cuda"
|
app_test.py
CHANGED
|
File without changes
|
requirements.txt
CHANGED
|
@@ -41,6 +41,7 @@ fpsample
|
|
| 41 |
scikit-learn
|
| 42 |
addict
|
| 43 |
scikit-image
|
|
|
|
| 44 |
|
| 45 |
# sonata
|
| 46 |
spconv-cu126
|
|
|
|
| 41 |
scikit-learn
|
| 42 |
addict
|
| 43 |
scikit-image
|
| 44 |
+
easydict
|
| 45 |
|
| 46 |
# sonata
|
| 47 |
spconv-cu126
|