diff --git a/.gitattributes b/.gitattributes
index cf9d3edf9ef0d30f7025244ab3c0382318bcb218..6c32ac01d69769a685cf53c3d51a80113f29fe8e 100644
--- a/.gitattributes
+++ b/.gitattributes
@@ -1,138 +1,143 @@
-*.7z filter=lfs diff=lfs merge=lfs -text
-*.arrow filter=lfs diff=lfs merge=lfs -text
-*.bin filter=lfs diff=lfs merge=lfs -text
-*.bz2 filter=lfs diff=lfs merge=lfs -text
-*.ckpt filter=lfs diff=lfs merge=lfs -text
-*.ftz filter=lfs diff=lfs merge=lfs -text
-*.gz filter=lfs diff=lfs merge=lfs -text
-*.h5 filter=lfs diff=lfs merge=lfs -text
-*.joblib filter=lfs diff=lfs merge=lfs -text
-*.lfs.* filter=lfs diff=lfs merge=lfs -text
-*.mlmodel filter=lfs diff=lfs merge=lfs -text
-*.model filter=lfs diff=lfs merge=lfs -text
-*.msgpack filter=lfs diff=lfs merge=lfs -text
-*.npy filter=lfs diff=lfs merge=lfs -text
-*.npz filter=lfs diff=lfs merge=lfs -text
-*.onnx filter=lfs diff=lfs merge=lfs -text
-*.ot filter=lfs diff=lfs merge=lfs -text
-*.parquet filter=lfs diff=lfs merge=lfs -text
-*.pb filter=lfs diff=lfs merge=lfs -text
-*.pickle filter=lfs diff=lfs merge=lfs -text
-*.pkl filter=lfs diff=lfs merge=lfs -text
-*.pt filter=lfs diff=lfs merge=lfs -text
-*.pth filter=lfs diff=lfs merge=lfs -text
-*.rar filter=lfs diff=lfs merge=lfs -text
-*.safetensors filter=lfs diff=lfs merge=lfs -text
-saved_model/**/* filter=lfs diff=lfs merge=lfs -text
-*.tar.* filter=lfs diff=lfs merge=lfs -text
-*.tar filter=lfs diff=lfs merge=lfs -text
-*.tflite filter=lfs diff=lfs merge=lfs -text
-*.tgz filter=lfs diff=lfs merge=lfs -text
-*.wasm filter=lfs diff=lfs merge=lfs -text
-*.xz filter=lfs diff=lfs merge=lfs -text
-*.zip filter=lfs diff=lfs merge=lfs -text
-*.zst filter=lfs diff=lfs merge=lfs -text
-*tfevents* filter=lfs diff=lfs merge=lfs -text
-S3D-2/assets/demo.png filter=lfs diff=lfs merge=lfs -text
-S3D-2/assets/example_images/052.png filter=lfs diff=lfs merge=lfs -text
-S3D-2/assets/example_images/073.png filter=lfs diff=lfs merge=lfs -text
-S3D-2/assets/example_images/075.png filter=lfs diff=lfs merge=lfs -text
-S3D-2/assets/example_images/1008.png filter=lfs diff=lfs merge=lfs -text
-S3D-2/assets/example_images/101.png filter=lfs diff=lfs merge=lfs -text
-S3D-2/assets/example_images/1022.png filter=lfs diff=lfs merge=lfs -text
-S3D-2/assets/example_images/1029.png filter=lfs diff=lfs merge=lfs -text
-S3D-2/assets/example_images/1111.png filter=lfs diff=lfs merge=lfs -text
-S3D-2/assets/example_images/1123.png filter=lfs diff=lfs merge=lfs -text
-S3D-2/assets/example_images/1128.png filter=lfs diff=lfs merge=lfs -text
-S3D-2/assets/example_images/1135.png filter=lfs diff=lfs merge=lfs -text
-S3D-2/assets/example_images/1146.png filter=lfs diff=lfs merge=lfs -text
-S3D-2/assets/example_images/1148.png filter=lfs diff=lfs merge=lfs -text
-S3D-2/assets/example_images/1154.png filter=lfs diff=lfs merge=lfs -text
-S3D-2/assets/example_images/1180.png filter=lfs diff=lfs merge=lfs -text
-S3D-2/assets/example_images/1196.png filter=lfs diff=lfs merge=lfs -text
-S3D-2/assets/example_images/1204.png filter=lfs diff=lfs merge=lfs -text
-S3D-2/assets/example_images/1234.png filter=lfs diff=lfs merge=lfs -text
-S3D-2/assets/example_images/1310.png filter=lfs diff=lfs merge=lfs -text
-S3D-2/assets/example_images/1316.png filter=lfs diff=lfs merge=lfs -text
-S3D-2/assets/example_images/1354.png filter=lfs diff=lfs merge=lfs -text
-S3D-2/assets/example_images/1429.png filter=lfs diff=lfs merge=lfs -text
-S3D-2/assets/example_images/1493.png filter=lfs diff=lfs merge=lfs -text
-S3D-2/assets/example_images/1582.png filter=lfs diff=lfs merge=lfs -text
-S3D-2/assets/example_images/1583.png filter=lfs diff=lfs merge=lfs -text
-S3D-2/assets/example_images/1596.png filter=lfs diff=lfs merge=lfs -text
-S3D-2/assets/example_images/1601.png filter=lfs diff=lfs merge=lfs -text
-S3D-2/assets/example_images/1603.png filter=lfs diff=lfs merge=lfs -text
-S3D-2/assets/example_images/1626.png filter=lfs diff=lfs merge=lfs -text
-S3D-2/assets/example_images/1627.png filter=lfs diff=lfs merge=lfs -text
-S3D-2/assets/example_images/167.png filter=lfs diff=lfs merge=lfs -text
-S3D-2/assets/example_images/1670.png filter=lfs diff=lfs merge=lfs -text
-S3D-2/assets/example_images/1679.png filter=lfs diff=lfs merge=lfs -text
-S3D-2/assets/example_images/1687.png filter=lfs diff=lfs merge=lfs -text
-S3D-2/assets/example_images/1698.png filter=lfs diff=lfs merge=lfs -text
-S3D-2/assets/example_images/1715.png filter=lfs diff=lfs merge=lfs -text
-S3D-2/assets/example_images/1735.png filter=lfs diff=lfs merge=lfs -text
-S3D-2/assets/example_images/1738.png filter=lfs diff=lfs merge=lfs -text
-S3D-2/assets/example_images/1744.png filter=lfs diff=lfs merge=lfs -text
-S3D-2/assets/example_images/1758.png filter=lfs diff=lfs merge=lfs -text
-S3D-2/assets/example_images/1772.png filter=lfs diff=lfs merge=lfs -text
-S3D-2/assets/example_images/1773.png filter=lfs diff=lfs merge=lfs -text
-S3D-2/assets/example_images/1778.png filter=lfs diff=lfs merge=lfs -text
-S3D-2/assets/example_images/1898.png filter=lfs diff=lfs merge=lfs -text
-S3D-2/assets/example_images/191.png filter=lfs diff=lfs merge=lfs -text
-S3D-2/assets/example_images/195.png filter=lfs diff=lfs merge=lfs -text
-S3D-2/assets/example_images/197.png filter=lfs diff=lfs merge=lfs -text
-S3D-2/assets/example_images/198.png filter=lfs diff=lfs merge=lfs -text
-S3D-2/assets/example_images/202.png filter=lfs diff=lfs merge=lfs -text
-S3D-2/assets/example_images/203.png filter=lfs diff=lfs merge=lfs -text
-S3D-2/assets/example_images/218.png filter=lfs diff=lfs merge=lfs -text
-S3D-2/assets/example_images/219.png filter=lfs diff=lfs merge=lfs -text
-S3D-2/assets/example_images/379.png filter=lfs diff=lfs merge=lfs -text
-S3D-2/assets/example_images/380.png filter=lfs diff=lfs merge=lfs -text
-S3D-2/assets/example_images/419.png filter=lfs diff=lfs merge=lfs -text
-S3D-2/assets/example_images/888.png filter=lfs diff=lfs merge=lfs -text
-S3D-2/assets/example_images/895.png filter=lfs diff=lfs merge=lfs -text
-S3D-2/assets/example_mv_images/1/back.png filter=lfs diff=lfs merge=lfs -text
-S3D-2/assets/example_mv_images/1/front.png filter=lfs diff=lfs merge=lfs -text
-S3D-2/assets/example_mv_images/1/left.png filter=lfs diff=lfs merge=lfs -text
-S3D-2/assets/example_mv_images/10/back.png filter=lfs diff=lfs merge=lfs -text
-S3D-2/assets/example_mv_images/10/front.png filter=lfs diff=lfs merge=lfs -text
-S3D-2/assets/example_mv_images/10/left.png filter=lfs diff=lfs merge=lfs -text
-S3D-2/assets/example_mv_images/11/back.png filter=lfs diff=lfs merge=lfs -text
-S3D-2/assets/example_mv_images/11/front.png filter=lfs diff=lfs merge=lfs -text
-S3D-2/assets/example_mv_images/11/left.png filter=lfs diff=lfs merge=lfs -text
-S3D-2/assets/example_mv_images/12/back.png filter=lfs diff=lfs merge=lfs -text
-S3D-2/assets/example_mv_images/12/front.png filter=lfs diff=lfs merge=lfs -text
-S3D-2/assets/example_mv_images/12/left.png filter=lfs diff=lfs merge=lfs -text
-S3D-2/assets/example_mv_images/13/back.png filter=lfs diff=lfs merge=lfs -text
-S3D-2/assets/example_mv_images/13/front.png filter=lfs diff=lfs merge=lfs -text
-S3D-2/assets/example_mv_images/13/left.png filter=lfs diff=lfs merge=lfs -text
-S3D-2/assets/example_mv_images/14/back.png filter=lfs diff=lfs merge=lfs -text
-S3D-2/assets/example_mv_images/14/front.png filter=lfs diff=lfs merge=lfs -text
-S3D-2/assets/example_mv_images/14/left.png filter=lfs diff=lfs merge=lfs -text
-S3D-2/assets/example_mv_images/2/back.png filter=lfs diff=lfs merge=lfs -text
-S3D-2/assets/example_mv_images/2/front.png filter=lfs diff=lfs merge=lfs -text
-S3D-2/assets/example_mv_images/2/left.png filter=lfs diff=lfs merge=lfs -text
-S3D-2/assets/example_mv_images/3/back.png filter=lfs diff=lfs merge=lfs -text
-S3D-2/assets/example_mv_images/3/front.png filter=lfs diff=lfs merge=lfs -text
-S3D-2/assets/example_mv_images/3/left.png filter=lfs diff=lfs merge=lfs -text
-S3D-2/assets/example_mv_images/4/back.png filter=lfs diff=lfs merge=lfs -text
-S3D-2/assets/example_mv_images/4/front.png filter=lfs diff=lfs merge=lfs -text
-S3D-2/assets/example_mv_images/4/left.png filter=lfs diff=lfs merge=lfs -text
-S3D-2/assets/example_mv_images/5/back.png filter=lfs diff=lfs merge=lfs -text
-S3D-2/assets/example_mv_images/5/front.png filter=lfs diff=lfs merge=lfs -text
-S3D-2/assets/example_mv_images/5/left.png filter=lfs diff=lfs merge=lfs -text
-S3D-2/assets/example_mv_images/6/back.png filter=lfs diff=lfs merge=lfs -text
-S3D-2/assets/example_mv_images/6/front.png filter=lfs diff=lfs merge=lfs -text
-S3D-2/assets/example_mv_images/6/left.png filter=lfs diff=lfs merge=lfs -text
-S3D-2/assets/example_mv_images/7/back.png filter=lfs diff=lfs merge=lfs -text
-S3D-2/assets/example_mv_images/7/front.png filter=lfs diff=lfs merge=lfs -text
-S3D-2/assets/example_mv_images/7/left.png filter=lfs diff=lfs merge=lfs -text
-S3D-2/assets/example_mv_images/8/back.png filter=lfs diff=lfs merge=lfs -text
-S3D-2/assets/example_mv_images/8/front.png filter=lfs diff=lfs merge=lfs -text
-S3D-2/assets/example_mv_images/8/left.png filter=lfs diff=lfs merge=lfs -text
-S3D-2/assets/images/arch.jpg filter=lfs diff=lfs merge=lfs -text
-S3D-2/assets/images/e2e-1.gif filter=lfs diff=lfs merge=lfs -text
-S3D-2/assets/images/e2e-2.gif filter=lfs diff=lfs merge=lfs -text
-S3D-2/assets/images/system.jpg filter=lfs diff=lfs merge=lfs -text
-S3D-2/assets/images/teaser.jpg filter=lfs diff=lfs merge=lfs -text
-S3D-2/custom_rasterizer-0.1-cp310-cp310-linux_x86_64.whl filter=lfs diff=lfs merge=lfs -text
+*.7z filter=lfs diff=lfs merge=lfs -text
+*.arrow filter=lfs diff=lfs merge=lfs -text
+*.bin filter=lfs diff=lfs merge=lfs -text
+*.bz2 filter=lfs diff=lfs merge=lfs -text
+*.ckpt filter=lfs diff=lfs merge=lfs -text
+*.ftz filter=lfs diff=lfs merge=lfs -text
+*.gz filter=lfs diff=lfs merge=lfs -text
+*.h5 filter=lfs diff=lfs merge=lfs -text
+*.joblib filter=lfs diff=lfs merge=lfs -text
+*.lfs.* filter=lfs diff=lfs merge=lfs -text
+*.mlmodel filter=lfs diff=lfs merge=lfs -text
+*.model filter=lfs diff=lfs merge=lfs -text
+*.msgpack filter=lfs diff=lfs merge=lfs -text
+*.npy filter=lfs diff=lfs merge=lfs -text
+*.npz filter=lfs diff=lfs merge=lfs -text
+*.onnx filter=lfs diff=lfs merge=lfs -text
+*.ot filter=lfs diff=lfs merge=lfs -text
+*.parquet filter=lfs diff=lfs merge=lfs -text
+*.pb filter=lfs diff=lfs merge=lfs -text
+*.pickle filter=lfs diff=lfs merge=lfs -text
+*.pkl filter=lfs diff=lfs merge=lfs -text
+*.pt filter=lfs diff=lfs merge=lfs -text
+*.pth filter=lfs diff=lfs merge=lfs -text
+*.rar filter=lfs diff=lfs merge=lfs -text
+*.safetensors filter=lfs diff=lfs merge=lfs -text
+saved_model/**/* filter=lfs diff=lfs merge=lfs -text
+*.tar.* filter=lfs diff=lfs merge=lfs -text
+*.tar filter=lfs diff=lfs merge=lfs -text
+*.tflite filter=lfs diff=lfs merge=lfs -text
+*.tgz filter=lfs diff=lfs merge=lfs -text
+*.wasm filter=lfs diff=lfs merge=lfs -text
+*.xz filter=lfs diff=lfs merge=lfs -text
+*.zip filter=lfs diff=lfs merge=lfs -text
+*.zst filter=lfs diff=lfs merge=lfs -text
+*tfevents* filter=lfs diff=lfs merge=lfs -text
+assets/images/arch.jpg filter=lfs diff=lfs merge=lfs -text
+assets/images/e2e-1.gif filter=lfs diff=lfs merge=lfs -text
+assets/images/e2e-2.gif filter=lfs diff=lfs merge=lfs -text
+assets/images/system.jpg filter=lfs diff=lfs merge=lfs -text
+assets/images/teaser.jpg filter=lfs diff=lfs merge=lfs -text
+gradio_cache/0/textured_mesh.glb filter=lfs diff=lfs merge=lfs -text
+gradio_cache/3/textured_mesh.glb filter=lfs diff=lfs merge=lfs -text
+gradio_cache/4/textured_mesh.glb filter=lfs diff=lfs merge=lfs -text
+gradio_cache/5/textured_mesh.glb filter=lfs diff=lfs merge=lfs -text
+*.whl filter=lfs diff=lfs merge=lfs -text
+gradio_cache/1/textured_mesh.glb filter=lfs diff=lfs merge=lfs -text
+assets/demo.png filter=lfs diff=lfs merge=lfs -text
+assets/example_images/052.png filter=lfs diff=lfs merge=lfs -text
+assets/example_images/073.png filter=lfs diff=lfs merge=lfs -text
+assets/example_images/075.png filter=lfs diff=lfs merge=lfs -text
+assets/example_images/1008.png filter=lfs diff=lfs merge=lfs -text
+assets/example_images/101.png filter=lfs diff=lfs merge=lfs -text
+assets/example_images/1022.png filter=lfs diff=lfs merge=lfs -text
+assets/example_images/1029.png filter=lfs diff=lfs merge=lfs -text
+assets/example_images/1111.png filter=lfs diff=lfs merge=lfs -text
+assets/example_images/1123.png filter=lfs diff=lfs merge=lfs -text
+assets/example_images/1128.png filter=lfs diff=lfs merge=lfs -text
+assets/example_images/1135.png filter=lfs diff=lfs merge=lfs -text
+assets/example_images/1146.png filter=lfs diff=lfs merge=lfs -text
+assets/example_images/1148.png filter=lfs diff=lfs merge=lfs -text
+assets/example_images/1154.png filter=lfs diff=lfs merge=lfs -text
+assets/example_images/1180.png filter=lfs diff=lfs merge=lfs -text
+assets/example_images/1196.png filter=lfs diff=lfs merge=lfs -text
+assets/example_images/1204.png filter=lfs diff=lfs merge=lfs -text
+assets/example_images/1234.png filter=lfs diff=lfs merge=lfs -text
+assets/example_images/1310.png filter=lfs diff=lfs merge=lfs -text
+assets/example_images/1316.png filter=lfs diff=lfs merge=lfs -text
+assets/example_images/1354.png filter=lfs diff=lfs merge=lfs -text
+assets/example_images/1429.png filter=lfs diff=lfs merge=lfs -text
+assets/example_images/1493.png filter=lfs diff=lfs merge=lfs -text
+assets/example_images/1582.png filter=lfs diff=lfs merge=lfs -text
+assets/example_images/1583.png filter=lfs diff=lfs merge=lfs -text
+assets/example_images/1596.png filter=lfs diff=lfs merge=lfs -text
+assets/example_images/1601.png filter=lfs diff=lfs merge=lfs -text
+assets/example_images/1603.png filter=lfs diff=lfs merge=lfs -text
+assets/example_images/1626.png filter=lfs diff=lfs merge=lfs -text
+assets/example_images/1627.png filter=lfs diff=lfs merge=lfs -text
+assets/example_images/167.png filter=lfs diff=lfs merge=lfs -text
+assets/example_images/1670.png filter=lfs diff=lfs merge=lfs -text
+assets/example_images/1679.png filter=lfs diff=lfs merge=lfs -text
+assets/example_images/1687.png filter=lfs diff=lfs merge=lfs -text
+assets/example_images/1698.png filter=lfs diff=lfs merge=lfs -text
+assets/example_images/1715.png filter=lfs diff=lfs merge=lfs -text
+assets/example_images/1735.png filter=lfs diff=lfs merge=lfs -text
+assets/example_images/1738.png filter=lfs diff=lfs merge=lfs -text
+assets/example_images/1744.png filter=lfs diff=lfs merge=lfs -text
+assets/example_images/1758.png filter=lfs diff=lfs merge=lfs -text
+assets/example_images/1772.png filter=lfs diff=lfs merge=lfs -text
+assets/example_images/1773.png filter=lfs diff=lfs merge=lfs -text
+assets/example_images/1778.png filter=lfs diff=lfs merge=lfs -text
+assets/example_images/1898.png filter=lfs diff=lfs merge=lfs -text
+assets/example_images/191.png filter=lfs diff=lfs merge=lfs -text
+assets/example_images/195.png filter=lfs diff=lfs merge=lfs -text
+assets/example_images/197.png filter=lfs diff=lfs merge=lfs -text
+assets/example_images/198.png filter=lfs diff=lfs merge=lfs -text
+assets/example_images/202.png filter=lfs diff=lfs merge=lfs -text
+assets/example_images/203.png filter=lfs diff=lfs merge=lfs -text
+assets/example_images/218.png filter=lfs diff=lfs merge=lfs -text
+assets/example_images/219.png filter=lfs diff=lfs merge=lfs -text
+assets/example_images/379.png filter=lfs diff=lfs merge=lfs -text
+assets/example_images/380.png filter=lfs diff=lfs merge=lfs -text
+assets/example_images/419.png filter=lfs diff=lfs merge=lfs -text
+assets/example_images/888.png filter=lfs diff=lfs merge=lfs -text
+assets/example_images/895.png filter=lfs diff=lfs merge=lfs -text
+assets/example_mv_images/1/back.png filter=lfs diff=lfs merge=lfs -text
+assets/example_mv_images/1/front.png filter=lfs diff=lfs merge=lfs -text
+assets/example_mv_images/1/left.png filter=lfs diff=lfs merge=lfs -text
+assets/example_mv_images/10/back.png filter=lfs diff=lfs merge=lfs -text
+assets/example_mv_images/10/front.png filter=lfs diff=lfs merge=lfs -text
+assets/example_mv_images/10/left.png filter=lfs diff=lfs merge=lfs -text
+assets/example_mv_images/11/back.png filter=lfs diff=lfs merge=lfs -text
+assets/example_mv_images/11/front.png filter=lfs diff=lfs merge=lfs -text
+assets/example_mv_images/11/left.png filter=lfs diff=lfs merge=lfs -text
+assets/example_mv_images/12/back.png filter=lfs diff=lfs merge=lfs -text
+assets/example_mv_images/12/front.png filter=lfs diff=lfs merge=lfs -text
+assets/example_mv_images/12/left.png filter=lfs diff=lfs merge=lfs -text
+assets/example_mv_images/13/back.png filter=lfs diff=lfs merge=lfs -text
+assets/example_mv_images/13/front.png filter=lfs diff=lfs merge=lfs -text
+assets/example_mv_images/13/left.png filter=lfs diff=lfs merge=lfs -text
+assets/example_mv_images/14/back.png filter=lfs diff=lfs merge=lfs -text
+assets/example_mv_images/14/front.png filter=lfs diff=lfs merge=lfs -text
+assets/example_mv_images/14/left.png filter=lfs diff=lfs merge=lfs -text
+assets/example_mv_images/2/back.png filter=lfs diff=lfs merge=lfs -text
+assets/example_mv_images/2/front.png filter=lfs diff=lfs merge=lfs -text
+assets/example_mv_images/2/left.png filter=lfs diff=lfs merge=lfs -text
+assets/example_mv_images/3/back.png filter=lfs diff=lfs merge=lfs -text
+assets/example_mv_images/3/front.png filter=lfs diff=lfs merge=lfs -text
+assets/example_mv_images/3/left.png filter=lfs diff=lfs merge=lfs -text
+assets/example_mv_images/4/back.png filter=lfs diff=lfs merge=lfs -text
+assets/example_mv_images/4/front.png filter=lfs diff=lfs merge=lfs -text
+assets/example_mv_images/4/left.png filter=lfs diff=lfs merge=lfs -text
+assets/example_mv_images/5/back.png filter=lfs diff=lfs merge=lfs -text
+assets/example_mv_images/5/front.png filter=lfs diff=lfs merge=lfs -text
+assets/example_mv_images/5/left.png filter=lfs diff=lfs merge=lfs -text
+assets/example_mv_images/6/back.png filter=lfs diff=lfs merge=lfs -text
+assets/example_mv_images/6/front.png filter=lfs diff=lfs merge=lfs -text
+assets/example_mv_images/6/left.png filter=lfs diff=lfs merge=lfs -text
+assets/example_mv_images/7/back.png filter=lfs diff=lfs merge=lfs -text
+assets/example_mv_images/7/front.png filter=lfs diff=lfs merge=lfs -text
+assets/example_mv_images/7/left.png filter=lfs diff=lfs merge=lfs -text
+assets/example_mv_images/8/back.png filter=lfs diff=lfs merge=lfs -text
+assets/example_mv_images/8/front.png filter=lfs diff=lfs merge=lfs -text
+assets/example_mv_images/8/left.png filter=lfs diff=lfs merge=lfs -text
diff --git a/.gitignore b/.gitignore
new file mode 100644
index 0000000000000000000000000000000000000000..df950ef4916ab0a05d7ef072f547d2e7309e1e2b
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,168 @@
+# Byte-compiled / optimized / DLL files
+__pycache__/
+*.py[cod]
+*$py.class
+.DS_Store
+# C extensions
+*.so
+
+# Distribution / packaging
+.Python
+build/
+develop-eggs/
+dist/
+downloads/
+eggs/
+.eggs/
+lib/
+lib64/
+parts/
+sdist/
+var/
+wheels/
+share/python-wheels/
+*.egg-info/
+.installed.cfg
+*.egg
+MANIFEST
+
+# PyInstaller
+# Usually these files are written by a python script from a template
+# before PyInstaller builds the exe, so as to inject date/other infos into it.
+*.manifest
+*.spec
+
+# Installer logs
+pip-log.txt
+pip-delete-this-directory.txt
+
+# Unit test / coverage reports
+htmlcov/
+.tox/
+.nox/
+.coverage
+.coverage.*
+.cache
+nosetests.xml
+coverage.xml
+*.cover
+*.py,cover
+.hypothesis/
+.pytest_cache/
+cover/
+
+# Translations
+*.mo
+*.pot
+
+# Django stuff:
+*.log
+local_settings.py
+db.sqlite3
+db.sqlite3-journal
+
+# Flask stuff:
+instance/
+.webassets-cache
+
+# Scrapy stuff:
+.scrapy
+
+# Sphinx documentation
+docs/_build/
+
+# PyBuilder
+.pybuilder/
+target/
+
+# Jupyter Notebook
+.ipynb_checkpoints
+
+# IPython
+profile_default/
+ipython_config.py
+
+# pyenv
+# For a library or package, you might want to ignore these files since the code is
+# intended to run in multiple environments; otherwise, check them in:
+# .python-version
+
+# pipenv
+# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
+# However, in case of collaboration, if having platform-specific dependencies or dependencies
+# having no cross-platform support, pipenv may install dependencies that don't work, or not
+# install all needed dependencies.
+#Pipfile.lock
+
+# UV
+# Similar to Pipfile.lock, it is generally recommended to include uv.lock in version control.
+# This is especially recommended for binary packages to ensure reproducibility, and is more
+# commonly ignored for libraries.
+#uv.lock
+
+# poetry
+# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
+# This is especially recommended for binary packages to ensure reproducibility, and is more
+# commonly ignored for libraries.
+# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
+#poetry.lock
+
+# pdm
+# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
+#pdm.lock
+# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
+# in version control.
+# https://pdm.fming.dev/latest/usage/project/#working-with-version-control
+.pdm.toml
+.pdm-python
+.pdm-build/
+
+# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
+__pypackages__/
+
+# Celery stuff
+celerybeat-schedule
+celerybeat.pid
+
+# SageMath parsed files
+*.sage.py
+
+# Environments
+.env
+.venv
+env/
+venv/
+ENV/
+env.bak/
+venv.bak/
+
+# Spyder project settings
+.spyderproject
+.spyproject
+
+# Rope project settings
+.ropeproject
+
+# mkdocs documentation
+/site
+
+# mypy
+.mypy_cache/
+.dmypy.json
+dmypy.json
+
+# Pyre type checker
+.pyre/
+
+# pytype static type analyzer
+.pytype/
+
+# Cython debug symbols
+cython_debug/
+gradio_cache/
+# PyCharm
+# JetBrains specific template is maintained in a separate JetBrains.gitignore that can
+# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
+# and can be added to the global gitignore or merged into this file. For a more nuclear
+# option (not recommended) you can uncomment the following to ignore the entire idea folder.
+#.idea/
diff --git a/LICENSE b/LICENSE
new file mode 100644
index 0000000000000000000000000000000000000000..8b455fe02a9469ebfe72cc73d79298c97db1aa62
--- /dev/null
+++ b/LICENSE
@@ -0,0 +1,80 @@
+TENCENT HUNYUAN 3D 2.0 COMMUNITY LICENSE AGREEMENT
+Tencent Hunyuan 3D 2.0 Release Date: January 21, 2025
+THIS LICENSE AGREEMENT DOES NOT APPLY IN THE EUROPEAN UNION, UNITED KINGDOM AND SOUTH KOREA AND IS EXPRESSLY LIMITED TO THE TERRITORY, AS DEFINED BELOW.
+By clicking to agree or by using, reproducing, modifying, distributing, performing or displaying any portion or element of the Tencent Hunyuan 3D 2.0 Works, including via any Hosted Service, You will be deemed to have recognized and accepted the content of this Agreement, which is effective immediately.
+1. DEFINITIONS.
+a. “Acceptable Use Policy” shall mean the policy made available by Tencent as set forth in the Exhibit A.
+b. “Agreement” shall mean the terms and conditions for use, reproduction, distribution, modification, performance and displaying of Tencent Hunyuan 3D 2.0 Works or any portion or element thereof set forth herein.
+c. “Documentation” shall mean the specifications, manuals and documentation for Tencent Hunyuan 3D 2.0 made publicly available by Tencent.
+d. “Hosted Service” shall mean a hosted service offered via an application programming interface (API), web access, or any other electronic or remote means.
+e. “Licensee,” “You” or “Your” shall mean a natural person or legal entity exercising the rights granted by this Agreement and/or using the Tencent Hunyuan 3D 2.0 Works for any purpose and in any field of use.
+f. “Materials” shall mean, collectively, Tencent’s proprietary Tencent Hunyuan 3D 2.0 and Documentation (and any portion thereof) as made available by Tencent under this Agreement.
+g. “Model Derivatives” shall mean all: (i) modifications to Tencent Hunyuan 3D 2.0 or any Model Derivative of Tencent Hunyuan 3D 2.0; (ii) works based on Tencent Hunyuan 3D 2.0 or any Model Derivative of Tencent Hunyuan 3D 2.0; or (iii) any other machine learning model which is created by transfer of patterns of the weights, parameters, operations, or Output of Tencent Hunyuan 3D 2.0 or any Model Derivative of Tencent Hunyuan 3D 2.0, to that model in order to cause that model to perform similarly to Tencent Hunyuan 3D 2.0 or a Model Derivative of Tencent Hunyuan 3D 2.0, including distillation methods, methods that use intermediate data representations, or methods based on the generation of synthetic data Outputs by Tencent Hunyuan 3D 2.0 or a Model Derivative of Tencent Hunyuan 3D 2.0 for training that model. For clarity, Outputs by themselves are not deemed Model Derivatives.
+h. “Output” shall mean the information and/or content output of Tencent Hunyuan 3D 2.0 or a Model Derivative that results from operating or otherwise using Tencent Hunyuan 3D 2.0 or a Model Derivative, including via a Hosted Service.
+i. “Tencent,” “We” or “Us” shall mean THL A29 Limited.
+j. “Tencent Hunyuan 3D 2.0” shall mean the 3D generation models and their software and algorithms, including trained model weights, parameters (including optimizer states), machine-learning model code, inference-enabling code, training-enabling code, fine-tuning enabling code and other elements of the foregoing made publicly available by Us at https://github.com/Tencent/Hunyuan3D-2.
+k. “Tencent Hunyuan 3D 2.0 Works” shall mean: (i) the Materials; (ii) Model Derivatives; and (iii) all derivative works thereof.
+l. “Territory” shall mean the worldwide territory, excluding the territory of the European Union, United Kingdom and South Korea.
+m. “Third Party” or “Third Parties” shall mean individuals or legal entities that are not under common control with Us or You.
+n. “including” shall mean including but not limited to.
+2. GRANT OF RIGHTS.
+We grant You, for the Territory only, a non-exclusive, non-transferable and royalty-free limited license under Tencent’s intellectual property or other rights owned by Us embodied in or utilized by the Materials to use, reproduce, distribute, create derivative works of (including Model Derivatives), and make modifications to the Materials, only in accordance with the terms of this Agreement and the Acceptable Use Policy, and You must not violate (or encourage or permit anyone else to violate) any term of this Agreement or the Acceptable Use Policy.
+3. DISTRIBUTION.
+You may, subject to Your compliance with this Agreement, distribute or make available to Third Parties the Tencent Hunyuan 3D 2.0 Works, exclusively in the Territory, provided that You meet all of the following conditions:
+a. You must provide all such Third Party recipients of the Tencent Hunyuan 3D 2.0 Works or products or services using them a copy of this Agreement;
+b. You must cause any modified files to carry prominent notices stating that You changed the files;
+c. You are encouraged to: (i) publish at least one technology introduction blogpost or one public statement expressing Your experience of using the Tencent Hunyuan 3D 2.0 Works; and (ii) mark the products or services developed by using the Tencent Hunyuan 3D 2.0 Works to indicate that the product/service is “Powered by Tencent Hunyuan”; and
+d. All distributions to Third Parties (other than through a Hosted Service) must be accompanied by a “Notice” text file that contains the following notice: “Tencent Hunyuan 3D 2.0 is licensed under the Tencent Hunyuan 3D 2.0 Community License Agreement, Copyright © 2025 Tencent. All Rights Reserved. The trademark rights of “Tencent Hunyuan” are owned by Tencent or its affiliate.”
+You may add Your own copyright statement to Your modifications and, except as set forth in this Section and in Section 5, may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Model Derivatives as a whole, provided Your use, reproduction, modification, distribution, performance and display of the work otherwise complies with the terms and conditions of this Agreement (including as regards the Territory). If You receive Tencent Hunyuan 3D 2.0 Works from a Licensee as part of an integrated end user product, then this Section 3 of this Agreement will not apply to You.
+4. ADDITIONAL COMMERCIAL TERMS.
+If, on the Tencent Hunyuan 3D 2.0 version release date, the monthly active users of all products or services made available by or for Licensee is greater than 1 million monthly active users in the preceding calendar month, You must request a license from Tencent, which Tencent may grant to You in its sole discretion, and You are not authorized to exercise any of the rights under this Agreement unless or until Tencent otherwise expressly grants You such rights.
+Subject to Tencent's written approval, you may request a license for the use of Tencent Hunyuan 3D 2.0 by submitting the following information to hunyuan3d@tencent.com:
+a. Your company’s name and associated business sector that plans to use Tencent Hunyuan 3D 2.0.
+b. Your intended use case and the purpose of using Tencent Hunyuan 3D 2.0.
+5. RULES OF USE.
+a. Your use of the Tencent Hunyuan 3D 2.0 Works must comply with applicable laws and regulations (including trade compliance laws and regulations) and adhere to the Acceptable Use Policy for the Tencent Hunyuan 3D 2.0 Works, which is hereby incorporated by reference into this Agreement. You must include the use restrictions referenced in these Sections 5(a) and 5(b) as an enforceable provision in any agreement (e.g., license agreement, terms of use, etc.) governing the use and/or distribution of Tencent Hunyuan 3D 2.0 Works and You must provide notice to subsequent users to whom You distribute that Tencent Hunyuan 3D 2.0 Works are subject to the use restrictions in these Sections 5(a) and 5(b).
+b. You must not use the Tencent Hunyuan 3D 2.0 Works or any Output or results of the Tencent Hunyuan 3D 2.0 Works to improve any other AI model (other than Tencent Hunyuan 3D 2.0 or Model Derivatives thereof).
+c. You must not use, reproduce, modify, distribute, or display the Tencent Hunyuan 3D 2.0 Works, Output or results of the Tencent Hunyuan 3D 2.0 Works outside the Territory. Any such use outside the Territory is unlicensed and unauthorized under this Agreement.
+6. INTELLECTUAL PROPERTY.
+a. Subject to Tencent’s ownership of Tencent Hunyuan 3D 2.0 Works made by or for Tencent and intellectual property rights therein, conditioned upon Your compliance with the terms and conditions of this Agreement, as between You and Tencent, You will be the owner of any derivative works and modifications of the Materials and any Model Derivatives that are made by or for You.
+b. No trademark licenses are granted under this Agreement, and in connection with the Tencent Hunyuan 3D 2.0 Works, Licensee may not use any name or mark owned by or associated with Tencent or any of its affiliates, except as required for reasonable and customary use in describing and distributing the Tencent Hunyuan 3D 2.0 Works. Tencent hereby grants You a license to use “Tencent Hunyuan” (the “Mark”) in the Territory solely as required to comply with the provisions of Section 3(c), provided that You comply with any applicable laws related to trademark protection. All goodwill arising out of Your use of the Mark will inure to the benefit of Tencent.
+c. If You commence a lawsuit or other proceedings (including a cross-claim or counterclaim in a lawsuit) against Us or any person or entity alleging that the Materials or any Output, or any portion of any of the foregoing, infringe any intellectual property or other right owned or licensable by You, then all licenses granted to You under this Agreement shall terminate as of the date such lawsuit or other proceeding is filed. You will defend, indemnify and hold harmless Us from and against any claim by any Third Party arising out of or related to Your or the Third Party’s use or distribution of the Tencent Hunyuan 3D 2.0 Works.
+d. Tencent claims no rights in Outputs You generate. You and Your users are solely responsible for Outputs and their subsequent uses.
+7. DISCLAIMERS OF WARRANTY AND LIMITATIONS OF LIABILITY.
+a. We are not obligated to support, update, provide training for, or develop any further version of the Tencent Hunyuan 3D 2.0 Works or to grant any license thereto.
+b. UNLESS AND ONLY TO THE EXTENT REQUIRED BY APPLICABLE LAW, THE TENCENT HUNYUAN 3D 2.0 WORKS AND ANY OUTPUT AND RESULTS THEREFROM ARE PROVIDED “AS IS” WITHOUT ANY EXPRESS OR IMPLIED WARRANTIES OF ANY KIND INCLUDING ANY WARRANTIES OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, COURSE OF DEALING, USAGE OF TRADE, OR FITNESS FOR A PARTICULAR PURPOSE. YOU ARE SOLELY RESPONSIBLE FOR DETERMINING THE APPROPRIATENESS OF USING, REPRODUCING, MODIFYING, PERFORMING, DISPLAYING OR DISTRIBUTING ANY OF THE TENCENT HUNYUAN 3D 2.0 WORKS OR OUTPUTS AND ASSUME ANY AND ALL RISKS ASSOCIATED WITH YOUR OR A THIRD PARTY’S USE OR DISTRIBUTION OF ANY OF THE TENCENT HUNYUAN 3D 2.0 WORKS OR OUTPUTS AND YOUR EXERCISE OF RIGHTS AND PERMISSIONS UNDER THIS AGREEMENT.
+c. TO THE FULLEST EXTENT PERMITTED BY APPLICABLE LAW, IN NO EVENT SHALL TENCENT OR ITS AFFILIATES BE LIABLE UNDER ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, TORT, NEGLIGENCE, PRODUCTS LIABILITY, OR OTHERWISE, FOR ANY DAMAGES, INCLUDING ANY DIRECT, INDIRECT, SPECIAL, INCIDENTAL, EXEMPLARY, CONSEQUENTIAL OR PUNITIVE DAMAGES, OR LOST PROFITS OF ANY KIND ARISING FROM THIS AGREEMENT OR RELATED TO ANY OF THE TENCENT HUNYUAN 3D 2.0 WORKS OR OUTPUTS, EVEN IF TENCENT OR ITS AFFILIATES HAVE BEEN ADVISED OF THE POSSIBILITY OF ANY OF THE FOREGOING.
+8. SURVIVAL AND TERMINATION.
+a. The term of this Agreement shall commence upon Your acceptance of this Agreement or access to the Materials and will continue in full force and effect until terminated in accordance with the terms and conditions herein.
+b. We may terminate this Agreement if You breach any of the terms or conditions of this Agreement. Upon termination of this Agreement, You must promptly delete and cease use of the Tencent Hunyuan 3D 2.0 Works. Sections 6(a), 6(c), 7 and 9 shall survive the termination of this Agreement.
+9. GOVERNING LAW AND JURISDICTION.
+a. This Agreement and any dispute arising out of or relating to it will be governed by the laws of the Hong Kong Special Administrative Region of the People’s Republic of China, without regard to conflict of law principles, and the UN Convention on Contracts for the International Sale of Goods does not apply to this Agreement.
+b. Exclusive jurisdiction and venue for any dispute arising out of or relating to this Agreement will be a court of competent jurisdiction in the Hong Kong Special Administrative Region of the People’s Republic of China, and Tencent and Licensee consent to the exclusive jurisdiction of such court with respect to any such dispute.
+
+EXHIBIT A
+ACCEPTABLE USE POLICY
+
+Tencent reserves the right to update this Acceptable Use Policy from time to time.
+Last modified: November 5, 2024
+
+Tencent endeavors to promote safe and fair use of its tools and features, including Tencent Hunyuan 3D 2.0. You agree not to use Tencent Hunyuan 3D 2.0 or Model Derivatives:
+1. Outside the Territory;
+2. In any way that violates any applicable national, federal, state, local, international or any other law or regulation;
+3. To harm Yourself or others;
+4. To repurpose or distribute output from Tencent Hunyuan 3D 2.0 or any Model Derivatives to harm Yourself or others;
+5. To override or circumvent the safety guardrails and safeguards We have put in place;
+6. For the purpose of exploiting, harming or attempting to exploit or harm minors in any way;
+7. To generate or disseminate verifiably false information and/or content with the purpose of harming others or influencing elections;
+8. To generate or facilitate false online engagement, including fake reviews and other means of fake online engagement;
+9. To intentionally defame, disparage or otherwise harass others;
+10. To generate and/or disseminate malware (including ransomware) or any other content to be used for the purpose of harming electronic systems;
+11. To generate or disseminate personal identifiable information with the purpose of harming others;
+12. To generate or disseminate information (including images, code, posts, articles), and place the information in any public context (including –through the use of bot generated tweets), without expressly and conspicuously identifying that the information and/or content is machine generated;
+13. To impersonate another individual without consent, authorization, or legal right;
+14. To make high-stakes automated decisions in domains that affect an individual’s safety, rights or wellbeing (e.g., law enforcement, migration, medicine/health, management of critical infrastructure, safety components of products, essential services, credit, employment, housing, education, social scoring, or insurance);
+15. In a manner that violates or disrespects the social ethics and moral standards of other countries or regions;
+16. To perform, facilitate, threaten, incite, plan, promote or encourage violent extremism or terrorism;
+17. For any use intended to discriminate against or harm individuals or groups based on protected characteristics or categories, online or offline social behavior or known or predicted personal or personality characteristics;
+18. To intentionally exploit any of the vulnerabilities of a specific group of persons based on their age, social, physical or mental characteristics, in order to materially distort the behavior of a person pertaining to that group in a manner that causes or is likely to cause that person or another person physical or psychological harm;
+19. For military purposes;
+20. To engage in the unauthorized or unlicensed practice of any profession including, but not limited to, financial, legal, medical/health, or other professional practices.
diff --git a/NOTICE b/NOTICE
new file mode 100644
index 0000000000000000000000000000000000000000..7908fba8eaf827287afdae1096c54e10f38ee5ff
--- /dev/null
+++ b/NOTICE
@@ -0,0 +1,214 @@
+Usage and Legal Notices:
+
+Tencent is pleased to support the open source community by making Hunyuan 3D 2.0 available.
+
+Copyright (C) 2025 THL A29 Limited, a Tencent company. All rights reserved. The below software and/or models in this distribution may have been modified by THL A29 Limited ("Tencent Modifications"). All Tencent Modifications are Copyright (C) THL A29 Limited.
+
+Hunyuan 3D 2.0 is licensed under the TENCENT HUNYUAN 3D 2.0 COMMUNITY LICENSE AGREEMENT except for the third-party components listed below, which is licensed under different terms. Hunyuan 3D 2.0 does not impose any additional limitations beyond what is outlined in the respective licenses of these third-party components. Users must comply with all terms and conditions of original licenses of these third-party components and must ensure that the usage of the third party components adheres to all relevant laws and regulations.
+
+For avoidance of doubts, Hunyuan 3D 2.0 means inference-enabling code, parameters, and weights of this Model only, which are made publicly available by Tencent in accordance with TENCENT HUNYUAN 3D 2.0 COMMUNITY LICENSE AGREEMENT.
+
+
+Other dependencies and licenses:
+
+
+Open Source Model Licensed under the MIT and CreativeML Open RAIL++-M License:
+--------------------------------------------------------------------
+1. Stable Diffusion
+Copyright (c) 2022 Stability AI
+
+
+Terms of the MIT and CreativeML Open RAIL++-M License:
+--------------------------------------------------------------------
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+
+
+CreativeML Open RAIL++-M License
+dated November 24, 2022
+
+Section I: PREAMBLE
+
+Multimodal generative models are being widely adopted and used, and have the potential to transform the way artists, among other individuals, conceive and benefit from AI or ML technologies as a tool for content creation.
+
+Notwithstanding the current and potential benefits that these artifacts can bring to society at large, there are also concerns about potential misuses of them, either due to their technical limitations or ethical considerations.
+
+In short, this license strives for both the open and responsible downstream use of the accompanying model. When it comes to the open character, we took inspiration from open source permissive licenses regarding the grant of IP rights. Referring to the downstream responsible use, we added use-based restrictions not permitting the use of the Model in very specific scenarios, in order for the licensor to be able to enforce the license in case potential misuses of the Model may occur. At the same time, we strive to promote open and responsible research on generative models for art and content generation.
+
+Even though downstream derivative versions of the model could be released under different licensing terms, the latter will always have to include - at minimum - the same use-based restrictions as the ones in the original license (this license). We believe in the intersection between open and responsible AI development; thus, this License aims to strike a balance between both in order to enable responsible open-science in the field of AI.
+
+This License governs the use of the model (and its derivatives) and is informed by the model card associated with the model.
+
+NOW THEREFORE, You and Licensor agree as follows:
+
+1. Definitions
+
+- "License" means the terms and conditions for use, reproduction, and Distribution as defined in this document.
+- "Data" means a collection of information and/or content extracted from the dataset used with the Model, including to train, pretrain, or otherwise evaluate the Model. The Data is not licensed under this License.
+- "Output" means the results of operating a Model as embodied in informational content resulting therefrom.
+- "Model" means any accompanying machine-learning based assemblies (including checkpoints), consisting of learnt weights, parameters (including optimizer states), corresponding to the model architecture as embodied in the Complementary Material, that have been trained or tuned, in whole or in part on the Data, using the Complementary Material.
+- "Derivatives of the Model" means all modifications to the Model, works based on the Model, or any other model which is created or initialized by transfer of patterns of the weights, parameters, activations or output of the Model, to the other model, in order to cause the other model to perform similarly to the Model, including - but not limited to - distillation methods entailing the use of intermediate data representations or methods based on the generation of synthetic data by the Model for training the other model.
+- "Complementary Material" means the accompanying source code and scripts used to define, run, load, benchmark or evaluate the Model, and used to prepare data for training or evaluation, if any. This includes any accompanying documentation, tutorials, examples, etc, if any.
+- "Distribution" means any transmission, reproduction, publication or other sharing of the Model or Derivatives of the Model to a third party, including providing the Model as a hosted service made available by electronic or other remote means - e.g. API-based or web access.
+- "Licensor" means the copyright owner or entity authorized by the copyright owner that is granting the License, including the persons or entities that may have rights in the Model and/or distributing the Model.
+- "You" (or "Your") means an individual or Legal Entity exercising permissions granted by this License and/or making use of the Model for whichever purpose and in any field of use, including usage of the Model in an end-use application - e.g. chatbot, translator, image generator.
+- "Third Parties" means individuals or legal entities that are not under common control with Licensor or You.
+- "Contribution" means any work of authorship, including the original version of the Model and any modifications or additions to that Model or Derivatives of the Model thereof, that is intentionally submitted to Licensor for inclusion in the Model by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Model, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution."
+- "Contributor" means Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Model.
+
+Section II: INTELLECTUAL PROPERTY RIGHTS
+
+Both copyright and patent grants apply to the Model, Derivatives of the Model and Complementary Material. The Model and Derivatives of the Model are subject to additional terms as described in Section III.
+
+2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare, publicly display, publicly perform, sublicense, and distribute the Complementary Material, the Model, and Derivatives of the Model.
+3. Grant of Patent License. Subject to the terms and conditions of this License and where and as applicable, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this paragraph) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Model and the Complementary Material, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Model to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Model and/or Complementary Material or a Contribution incorporated within the Model and/or Complementary Material constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for the Model and/or Work shall terminate as of the date such litigation is asserted or filed.
+
+Section III: CONDITIONS OF USAGE, DISTRIBUTION AND REDISTRIBUTION
+
+4. Distribution and Redistribution. You may host for Third Party remote access purposes (e.g. software-as-a-service), reproduce and distribute copies of the Model or Derivatives of the Model thereof in any medium, with or without modifications, provided that You meet the following conditions:
+Use-based restrictions as referenced in paragraph 5 MUST be included as an enforceable provision by You in any type of legal agreement (e.g. a license) governing the use and/or distribution of the Model or Derivatives of the Model, and You shall give notice to subsequent users You Distribute to, that the Model or Derivatives of the Model are subject to paragraph 5. This provision does not apply to the use of Complementary Material.
+You must give any Third Party recipients of the Model or Derivatives of the Model a copy of this License;
+You must cause any modified files to carry prominent notices stating that You changed the files;
+You must retain all copyright, patent, trademark, and attribution notices excluding those notices that do not pertain to any part of the Model, Derivatives of the Model.
+You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions - respecting paragraph 4.a. - for use, reproduction, or Distribution of Your modifications, or for any such Derivatives of the Model as a whole, provided Your use, reproduction, and Distribution of the Model otherwise complies with the conditions stated in this License.
+5. Use-based restrictions. The restrictions set forth in Attachment A are considered Use-based restrictions. Therefore You cannot use the Model and the Derivatives of the Model for the specified restricted uses. You may use the Model subject to this License, including only for lawful purposes and in accordance with the License. Use may include creating any content with, finetuning, updating, running, training, evaluating and/or reparametrizing the Model. You shall require all of Your users who use the Model or a Derivative of the Model to comply with the terms of this paragraph (paragraph 5).
+6. The Output You Generate. Except as set forth herein, Licensor claims no rights in the Output You generate using the Model. You are accountable for the Output you generate and its subsequent uses. No use of the output can contravene any provision as stated in the License.
+
+Section IV: OTHER PROVISIONS
+
+7. Updates and Runtime Restrictions. To the maximum extent permitted by law, Licensor reserves the right to restrict (remotely or otherwise) usage of the Model in violation of this License.
+8. Trademarks and related. Nothing in this License permits You to make use of Licensors’ trademarks, trade names, logos or to otherwise suggest endorsement or misrepresent the relationship between the parties; and any rights not expressly granted herein are reserved by the Licensors.
+9. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Model and the Complementary Material (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Model, Derivatives of the Model, and the Complementary Material and assume any risks associated with Your exercise of permissions under this License.
+10. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Model and the Complementary Material (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages.
+11. Accepting Warranty or Additional Liability. While redistributing the Model, Derivatives of the Model and the Complementary Material thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability.
+12. If any provision of this License is held to be invalid, illegal or unenforceable, the remaining provisions shall be unaffected thereby and remain valid as if such provision had not been set forth herein.
+
+END OF TERMS AND CONDITIONS
+
+
+
+
+Attachment A
+
+Use Restrictions
+
+You agree not to use the Model or Derivatives of the Model:
+
+- In any way that violates any applicable national, federal, state, local or international law or regulation;
+- For the purpose of exploiting, harming or attempting to exploit or harm minors in any way;
+- To generate or disseminate verifiably false information and/or content with the purpose of harming others;
+- To generate or disseminate personal identifiable information that can be used to harm an individual;
+- To defame, disparage or otherwise harass others;
+- For fully automated decision making that adversely impacts an individual’s legal rights or otherwise creates or modifies a binding, enforceable obligation;
+- For any use intended to or which has the effect of discriminating against or harming individuals or groups based on online or offline social behavior or known or predicted personal or personality characteristics;
+- To exploit any of the vulnerabilities of a specific group of persons based on their age, social, physical or mental characteristics, in order to materially distort the behavior of a person pertaining to that group in a manner that causes or is likely to cause that person or another person physical or psychological harm;
+- For any use intended to or which has the effect of discriminating against individuals or groups based on legally protected characteristics or categories;
+- To provide medical advice and medical results interpretation;
+- To generate or disseminate information for the purpose to be used for administration of justice, law enforcement, immigration or asylum processes, such as predicting an individual will commit fraud/crime commitment (e.g. by text profiling, drawing causal relationships between assertions made in documents, indiscriminate and arbitrarily-targeted use).
+
+
+
+Open Source Model Licensed under the TENCENT HUNYUAN COMMUNITY LICENSE AGREEMENT and Other Licenses of the Third-Party Components therein:
+--------------------------------------------------------------------
+1. HunyuanDiT
+Copyright (C) 2024 THL A29 Limited, a Tencent company. All rights reserved.
+
+
+Terms of the TENCENT HUNYUAN COMMUNITY LICENSE AGREEMENT:
+--------------------------------------------------------------------
+TENCENT HUNYUAN COMMUNITY LICENSE AGREEMENT
+Tencent Hunyuan Release Date: 2024/5/14
+By clicking to agree or by using, reproducing, modifying, distributing, performing or displaying any portion or element of the Tencent Hunyuan Works, including via any Hosted Service, You will be deemed to have recognized and accepted the content of this Agreement, which is effective immediately.
+1. DEFINITIONS.
+a. “Acceptable Use Policy” shall mean the policy made available by Tencent as set forth in the Exhibit A.
+b. “Agreement” shall mean the terms and conditions for use, reproduction, distribution, modification, performance and displaying of the Hunyuan Works or any portion or element thereof set forth herein.
+c. “Documentation” shall mean the specifications, manuals and documentation for Tencent Hunyuan made publicly available by Tencent.
+d. “Hosted Service” shall mean a hosted service offered via an application programming interface (API), web access, or any other electronic or remote means.
+e. “Licensee,” “You” or “Your” shall mean a natural person or legal entity exercising the rights granted by this Agreement and/or using the Tencent Hunyuan Works for any purpose and in any field of use.
+f. “Materials” shall mean, collectively, Tencent’s proprietary Tencent Hunyuan and Documentation (and any portion thereof) as made available by Tencent under this Agreement.
+g. “Model Derivatives” shall mean all: (i) modifications to Tencent Hunyuan or any Model Derivative of Tencent Hunyuan; (ii) works based on Tencent Hunyuan or any Model Derivative of Tencent Hunyuan; or (iii) any other machine learning model which is created by transfer of patterns of the weights, parameters, operations, or Output of Tencent Hunyuan or any Model Derivative of Tencent Hunyuan, to that model in order to cause that model to perform similarly to Tencent Hunyuan or a Model Derivative of Tencent Hunyuan, including distillation methods, methods that use intermediate data representations, or methods based on the generation of synthetic data Outputs by Tencent Hunyuan or a Model Derivative of Tencent Hunyuan for training that model. For clarity, Outputs by themselves are not deemed Model Derivatives.
+h. “Output” shall mean the information and/or content output of Tencent Hunyuan or a Model Derivative that results from operating or otherwise using Tencent Hunyuan or a Model Derivative, including via a Hosted Service.
+i. “Tencent,” “We” or “Us” shall mean THL A29 Limited.
+j. “Tencent Hunyuan” shall mean the large language models, image/video/audio/3D generation models, and multimodal large language models and their software and algorithms, including trained model weights, parameters (including optimizer states), machine-learning model code, inference-enabling code, training-enabling code, fine-tuning enabling code and other elements of the foregoing made publicly available by Us at https://huggingface.co/Tencent-Hunyuan/HunyuanDiT and https://github.com/Tencent/HunyuanDiT .
+k. “Tencent Hunyuan Works” shall mean: (i) the Materials; (ii) Model Derivatives; and (iii) all derivative works thereof.
+l. “Third Party” or “Third Parties” shall mean individuals or legal entities that are not under common control with Us or You.
+m. “including” shall mean including but not limited to.
+2. GRANT OF RIGHTS.
+We grant You a non-exclusive, worldwide, non-transferable and royalty-free limited license under Tencent’s intellectual property or other rights owned by Us embodied in or utilized by the Materials to use, reproduce, distribute, create derivative works of (including Model Derivatives), and make modifications to the Materials, only in accordance with the terms of this Agreement and the Acceptable Use Policy, and You must not violate (or encourage or permit anyone else to violate) any term of this Agreement or the Acceptable Use Policy.
+3. DISTRIBUTION.
+You may, subject to Your compliance with this Agreement, distribute or make available to Third Parties the Tencent Hunyuan Works, provided that You meet all of the following conditions:
+a. You must provide all such Third Party recipients of the Tencent Hunyuan Works or products or services using them a copy of this Agreement;
+b. You must cause any modified files to carry prominent notices stating that You changed the files;
+c. You are encouraged to: (i) publish at least one technology introduction blogpost or one public statement expressing Your experience of using the Tencent Hunyuan Works; and (ii) mark the products or services developed by using the Tencent Hunyuan Works to indicate that the product/service is “Powered by Tencent Hunyuan”; and
+d. All distributions to Third Parties (other than through a Hosted Service) must be accompanied by a “Notice” text file that contains the following notice: “Tencent Hunyuan is licensed under the Tencent Hunyuan Community License Agreement, Copyright © 2024 Tencent. All Rights Reserved. The trademark rights of “Tencent Hunyuan” are owned by Tencent or its affiliate.”
+You may add Your own copyright statement to Your modifications and, except as set forth in this Section and in Section 5, may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Model Derivatives as a whole, provided Your use, reproduction, modification, distribution, performance and display of the work otherwise complies with the terms and conditions of this Agreement. If You receive Tencent Hunyuan Works from a Licensee as part of an integrated end user product, then this Section 3 of this Agreement will not apply to You.
+4. ADDITIONAL COMMERCIAL TERMS.
+If, on the Tencent Hunyuan version release date, the monthly active users of all products or services made available by or for Licensee is greater than 100 million monthly active users in the preceding calendar month, You must request a license from Tencent, which Tencent may grant to You in its sole discretion, and You are not authorized to exercise any of the rights under this Agreement unless or until Tencent otherwise expressly grants You such rights.
+5. RULES OF USE.
+a. Your use of the Tencent Hunyuan Works must comply with applicable laws and regulations (including trade compliance laws and regulations) and adhere to the Acceptable Use Policy for the Tencent Hunyuan Works, which is hereby incorporated by reference into this Agreement. You must include the use restrictions referenced in these Sections 5(a) and 5(b) as an enforceable provision in any agreement (e.g., license agreement, terms of use, etc.) governing the use and/or distribution of Tencent Hunyuan Works and You must provide notice to subsequent users to whom You distribute that Tencent Hunyuan Works are subject to the use restrictions in these Sections 5(a) and 5(b).
+b. You must not use the Tencent Hunyuan Works or any Output or results of the Tencent Hunyuan Works to improve any other large language model (other than Tencent Hunyuan or Model Derivatives thereof).
+6. INTELLECTUAL PROPERTY.
+a. Subject to Tencent’s ownership of Tencent Hunyuan Works made by or for Tencent and intellectual property rights therein, conditioned upon Your compliance with the terms and conditions of this Agreement, as between You and Tencent, You will be the owner of any derivative works and modifications of the Materials and any Model Derivatives that are made by or for You.
+b. No trademark licenses are granted under this Agreement, and in connection with the Tencent Hunyuan Works, Licensee may not use any name or mark owned by or associated with Tencent or any of its affiliates, except as required for reasonable and customary use in describing and distributing the Tencent Hunyuan Works. Tencent hereby grants You a license to use “Tencent Hunyuan” (the “Mark”) solely as required to comply with the provisions of Section 3(c), provided that You comply with any applicable laws related to trademark protection. All goodwill arising out of Your use of the Mark will inure to the benefit of Tencent.
+c. If You commence a lawsuit or other proceedings (including a cross-claim or counterclaim in a lawsuit) against Us or any person or entity alleging that the Materials or any Output, or any portion of any of the foregoing, infringe any intellectual property or other right owned or licensable by You, then all licenses granted to You under this Agreement shall terminate as of the date such lawsuit or other proceeding is filed. You will defend, indemnify and hold harmless Us from and against any claim by any Third Party arising out of or related to Your or the Third Party’s use or distribution of the Tencent Hunyuan Works.
+d. Tencent claims no rights in Outputs You generate. You and Your users are solely responsible for Outputs and their subsequent uses.
+7. DISCLAIMERS OF WARRANTY AND LIMITATIONS OF LIABILITY.
+a. We are not obligated to support, update, provide training for, or develop any further version of the Tencent Hunyuan Works or to grant any license thereto.
+b. UNLESS AND ONLY TO THE EXTENT REQUIRED BY APPLICABLE LAW, THE TENCENT HUNYUAN WORKS AND ANY OUTPUT AND RESULTS THEREFROM ARE PROVIDED “AS IS” WITHOUT ANY EXPRESS OR IMPLIED WARRANTIES OF ANY KIND INCLUDING ANY WARRANTIES OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, COURSE OF DEALING, USAGE OF TRADE, OR FITNESS FOR A PARTICULAR PURPOSE. YOU ARE SOLELY RESPONSIBLE FOR DETERMINING THE APPROPRIATENESS OF USING, REPRODUCING, MODIFYING, PERFORMING, DISPLAYING OR DISTRIBUTING ANY OF THE TENCENT HUNYUAN WORKS OR OUTPUTS AND ASSUME ANY AND ALL RISKS ASSOCIATED WITH YOUR OR A THIRD PARTY’S USE OR DISTRIBUTION OF ANY OF THE TENCENT HUNYUAN WORKS OR OUTPUTS AND YOUR EXERCISE OF RIGHTS AND PERMISSIONS UNDER THIS AGREEMENT.
+c. TO THE FULLEST EXTENT PERMITTED BY APPLICABLE LAW, IN NO EVENT SHALL TENCENT OR ITS AFFILIATES BE LIABLE UNDER ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, TORT, NEGLIGENCE, PRODUCTS LIABILITY, OR OTHERWISE, FOR ANY DAMAGES, INCLUDING ANY DIRECT, INDIRECT, SPECIAL, INCIDENTAL, EXEMPLARY, CONSEQUENTIAL OR PUNITIVE DAMAGES, OR LOST PROFITS OF ANY KIND ARISING FROM THIS AGREEMENT OR RELATED TO ANY OF THE TENCENT HUNYUAN WORKS OR OUTPUTS, EVEN IF TENCENT OR ITS AFFILIATES HAVE BEEN ADVISED OF THE POSSIBILITY OF ANY OF THE FOREGOING.
+8. SURVIVAL AND TERMINATION.
+a. The term of this Agreement shall commence upon Your acceptance of this Agreement or access to the Materials and will continue in full force and effect until terminated in accordance with the terms and conditions herein.
+b. We may terminate this Agreement if You breach any of the terms or conditions of this Agreement. Upon termination of this Agreement, You must promptly delete and cease use of the Tencent Hunyuan Works. Sections 6(a), 6(c), 7 and 9 shall survive the termination of this Agreement.
+9. GOVERNING LAW AND JURISDICTION.
+a. This Agreement and any dispute arising out of or relating to it will be governed by the laws of the Hong Kong Special Administrative Region of the People’s Republic of China, without regard to conflict of law principles, and the UN Convention on Contracts for the International Sale of Goods does not apply to this Agreement.
+b. Exclusive jurisdiction and venue for any dispute arising out of or relating to this Agreement will be a court of competent jurisdiction in the Hong Kong Special Administrative Region of the People’s Republic of China, and Tencent and Licensee consent to the exclusive jurisdiction of such court with respect to any such dispute.
+
+
+EXHIBIT A
+ACCEPTABLE USE POLICY
+
+Tencent reserves the right to update this Acceptable Use Policy from time to time.
+Last modified: 2024/5/14
+
+Tencent endeavors to promote safe and fair use of its tools and features, including Tencent Hunyuan. You agree not to use Tencent Hunyuan or Model Derivatives:
+1. In any way that violates any applicable national, federal, state, local, international or any other law or regulation;
+2. To harm Yourself or others;
+3. To repurpose or distribute output from Tencent Hunyuan or any Model Derivatives to harm Yourself or others;
+4. To override or circumvent the safety guardrails and safeguards We have put in place;
+5. For the purpose of exploiting, harming or attempting to exploit or harm minors in any way;
+6. To generate or disseminate verifiably false information and/or content with the purpose of harming others or influencing elections;
+7. To generate or facilitate false online engagement, including fake reviews and other means of fake online engagement;
+8. To intentionally defame, disparage or otherwise harass others;
+9. To generate and/or disseminate malware (including ransomware) or any other content to be used for the purpose of harming electronic systems;
+10. To generate or disseminate personal identifiable information with the purpose of harming others;
+11. To generate or disseminate information (including images, code, posts, articles), and place the information in any public context (including –through the use of bot generated tweets), without expressly and conspicuously identifying that the information and/or content is machine generated;
+12. To impersonate another individual without consent, authorization, or legal right;
+13. To make high-stakes automated decisions in domains that affect an individual’s safety, rights or wellbeing (e.g., law enforcement, migration, medicine/health, management of critical infrastructure, safety components of products, essential services, credit, employment, housing, education, social scoring, or insurance);
+14. In a manner that violates or disrespects the social ethics and moral standards of other countries or regions;
+15. To perform, facilitate, threaten, incite, plan, promote or encourage violent extremism or terrorism;
+16. For any use intended to discriminate against or harm individuals or groups based on protected characteristics or categories, online or offline social behavior or known or predicted personal or personality characteristics;
+17. To intentionally exploit any of the vulnerabilities of a specific group of persons based on their age, social, physical or mental characteristics, in order to materially distort the behavior of a person pertaining to that group in a manner that causes or is likely to cause that person or another person physical or psychological harm;
+18. For military purposes;
+19. To engage in the unauthorized or unlicensed practice of any profession including, but not limited to, financial, legal, medical/health, or other professional practices.
+
+For the license of other third party components, please refer to the following URL:
+https://huggingface.co/Tencent-Hunyuan/HunyuanDiT/blob/main/Notice
+
+--------------------------------------------------------------------
+
+This Model also incorporates insights from Flux's neural network architechtures (https://github.com/black-forest-labs/flux?tab=readme-ov-file). Credits are given to the orginal authors.
\ No newline at end of file
diff --git a/README.md b/README.md
index 7c00d98c3ac19cd031fd5f7ce2384297f55d3fba..bd30e0fe2fa42ba6fa5705b4e6142fdd4414df63 100644
--- a/README.md
+++ b/README.md
@@ -1,14 +1,229 @@
----
-title: 3d Tuning
-emoji: 🔥
-colorFrom: pink
-colorTo: red
-sdk: gradio
-sdk_version: 5.23.3
-app_file: app.py
-pinned: false
-license: mit
-short_description: image-to-3d texture
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
+---
+title: Hunyuan3D-2.0
+emoji: 🌍
+colorFrom: purple
+colorTo: red
+sdk: gradio
+sdk_version: 4.44.1
+app_file: gradio_app.py
+pinned: false
+short_description: Text-to-3D and Image-to-3D Generation
+models:
+- tencent/Hunyuan3D-2
+---
+
+
+[中文阅读](README_zh_cn.md)
+[日本語で読む](README_ja_jp.md)
+
+
+
+
+
+
+
+
+
+
+[//]: # ( )
+
+[//]: # ( )
+
+[//]: # ( )
+
+> Join our **[Wechat](#find-us)** and **[Discord](#find-us)** group to discuss and find help from us.
+
+
+
+“ Living out everyone’s imagination on creating and manipulating 3D assets.”
+
+
+## 🔥 News
+
+- Jan 21, 2025: 💬 Enjoy exciting 3D generation on our website [Hunyuan3D Studio](https://3d.hunyuan.tencent.com)!
+- Jan 21, 2025: 💬 Release inference code and pretrained models
+ of [Hunyuan3D 2.0](https://huggingface.co/tencent/Hunyuan3D-2).
+- Jan 21, 2025: 💬 Release Hunyuan3D 2.0. Please give it a try
+ via [huggingface space](https://huggingface.co/spaces/tencent/Hunyuan3D-2)
+ our [official site](https://3d.hunyuan.tencent.com)!
+
+## **Abstract**
+
+We present Hunyuan3D 2.0, an advanced large-scale 3D synthesis system for generating high-resolution textured 3D assets.
+This system includes two foundation components: a large-scale shape generation model - Hunyuan3D-DiT, and a large-scale
+texture synthesis model - Hunyuan3D-Paint.
+The shape generative model, built on a scalable flow-based diffusion transformer, aims to create geometry that properly
+aligns with a given condition image, laying a solid foundation for downstream applications.
+The texture synthesis model, benefiting from strong geometric and diffusion priors, produces high-resolution and vibrant
+texture maps for either generated or hand-crafted meshes.
+Furthermore, we build Hunyuan3D-Studio - a versatile, user-friendly production platform that simplifies the re-creation
+process of 3D assets. It allows both professional and amateur users to manipulate or even animate their meshes
+efficiently.
+We systematically evaluate our models, showing that Hunyuan3D 2.0 outperforms previous state-of-the-art models,
+including the open-source models and closed-source models in geometry details, condition alignment, texture quality, and
+e.t.c.
+
+
+
+
+
+
+
+## ☯️ **Hunyuan3D 2.0**
+
+### Architecture
+
+Hunyuan3D 2.0 features a two-stage generation pipeline, starting with the creation of a bare mesh, followed by the
+synthesis of a texture map for that mesh. This strategy is effective for decoupling the difficulties of shape and
+texture generation and also provides flexibility for texturing either generated or handcrafted meshes.
+
+
+
+
+
+### Performance
+
+We have evaluated Hunyuan3D 2.0 with other open-source as well as close-source 3d-generation methods.
+The numerical results indicate that Hunyuan3D 2.0 surpasses all baselines in the quality of generated textured 3D assets
+and the condition following ability.
+
+| Model | CMMD(⬇) | FID_CLIP(⬇) | FID(⬇) | CLIP-score(⬆) |
+|-------------------------|-----------|-------------|-------------|---------------|
+| Top Open-source Model1 | 3.591 | 54.639 | 289.287 | 0.787 |
+| Top Close-source Model1 | 3.600 | 55.866 | 305.922 | 0.779 |
+| Top Close-source Model2 | 3.368 | 49.744 | 294.628 | 0.806 |
+| Top Close-source Model3 | 3.218 | 51.574 | 295.691 | 0.799 |
+| Hunyuan3D 2.0 | **3.193** | **49.165** | **282.429** | **0.809** |
+
+Generation results of Hunyuan3D 2.0:
+
+
+
+
+
+### Pretrained Models
+
+| Model | Date | Huggingface |
+|----------------------|------------|--------------------------------------------------------|
+| Hunyuan3D-DiT-v2-0 | 2025-01-21 | [Download](https://huggingface.co/tencent/Hunyuan3D-2) |
+| Hunyuan3D-Paint-v2-0 | 2025-01-21 | [Download](https://huggingface.co/tencent/Hunyuan3D-2) |
+
+## 🤗 Get Started with Hunyuan3D 2.0
+
+You may follow the next steps to use Hunyuan3D 2.0 via code or the Gradio App.
+
+### Install Requirements
+
+Please install Pytorch via the [official](https://pytorch.org/) site. Then install the other requirements via
+
+```bash
+pip install -r requirements.txt
+# for texture
+cd hy3dgen/texgen/custom_rasterizer
+python3 setup.py install
+cd hy3dgen/texgen/differentiable_renderer
+bash compile_mesh_painter.sh
+```
+
+### API Usage
+
+We designed a diffusers-like API to use our shape generation model - Hunyuan3D-DiT and texture synthesis model -
+Hunyuan3D-Paint.
+
+You could assess **Hunyuan3D-DiT** via:
+
+```python
+from hy3dgen.shapegen import Hunyuan3DDiTFlowMatchingPipeline
+
+pipeline = Hunyuan3DDiTFlowMatchingPipeline.from_pretrained('tencent/Hunyuan3D-2')
+mesh = pipeline(image='assets/demo.png')[0]
+```
+
+The output mesh is a [trimesh object](https://trimesh.org/trimesh.html), which you could save to glb/obj (or other
+format) file.
+
+For **Hunyuan3D-Paint**, do the following:
+
+```python
+from hy3dgen.texgen import Hunyuan3DPaintPipeline
+from hy3dgen.shapegen import Hunyuan3DDiTFlowMatchingPipeline
+
+# let's generate a mesh first
+pipeline = Hunyuan3DDiTFlowMatchingPipeline.from_pretrained('tencent/Hunyuan3D-2')
+mesh = pipeline(image='assets/demo.png')[0]
+
+pipeline = Hunyuan3DPaintPipeline.from_pretrained('tencent/Hunyuan3D-2')
+mesh = pipeline(mesh, image='assets/demo.png')
+```
+
+Please visit [minimal_demo.py](minimal_demo.py) for more advanced usage, such as **text to 3D** and **texture generation
+for handcrafted mesh**.
+
+### Gradio App
+
+You could also host a [Gradio](https://www.gradio.app/) App in your own computer via:
+
+```bash
+python3 gradio_app.py
+```
+
+Don't forget to visit [Hunyuan3D](https://3d.hunyuan.tencent.com) for quick use, if you don't want to host yourself.
+
+## 📑 Open-Source Plan
+
+- [x] Inference Code
+- [x] Model Checkpoints
+- [x] Technical Report
+- [ ] ComfyUI
+- [ ] TensorRT Version
+
+## 🔗 BibTeX
+
+If you found this repository helpful, please cite our reports:
+
+```bibtex
+@misc{hunyuan3d22025tencent,
+ title={Hunyuan3D 2.0: Scaling Diffusion Models for High Resolution Textured 3D Assets Generation},
+ author={Tencent Hunyuan3D Team},
+ year={2025},
+}
+
+@misc{yang2024tencent,
+ title={Tencent Hunyuan3D-1.0: A Unified Framework for Text-to-3D and Image-to-3D Generation},
+ year={2024},
+ author={Tencent Hunyuan3D Team},
+ eprint={2411.02293},
+ archivePrefix={arXiv},
+ primaryClass={cs.CV}
+}
+```
+
+## Acknowledgements
+
+We would like to thank the contributors to
+the [DINOv2](https://github.com/facebookresearch/dinov2), [Stable Diffusion](https://github.com/Stability-AI/stablediffusion), [FLUX](https://github.com/black-forest-labs/flux), [diffusers](https://github.com/huggingface/diffusers), [HuggingFace](https://huggingface.co), [CraftsMan3D](https://github.com/wyysf-98/CraftsMan3D),
+and [Michelangelo](https://github.com/NeuralCarver/Michelangelo/tree/main) repositories, for their open research and
+exploration.
+
+## Find Us
+
+| Wechat Group | Xiaohongshu | X | Discord |
+|--------------|-------------|---|---------|
+| | | | |
+
+## Star History
+
+
+
+
+
+
+
+
diff --git a/README_zh_cn.md b/README_zh_cn.md
new file mode 100644
index 0000000000000000000000000000000000000000..597d9b794d0ce3e4a41aff292a4fef07c65f8fa4
--- /dev/null
+++ b/README_zh_cn.md
@@ -0,0 +1,160 @@
+[Read in English](README.md)
+
+
+
+
+
+
+
+
+
+
+“通过 3D 创作与编辑让每个人的想象变成现实。”
+
+
+## 🔥 最新消息
+
+- Jan 21, 2025: 💬 我们发布了 [Hunyuan3D 2.0](https://huggingface.co/spaces/tencent/Hunyuan3D-2). 快来试试吧!
+
+## 概览
+
+混元 3D 2.0 是一款先进的大规模 3D 资产创作系统,它可以用于生成高分辨率的 3D 白膜以及带纹理的 3D
+模型。该系统包含两个基础组件:一个大规模几何生成模型 — 混元 3D-DiT,以及一个大规模纹理合成模型 — 混元 3D-Paint。
+几何生成模型基于基于流扩散的扩散模型构建,旨在生成与给定条件图像精确匹配的几何模型,为下游应用奠定坚实基础。
+纹理合成模型得益于强大的几何和扩散模型先验知识,能够为AI生成的或手工制作的网格模型生成高分辨率且生动逼真的纹理贴图。
+此外,我们打造了混元 3D 功能矩阵,一个功能多样、易于使用的创作平台,简化了 3D 模型的制作以及修改过程。它使专业用户和业余爱好者都能高效地对3D模型进行操作,甚至制作动画。
+我们对该系统进行了系统评估,结果表明混元 3D 2.0 在几何细节、条件匹配、纹理质量等方面均优于以往的最先进的开源以及闭源模型。
+
+
+
+
+
+## ☯️ **Hunyuan3D 2.0**
+
+### 模型架构
+
+混元 3D 2.0 采用了一个两阶段的生成过程,它首先创建一个无纹理的几何模型,然后为该几何模型合成纹理贴图。这种策略有效地将形状生成和纹理生成的难点分离开来,同时也为生成的几何模型或手工制作的几何模型进行纹理处理提供了灵活性。
+
+
+
+
+
+### 性能评估
+
+我们将混元 3D 2.0 与其他开源及闭源的 3D 生成方法进行了评估对比。
+数值结果表明,在生成的带纹理 3D 模型的质量以及对给定条件的遵循能力方面,混元 3D 2.0 超越了所有的基准模型。
+
+| Model | CMMD(⬇) | FID_CLIP(⬇) | FID(⬇) | CLIP-score(⬆) |
+|-------------------------|-----------|-------------|-------------|---------------|
+| Top Open-source Model1 | 3.591 | 54.639 | 289.287 | 0.787 |
+| Top Close-source Model1 | 3.600 | 55.866 | 305.922 | 0.779 |
+| Top Close-source Model2 | 3.368 | 49.744 | 294.628 | 0.806 |
+| Top Close-source Model3 | 3.218 | 51.574 | 295.691 | 0.799 |
+| Hunyuan3D 2.0 | **3.193** | **49.165** | **282.429** | **0.809** |
+
+一些 Hunyuan3D 2.0 的生成结果:
+
+
+
+
+
+### 预训练模型
+
+| 模型名称 | 发布日期 | Huggingface |
+|----------------------|------------|--------------------------------------------------|
+| Hunyuan3D-DiT-v2-0 | 2025-01-21 | [下载](https://huggingface.co/tencent/Hunyuan3D-2) |
+| Hunyuan3D-Paint-v2-0 | 2025-01-21 | [下载](https://huggingface.co/tencent/Hunyuan3D-2) |
+
+## 🤗快速入门 Hunyuan3D 2.0
+
+你可以按照以下步骤,通过代码或 Gradio 来使用混元 3D 2.0。
+
+### 依赖包安装
+
+请通过官方网站安装 PyTorch。然后通过以下方式安装其他所需的依赖项。
+
+```bash
+pip install -r assets/requirements.txt
+```
+
+### API 使用方法
+
+我们设计了一个类似于 diffusers 的 API 来使用我们的几何生成模型 — 混元 3D-DiT 和纹理合成模型 — 混元 3D-Paint。
+你可以通过以下方式使用 混元 3D-DiT:
+
+```python
+from hy3dgen.shapegen import Hunyuan3DDiTFlowMatchingPipeline
+
+pipeline = Hunyuan3DDiTFlowMatchingPipeline.from_pretrained('tencent/Hunyuan3D-2')
+mesh = pipeline(image='assets/demo.png')[0]
+```
+
+输出的网格是一个 Trimesh 对象,你可以将其保存为 glb/obj(或其他格式)文件。
+对于 混元 3D-Paint,请执行以下操作:
+
+```python
+from hy3dgen.texgen import Hunyuan3DPaintPipeline
+from hy3dgen.shapegen import Hunyuan3DDiTFlowMatchingPipeline
+
+# let's generate a mesh first
+pipeline = Hunyuan3DDiTFlowMatchingPipeline.from_pretrained('tencent/Hunyuan3D-2')
+mesh = pipeline(image='assets/demo.png')[0]
+
+pipeline = Hunyuan3DPaintPipeline.from_pretrained('tencent/Hunyuan3D-2')
+mesh = pipeline(mesh, image='assets/demo.png')
+```
+
+请访问 [minimal_demo.py](minimal_demo.py) 以了解更多高级用法,例如 文本转 3D 以及 为手工制作的网格生成纹理。
+
+### Gradio App 使用方法
+
+你也可以通过以下方式在自己的计算机上托管一个Gradio应用程序:
+
+```bash
+pip3 install gradio==3.39.0
+python3 gradio_app.py
+```
+
+如果你不想自己托管,别忘了访问[混元 3D](https://3d.hunyuan.tencent.com)进行快速使用。
+
+## 📑 开源计划
+
+- [x] 推理代码
+- [x] 模型权重
+- [ ] 技术报告
+- [ ] ComfyUI
+- [ ] TensorRT 量化
+
+## 🔗 引用
+
+如果你发现我们的工作有帮助,你可以以下面的方式引用我们的报告:
+
+```bibtex
+@misc{hunyuan3d22025tencent,
+ title={Hunyuan3D 2.0: Scaling Diffusion Models for High Resolution Textured 3D Assets Generation},
+ author={Tencent Hunyuan3D Team},
+ year={2025},
+}
+```
+
+## 致谢
+
+We would like to thank the contributors to
+the [DINOv2](https://github.com/facebookresearch/dinov2), [Stable Diffusion](https://github.com/Stability-AI/stablediffusion), [FLUX](https://github.com/black-forest-labs/flux), [diffusers](https://github.com/huggingface/diffusers)
+and [HuggingFace](https://huggingface.co) repositories, for their open research and exploration.
+
+## Star 历史
+
+
+
+
+
+
+
+
diff --git a/assets/demo.png b/assets/demo.png
new file mode 100644
index 0000000000000000000000000000000000000000..7c069409fe6c9b91e68ddaf13a049bab61a1523d
--- /dev/null
+++ b/assets/demo.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:4260b9a45c39fc4045bae81d27f8eb17127cdb201df614193077268d996ce436
+size 151014
diff --git a/assets/env_maps/gradient.jpg b/assets/env_maps/gradient.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..55546c1f260daa7d3c6eef36b70fe5d7e1697df0
Binary files /dev/null and b/assets/env_maps/gradient.jpg differ
diff --git a/assets/env_maps/white.jpg b/assets/env_maps/white.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..f7af1237608dd1d486edb9298c04fbe15ec74185
Binary files /dev/null and b/assets/env_maps/white.jpg differ
diff --git a/assets/example_images/004.png b/assets/example_images/004.png
new file mode 100644
index 0000000000000000000000000000000000000000..95eb0da790153f96b65210330761b28e5172be4c
Binary files /dev/null and b/assets/example_images/004.png differ
diff --git a/assets/example_images/052.png b/assets/example_images/052.png
new file mode 100644
index 0000000000000000000000000000000000000000..0da7e1389746ecae72e850aeaddfd0c1013c78c0
--- /dev/null
+++ b/assets/example_images/052.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:63dfa62dc5a19a1a3bd2df1d1589644f98dea501a4b31cb02a71679d0c74a0e7
+size 151386
diff --git a/assets/example_images/073.png b/assets/example_images/073.png
new file mode 100644
index 0000000000000000000000000000000000000000..deea7de118539c068e793cbbfaa18324d13c06a8
--- /dev/null
+++ b/assets/example_images/073.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:afc0a4ee17882291aacedb6b2a6b1ca0e24912a3dc566efdb4d4c3400bc002e2
+size 107735
diff --git a/assets/example_images/075.png b/assets/example_images/075.png
new file mode 100644
index 0000000000000000000000000000000000000000..5f1aee97d27a9f8f0cfb155c3a64c4afdd5bb912
--- /dev/null
+++ b/assets/example_images/075.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:58b926e26c0026df16378e214fb046e63dd0edceacb0f8db8005d9ed3f86949a
+size 261670
diff --git a/assets/example_images/1008.png b/assets/example_images/1008.png
new file mode 100644
index 0000000000000000000000000000000000000000..6e881a37ea1ef9f45824b28766a8ea77e5d68af8
--- /dev/null
+++ b/assets/example_images/1008.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:3dbbff31a72949c8f125e1598fbb4c7d9e6459dfa5e5280a0eb581c0ca608815
+size 127117
diff --git a/assets/example_images/101.png b/assets/example_images/101.png
new file mode 100644
index 0000000000000000000000000000000000000000..17645f93e4020ae40203091a914af0c6309b16d7
--- /dev/null
+++ b/assets/example_images/101.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:60abb50511ed23cf3432497dcae628b2efb56ec7a100b5c18fb4a0ac497617cc
+size 150532
diff --git a/assets/example_images/1022.png b/assets/example_images/1022.png
new file mode 100644
index 0000000000000000000000000000000000000000..da740f7303bac1e364623489de0ab12ef54e464c
--- /dev/null
+++ b/assets/example_images/1022.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:df4284dabd513c4826d1cda8ad2dc6687ee4c30229d41c3d631749329175c90c
+size 189310
diff --git a/assets/example_images/1029.png b/assets/example_images/1029.png
new file mode 100644
index 0000000000000000000000000000000000000000..246d6fbf2f7aa5ffea6478ab6a54f3e991e1a138
--- /dev/null
+++ b/assets/example_images/1029.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:66305f50ddd7d90e521d718952c702b9fb71c30efe6970d39d918f6dbf5b6ec3
+size 170947
diff --git a/assets/example_images/1037.png b/assets/example_images/1037.png
new file mode 100644
index 0000000000000000000000000000000000000000..e2ac72e17f6c3339d30454bba3c054e1cfa98e36
Binary files /dev/null and b/assets/example_images/1037.png differ
diff --git a/assets/example_images/1079.png b/assets/example_images/1079.png
new file mode 100644
index 0000000000000000000000000000000000000000..0398f6b84204a1ff65722f609b3ead51c8dd94fb
Binary files /dev/null and b/assets/example_images/1079.png differ
diff --git a/assets/example_images/1111.png b/assets/example_images/1111.png
new file mode 100644
index 0000000000000000000000000000000000000000..548031e68dfe5e683e4a6fe53d5e05a9686e1c80
--- /dev/null
+++ b/assets/example_images/1111.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:d9d43eae5cc61c872bc01d480de4e8954da352743d81f9329345d96e052ee2ac
+size 133829
diff --git a/assets/example_images/1123.png b/assets/example_images/1123.png
new file mode 100644
index 0000000000000000000000000000000000000000..81fba3cbba1a3d20c5aee93c87702344ffbab63b
--- /dev/null
+++ b/assets/example_images/1123.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:145a64a2e0afa2ea2c14940a780c74ba7427a3f2ce7ae815c730b68c859565a8
+size 123524
diff --git a/assets/example_images/1128.png b/assets/example_images/1128.png
new file mode 100644
index 0000000000000000000000000000000000000000..76b11499662c02d1414933130c1eae1b6ebc7d4b
--- /dev/null
+++ b/assets/example_images/1128.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:09cc96314115f1786047576e7f8c2da3b79b2c45add1c257476f3f0967856e81
+size 142824
diff --git a/assets/example_images/1135.png b/assets/example_images/1135.png
new file mode 100644
index 0000000000000000000000000000000000000000..f0e60a48a3e9b4bdd88e30e04cee03aa0cbd7146
--- /dev/null
+++ b/assets/example_images/1135.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:95e6a4428192731671218e9a63a37c182ebdd2d916f862dbbaed1b6595fd6bf3
+size 598848
diff --git a/assets/example_images/1146.png b/assets/example_images/1146.png
new file mode 100644
index 0000000000000000000000000000000000000000..f6a378ba7a9e348d3fefae95e5aebfc33cf37d74
--- /dev/null
+++ b/assets/example_images/1146.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:c37b96e362d925be05925a307d02a6f2af0fe74dccee4a8679d97ad5a872d63e
+size 591263
diff --git a/assets/example_images/1148.png b/assets/example_images/1148.png
new file mode 100644
index 0000000000000000000000000000000000000000..e3296c025a4822cd0d02d596933fce0bbf83ac6c
--- /dev/null
+++ b/assets/example_images/1148.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:24d1ae7ccc5797acb87d17b2965a2b45a459054ffb44c106d41c91d6b358edd0
+size 505267
diff --git a/assets/example_images/1154.png b/assets/example_images/1154.png
new file mode 100644
index 0000000000000000000000000000000000000000..edbb4fbf1cfd6c7b8e2a55690322cc1dd0dae6b2
--- /dev/null
+++ b/assets/example_images/1154.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:1b74a7f8abc13c7cf13177e1704cfaa5bcc6c7448cbf56a0fecb2b0082abf3c5
+size 806734
diff --git a/assets/example_images/1180.png b/assets/example_images/1180.png
new file mode 100644
index 0000000000000000000000000000000000000000..4f5911df78594cb9ff22524153fc1f8ef727f0c8
--- /dev/null
+++ b/assets/example_images/1180.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:37ffc62ae54489a600f6dc4a22c3cde642426bd9d940e63ac7a0da79d06409ac
+size 650239
diff --git a/assets/example_images/1196.png b/assets/example_images/1196.png
new file mode 100644
index 0000000000000000000000000000000000000000..c21eb3d7abf5372c3d851d518122e77940b854d3
--- /dev/null
+++ b/assets/example_images/1196.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:d399c41e5220c951507776dc83dd5a956eb15de8f21cb85df744f2b6b418f5bc
+size 319028
diff --git a/assets/example_images/1204.png b/assets/example_images/1204.png
new file mode 100644
index 0000000000000000000000000000000000000000..9c5e833bdfacb030f8673d07c17089896d812057
--- /dev/null
+++ b/assets/example_images/1204.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:0620e8189232a5a553975aa9f544a5c09097154d3f8457e25917876a27af4ff0
+size 469722
diff --git a/assets/example_images/1234.png b/assets/example_images/1234.png
new file mode 100644
index 0000000000000000000000000000000000000000..ad31c55a13f8cd97e029b2ebfddee5e6cf9c9c76
--- /dev/null
+++ b/assets/example_images/1234.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:0e6dcb59eb4b16ea7e5937e97b68b2727f2e11bf582b78cdcf7cd58f9d316156
+size 683020
diff --git a/assets/example_images/1310.png b/assets/example_images/1310.png
new file mode 100644
index 0000000000000000000000000000000000000000..973eb570036b2883e6ff6cf340455b2a3273d55b
--- /dev/null
+++ b/assets/example_images/1310.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:dc4274d01028c1f0be25839a71200770a9e4973979839a3c1e27a2b7637bebb3
+size 349994
diff --git a/assets/example_images/1316.png b/assets/example_images/1316.png
new file mode 100644
index 0000000000000000000000000000000000000000..21d7a89d36344762896cc9f8e3de48422c6634f9
--- /dev/null
+++ b/assets/example_images/1316.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:ca545e6522b5135904843261bbc2e61fbc15a70b0a15537e5d16fb81103fdc84
+size 499825
diff --git a/assets/example_images/1354.png b/assets/example_images/1354.png
new file mode 100644
index 0000000000000000000000000000000000000000..b6f9ec845209644879fe06e4e40d73620d340274
--- /dev/null
+++ b/assets/example_images/1354.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:016b1611cf8aa7f9600e452900a1e5938a081e87dd8de9a9a580d99ee1d5f827
+size 621931
diff --git a/assets/example_images/1429.png b/assets/example_images/1429.png
new file mode 100644
index 0000000000000000000000000000000000000000..5b031fd525a072583a3672708487a1209036d498
--- /dev/null
+++ b/assets/example_images/1429.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:5081010b47e91de52be1d73961fbaff2563fd89dc8b04c1c614e80e224233e42
+size 625356
diff --git a/assets/example_images/1493.png b/assets/example_images/1493.png
new file mode 100644
index 0000000000000000000000000000000000000000..a16abbd465f9280bafe4f5cb5a5ccdd62b64f844
--- /dev/null
+++ b/assets/example_images/1493.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:d90736d5ada26cba576d2ccc61ede5dc2e5f74942b9589ea8523498f327ec74b
+size 132760
diff --git a/assets/example_images/1582.png b/assets/example_images/1582.png
new file mode 100644
index 0000000000000000000000000000000000000000..6265c0dd25afb321ba77aa35eecd97e4591559bb
--- /dev/null
+++ b/assets/example_images/1582.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:862d84ca0acd15a468d7cc9936b77b426d4e5ecb9e79a4f02573a5ab264e6c65
+size 182184
diff --git a/assets/example_images/1583.png b/assets/example_images/1583.png
new file mode 100644
index 0000000000000000000000000000000000000000..8b06b8c1a445f5284381cd247c1eade986f7f2ff
--- /dev/null
+++ b/assets/example_images/1583.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:9d6aafda0bfb3b02a2f91909af0c98ca0cd158357608305b6b05bf10ab930e75
+size 137517
diff --git a/assets/example_images/1596.png b/assets/example_images/1596.png
new file mode 100644
index 0000000000000000000000000000000000000000..44e466f1266d0a163d54510598d8976a8ee1e142
--- /dev/null
+++ b/assets/example_images/1596.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:6f702dd5c9c970da79d26c3df5ee7c3b0ce9fb5e6a8457537dad61ebc2afee4b
+size 191746
diff --git a/assets/example_images/1601.png b/assets/example_images/1601.png
new file mode 100644
index 0000000000000000000000000000000000000000..41ce97cd1df9ba2752f1c57c3e37bfb9d629e24d
--- /dev/null
+++ b/assets/example_images/1601.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:ef55cdcd3d3dcc41db2de39cf61c59e07008e1bc0a795c86b413216e223b7267
+size 198760
diff --git a/assets/example_images/1603.png b/assets/example_images/1603.png
new file mode 100644
index 0000000000000000000000000000000000000000..40c8cef2b0c701d3e70cae435f7f19eb94f750bf
--- /dev/null
+++ b/assets/example_images/1603.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:dbb890dbea2903a41633fd7ee6796a45ff225171a8d93474d7542f9d880ab35f
+size 197398
diff --git a/assets/example_images/1626.png b/assets/example_images/1626.png
new file mode 100644
index 0000000000000000000000000000000000000000..6d0dc7c897634abd77f555dbfe54c9f3eac37579
--- /dev/null
+++ b/assets/example_images/1626.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:b18c2764d2b7bf42b88eec34524b2ec48094a1538d64ee04a0c87bc1637632c1
+size 111076
diff --git a/assets/example_images/1627.png b/assets/example_images/1627.png
new file mode 100644
index 0000000000000000000000000000000000000000..05c4fbb2970296f10d114a3705cfc7691042f5bf
--- /dev/null
+++ b/assets/example_images/1627.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:6de52ff9e95ac9ab7a3f0b6016dc8aa204babc6edab0f506d085a0116551d2c4
+size 269408
diff --git a/assets/example_images/1654.png b/assets/example_images/1654.png
new file mode 100644
index 0000000000000000000000000000000000000000..2385031d0c0e6f0df1427a6516ee0b12c9cc14bf
Binary files /dev/null and b/assets/example_images/1654.png differ
diff --git a/assets/example_images/167.png b/assets/example_images/167.png
new file mode 100644
index 0000000000000000000000000000000000000000..3295b8b27209c9f2af1811f4c628a9d55d491ecf
--- /dev/null
+++ b/assets/example_images/167.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:d6d6ed939ed8e3f358510291f1534b843d6233cf246739f2e083957e56e7f59e
+size 200695
diff --git a/assets/example_images/1670.png b/assets/example_images/1670.png
new file mode 100644
index 0000000000000000000000000000000000000000..d1e87d4536a846002e64478b07c106102806003e
--- /dev/null
+++ b/assets/example_images/1670.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:55c339531299d19c945d0a2d96960cf0131d0be54b7bd5a457b9e67b1822a3a9
+size 193904
diff --git a/assets/example_images/1679.png b/assets/example_images/1679.png
new file mode 100644
index 0000000000000000000000000000000000000000..9cb8ac5073f6122cb6e5ed1d63e98302282d6890
--- /dev/null
+++ b/assets/example_images/1679.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:06b30b751a472096d223b9a7c128091402461ed7c974f74c13e0f13cec397ee8
+size 132581
diff --git a/assets/example_images/1687.png b/assets/example_images/1687.png
new file mode 100644
index 0000000000000000000000000000000000000000..387d7fd73f80cdc393772fe378e1226dcf389e08
--- /dev/null
+++ b/assets/example_images/1687.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:74804ae8dee62928cd82d62a1a8bbbbaddeb91bc10a0315ec12c4a5d4aafb9c2
+size 157743
diff --git a/assets/example_images/1698.png b/assets/example_images/1698.png
new file mode 100644
index 0000000000000000000000000000000000000000..62db0e11e76fe0e1855bc2819506ad399139d248
--- /dev/null
+++ b/assets/example_images/1698.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:b8e0d82a0a253637b84b4b5549730cd0dcea89694de94b5e013ad31ed16a2bd5
+size 236338
diff --git a/assets/example_images/1715.png b/assets/example_images/1715.png
new file mode 100644
index 0000000000000000000000000000000000000000..de50d5eae8d84828e8d121ff6b9b9d2b694c1978
--- /dev/null
+++ b/assets/example_images/1715.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:567697431071472db18b9ad1ba1c3b8e272f1bd425fc9bbd3d703d654ef12c07
+size 255078
diff --git a/assets/example_images/1735.png b/assets/example_images/1735.png
new file mode 100644
index 0000000000000000000000000000000000000000..edd4e89e06c21a4ee900052b10e917d0f530bdc3
--- /dev/null
+++ b/assets/example_images/1735.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:4534cb0f79e7022621e45ed7e5d1d90b951ae320fbf7f20aa374c5c470785525
+size 134865
diff --git a/assets/example_images/1738.png b/assets/example_images/1738.png
new file mode 100644
index 0000000000000000000000000000000000000000..68abb9cb131543483d20ee14bbcf351ed7deb5f1
--- /dev/null
+++ b/assets/example_images/1738.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:81672bda2b58e0af1b0df37a8dd10fe7b622f445e5f5159f6c07a907829c4242
+size 233988
diff --git a/assets/example_images/1744.png b/assets/example_images/1744.png
new file mode 100644
index 0000000000000000000000000000000000000000..91c19f14114cdd9cdcf8ea6986496df071d7e285
--- /dev/null
+++ b/assets/example_images/1744.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:643acbe5502443db01663c12c3973c1d635e297497b50baaba88b141ff1e8b8f
+size 127929
diff --git a/assets/example_images/1758.png b/assets/example_images/1758.png
new file mode 100644
index 0000000000000000000000000000000000000000..e780a05b54bb68622cc08b7774ae4a80b68903db
--- /dev/null
+++ b/assets/example_images/1758.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:ebc39704f3667d1388a543b50ad5d91ebb342b3949ca51987755e1c1fc9bf6a9
+size 202004
diff --git a/assets/example_images/1772.png b/assets/example_images/1772.png
new file mode 100644
index 0000000000000000000000000000000000000000..0449267b0dab7e99dbc530b7d068eae16659ad24
--- /dev/null
+++ b/assets/example_images/1772.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:d37b574bb46bcedf99929e0521fcb6b2df8fd52ba4716e908e2c338541417050
+size 207360
diff --git a/assets/example_images/1773.png b/assets/example_images/1773.png
new file mode 100644
index 0000000000000000000000000000000000000000..6e0441cd37eedb1b67f3af71e23f4c466fcb6397
--- /dev/null
+++ b/assets/example_images/1773.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:eee59fb76e264f2b16c7cbcdb458e64bb16da88df819ea1095f986f590194ecf
+size 168548
diff --git a/assets/example_images/1778.png b/assets/example_images/1778.png
new file mode 100644
index 0000000000000000000000000000000000000000..693d864e2d49b4e24e87f7e9cb032c7cf908397c
--- /dev/null
+++ b/assets/example_images/1778.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:48ba2e20060432b18941340eae76aa302ea0b165734751cbc4665c89e725cc8a
+size 157988
diff --git a/assets/example_images/179.png b/assets/example_images/179.png
new file mode 100644
index 0000000000000000000000000000000000000000..a2d416073086d5eebf6609a9e803f4c624712444
Binary files /dev/null and b/assets/example_images/179.png differ
diff --git a/assets/example_images/1898.png b/assets/example_images/1898.png
new file mode 100644
index 0000000000000000000000000000000000000000..10d5be8ad219150f348b176df139313934a1e909
--- /dev/null
+++ b/assets/example_images/1898.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:18755aa01c28748e4ee192c8b7fa5b3ee188b4f7c9a7be0514502af07874f808
+size 121238
diff --git a/assets/example_images/191.png b/assets/example_images/191.png
new file mode 100644
index 0000000000000000000000000000000000000000..025338f544cc333aca2910331a0b544a643c2077
--- /dev/null
+++ b/assets/example_images/191.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:db37cbd26aa7a77bf8c04dec7d7a996fe97b3618801b7ab9acb4d26e43d666e4
+size 142385
diff --git a/assets/example_images/195.png b/assets/example_images/195.png
new file mode 100644
index 0000000000000000000000000000000000000000..a16848166fdf72b01a004fdcb850622058b787d4
--- /dev/null
+++ b/assets/example_images/195.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:19e5c133448aa7881a9d3eb4a6874abd465e0da83dcb91efba9854abd43de5f5
+size 119924
diff --git a/assets/example_images/197.png b/assets/example_images/197.png
new file mode 100644
index 0000000000000000000000000000000000000000..86ca74741b0b30fb851ec8e5e0a1a516db77e24a
--- /dev/null
+++ b/assets/example_images/197.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:b334ad0edb6e8c3e1ee71918012a6bd23d5ac1673fe47ae9c38ae64d706f40cd
+size 147455
diff --git a/assets/example_images/198.png b/assets/example_images/198.png
new file mode 100644
index 0000000000000000000000000000000000000000..67a1b16359cb05de2edd6c4e242466f544cc0ff1
--- /dev/null
+++ b/assets/example_images/198.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:bd412f4e184ce2f000e5ddf92e660257a4c21c16edcff499149ca6387ab5c7a8
+size 210179
diff --git a/assets/example_images/202.png b/assets/example_images/202.png
new file mode 100644
index 0000000000000000000000000000000000000000..4b54b1dfdb142df59e11b1a8bd1b69e17e714a4b
--- /dev/null
+++ b/assets/example_images/202.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:7c012997d8f365b9141c03b6b24c595632a926d0972e0727e527b0cac39fb6a3
+size 224840
diff --git a/assets/example_images/203.png b/assets/example_images/203.png
new file mode 100644
index 0000000000000000000000000000000000000000..524ccaff74f115924d54074a9a67df44229046c6
--- /dev/null
+++ b/assets/example_images/203.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:50c1bf6fb350031480031ecbf15a212eb119e9589f7a66d8dc0d09910c4d8060
+size 143678
diff --git a/assets/example_images/218.png b/assets/example_images/218.png
new file mode 100644
index 0000000000000000000000000000000000000000..a81a5e3b9283a002141e2e1a0839625273b41ab2
--- /dev/null
+++ b/assets/example_images/218.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e098979c09a27da628a39364168f92dc50451714881172fc0709573f884cab56
+size 183290
diff --git a/assets/example_images/219.png b/assets/example_images/219.png
new file mode 100644
index 0000000000000000000000000000000000000000..ac2036ce8202e477f1fdd6bdd9588f95961ec8d3
--- /dev/null
+++ b/assets/example_images/219.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:3db54f848d703032f9d3c0c5bb9c8a0fea8bfa03b263d5b5dcf7f9fc738e4be3
+size 164817
diff --git a/assets/example_images/379.png b/assets/example_images/379.png
new file mode 100644
index 0000000000000000000000000000000000000000..54a3337db018a146c5ca77b927a35976538053fc
--- /dev/null
+++ b/assets/example_images/379.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:188914bdcb9b6f956ac888b511f9f1ba8028ad85071c1e2c84c778eb601f2dbf
+size 146849
diff --git a/assets/example_images/380.png b/assets/example_images/380.png
new file mode 100644
index 0000000000000000000000000000000000000000..2a19e273ef4dc657c2fbf02eeab9a4eaa8639713
--- /dev/null
+++ b/assets/example_images/380.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:813f62e8b85e5162b3fccd9e949337405755e75107a22e6b34843f1bd3d9af68
+size 178552
diff --git a/assets/example_images/419.png b/assets/example_images/419.png
new file mode 100644
index 0000000000000000000000000000000000000000..092694a9d45e43a488e2d19ea280adf9a48ec665
--- /dev/null
+++ b/assets/example_images/419.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:a690008aeb3417630fae7046766aaf373662ecde847561762d640384cffbe0c5
+size 213693
diff --git a/assets/example_images/583.png b/assets/example_images/583.png
new file mode 100644
index 0000000000000000000000000000000000000000..c303211f005577e5877883e2210db2a2755910b7
Binary files /dev/null and b/assets/example_images/583.png differ
diff --git a/assets/example_images/888.png b/assets/example_images/888.png
new file mode 100644
index 0000000000000000000000000000000000000000..702bc71849ef5e635199a4eb5c1e25f58e39eacd
--- /dev/null
+++ b/assets/example_images/888.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:5792247c9b38e5ac2c990e93a4bf9974f9f98cbb4f08cea1b9744305c9bfc639
+size 162911
diff --git a/assets/example_images/895.png b/assets/example_images/895.png
new file mode 100644
index 0000000000000000000000000000000000000000..4648ddef58d389c635bfe084e7edc56a6901b757
--- /dev/null
+++ b/assets/example_images/895.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:b23a294b5d47b831a23a1a643b15990804a64f187ccce519ddce9aa73a1f3279
+size 124156
diff --git a/assets/example_images/example_000.png b/assets/example_images/example_000.png
new file mode 100644
index 0000000000000000000000000000000000000000..6222237f2229a1f3f5ba7651a93ebef216aea1ed
Binary files /dev/null and b/assets/example_images/example_000.png differ
diff --git a/assets/example_images/example_002.png b/assets/example_images/example_002.png
new file mode 100644
index 0000000000000000000000000000000000000000..a6fd2a579660fbea42a86bda0a8344b577c12b8c
Binary files /dev/null and b/assets/example_images/example_002.png differ
diff --git a/assets/example_mv_images/1/back.png b/assets/example_mv_images/1/back.png
new file mode 100644
index 0000000000000000000000000000000000000000..63b270e8305fab70f12dfa706a7f36f0e8c34acc
--- /dev/null
+++ b/assets/example_mv_images/1/back.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:20c1c2b3afd1faa5877b0d26f6be88a304faf0703e580f3f7d61b4a78ce794b3
+size 123643
diff --git a/assets/example_mv_images/1/front.png b/assets/example_mv_images/1/front.png
new file mode 100644
index 0000000000000000000000000000000000000000..f02bd7666125f8e279e1c974b02c568e54d6aea0
--- /dev/null
+++ b/assets/example_mv_images/1/front.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:d3c6349c9aa12c7bdebefad374f8fbf661600c6d58db87ecc86112e1a7e25453
+size 147606
diff --git a/assets/example_mv_images/1/left.png b/assets/example_mv_images/1/left.png
new file mode 100644
index 0000000000000000000000000000000000000000..2c3b5f1af469d8d7f72db8d42bb15e3a9677a3a5
--- /dev/null
+++ b/assets/example_mv_images/1/left.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e8fc9376631b3244fe048b3d51caf39ff72d1ad6fb951dbbf31fb4958466a229
+size 132775
diff --git a/assets/example_mv_images/10/back.png b/assets/example_mv_images/10/back.png
new file mode 100644
index 0000000000000000000000000000000000000000..1c3efa07959f7a25da4491a06a824494aef29f8b
--- /dev/null
+++ b/assets/example_mv_images/10/back.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:6e75eddc86ddb5459cce74edd3d24ce6d563329f4dc3f028f5c25b1e3b0dfdb1
+size 294746
diff --git a/assets/example_mv_images/10/front.png b/assets/example_mv_images/10/front.png
new file mode 100644
index 0000000000000000000000000000000000000000..1d91deb5d3f80e33a96f2d4fce4c46a3f9c992e4
--- /dev/null
+++ b/assets/example_mv_images/10/front.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:b0961e39b4c36e7be449ecdf67aa014906ab98ec8a44c7c84e3356d1e4ea3be0
+size 346253
diff --git a/assets/example_mv_images/10/left.png b/assets/example_mv_images/10/left.png
new file mode 100644
index 0000000000000000000000000000000000000000..d988a85dcde585c7647b10492990003c0449c570
--- /dev/null
+++ b/assets/example_mv_images/10/left.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:f052338cfa8ce4a47d24067b1eaefe6cd642eb471b5af23d4a0d96ebf016be40
+size 255049
diff --git a/assets/example_mv_images/11/back.png b/assets/example_mv_images/11/back.png
new file mode 100644
index 0000000000000000000000000000000000000000..63bf8d5aa10c108c08e182c57234647d8e9609be
--- /dev/null
+++ b/assets/example_mv_images/11/back.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:5b408cf80687a96181c9f0ddb3fdc57b31dfe76d2e53af10fed461b81172bb4f
+size 232984
diff --git a/assets/example_mv_images/11/front.png b/assets/example_mv_images/11/front.png
new file mode 100644
index 0000000000000000000000000000000000000000..446b33c3681afe3882e7e67737f7c3f7bbc23011
--- /dev/null
+++ b/assets/example_mv_images/11/front.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:0107c462e08874e70a02c74a4e10c8140312a4cfe30924b8fdcb1449d7374b81
+size 260181
diff --git a/assets/example_mv_images/11/left.png b/assets/example_mv_images/11/left.png
new file mode 100644
index 0000000000000000000000000000000000000000..694bb58324d98af5bf237117a633209bf47bc18d
--- /dev/null
+++ b/assets/example_mv_images/11/left.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:89b87518bf2de240580f0660a1b2e965f2558f5b8a0186b109d005660f762e9c
+size 208419
diff --git a/assets/example_mv_images/12/back.png b/assets/example_mv_images/12/back.png
new file mode 100644
index 0000000000000000000000000000000000000000..bb65c5b54d5e385e08dc33c910eb06492c3b325b
--- /dev/null
+++ b/assets/example_mv_images/12/back.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:4ec98ca3dca672f2c280c8a11cd9c3d0f66680dff15f2f8d5ecc336d42671ba7
+size 214938
diff --git a/assets/example_mv_images/12/front.png b/assets/example_mv_images/12/front.png
new file mode 100644
index 0000000000000000000000000000000000000000..8c18be1ae97d9b3658866f757be72b3793763f98
--- /dev/null
+++ b/assets/example_mv_images/12/front.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:f3097cf852842d8c3f9c53de0b4f4e4987cb1bc2a48dc63a245c4f2cc5ab1360
+size 151288
diff --git a/assets/example_mv_images/12/left.png b/assets/example_mv_images/12/left.png
new file mode 100644
index 0000000000000000000000000000000000000000..c0faf5662f1ac0e5fef0b95dd8c1d20deb84d09a
--- /dev/null
+++ b/assets/example_mv_images/12/left.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:4156f3ac3f394dc9b2876de23b314ac5c9ce487bd0af94a9994787d4e8be3eb7
+size 115565
diff --git a/assets/example_mv_images/13/back.png b/assets/example_mv_images/13/back.png
new file mode 100644
index 0000000000000000000000000000000000000000..af81a71fb68746b1aa7d3018aa01086f57189ac2
--- /dev/null
+++ b/assets/example_mv_images/13/back.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e3c391eb9fdff14b52ec58587cfb75ae57a7193cc6fd89af0b8191cb96937ae6
+size 377343
diff --git a/assets/example_mv_images/13/front.png b/assets/example_mv_images/13/front.png
new file mode 100644
index 0000000000000000000000000000000000000000..e0793507aab77817a1f33f45ebbfc1808f07db46
--- /dev/null
+++ b/assets/example_mv_images/13/front.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:ce383517cefceae0030704d6f4433119249c4c30d0294085e7c1b3ac234dda5c
+size 434933
diff --git a/assets/example_mv_images/13/left.png b/assets/example_mv_images/13/left.png
new file mode 100644
index 0000000000000000000000000000000000000000..a05d80e331d577a54e7d3a997bf46d16ccac56c1
--- /dev/null
+++ b/assets/example_mv_images/13/left.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:b47defe19173bbde4f1b1dd64fde70611a0efae8b546fa71478492644aff9f10
+size 371768
diff --git a/assets/example_mv_images/14/back.png b/assets/example_mv_images/14/back.png
new file mode 100644
index 0000000000000000000000000000000000000000..f8845ee555cc643845542e934694e62a57ffc6c4
--- /dev/null
+++ b/assets/example_mv_images/14/back.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:b388f588dd6ad93d8445d05fc7efad1a7c0bfd263da0ce6c6710e5ac62206934
+size 139087
diff --git a/assets/example_mv_images/14/front.png b/assets/example_mv_images/14/front.png
new file mode 100644
index 0000000000000000000000000000000000000000..5a449ccbd94e38ad05848520c9b9a965b49d64df
--- /dev/null
+++ b/assets/example_mv_images/14/front.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:351e5e3d9f2a34e1b7443f2a4c2dbcc5dff69fe8c9f22353a0f72b12de00f717
+size 127654
diff --git a/assets/example_mv_images/14/left.png b/assets/example_mv_images/14/left.png
new file mode 100644
index 0000000000000000000000000000000000000000..ea041788cb423e4b1bc8bc2a2e24fefbcf8f0238
--- /dev/null
+++ b/assets/example_mv_images/14/left.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:4f2076b8dd42c3ae4184c11d8c47de37319eed707317cf8f61771c151b54d2b0
+size 157493
diff --git a/assets/example_mv_images/2/back.png b/assets/example_mv_images/2/back.png
new file mode 100644
index 0000000000000000000000000000000000000000..d4276cef7c87f491fd4a0be323ad54810ad9b57b
--- /dev/null
+++ b/assets/example_mv_images/2/back.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:ee6e667fa528599d1176a446d9f006e159ef83821080d75ec169e713ab317181
+size 278681
diff --git a/assets/example_mv_images/2/front.png b/assets/example_mv_images/2/front.png
new file mode 100644
index 0000000000000000000000000000000000000000..5e0c398bb3090669721c4250b6eefa48f5f96609
--- /dev/null
+++ b/assets/example_mv_images/2/front.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:b9f9ac3aba8be4ff0624b475d29ffbc46def5a654ee52746c36fa489212815f1
+size 316367
diff --git a/assets/example_mv_images/2/left.png b/assets/example_mv_images/2/left.png
new file mode 100644
index 0000000000000000000000000000000000000000..d7b3a557831e7b5b721c3107afd7a988ef243d32
--- /dev/null
+++ b/assets/example_mv_images/2/left.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:6b537a9aa472e0790a4e64992ef318009bfc1e82afc17c97c3f8f7e3945a9fd0
+size 239758
diff --git a/assets/example_mv_images/3/back.png b/assets/example_mv_images/3/back.png
new file mode 100644
index 0000000000000000000000000000000000000000..cc762acc81728dbe64499d1fa5a986bce39b33c6
--- /dev/null
+++ b/assets/example_mv_images/3/back.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:26bbe58e6ee3dcb96de29e50f414009070155f605f0160e672438cd517c84a5a
+size 330924
diff --git a/assets/example_mv_images/3/front.png b/assets/example_mv_images/3/front.png
new file mode 100644
index 0000000000000000000000000000000000000000..0cbe89b067734ffc2a10e9f46403bd8a2240df0d
--- /dev/null
+++ b/assets/example_mv_images/3/front.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:cb8d4e1ff7de8c4cdd47857c05506d7752037fea13d8325dd54598087d913baf
+size 353653
diff --git a/assets/example_mv_images/3/left.png b/assets/example_mv_images/3/left.png
new file mode 100644
index 0000000000000000000000000000000000000000..6b0693ad490563d88f747005365528fb78c0d515
--- /dev/null
+++ b/assets/example_mv_images/3/left.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:446ef9887b9096d9786cf5f74af0cf5901772fa62d31d68de7eef2234afcab55
+size 199186
diff --git a/assets/example_mv_images/4/back.png b/assets/example_mv_images/4/back.png
new file mode 100644
index 0000000000000000000000000000000000000000..900f74fb21bad2386662637e294425be907b9da8
--- /dev/null
+++ b/assets/example_mv_images/4/back.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:d7f68658129288c0af404007856c304ba555d219184b61c73643182214560430
+size 442520
diff --git a/assets/example_mv_images/4/front.png b/assets/example_mv_images/4/front.png
new file mode 100644
index 0000000000000000000000000000000000000000..014ffe3b6ed640477e61e288f27434d1f84ff646
--- /dev/null
+++ b/assets/example_mv_images/4/front.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:1199f2ec9ce649810f65294e2579fe7ba8f2d117e5954624c28a57ad28296586
+size 562616
diff --git a/assets/example_mv_images/4/left.png b/assets/example_mv_images/4/left.png
new file mode 100644
index 0000000000000000000000000000000000000000..3cd7d776c4638f42366e0d931ff03cdabd82e999
--- /dev/null
+++ b/assets/example_mv_images/4/left.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:d6165b7f753a815137a4ebc55bac741dbe78ae95d76cf73aaed2a46d3d6f6add
+size 421959
diff --git a/assets/example_mv_images/5/back.png b/assets/example_mv_images/5/back.png
new file mode 100644
index 0000000000000000000000000000000000000000..264346a91ba9de918fcc85273d4c2f47b516e6a4
--- /dev/null
+++ b/assets/example_mv_images/5/back.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:9510e0ec98ca84deab9d6a079e90a38eec3a87e6cf65b44ad2502e78af8327b8
+size 217220
diff --git a/assets/example_mv_images/5/front.png b/assets/example_mv_images/5/front.png
new file mode 100644
index 0000000000000000000000000000000000000000..583449bc3becb8b7437e29b4579f111c82f9b655
--- /dev/null
+++ b/assets/example_mv_images/5/front.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:6a0d69ba6c9327835491e794c5ec26b499ac9171c502afed7bdd63555bbacbc3
+size 261388
diff --git a/assets/example_mv_images/5/left.png b/assets/example_mv_images/5/left.png
new file mode 100644
index 0000000000000000000000000000000000000000..399523a20b1078997ee636d4c1f14180ba62c999
--- /dev/null
+++ b/assets/example_mv_images/5/left.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:5a47f2156b60ddc5a46bfe76f3021a259769dcda63671c6c070343bb4e69f744
+size 194230
diff --git a/assets/example_mv_images/6/back.png b/assets/example_mv_images/6/back.png
new file mode 100644
index 0000000000000000000000000000000000000000..8ea23eb4fb74130bf0d6d0168dafa610f255a2d4
--- /dev/null
+++ b/assets/example_mv_images/6/back.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:7c14fc0013707170b92a55cae851d079e69ff3141bb2b651fd0056c21b54b73d
+size 226910
diff --git a/assets/example_mv_images/6/front.png b/assets/example_mv_images/6/front.png
new file mode 100644
index 0000000000000000000000000000000000000000..1c446d0bbf6a79848b9134f54553765bf5120de7
--- /dev/null
+++ b/assets/example_mv_images/6/front.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:b9d2133718e07c8f97abc116b7c53dd09fa94ac4c19a0f88ea6cbbde8b220096
+size 321267
diff --git a/assets/example_mv_images/6/left.png b/assets/example_mv_images/6/left.png
new file mode 100644
index 0000000000000000000000000000000000000000..697327907ad4d8f7c6cc4205e48b4b8a5c71a542
--- /dev/null
+++ b/assets/example_mv_images/6/left.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:75f983ae663c7bbd1a94aec3c33b2deb6730aec6155fd12323ff22d285d5f702
+size 222957
diff --git a/assets/example_mv_images/7/back.png b/assets/example_mv_images/7/back.png
new file mode 100644
index 0000000000000000000000000000000000000000..f0516d0225c71b07e3cc07a33a17be19445e58ac
--- /dev/null
+++ b/assets/example_mv_images/7/back.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:d1f0fa0ecdada99496ff30da7169112330de5bccc34890d3deee88e9fab09dcc
+size 267546
diff --git a/assets/example_mv_images/7/front.png b/assets/example_mv_images/7/front.png
new file mode 100644
index 0000000000000000000000000000000000000000..e7f32e7faec7b0e41d7ec31fc3242dbcb8a31afb
--- /dev/null
+++ b/assets/example_mv_images/7/front.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:75862fe740b8b7e0d6ece0313b6ed150f743c3774d069bbb15bf7829071ac16b
+size 267525
diff --git a/assets/example_mv_images/7/left.png b/assets/example_mv_images/7/left.png
new file mode 100644
index 0000000000000000000000000000000000000000..cd8bc21df2a6bb6f5c2de46c22422bdee843b783
--- /dev/null
+++ b/assets/example_mv_images/7/left.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:bbd26b3df85e915d9c097d4109fb30b5931de34bf72274b918f97782bec8c22d
+size 196356
diff --git a/assets/example_mv_images/8/back.png b/assets/example_mv_images/8/back.png
new file mode 100644
index 0000000000000000000000000000000000000000..8652db0d9ac6f152cd479fc7a4094afde0353019
--- /dev/null
+++ b/assets/example_mv_images/8/back.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:58cd81374a9c7b8558841e7f4d6904b8f56f9ac238eaba31ad6c1b50f63f8f8d
+size 364160
diff --git a/assets/example_mv_images/8/front.png b/assets/example_mv_images/8/front.png
new file mode 100644
index 0000000000000000000000000000000000000000..1ae9fbd92f0586134cffb411343ad4a0c7c952f3
--- /dev/null
+++ b/assets/example_mv_images/8/front.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:34dc5d4ddd4f62ab0b7eb2c6811d268318ec8a3b54e8ed6b52bc9072d1ac4c88
+size 230771
diff --git a/assets/example_mv_images/8/left.png b/assets/example_mv_images/8/left.png
new file mode 100644
index 0000000000000000000000000000000000000000..cb053a4d12b17ca847897b7e212cfe55fb790388
--- /dev/null
+++ b/assets/example_mv_images/8/left.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:d371643d6169b6c0a5ebdea334f00415130a57b0a5a21b9a24fcb23b7fae3909
+size 463846
diff --git a/assets/example_prompts.txt b/assets/example_prompts.txt
new file mode 100644
index 0000000000000000000000000000000000000000..3930c2f22804559bf4f8796d9c23ea4d0ab25b97
--- /dev/null
+++ b/assets/example_prompts.txt
@@ -0,0 +1,5 @@
+一片绿色的树叶在白色背景上居中展现,清晰的纹理
+一只棕白相间的仓鼠,站在白色背景前。照片采用居中构图方式,卡通风格
+一盆绿色植物生长在红色花盆中,居中,写实
+a pot of green plants grows in a red flower pot.
+a lovely rabbit eating carrots
diff --git a/assets/images/arch.jpg b/assets/images/arch.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..6110b1e5e684ddb022c88f9313d5fc62574afb3c
--- /dev/null
+++ b/assets/images/arch.jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:7ac9e63cbe30dd141832ed1c949d23a7186c5681073fa8d173ee9c1f5f81a18e
+size 903034
diff --git a/assets/images/e2e-1.gif b/assets/images/e2e-1.gif
new file mode 100644
index 0000000000000000000000000000000000000000..3fc3c74a1b83ab88c050d474409313b541b0bb15
--- /dev/null
+++ b/assets/images/e2e-1.gif
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:6c582fdb3f6560b70418d1166a8024640a49f162b6969e03dc1a54ad196a837c
+size 8786876
diff --git a/assets/images/e2e-2.gif b/assets/images/e2e-2.gif
new file mode 100644
index 0000000000000000000000000000000000000000..feb04b1edb0837e28654de3fc78337fa2e521b09
--- /dev/null
+++ b/assets/images/e2e-2.gif
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:074ecb05f60c224f79e35e294ce847a7b77511c355fbcd57d3e82aa253eeb320
+size 9021984
diff --git a/assets/images/system.jpg b/assets/images/system.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..8a0e7cf8ce0e7607334a47c6e19297dc9c5670bc
--- /dev/null
+++ b/assets/images/system.jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:7b38a34f393ff023b56c18a0fffec186018a4c5ef8f275250698c27123035474
+size 1569660
diff --git a/assets/images/teaser.jpg b/assets/images/teaser.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..591d23ad42d25199543d521ee6d62eb87c1618eb
--- /dev/null
+++ b/assets/images/teaser.jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:9deac73e728652dfa5e2b73833401dcf659cce9d15a9150a52d8aac62c91e4c9
+size 1867475
diff --git a/assets/modelviewer-template.html b/assets/modelviewer-template.html
new file mode 100644
index 0000000000000000000000000000000000000000..edf2374634d746e47ce30eb79074eded6c9026a8
--- /dev/null
+++ b/assets/modelviewer-template.html
@@ -0,0 +1,81 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/assets/modelviewer-textured-template.html b/assets/modelviewer-textured-template.html
new file mode 100644
index 0000000000000000000000000000000000000000..bc67d3221cd9e43660388e6fa931069ea26649b2
--- /dev/null
+++ b/assets/modelviewer-textured-template.html
@@ -0,0 +1,136 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/assets/qrcode/discord.png b/assets/qrcode/discord.png
new file mode 100644
index 0000000000000000000000000000000000000000..16eeb159e6bfc8273dfb42b77d2fa32452033de5
Binary files /dev/null and b/assets/qrcode/discord.png differ
diff --git a/assets/qrcode/wechat.png b/assets/qrcode/wechat.png
new file mode 100644
index 0000000000000000000000000000000000000000..4f25092d07ee612ff4ef82d4afd41acfca293f3c
Binary files /dev/null and b/assets/qrcode/wechat.png differ
diff --git a/assets/qrcode/x.png b/assets/qrcode/x.png
new file mode 100644
index 0000000000000000000000000000000000000000..e18742f92bf28484d6ff75f30fe0fd9083c9a506
Binary files /dev/null and b/assets/qrcode/x.png differ
diff --git a/assets/qrcode/xiaohongshu.png b/assets/qrcode/xiaohongshu.png
new file mode 100644
index 0000000000000000000000000000000000000000..97c9a16d0d24005554d2cd395e0887a34010f3ff
Binary files /dev/null and b/assets/qrcode/xiaohongshu.png differ
diff --git a/custom_rasterizer-0.1-cp310-cp310-linux_x86_64.whl b/custom_rasterizer-0.1-cp310-cp310-linux_x86_64.whl
new file mode 100644
index 0000000000000000000000000000000000000000..bde3503a9bc08de93194665f5b5ffcdb0dc285a1
--- /dev/null
+++ b/custom_rasterizer-0.1-cp310-cp310-linux_x86_64.whl
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:1dc5bea62f7ef924b9f58722b9f7634501b05af2b9507e736c256d6b2b9d90fc
+size 4674364
diff --git a/gradio_app.py b/gradio_app.py
new file mode 100644
index 0000000000000000000000000000000000000000..7ae3bfdc3b9f633317fbdeeb219e44de48ea9276
--- /dev/null
+++ b/gradio_app.py
@@ -0,0 +1,769 @@
+# Hunyuan 3D is licensed under the TENCENT HUNYUAN NON-COMMERCIAL LICENSE AGREEMENT
+# except for the third-party components listed below.
+# Hunyuan 3D does not impose any additional limitations beyond what is outlined
+# in the repsective licenses of these third-party components.
+# Users must comply with all terms and conditions of original licenses of these third-party
+# components and must ensure that the usage of the third party components adheres to
+# all relevant laws and regulations.
+
+# For avoidance of doubts, Hunyuan 3D means the large language models and
+# their software and algorithms, including trained model weights, parameters (including
+# optimizer states), machine-learning model code, inference-enabling code, training-enabling code,
+# fine-tuning enabling code and other elements of the foregoing made publicly available
+# by Tencent in accordance with TENCENT HUNYUAN COMMUNITY LICENSE AGREEMENT.
+
+import os
+import random
+import shutil
+import time
+from glob import glob
+from pathlib import Path
+
+import gradio as gr
+import torch
+import trimesh
+import uvicorn
+from fastapi import FastAPI
+from fastapi.staticfiles import StaticFiles
+import uuid
+
+from hy3dgen.shapegen.utils import logger
+
+MAX_SEED = 1e7
+
+if True:
+ import os
+ import spaces
+ import subprocess
+ import sys
+ import shlex
+ print("cd /home/user/app/hy3dgen/texgen/differentiable_renderer/ && bash compile_mesh_painter.sh")
+ os.system("cd /home/user/app/hy3dgen/texgen/differentiable_renderer/ && bash compile_mesh_painter.sh")
+ print('install custom')
+ subprocess.run(shlex.split("pip install custom_rasterizer-0.1-cp310-cp310-linux_x86_64.whl"), check=True)
+
+
+def get_example_img_list():
+ print('Loading example img list ...')
+ return sorted(glob('./assets/example_images/**/*.png', recursive=True))
+
+
+def get_example_txt_list():
+ print('Loading example txt list ...')
+ txt_list = list()
+ for line in open('./assets/example_prompts.txt', encoding='utf-8'):
+ txt_list.append(line.strip())
+ return txt_list
+
+
+def get_example_mv_list():
+ print('Loading example mv list ...')
+ mv_list = list()
+ root = './assets/example_mv_images'
+ for mv_dir in os.listdir(root):
+ view_list = []
+ for view in ['front', 'back', 'left', 'right']:
+ path = os.path.join(root, mv_dir, f'{view}.png')
+ if os.path.exists(path):
+ view_list.append(path)
+ else:
+ view_list.append(None)
+ mv_list.append(view_list)
+ return mv_list
+
+
+def gen_save_folder(max_size=200):
+ os.makedirs(SAVE_DIR, exist_ok=True)
+
+ # 获取所有文件夹路径
+ dirs = [f for f in Path(SAVE_DIR).iterdir() if f.is_dir()]
+
+ # 如果文件夹数量超过 max_size,删除创建时间最久的文件夹
+ if len(dirs) >= max_size:
+ # 按创建时间排序,最久的排在前面
+ oldest_dir = min(dirs, key=lambda x: x.stat().st_ctime)
+ shutil.rmtree(oldest_dir)
+ print(f"Removed the oldest folder: {oldest_dir}")
+
+ # 生成一个新的 uuid 文件夹名称
+ new_folder = os.path.join(SAVE_DIR, str(uuid.uuid4()))
+ os.makedirs(new_folder, exist_ok=True)
+ print(f"Created new folder: {new_folder}")
+
+ return new_folder
+
+
+def export_mesh(mesh, save_folder, textured=False, type='glb'):
+ if textured:
+ path = os.path.join(save_folder, f'textured_mesh.{type}')
+ else:
+ path = os.path.join(save_folder, f'white_mesh.{type}')
+ if type not in ['glb', 'obj']:
+ mesh.export(path)
+ else:
+ mesh.export(path, include_normals=textured)
+ return path
+
+
+def randomize_seed_fn(seed: int, randomize_seed: bool) -> int:
+ if randomize_seed:
+ seed = random.randint(0, MAX_SEED)
+ return seed
+
+
+def build_model_viewer_html(save_folder, height=660, width=790, textured=False):
+ # Remove first folder from path to make relative path
+ if textured:
+ related_path = f"./textured_mesh.glb"
+ template_name = './assets/modelviewer-textured-template.html'
+ output_html_path = os.path.join(save_folder, f'textured_mesh.html')
+ else:
+ related_path = f"./white_mesh.glb"
+ template_name = './assets/modelviewer-template.html'
+ output_html_path = os.path.join(save_folder, f'white_mesh.html')
+ offset = 50 if textured else 10
+ with open(os.path.join(CURRENT_DIR, template_name), 'r', encoding='utf-8') as f:
+ template_html = f.read()
+
+ with open(output_html_path, 'w', encoding='utf-8') as f:
+ template_html = template_html.replace('#height#', f'{height - offset}')
+ template_html = template_html.replace('#width#', f'{width}')
+ template_html = template_html.replace('#src#', f'{related_path}/')
+ f.write(template_html)
+
+ rel_path = os.path.relpath(output_html_path, SAVE_DIR)
+ iframe_tag = f''
+ print(
+ f'Find html file {output_html_path}, {os.path.exists(output_html_path)}, relative HTML path is /static/{rel_path}')
+
+ return f"""
+
+ {iframe_tag}
+
+ """
+
+@spaces.GPU(duration=40)
+def _gen_shape(
+ caption=None,
+ image=None,
+ mv_image_front=None,
+ mv_image_back=None,
+ mv_image_left=None,
+ mv_image_right=None,
+ steps=50,
+ guidance_scale=7.5,
+ seed=1234,
+ octree_resolution=256,
+ check_box_rembg=False,
+ num_chunks=200000,
+ randomize_seed: bool = False,
+):
+ if not MV_MODE and image is None and caption is None:
+ raise gr.Error("Please provide either a caption or an image.")
+ if MV_MODE:
+ if mv_image_front is None and mv_image_back is None and mv_image_left is None and mv_image_right is None:
+ raise gr.Error("Please provide at least one view image.")
+ image = {}
+ if mv_image_front:
+ image['front'] = mv_image_front
+ if mv_image_back:
+ image['back'] = mv_image_back
+ if mv_image_left:
+ image['left'] = mv_image_left
+ if mv_image_right:
+ image['right'] = mv_image_right
+
+ seed = int(randomize_seed_fn(seed, randomize_seed))
+
+ octree_resolution = int(octree_resolution)
+ if caption: print('prompt is', caption)
+ save_folder = gen_save_folder()
+ stats = {
+ 'model': {
+ 'shapegen': f'{args.model_path}/{args.subfolder}',
+ 'texgen': f'{args.texgen_model_path}',
+ },
+ 'params': {
+ 'caption': caption,
+ 'steps': steps,
+ 'guidance_scale': guidance_scale,
+ 'seed': seed,
+ 'octree_resolution': octree_resolution,
+ 'check_box_rembg': check_box_rembg,
+ 'num_chunks': num_chunks,
+ }
+ }
+ time_meta = {}
+
+ if image is None:
+ start_time = time.time()
+ try:
+ image = t2i_worker(caption)
+ except Exception as e:
+ raise gr.Error(f"Text to 3D is disable. Please enable it by `python gradio_app.py --enable_t23d`.")
+ time_meta['text2image'] = time.time() - start_time
+
+ # remove disk io to make responding faster, uncomment at your will.
+ # image.save(os.path.join(save_folder, 'input.png'))
+ if MV_MODE:
+ start_time = time.time()
+ for k, v in image.items():
+ if check_box_rembg or v.mode == "RGB":
+ img = rmbg_worker(v.convert('RGB'))
+ image[k] = img
+ time_meta['remove background'] = time.time() - start_time
+ else:
+ if check_box_rembg or image.mode == "RGB":
+ start_time = time.time()
+ image = rmbg_worker(image.convert('RGB'))
+ time_meta['remove background'] = time.time() - start_time
+
+ # remove disk io to make responding faster, uncomment at your will.
+ # image.save(os.path.join(save_folder, 'rembg.png'))
+
+ # image to white model
+ start_time = time.time()
+
+ generator = torch.Generator()
+ generator = generator.manual_seed(int(seed))
+ outputs = i23d_worker(
+ image=image,
+ num_inference_steps=steps,
+ guidance_scale=guidance_scale,
+ generator=generator,
+ octree_resolution=octree_resolution,
+ num_chunks=num_chunks,
+ output_type='mesh'
+ )
+ time_meta['shape generation'] = time.time() - start_time
+ logger.info("---Shape generation takes %s seconds ---" % (time.time() - start_time))
+
+ tmp_start = time.time()
+ mesh = export_to_trimesh(outputs)[0]
+ time_meta['export to trimesh'] = time.time() - tmp_start
+
+ stats['number_of_faces'] = mesh.faces.shape[0]
+ stats['number_of_vertices'] = mesh.vertices.shape[0]
+
+ stats['time'] = time_meta
+ main_image = image if not MV_MODE else image['front']
+ return mesh, main_image, save_folder, stats, seed
+
+@spaces.GPU(duration=90)
+def generation_all(
+ caption=None,
+ image=None,
+ mv_image_front=None,
+ mv_image_back=None,
+ mv_image_left=None,
+ mv_image_right=None,
+ steps=50,
+ guidance_scale=7.5,
+ seed=1234,
+ octree_resolution=256,
+ check_box_rembg=False,
+ num_chunks=200000,
+ randomize_seed: bool = False,
+):
+ start_time_0 = time.time()
+ mesh, image, save_folder, stats, seed = _gen_shape(
+ caption,
+ image,
+ mv_image_front=mv_image_front,
+ mv_image_back=mv_image_back,
+ mv_image_left=mv_image_left,
+ mv_image_right=mv_image_right,
+ steps=steps,
+ guidance_scale=guidance_scale,
+ seed=seed,
+ octree_resolution=octree_resolution,
+ check_box_rembg=check_box_rembg,
+ num_chunks=num_chunks,
+ randomize_seed=randomize_seed,
+ )
+ path = export_mesh(mesh, save_folder, textured=False)
+
+ # tmp_time = time.time()
+ # mesh = floater_remove_worker(mesh)
+ # mesh = degenerate_face_remove_worker(mesh)
+ # logger.info("---Postprocessing takes %s seconds ---" % (time.time() - tmp_time))
+ # stats['time']['postprocessing'] = time.time() - tmp_time
+
+ tmp_time = time.time()
+ mesh = face_reduce_worker(mesh)
+ logger.info("---Face Reduction takes %s seconds ---" % (time.time() - tmp_time))
+ stats['time']['face reduction'] = time.time() - tmp_time
+
+ tmp_time = time.time()
+ textured_mesh = texgen_worker(mesh, image)
+ logger.info("---Texture Generation takes %s seconds ---" % (time.time() - tmp_time))
+ stats['time']['texture generation'] = time.time() - tmp_time
+ stats['time']['total'] = time.time() - start_time_0
+
+ textured_mesh.metadata['extras'] = stats
+ path_textured = export_mesh(textured_mesh, save_folder, textured=True)
+ model_viewer_html_textured = build_model_viewer_html(save_folder, height=HTML_HEIGHT, width=HTML_WIDTH,
+ textured=True)
+ if args.low_vram_mode:
+ torch.cuda.empty_cache()
+ return (
+ gr.update(value=path),
+ gr.update(value=path_textured),
+ model_viewer_html_textured,
+ stats,
+ seed,
+ )
+
+@spaces.GPU(duration=40)
+def shape_generation(
+ caption=None,
+ image=None,
+ mv_image_front=None,
+ mv_image_back=None,
+ mv_image_left=None,
+ mv_image_right=None,
+ steps=50,
+ guidance_scale=7.5,
+ seed=1234,
+ octree_resolution=256,
+ check_box_rembg=False,
+ num_chunks=200000,
+ randomize_seed: bool = False,
+):
+ start_time_0 = time.time()
+ mesh, image, save_folder, stats, seed = _gen_shape(
+ caption,
+ image,
+ mv_image_front=mv_image_front,
+ mv_image_back=mv_image_back,
+ mv_image_left=mv_image_left,
+ mv_image_right=mv_image_right,
+ steps=steps,
+ guidance_scale=guidance_scale,
+ seed=seed,
+ octree_resolution=octree_resolution,
+ check_box_rembg=check_box_rembg,
+ num_chunks=num_chunks,
+ randomize_seed=randomize_seed,
+ )
+ stats['time']['total'] = time.time() - start_time_0
+ mesh.metadata['extras'] = stats
+
+ path = export_mesh(mesh, save_folder, textured=False)
+ model_viewer_html = build_model_viewer_html(save_folder, height=HTML_HEIGHT, width=HTML_WIDTH)
+ if args.low_vram_mode:
+ torch.cuda.empty_cache()
+ return (
+ gr.update(value=path),
+ model_viewer_html,
+ stats,
+ seed,
+ )
+
+
+def build_app():
+ title = 'Hunyuan3D-2: High Resolution Textured 3D Assets Generation'
+ if MV_MODE:
+ title = 'Hunyuan3D-2mv: Image to 3D Generation with 1-4 Views'
+ if 'mini' in args.subfolder:
+ title = 'Hunyuan3D-2mini: Strong 0.6B Image to Shape Generator'
+ if TURBO_MODE:
+ title = title.replace(':', '-Turbo: Fast ')
+
+ title_html = f"""
+
+
+ {title}
+
+
+ Tencent Hunyuan3D Team
+
+
+ """
+ custom_css = """
+ .app.svelte-wpkpf6.svelte-wpkpf6:not(.fill_width) {
+ max-width: 1480px;
+ }
+ .mv-image button .wrap {
+ font-size: 10px;
+ }
+
+ .mv-image .icon-wrap {
+ width: 20px;
+ }
+
+ """
+
+ with gr.Blocks(theme=gr.themes.Base(), title='Hunyuan-3D-2.0', analytics_enabled=False, css=custom_css) as demo:
+ gr.HTML(title_html)
+
+ with gr.Row():
+ with gr.Column(scale=3):
+ with gr.Tabs(selected='tab_img_prompt') as tabs_prompt:
+ with gr.Tab('Image Prompt', id='tab_img_prompt', visible=not MV_MODE) as tab_ip:
+ image = gr.Image(label='Image', type='pil', image_mode='RGBA', height=290)
+
+ with gr.Tab('Text Prompt', id='tab_txt_prompt', visible=HAS_T2I and not MV_MODE) as tab_tp:
+ caption = gr.Textbox(label='Text Prompt',
+ placeholder='HunyuanDiT will be used to generate image.',
+ info='Example: A 3D model of a cute cat, white background')
+ with gr.Tab('MultiView Prompt', visible=MV_MODE) as tab_mv:
+ # gr.Label('Please upload at least one front image.')
+ with gr.Row():
+ mv_image_front = gr.Image(label='Front', type='pil', image_mode='RGBA', height=140,
+ min_width=100, elem_classes='mv-image')
+ mv_image_back = gr.Image(label='Back', type='pil', image_mode='RGBA', height=140,
+ min_width=100, elem_classes='mv-image')
+ with gr.Row():
+ mv_image_left = gr.Image(label='Left', type='pil', image_mode='RGBA', height=140,
+ min_width=100, elem_classes='mv-image')
+ mv_image_right = gr.Image(label='Right', type='pil', image_mode='RGBA', height=140,
+ min_width=100, elem_classes='mv-image')
+
+ with gr.Row():
+ btn = gr.Button(value='Gen Shape', variant='primary', min_width=100)
+ btn_all = gr.Button(value='Gen Textured Shape',
+ variant='primary',
+ visible=HAS_TEXTUREGEN,
+ min_width=100)
+
+ with gr.Group():
+ file_out = gr.File(label="File", visible=False)
+ file_out2 = gr.File(label="File", visible=False)
+
+ with gr.Tabs(selected='tab_options' if TURBO_MODE else 'tab_export'):
+ with gr.Tab("Options", id='tab_options', visible=TURBO_MODE):
+ gen_mode = gr.Radio(label='Generation Mode',
+ info='Recommendation: Turbo for most cases, Fast for very complex cases, Standard seldom use.',
+ choices=['Turbo', 'Fast', 'Standard'], value='Turbo')
+ decode_mode = gr.Radio(label='Decoding Mode',
+ info='The resolution for exporting mesh from generated vectset',
+ choices=['Low', 'Standard', 'High'],
+ value='Standard')
+ with gr.Tab('Advanced Options', id='tab_advanced_options'):
+ with gr.Row():
+ check_box_rembg = gr.Checkbox(value=True, label='Remove Background', min_width=100)
+ randomize_seed = gr.Checkbox(label="Randomize seed", value=True, min_width=100)
+ seed = gr.Slider(
+ label="Seed",
+ minimum=0,
+ maximum=MAX_SEED,
+ step=1,
+ value=1234,
+ min_width=100,
+ )
+ with gr.Row():
+ num_steps = gr.Slider(maximum=100,
+ minimum=1,
+ value=5 if 'turbo' in args.subfolder else 30,
+ step=1, label='Inference Steps')
+ octree_resolution = gr.Slider(maximum=512, minimum=16, value=256, label='Octree Resolution')
+ with gr.Row():
+ cfg_scale = gr.Number(value=5.0, label='Guidance Scale', min_width=100)
+ num_chunks = gr.Slider(maximum=5000000, minimum=1000, value=8000,
+ label='Number of Chunks', min_width=100)
+ with gr.Tab("Export", id='tab_export'):
+ with gr.Row():
+ file_type = gr.Dropdown(label='File Type', choices=SUPPORTED_FORMATS,
+ value='glb', min_width=100)
+ reduce_face = gr.Checkbox(label='Simplify Mesh', value=False, min_width=100)
+ export_texture = gr.Checkbox(label='Include Texture', value=False,
+ visible=False, min_width=100)
+ target_face_num = gr.Slider(maximum=1000000, minimum=100, value=10000,
+ label='Target Face Number')
+ with gr.Row():
+ confirm_export = gr.Button(value="Transform", min_width=100)
+ file_export = gr.DownloadButton(label="Download", variant='primary',
+ interactive=False, min_width=100)
+
+ with gr.Column(scale=6):
+ with gr.Tabs(selected='gen_mesh_panel') as tabs_output:
+ with gr.Tab('Generated Mesh', id='gen_mesh_panel'):
+ html_gen_mesh = gr.HTML(HTML_OUTPUT_PLACEHOLDER, label='Output')
+ with gr.Tab('Exporting Mesh', id='export_mesh_panel'):
+ html_export_mesh = gr.HTML(HTML_OUTPUT_PLACEHOLDER, label='Output')
+ with gr.Tab('Mesh Statistic', id='stats_panel'):
+ stats = gr.Json({}, label='Mesh Stats')
+
+ with gr.Column(scale=3 if MV_MODE else 2):
+ with gr.Tabs(selected='tab_img_gallery') as gallery:
+ with gr.Tab('Image to 3D Gallery', id='tab_img_gallery', visible=not MV_MODE) as tab_gi:
+ with gr.Row():
+ gr.Examples(examples=example_is, inputs=[image],
+ label=None, examples_per_page=18)
+
+ with gr.Tab('Text to 3D Gallery', id='tab_txt_gallery', visible=HAS_T2I and not MV_MODE) as tab_gt:
+ with gr.Row():
+ gr.Examples(examples=example_ts, inputs=[caption],
+ label=None, examples_per_page=18)
+ with gr.Tab('MultiView to 3D Gallery', id='tab_mv_gallery', visible=MV_MODE) as tab_mv:
+ with gr.Row():
+ gr.Examples(examples=example_mvs,
+ inputs=[mv_image_front, mv_image_back, mv_image_left, mv_image_right],
+ label=None, examples_per_page=6)
+
+ gr.HTML(f"""
+
+ Activated Model - Shape Generation ({args.model_path}/{args.subfolder}) ; Texture Generation ({'Hunyuan3D-2' if HAS_TEXTUREGEN else 'Unavailable'})
+
+ """)
+ if not HAS_TEXTUREGEN:
+ gr.HTML("""
+
+
Warning:
+ Texture synthesis is disable due to missing requirements,
+ please install requirements following
README.md to activate it.
+
+ """)
+ if not args.enable_t23d:
+ gr.HTML("""
+
+ Warning:
+ Text to 3D is disable. To activate it, please run `python gradio_app.py --enable_t23d`.
+
+ """)
+
+ tab_ip.select(fn=lambda: gr.update(selected='tab_img_gallery'), outputs=gallery)
+ if HAS_T2I:
+ tab_tp.select(fn=lambda: gr.update(selected='tab_txt_gallery'), outputs=gallery)
+
+ btn.click(
+ shape_generation,
+ inputs=[
+ caption,
+ image,
+ mv_image_front,
+ mv_image_back,
+ mv_image_left,
+ mv_image_right,
+ num_steps,
+ cfg_scale,
+ seed,
+ octree_resolution,
+ check_box_rembg,
+ num_chunks,
+ randomize_seed,
+ ],
+ outputs=[file_out, html_gen_mesh, stats, seed]
+ ).then(
+ lambda: (gr.update(visible=False, value=False), gr.update(interactive=True), gr.update(interactive=True),
+ gr.update(interactive=False)),
+ outputs=[export_texture, reduce_face, confirm_export, file_export],
+ ).then(
+ lambda: gr.update(selected='gen_mesh_panel'),
+ outputs=[tabs_output],
+ )
+
+ btn_all.click(
+ generation_all,
+ inputs=[
+ caption,
+ image,
+ mv_image_front,
+ mv_image_back,
+ mv_image_left,
+ mv_image_right,
+ num_steps,
+ cfg_scale,
+ seed,
+ octree_resolution,
+ check_box_rembg,
+ num_chunks,
+ randomize_seed,
+ ],
+ outputs=[file_out, file_out2, html_gen_mesh, stats, seed]
+ ).then(
+ lambda: (gr.update(visible=True, value=True), gr.update(interactive=False), gr.update(interactive=True),
+ gr.update(interactive=False)),
+ outputs=[export_texture, reduce_face, confirm_export, file_export],
+ ).then(
+ lambda: gr.update(selected='gen_mesh_panel'),
+ outputs=[tabs_output],
+ )
+
+ def on_gen_mode_change(value):
+ if value == 'Turbo':
+ return gr.update(value=5)
+ elif value == 'Fast':
+ return gr.update(value=10)
+ else:
+ return gr.update(value=30)
+
+ gen_mode.change(on_gen_mode_change, inputs=[gen_mode], outputs=[num_steps])
+
+ def on_decode_mode_change(value):
+ if value == 'Low':
+ return gr.update(value=196)
+ elif value == 'Standard':
+ return gr.update(value=256)
+ else:
+ return gr.update(value=384)
+
+ decode_mode.change(on_decode_mode_change, inputs=[decode_mode], outputs=[octree_resolution])
+
+ def on_export_click(file_out, file_out2, file_type, reduce_face, export_texture, target_face_num):
+ if file_out is None:
+ raise gr.Error('Please generate a mesh first.')
+
+ print(f'exporting {file_out}')
+ print(f'reduce face to {target_face_num}')
+ if export_texture:
+ mesh = trimesh.load(file_out2)
+ save_folder = gen_save_folder()
+ path = export_mesh(mesh, save_folder, textured=True, type=file_type)
+
+ # for preview
+ save_folder = gen_save_folder()
+ _ = export_mesh(mesh, save_folder, textured=True)
+ model_viewer_html = build_model_viewer_html(save_folder, height=HTML_HEIGHT, width=HTML_WIDTH,
+ textured=True)
+ else:
+ mesh = trimesh.load(file_out)
+ mesh = floater_remove_worker(mesh)
+ mesh = degenerate_face_remove_worker(mesh)
+ if reduce_face:
+ mesh = face_reduce_worker(mesh, target_face_num)
+ save_folder = gen_save_folder()
+ path = export_mesh(mesh, save_folder, textured=False, type=file_type)
+
+ # for preview
+ save_folder = gen_save_folder()
+ _ = export_mesh(mesh, save_folder, textured=False)
+ model_viewer_html = build_model_viewer_html(save_folder, height=HTML_HEIGHT, width=HTML_WIDTH,
+ textured=False)
+ print(f'export to {path}')
+ return model_viewer_html, gr.update(value=path, interactive=True)
+
+ confirm_export.click(
+ lambda: gr.update(selected='export_mesh_panel'),
+ outputs=[tabs_output],
+ ).then(
+ on_export_click,
+ inputs=[file_out, file_out2, file_type, reduce_face, export_texture, target_face_num],
+ outputs=[html_export_mesh, file_export]
+ )
+
+ return demo
+
+
+if __name__ == '__main__':
+ import argparse
+
+ parser = argparse.ArgumentParser()
+ parser.add_argument("--model_path", type=str, default='tencent/Hunyuan3D-2')
+ parser.add_argument("--subfolder", type=str, default='hunyuan3d-dit-v2-0')
+ parser.add_argument("--texgen_model_path", type=str, default='tencent/Hunyuan3D-2')
+ parser.add_argument('--port', type=int, default=7860)
+ parser.add_argument('--host', type=str, default='0.0.0.0')
+ parser.add_argument('--device', type=str, default='cuda')
+ parser.add_argument('--mc_algo', type=str, default='mc')
+ parser.add_argument('--cache-path', type=str, default='gradio_cache')
+ parser.add_argument('--enable_t23d', action='store_true')
+ parser.add_argument('--disable_tex', action='store_true')
+ parser.add_argument('--enable_flashvdm', action='store_true')
+ parser.add_argument('--compile', action='store_true')
+ parser.add_argument('--low_vram_mode', action='store_true')
+ args = parser.parse_args()
+
+ args.enable_flashvdm = True
+ args.enable_t23d = False
+
+ SAVE_DIR = args.cache_path
+ os.makedirs(SAVE_DIR, exist_ok=True)
+
+ CURRENT_DIR = os.path.dirname(os.path.abspath(__file__))
+ MV_MODE = 'mv' in args.model_path
+ TURBO_MODE = 'turbo' in args.subfolder
+
+ HTML_HEIGHT = 690 if MV_MODE else 650
+ HTML_WIDTH = 500
+ HTML_OUTPUT_PLACEHOLDER = f"""
+
+
+
Welcome to Hunyuan3D!
+
No mesh here.
+
+
+ """
+
+ INPUT_MESH_HTML = """
+
+
+ """
+ example_is = get_example_img_list()
+ example_ts = get_example_txt_list()
+ example_mvs = get_example_mv_list()
+
+ SUPPORTED_FORMATS = ['glb', 'obj', 'ply', 'stl']
+
+ HAS_TEXTUREGEN = False
+ if not args.disable_tex:
+ try:
+ from hy3dgen.texgen import Hunyuan3DPaintPipeline
+
+ texgen_worker = Hunyuan3DPaintPipeline.from_pretrained(args.texgen_model_path)
+ if args.low_vram_mode:
+ texgen_worker.enable_model_cpu_offload()
+ # Not help much, ignore for now.
+ # if args.compile:
+ # texgen_worker.models['delight_model'].pipeline.unet.compile()
+ # texgen_worker.models['delight_model'].pipeline.vae.compile()
+ # texgen_worker.models['multiview_model'].pipeline.unet.compile()
+ # texgen_worker.models['multiview_model'].pipeline.vae.compile()
+ HAS_TEXTUREGEN = True
+ except Exception as e:
+ print(e)
+ print("Failed to load texture generator.")
+ print('Please try to install requirements by following README.md')
+ HAS_TEXTUREGEN = False
+
+ HAS_T2I = True
+ if args.enable_t23d:
+ from hy3dgen.text2image import HunyuanDiTPipeline
+
+ t2i_worker = HunyuanDiTPipeline('Tencent-Hunyuan/HunyuanDiT-v1.1-Diffusers-Distilled')
+ HAS_T2I = True
+
+ from hy3dgen.shapegen import FaceReducer, FloaterRemover, DegenerateFaceRemover, MeshSimplifier, \
+ Hunyuan3DDiTFlowMatchingPipeline
+ from hy3dgen.shapegen.pipelines import export_to_trimesh
+ from hy3dgen.rembg import BackgroundRemover
+
+ rmbg_worker = BackgroundRemover()
+ i23d_worker = Hunyuan3DDiTFlowMatchingPipeline.from_pretrained(
+ args.model_path,
+ subfolder=args.subfolder,
+ use_safetensors=True,
+ device=args.device,
+ )
+ if args.enable_flashvdm:
+ mc_algo = 'mc' if args.device in ['cpu', 'mps'] else args.mc_algo
+ i23d_worker.enable_flashvdm(mc_algo=mc_algo)
+ if args.compile:
+ i23d_worker.compile()
+
+ floater_remove_worker = FloaterRemover()
+ degenerate_face_remove_worker = DegenerateFaceRemover()
+ face_reduce_worker = FaceReducer()
+
+ # https://discuss.huggingface.co/t/how-to-serve-an-html-file/33921/2
+ # create a FastAPI app
+ app = FastAPI()
+ # create a static directory to store the static files
+ static_dir = Path(SAVE_DIR).absolute()
+ static_dir.mkdir(parents=True, exist_ok=True)
+ app.mount("/static", StaticFiles(directory=static_dir, html=True), name="static")
+ shutil.copytree('./assets/env_maps', os.path.join(static_dir, 'env_maps'), dirs_exist_ok=True)
+
+ if args.low_vram_mode:
+ torch.cuda.empty_cache()
+ demo = build_app()
+ app = gr.mount_gradio_app(app, demo, path="/")
+ uvicorn.run(app, host=args.host, port=args.port)
diff --git a/hg_app.py b/hg_app.py
new file mode 100644
index 0000000000000000000000000000000000000000..d080222cc7798d5ba24b601d01ad6966eda9e926
--- /dev/null
+++ b/hg_app.py
@@ -0,0 +1,439 @@
+# pip install gradio==4.44.1
+import argparse
+parser = argparse.ArgumentParser()
+parser.add_argument('--port', type=int, default=8080)
+parser.add_argument('--cache-path', type=str, default='gradio_cache')
+parser.add_argument('--enable_t23d', default=False)
+parser.add_argument('--local', action="store_true")
+args = parser.parse_args()
+
+print(f"Running on {'local' if args.local else 'huggingface'}")
+if not args.local:
+ import os
+ import spaces
+ import subprocess
+ import sys
+ import shlex
+
+ print("cd /home/user/app/hy3dgen/texgen/differentiable_renderer/ && bash compile_mesh_painter.sh")
+ os.system("cd /home/user/app/hy3dgen/texgen/differentiable_renderer/ && bash compile_mesh_painter.sh")
+ print('install custom')
+ subprocess.run(shlex.split("pip install custom_rasterizer-0.1-cp310-cp310-linux_x86_64.whl"), check=True)
+
+ IP = "0.0.0.0"
+ PORT = 7860
+
+else:
+ IP = "0.0.0.0"
+ PORT = 8080
+ class spaces:
+ class GPU:
+ def __init__(self, duration=60):
+ self.duration = duration
+ def __call__(self, func):
+ return func
+
+import os
+import shutil
+import time
+from glob import glob
+from pathlib import Path
+from PIL import Image
+from datetime import datetime
+import uuid
+import gradio as gr
+import torch
+import uvicorn
+from fastapi import FastAPI
+from fastapi.staticfiles import StaticFiles
+
+
+def start_session(req: gr.Request):
+ save_folder = os.path.join(SAVE_DIR, str(req.session_hash))
+ os.makedirs(save_folder, exist_ok=True)
+
+def end_session(req: gr.Request):
+ save_folder = os.path.join(SAVE_DIR, str(req.session_hash))
+ shutil.rmtree(save_folder)
+
+def get_example_img_list():
+ print('Loading example img list ...')
+ return sorted(glob('./assets/example_images/*.png'))
+
+
+def get_example_txt_list():
+ print('Loading example txt list ...')
+ txt_list = list()
+ for line in open('./assets/example_prompts.txt'):
+ txt_list.append(line.strip())
+ return txt_list
+
+
+def export_mesh(mesh, save_folder, textured=False):
+ if textured:
+ path = os.path.join(save_folder, f'textured_mesh.glb')
+ else:
+ path = os.path.join(save_folder, f'white_mesh.glb')
+ mesh.export(path, include_normals=textured)
+ return path
+
+def build_model_viewer_html(save_folder, height=660, width=790, textured=False):
+ if textured:
+ related_path = f"./textured_mesh.glb"
+ template_name = './assets/modelviewer-textured-template.html'
+ output_html_path = os.path.join(save_folder, f'{uuid.uuid4()}_textured_mesh.html')
+ else:
+ related_path = f"./white_mesh.glb"
+ template_name = './assets/modelviewer-template.html'
+ output_html_path = os.path.join(save_folder, f'{uuid.uuid4()}_white_mesh.html')
+
+ with open(os.path.join(CURRENT_DIR, template_name), 'r') as f:
+ template_html = f.read()
+ obj_html = f"""
+
+
+
+
+ """
+
+ with open(output_html_path, 'w') as f:
+ f.write(template_html.replace('', obj_html))
+
+ output_html_path = output_html_path.replace(SAVE_DIR + '/', '')
+ iframe_tag = f''
+ print(f'Find html {output_html_path}, {os.path.exists(output_html_path)}')
+
+ # rel_path = os.path.relpath(output_html_path, SAVE_DIR)
+ # iframe_tag = f''
+ # print(f'Find html file {output_html_path}, {os.path.exists(output_html_path)}, relative HTML path is /static/{rel_path}')
+
+ return f"""
+
+ {iframe_tag}
+
+ """
+
+
+@spaces.GPU(duration=100)
+def _gen_shape(
+ caption: str,
+ image: Image.Image,
+ steps: int,
+ guidance_scale: float,
+ seed: int,
+ octree_resolution: int,
+ check_box_rembg: bool,
+ req: gr.Request,
+):
+ if caption: print('prompt is', caption)
+ save_folder = os.path.join(SAVE_DIR, str(req.session_hash))
+ os.makedirs(save_folder, exist_ok=True)
+
+ stats = {}
+ time_meta = {}
+ start_time_0 = time.time()
+
+ if image is None:
+ start_time = time.time()
+ try:
+ image = t2i_worker(caption)
+ except Exception as e:
+ raise gr.Error(f"Text to 3D is disable. Please enable it by `python gradio_app.py --enable_t23d`.")
+ time_meta['text2image'] = time.time() - start_time
+
+ image.save(os.path.join(save_folder, 'input.png'))
+
+ print(f"[{datetime.now()}][HunYuan3D-2]]", str(req.session_hash), image.mode)
+ if check_box_rembg or image.mode == "RGB":
+ start_time = time.time()
+ image = rmbg_worker(image.convert('RGB'))
+ time_meta['rembg'] = time.time() - start_time
+
+ image.save(os.path.join(save_folder, 'rembg.png'))
+
+ # image to white model
+ start_time = time.time()
+
+ generator = torch.Generator()
+ generator = generator.manual_seed(int(seed))
+ mesh = i23d_worker(
+ image=image,
+ num_inference_steps=steps,
+ guidance_scale=guidance_scale,
+ generator=generator,
+ octree_resolution=octree_resolution
+ )[0]
+
+ mesh = FloaterRemover()(mesh)
+ mesh = DegenerateFaceRemover()(mesh)
+ mesh = FaceReducer()(mesh)
+
+ stats['number_of_faces'] = mesh.faces.shape[0]
+ stats['number_of_vertices'] = mesh.vertices.shape[0]
+
+ time_meta['image_to_textured_3d'] = {'total': time.time() - start_time}
+ time_meta['total'] = time.time() - start_time_0
+ stats['time'] = time_meta
+
+ torch.cuda.empty_cache()
+ return mesh, save_folder, image
+
+@spaces.GPU(duration=150)
+def generation_all(
+ caption: str,
+ image: Image.Image,
+ steps: int,
+ guidance_scale: float,
+ seed: int,
+ octree_resolution: int,
+ check_box_rembg: bool,
+ req: gr.Request,
+):
+ mesh, save_folder, image = _gen_shape(
+ caption,
+ image,
+ steps=steps,
+ guidance_scale=guidance_scale,
+ seed=seed,
+ octree_resolution=octree_resolution,
+ check_box_rembg=check_box_rembg,
+ req=req
+ )
+ path = export_mesh(mesh, save_folder, textured=False)
+ model_viewer_html = build_model_viewer_html(save_folder, height=596, width=700)
+
+ textured_mesh = texgen_worker(mesh, image)
+ path_textured = export_mesh(textured_mesh, save_folder, textured=True)
+ model_viewer_html_textured = build_model_viewer_html(save_folder, height=596, width=700, textured=True)
+
+ torch.cuda.empty_cache()
+ return (
+ path,
+ path_textured,
+ model_viewer_html,
+ model_viewer_html_textured,
+ )
+
+@spaces.GPU(duration=100)
+def shape_generation(
+ caption: str,
+ image: Image.Image,
+ steps: int,
+ guidance_scale: float,
+ seed: int,
+ octree_resolution: int,
+ check_box_rembg: bool,
+ req: gr.Request,
+):
+ mesh, save_folder, image = _gen_shape(
+ caption,
+ image,
+ steps=steps,
+ guidance_scale=guidance_scale,
+ seed=seed,
+ octree_resolution=octree_resolution,
+ check_box_rembg=check_box_rembg,
+ req=req,
+ )
+
+ path = export_mesh(mesh, save_folder, textured=False)
+ model_viewer_html = build_model_viewer_html(save_folder, height=596, width=700)
+
+ return (
+ path,
+ model_viewer_html,
+ )
+
+
+def build_app():
+ title_html = """
+
+
+ Hunyuan3D-2: Scaling Diffusion Models for High Resolution Textured 3D Assets Generation
+
+
+ Tencent Hunyuan3D Team
+
+
+ """
+
+ with gr.Blocks(theme=gr.themes.Base(), title='Hunyuan-3D-2.0', delete_cache=(1000,1000)) as demo:
+ gr.HTML(title_html)
+
+ with gr.Row():
+ with gr.Column(scale=2):
+ with gr.Tabs() as tabs_prompt:
+ with gr.Tab('Image Prompt', id='tab_img_prompt') as tab_ip:
+ image = gr.Image(label='Image', type='pil', image_mode='RGBA', height=290)
+ with gr.Row():
+ check_box_rembg = gr.Checkbox(value=True, label='Remove Background')
+
+ with gr.Tab('Text Prompt', id='tab_txt_prompt', visible=HAS_T2I) as tab_tp:
+ caption = gr.Textbox(label='Text Prompt',
+ placeholder='HunyuanDiT will be used to generate image.',
+ info='Example: A 3D model of a cute cat, white background')
+
+ with gr.Accordion('Advanced Options', open=False):
+ num_steps = gr.Slider(maximum=50, minimum=20, value=50, step=1, label='Inference Steps')
+ octree_resolution = gr.Dropdown([256, 384, 512], value=256, label='Octree Resolution')
+ cfg_scale = gr.Number(value=5.5, label='Guidance Scale')
+ seed = gr.Slider(maximum=1e7, minimum=0, value=1234, label='Seed')
+
+ with gr.Group():
+ btn = gr.Button(value='Generate Shape Only', variant='primary')
+ btn_all = gr.Button(value='Generate Shape and Texture', variant='primary', visible=HAS_TEXTUREGEN)
+
+ # with gr.Group():
+ # file_out = gr.File(label="File", visible=False)
+ # file_out2 = gr.File(label="File", visible=False)
+
+ with gr.Group():
+ file_out = gr.DownloadButton(label="Download White Mesh", interactive=False)
+ file_out2 = gr.DownloadButton(label="Download Textured Mesh", interactive=False)
+
+ with gr.Column(scale=5):
+ with gr.Tabs():
+ with gr.Tab('Generated Mesh') as mesh1:
+ html_output1 = gr.HTML(HTML_OUTPUT_PLACEHOLDER, label='Output')
+ with gr.Tab('Generated Textured Mesh') as mesh2:
+ html_output2 = gr.HTML(HTML_OUTPUT_PLACEHOLDER, label='Output')
+
+ with gr.Column(scale=2):
+ with gr.Tabs() as gallery:
+ with gr.Tab('Image to 3D Gallery', id='tab_img_gallery') as tab_gi:
+ with gr.Row():
+ gr.Examples(examples=example_is, inputs=[image],
+ label="Image Prompts", examples_per_page=18)
+
+ with gr.Tab('Text to 3D Gallery', id='tab_txt_gallery', visible=HAS_T2I) as tab_gt:
+ with gr.Row():
+ gr.Examples(examples=example_ts, inputs=[caption],
+ label="Text Prompts", examples_per_page=18)
+
+ if not HAS_TEXTUREGEN:
+ gr.HTML("""
+
+ Warning:
+ Texture synthesis is disable due to missing requirements,
+ please install requirements following README.md to activate it.
+
+ """)
+ if not args.enable_t23d:
+ gr.HTML("""
+
+ Warning:
+ Text to 3D is disable. To activate it, please run `python gradio_app.py --enable_t23d`.
+
+ """)
+
+ tab_gi.select(fn=lambda: gr.update(selected='tab_img_prompt'), outputs=tabs_prompt)
+ if HAS_T2I:
+ tab_gt.select(fn=lambda: gr.update(selected='tab_txt_prompt'), outputs=tabs_prompt)
+
+ btn.click(
+ shape_generation,
+ inputs=[
+ caption,
+ image,
+ num_steps,
+ cfg_scale,
+ seed,
+ octree_resolution,
+ check_box_rembg,
+ ],
+ outputs=[file_out, html_output1]
+ ).then(
+ lambda: gr.Button(interactive=True),
+ outputs=[file_out],
+ )
+
+ btn_all.click(
+ generation_all,
+ inputs=[
+ caption,
+ image,
+ num_steps,
+ cfg_scale,
+ seed,
+ octree_resolution,
+ check_box_rembg,
+ ],
+ outputs=[file_out, file_out2, html_output1, html_output2]
+ ).then(
+ lambda: (gr.Button(interactive=True),gr.Button(interactive=True)),
+ outputs=[file_out, file_out2],
+ )
+
+ # demo.load(start_session)
+ # demo.unload(end_session)
+
+ return demo
+
+
+if __name__ == '__main__':
+
+ CURRENT_DIR = os.path.dirname(os.path.abspath(__file__))
+ SAVE_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)), args.cache_path)
+ os.makedirs(SAVE_DIR, exist_ok=True)
+
+ HTML_OUTPUT_PLACEHOLDER = """
+
+ """
+
+ INPUT_MESH_HTML = """
+
+
+ """
+ example_is = get_example_img_list()
+ example_ts = get_example_txt_list()
+
+ try:
+ from hy3dgen.texgen import Hunyuan3DPaintPipeline
+
+ texgen_worker = Hunyuan3DPaintPipeline.from_pretrained('tencent/Hunyuan3D-2')
+ HAS_TEXTUREGEN = True
+ except Exception as e:
+ print(e)
+ print("Failed to load texture generator.")
+ print('Please try to install requirements by following README.md')
+ HAS_TEXTUREGEN = False
+
+ HAS_T2I = False
+ if args.enable_t23d:
+ from hy3dgen.text2image import HunyuanDiTPipeline
+
+ t2i_worker = HunyuanDiTPipeline('Tencent-Hunyuan/HunyuanDiT-v1.1-Diffusers-Distilled')
+ HAS_T2I = True
+
+ from hy3dgen.shapegen import FaceReducer, FloaterRemover, DegenerateFaceRemover, \
+ Hunyuan3DDiTFlowMatchingPipeline
+ from hy3dgen.rembg import BackgroundRemover
+
+ rmbg_worker = BackgroundRemover()
+ i23d_worker = Hunyuan3DDiTFlowMatchingPipeline.from_pretrained('tencent/Hunyuan3D-2')
+ floater_remove_worker = FloaterRemover()
+ degenerate_face_remove_worker = DegenerateFaceRemover()
+ face_reduce_worker = FaceReducer()
+
+ # https://discuss.huggingface.co/t/how-to-serve-an-html-file/33921/2
+ # create a FastAPI app
+ app = FastAPI()
+ # create a static directory to store the static files
+ static_dir = Path('./gradio_cache')
+ static_dir.mkdir(parents=True, exist_ok=True)
+ app.mount("/static", StaticFiles(directory=static_dir), name="static")
+
+ demo = build_app()
+ demo.queue(max_size=10)
+ app = gr.mount_gradio_app(app, demo, path="/")
+ uvicorn.run(app, host=IP, port=PORT)
diff --git a/hg_app_bak.py b/hg_app_bak.py
new file mode 100644
index 0000000000000000000000000000000000000000..d52989d81c6541bf18fc0047898650746923da6f
--- /dev/null
+++ b/hg_app_bak.py
@@ -0,0 +1,402 @@
+# pip install gradio==4.44.1
+if True:
+ import os
+ import spaces
+ import subprocess
+ import sys
+ import shlex
+ print("cd /home/user/app/hy3dgen/texgen/differentiable_renderer/ && bash compile_mesh_painter.sh")
+ os.system("cd /home/user/app/hy3dgen/texgen/differentiable_renderer/ && bash compile_mesh_painter.sh")
+ print('install custom')
+ subprocess.run(shlex.split("pip install custom_rasterizer-0.1-cp310-cp310-linux_x86_64.whl"), check=True)
+ IP = "0.0.0.0"
+ PORT = 7860
+else:
+ IP = "0.0.0.0"
+ PORT = 8080
+ class spaces:
+ class GPU:
+ def __init__(self, duration=60):
+ self.duration = duration
+ def __call__(self, func):
+ return func
+
+import os
+import shutil
+import time
+from glob import glob
+import gradio as gr
+import torch
+from gradio_litmodel3d import LitModel3D
+
+
+def get_example_img_list():
+ print('Loading example img list ...')
+ return sorted(glob('./assets/example_images/*.png'))
+
+
+def get_example_txt_list():
+ print('Loading example txt list ...')
+ txt_list = list()
+ for line in open('./assets/example_prompts.txt'):
+ txt_list.append(line.strip())
+ return txt_list
+
+
+def gen_save_folder(max_size=6000):
+ os.makedirs(SAVE_DIR, exist_ok=True)
+ exists = set(int(_) for _ in os.listdir(SAVE_DIR) if not _.startswith("."))
+ cur_id = min(set(range(max_size)) - exists) if len(exists) < max_size else -1
+ if os.path.exists(f"{SAVE_DIR}/{(cur_id + 1) % max_size}"):
+ shutil.rmtree(f"{SAVE_DIR}/{(cur_id + 1) % max_size}")
+ print(f"remove {SAVE_DIR}/{(cur_id + 1) % max_size} success !!!")
+ save_folder = f"{SAVE_DIR}/{max(0, cur_id)}"
+ os.makedirs(save_folder, exist_ok=True)
+ print(f"mkdir {save_folder} suceess !!!")
+ return save_folder
+
+
+def export_mesh(mesh, save_folder, textured=False):
+ if textured:
+ path = os.path.join(save_folder, f'textured_mesh.glb')
+ else:
+ path = os.path.join(save_folder, f'white_mesh.glb')
+ mesh.export(path, include_normals=textured)
+ return path
+
+
+def build_model_viewer_html(save_folder, height=660, width=790, textured=False):
+ if textured:
+ related_path = f"./textured_mesh.glb"
+ template_name = './assets/modelviewer-textured-template.html'
+ output_html_path = os.path.join(save_folder, f'textured_mesh.html')
+ else:
+ related_path = f"./white_mesh.glb"
+ template_name = './assets/modelviewer-template.html'
+ output_html_path = os.path.join(save_folder, f'white_mesh.html')
+
+ with open(os.path.join(CURRENT_DIR, template_name), 'r') as f:
+ template_html = f.read()
+ obj_html = f"""
+
+
+
+
+ """
+
+ with open(output_html_path, 'w') as f:
+ f.write(template_html.replace('', obj_html))
+
+ iframe_tag = f''
+ print(f'Find html {output_html_path}, {os.path.exists(output_html_path)}')
+
+ return f"""
+
+ {iframe_tag}
+
+ """
+
+@spaces.GPU(duration=60)
+def _gen_shape(
+ caption,
+ image,
+ steps=50,
+ guidance_scale=7.5,
+ seed=1234,
+ octree_resolution=256,
+ check_box_rembg=False,
+):
+ if caption: print('prompt is', caption)
+ save_folder = gen_save_folder()
+ stats = {}
+ time_meta = {}
+ start_time_0 = time.time()
+
+ image_path = ''
+ if image is None:
+ start_time = time.time()
+ image = t2i_worker(caption)
+ time_meta['text2image'] = time.time() - start_time
+
+ image.save(os.path.join(save_folder, 'input.png'))
+
+ print(image.mode)
+ if check_box_rembg or image.mode == "RGB":
+ start_time = time.time()
+ image = rmbg_worker(image.convert('RGB'))
+ time_meta['rembg'] = time.time() - start_time
+
+ image.save(os.path.join(save_folder, 'rembg.png'))
+
+ # image to white model
+ start_time = time.time()
+
+ generator = torch.Generator()
+ generator = generator.manual_seed(int(seed))
+ mesh = i23d_worker(
+ image=image,
+ num_inference_steps=steps,
+ guidance_scale=guidance_scale,
+ generator=generator,
+ octree_resolution=octree_resolution
+ )[0]
+
+ mesh = FloaterRemover()(mesh)
+ mesh = DegenerateFaceRemover()(mesh)
+ mesh = FaceReducer()(mesh)
+
+ stats['number_of_faces'] = mesh.faces.shape[0]
+ stats['number_of_vertices'] = mesh.vertices.shape[0]
+
+ time_meta['image_to_textured_3d'] = {'total': time.time() - start_time}
+ time_meta['total'] = time.time() - start_time_0
+ stats['time'] = time_meta
+ return mesh, save_folder, image
+
+@spaces.GPU(duration=80)
+def generation_all(
+ caption,
+ image,
+ steps=50,
+ guidance_scale=7.5,
+ seed=1234,
+ octree_resolution=256,
+ check_box_rembg=False
+):
+ mesh, save_folder, image = _gen_shape(
+ caption,
+ image,
+ steps=steps,
+ guidance_scale=guidance_scale,
+ seed=seed,
+ octree_resolution=octree_resolution,
+ check_box_rembg=check_box_rembg
+ )
+ path = export_mesh(mesh, save_folder, textured=False)
+ model_viewer_html = build_model_viewer_html(save_folder, height=596, width=700)
+
+ textured_mesh = texgen_worker(mesh, image)
+ path_textured = export_mesh(textured_mesh, save_folder, textured=True)
+ model_viewer_html_textured = build_model_viewer_html(save_folder, height=596, width=700, textured=True)
+
+ return (
+ gr.update(value=path, visible=True),
+ gr.update(value=path_textured, visible=True),
+ gr.update(value=path, visible=True),
+ gr.update(value=path_textured, visible=True),
+ # model_viewer_html,
+ # model_viewer_html_textured,
+ )
+
+@spaces.GPU(duration=30)
+def shape_generation(
+ caption,
+ image,
+ steps=50,
+ guidance_scale=7.5,
+ seed=1234,
+ octree_resolution=256,
+ check_box_rembg=False,
+):
+ mesh, save_folder, image = _gen_shape(
+ caption,
+ image,
+ steps=steps,
+ guidance_scale=guidance_scale,
+ seed=seed,
+ octree_resolution=octree_resolution,
+ check_box_rembg=check_box_rembg
+ )
+
+ path = export_mesh(mesh, save_folder, textured=False)
+ model_viewer_html = build_model_viewer_html(save_folder, height=596, width=700)
+
+ return (
+ gr.update(value=path, visible=True),
+ gr.update(value=path, visible=True),
+ # model_viewer_html,
+ )
+
+
+def build_app():
+ title_html = """
+
+
+ Hunyuan3D-2: Scaling Diffusion Models for High Resolution Textured 3D Assets Generation
+
+
+ Tencent Hunyuan3D Team
+
+
+ """
+ css = """
+ .json-output {
+ height: 578px;
+ }
+ .json-output .json-holder {
+ height: 538px;
+ overflow-y: scroll;
+ }
+ """
+
+ with gr.Blocks(theme=gr.themes.Base(), css=css, title='Hunyuan-3D-2.0') as demo:
+ # if not gr.__version__.startswith('4'): gr.HTML(title_html)
+ gr.HTML(title_html)
+
+ with gr.Row():
+ with gr.Column(scale=2):
+ with gr.Tabs() as tabs_prompt:
+ with gr.Tab('Image Prompt', id='tab_img_prompt') as tab_ip:
+ image = gr.Image(label='Image', type='pil', image_mode='RGBA', height=290)
+ with gr.Row():
+ check_box_rembg = gr.Checkbox(value=True, label='Remove Background')
+
+ with gr.Tab('Text Prompt', id='tab_txt_prompt') as tab_tp:
+ caption = gr.Textbox(label='Text Prompt',
+ placeholder='HunyuanDiT will be used to generate image.',
+ info='Example: A 3D model of a cute cat, white background')
+
+ with gr.Accordion('Advanced Options', open=False):
+ num_steps = gr.Slider(maximum=50, minimum=20, value=30, step=1, label='Inference Steps')
+ octree_resolution = gr.Dropdown([256, 384, 512], value=256, label='Octree Resolution')
+ cfg_scale = gr.Number(value=5.5, label='Guidance Scale')
+ seed = gr.Slider(maximum=1e7, minimum=0, value=1234, label='Seed')
+
+ with gr.Group():
+ btn = gr.Button(value='Generate Shape Only', variant='primary')
+ btn_all = gr.Button(value='Generate Shape and Texture', variant='primary')
+
+ with gr.Group():
+ file_out = gr.File(label="File", visible=False)
+ file_out2 = gr.File(label="File", visible=False)
+
+ with gr.Column(scale=5):
+ with gr.Tabs():
+ with gr.Tab('Generated Mesh') as mesh1:
+ mesh_output1 = LitModel3D(
+ label="3D Model1",
+ exposure=10.0,
+ height=600,
+ visible=True,
+ clear_color=[0.0, 0.0, 0.0, 0.0],
+ tonemapping="aces",
+ contrast=1.0,
+ scale=1.0,
+ )
+ # html_output1 = gr.HTML(HTML_OUTPUT_PLACEHOLDER, label='Output')
+ with gr.Tab('Generated Textured Mesh') as mesh2:
+ # html_output2 = gr.HTML(HTML_OUTPUT_PLACEHOLDER, label='Output')
+ mesh_output2 = LitModel3D(
+ label="3D Model2",
+ exposure=10.0,
+ height=600,
+ visible=True,
+ clear_color=[0.0, 0.0, 0.0, 0.0],
+ tonemapping="aces",
+ contrast=1.0,
+ scale=1.0,
+ )
+
+ with gr.Column(scale=2):
+ with gr.Tabs() as gallery:
+ with gr.Tab('Image to 3D Gallery', id='tab_img_gallery') as tab_gi:
+ with gr.Row():
+ gr.Examples(examples=example_is, inputs=[image],
+ label="Image Prompts", examples_per_page=18)
+
+ with gr.Tab('Text to 3D Gallery', id='tab_txt_gallery') as tab_gt:
+ with gr.Row():
+ gr.Examples(examples=example_ts, inputs=[caption],
+ label="Text Prompts", examples_per_page=18)
+
+ tab_gi.select(fn=lambda: gr.update(selected='tab_img_prompt'), outputs=tabs_prompt)
+ tab_gt.select(fn=lambda: gr.update(selected='tab_txt_prompt'), outputs=tabs_prompt)
+
+ btn.click(
+ shape_generation,
+ inputs=[
+ caption,
+ image,
+ num_steps,
+ cfg_scale,
+ seed,
+ octree_resolution,
+ check_box_rembg,
+ ],
+ # outputs=[file_out, html_output1]
+ outputs=[file_out, mesh_output1]
+ ).then(
+ lambda: gr.update(visible=True),
+ outputs=[file_out],
+ )
+
+ btn_all.click(
+ generation_all,
+ inputs=[
+ caption,
+ image,
+ num_steps,
+ cfg_scale,
+ seed,
+ octree_resolution,
+ check_box_rembg,
+ ],
+ # outputs=[file_out, file_out2, html_output1, html_output2]
+ outputs=[file_out, file_out2, mesh_output1, mesh_output2]
+ ).then(
+ lambda: (gr.update(visible=True), gr.update(visible=True)),
+ outputs=[file_out, file_out2],
+ )
+
+ return demo
+
+
+if __name__ == '__main__':
+ import argparse
+
+ parser = argparse.ArgumentParser()
+ parser.add_argument('--port', type=int, default=8080)
+ parser.add_argument('--cache-path', type=str, default='./gradio_cache')
+ args = parser.parse_args()
+
+ SAVE_DIR = args.cache_path
+ os.makedirs(SAVE_DIR, exist_ok=True)
+
+ CURRENT_DIR = os.path.dirname(os.path.abspath(__file__))
+
+ HTML_OUTPUT_PLACEHOLDER = """
+
+ """
+
+ INPUT_MESH_HTML = """
+
+
+ """
+ example_is = get_example_img_list()
+ example_ts = get_example_txt_list()
+
+ from hy3dgen.text2image import HunyuanDiTPipeline
+ from hy3dgen.shapegen import FaceReducer, FloaterRemover, DegenerateFaceRemover, \
+ Hunyuan3DDiTFlowMatchingPipeline
+ from hy3dgen.texgen import Hunyuan3DPaintPipeline
+ from hy3dgen.rembg import BackgroundRemover
+
+ rmbg_worker = BackgroundRemover()
+ t2i_worker = HunyuanDiTPipeline()
+ i23d_worker = Hunyuan3DDiTFlowMatchingPipeline.from_pretrained('tencent/Hunyuan3D-2')
+ texgen_worker = Hunyuan3DPaintPipeline.from_pretrained('tencent/Hunyuan3D-2')
+ floater_remove_worker = FloaterRemover()
+ degenerate_face_remove_worker = DegenerateFaceRemover()
+ face_reduce_worker = FaceReducer()
+
+ demo = build_app()
+ demo.queue().launch(server_name=IP,server_port=PORT)
diff --git a/hy3dgen/__init__.py b/hy3dgen/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..3cde306b834c182f04339203fb5a78d846ac613b
--- /dev/null
+++ b/hy3dgen/__init__.py
@@ -0,0 +1,13 @@
+# Hunyuan 3D is licensed under the TENCENT HUNYUAN NON-COMMERCIAL LICENSE AGREEMENT
+# except for the third-party components listed below.
+# Hunyuan 3D does not impose any additional limitations beyond what is outlined
+# in the repsective licenses of these third-party components.
+# Users must comply with all terms and conditions of original licenses of these third-party
+# components and must ensure that the usage of the third party components adheres to
+# all relevant laws and regulations.
+
+# For avoidance of doubts, Hunyuan 3D means the large language models and
+# their software and algorithms, including trained model weights, parameters (including
+# optimizer states), machine-learning model code, inference-enabling code, training-enabling code,
+# fine-tuning enabling code and other elements of the foregoing made publicly available
+# by Tencent in accordance with TENCENT HUNYUAN COMMUNITY LICENSE AGREEMENT.
\ No newline at end of file
diff --git a/hy3dgen/rembg.py b/hy3dgen/rembg.py
new file mode 100644
index 0000000000000000000000000000000000000000..8704047a89d815ff435bbdaa95496426b6c3f910
--- /dev/null
+++ b/hy3dgen/rembg.py
@@ -0,0 +1,25 @@
+# Hunyuan 3D is licensed under the TENCENT HUNYUAN NON-COMMERCIAL LICENSE AGREEMENT
+# except for the third-party components listed below.
+# Hunyuan 3D does not impose any additional limitations beyond what is outlined
+# in the repsective licenses of these third-party components.
+# Users must comply with all terms and conditions of original licenses of these third-party
+# components and must ensure that the usage of the third party components adheres to
+# all relevant laws and regulations.
+
+# For avoidance of doubts, Hunyuan 3D means the large language models and
+# their software and algorithms, including trained model weights, parameters (including
+# optimizer states), machine-learning model code, inference-enabling code, training-enabling code,
+# fine-tuning enabling code and other elements of the foregoing made publicly available
+# by Tencent in accordance with TENCENT HUNYUAN COMMUNITY LICENSE AGREEMENT.
+
+from PIL import Image
+from rembg import remove, new_session
+
+
+class BackgroundRemover():
+ def __init__(self):
+ self.session = new_session()
+
+ def __call__(self, image: Image.Image):
+ output = remove(image, session=self.session, bgcolor=[255, 255, 255, 0])
+ return output
diff --git a/hy3dgen/shapegen/__init__.py b/hy3dgen/shapegen/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..f62e193ec28eb835edb4ceee330ad96a21db86b8
--- /dev/null
+++ b/hy3dgen/shapegen/__init__.py
@@ -0,0 +1,17 @@
+# Hunyuan 3D is licensed under the TENCENT HUNYUAN NON-COMMERCIAL LICENSE AGREEMENT
+# except for the third-party components listed below.
+# Hunyuan 3D does not impose any additional limitations beyond what is outlined
+# in the repsective licenses of these third-party components.
+# Users must comply with all terms and conditions of original licenses of these third-party
+# components and must ensure that the usage of the third party components adheres to
+# all relevant laws and regulations.
+
+# For avoidance of doubts, Hunyuan 3D means the large language models and
+# their software and algorithms, including trained model weights, parameters (including
+# optimizer states), machine-learning model code, inference-enabling code, training-enabling code,
+# fine-tuning enabling code and other elements of the foregoing made publicly available
+# by Tencent in accordance with TENCENT HUNYUAN COMMUNITY LICENSE AGREEMENT.
+
+from .pipelines import Hunyuan3DDiTPipeline, Hunyuan3DDiTFlowMatchingPipeline
+from .postprocessors import FaceReducer, FloaterRemover, DegenerateFaceRemover, MeshSimplifier
+from .preprocessors import ImageProcessorV2, IMAGE_PROCESSORS, DEFAULT_IMAGEPROCESSOR
diff --git a/hy3dgen/shapegen/models/__init__.py b/hy3dgen/shapegen/models/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..03011fa6a1237b59afcff2b7a5c20d307b07c6ac
--- /dev/null
+++ b/hy3dgen/shapegen/models/__init__.py
@@ -0,0 +1,28 @@
+# Open Source Model Licensed under the Apache License Version 2.0
+# and Other Licenses of the Third-Party Components therein:
+# The below Model in this distribution may have been modified by THL A29 Limited
+# ("Tencent Modifications"). All Tencent Modifications are Copyright (C) 2024 THL A29 Limited.
+
+# Copyright (C) 2024 THL A29 Limited, a Tencent company. All rights reserved.
+# The below software and/or models in this distribution may have been
+# modified by THL A29 Limited ("Tencent Modifications").
+# All Tencent Modifications are Copyright (C) THL A29 Limited.
+
+# Hunyuan 3D is licensed under the TENCENT HUNYUAN NON-COMMERCIAL LICENSE AGREEMENT
+# except for the third-party components listed below.
+# Hunyuan 3D does not impose any additional limitations beyond what is outlined
+# in the repsective licenses of these third-party components.
+# Users must comply with all terms and conditions of original licenses of these third-party
+# components and must ensure that the usage of the third party components adheres to
+# all relevant laws and regulations.
+
+# For avoidance of doubts, Hunyuan 3D means the large language models and
+# their software and algorithms, including trained model weights, parameters (including
+# optimizer states), machine-learning model code, inference-enabling code, training-enabling code,
+# fine-tuning enabling code and other elements of the foregoing made publicly available
+# by Tencent in accordance with TENCENT HUNYUAN COMMUNITY LICENSE AGREEMENT.
+
+
+from .autoencoders import ShapeVAE
+from .conditioner import DualImageEncoder, SingleImageEncoder, DinoImageEncoder, CLIPImageEncoder
+from .denoisers import Hunyuan3DDiT
diff --git a/hy3dgen/shapegen/models/autoencoders/__init__.py b/hy3dgen/shapegen/models/autoencoders/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..40ea0875b32ceab6ab254289cfdb4367f458211d
--- /dev/null
+++ b/hy3dgen/shapegen/models/autoencoders/__init__.py
@@ -0,0 +1,20 @@
+# Hunyuan 3D is licensed under the TENCENT HUNYUAN NON-COMMERCIAL LICENSE AGREEMENT
+# except for the third-party components listed below.
+# Hunyuan 3D does not impose any additional limitations beyond what is outlined
+# in the repsective licenses of these third-party components.
+# Users must comply with all terms and conditions of original licenses of these third-party
+# components and must ensure that the usage of the third party components adheres to
+# all relevant laws and regulations.
+
+# For avoidance of doubts, Hunyuan 3D means the large language models and
+# their software and algorithms, including trained model weights, parameters (including
+# optimizer states), machine-learning model code, inference-enabling code, training-enabling code,
+# fine-tuning enabling code and other elements of the foregoing made publicly available
+# by Tencent in accordance with TENCENT HUNYUAN COMMUNITY LICENSE AGREEMENT.
+
+from .attention_blocks import CrossAttentionDecoder
+from .attention_processors import FlashVDMCrossAttentionProcessor, CrossAttentionProcessor, \
+ FlashVDMTopMCrossAttentionProcessor
+from .model import ShapeVAE, VectsetVAE
+from .surface_extractors import SurfaceExtractors, MCSurfaceExtractor, DMCSurfaceExtractor, Latent2MeshOutput
+from .volume_decoders import HierarchicalVolumeDecoding, FlashVDMVolumeDecoding, VanillaVolumeDecoder
diff --git a/hy3dgen/shapegen/models/autoencoders/attention_blocks.py b/hy3dgen/shapegen/models/autoencoders/attention_blocks.py
new file mode 100644
index 0000000000000000000000000000000000000000..ff4d5936f4b08777b3918cb4a6d758fd89bc2548
--- /dev/null
+++ b/hy3dgen/shapegen/models/autoencoders/attention_blocks.py
@@ -0,0 +1,493 @@
+# Hunyuan 3D is licensed under the TENCENT HUNYUAN NON-COMMERCIAL LICENSE AGREEMENT
+# except for the third-party components listed below.
+# Hunyuan 3D does not impose any additional limitations beyond what is outlined
+# in the repsective licenses of these third-party components.
+# Users must comply with all terms and conditions of original licenses of these third-party
+# components and must ensure that the usage of the third party components adheres to
+# all relevant laws and regulations.
+
+# For avoidance of doubts, Hunyuan 3D means the large language models and
+# their software and algorithms, including trained model weights, parameters (including
+# optimizer states), machine-learning model code, inference-enabling code, training-enabling code,
+# fine-tuning enabling code and other elements of the foregoing made publicly available
+# by Tencent in accordance with TENCENT HUNYUAN COMMUNITY LICENSE AGREEMENT.
+
+
+import os
+from typing import Optional
+
+import torch
+import torch.nn as nn
+from einops import rearrange
+
+from .attention_processors import CrossAttentionProcessor
+from ...utils import logger
+
+scaled_dot_product_attention = nn.functional.scaled_dot_product_attention
+
+if os.environ.get('USE_SAGEATTN', '0') == '1':
+ try:
+ from sageattention import sageattn
+ except ImportError:
+ raise ImportError('Please install the package "sageattention" to use this USE_SAGEATTN.')
+ scaled_dot_product_attention = sageattn
+
+
+class FourierEmbedder(nn.Module):
+ """The sin/cosine positional embedding. Given an input tensor `x` of shape [n_batch, ..., c_dim], it converts
+ each feature dimension of `x[..., i]` into:
+ [
+ sin(x[..., i]),
+ sin(f_1*x[..., i]),
+ sin(f_2*x[..., i]),
+ ...
+ sin(f_N * x[..., i]),
+ cos(x[..., i]),
+ cos(f_1*x[..., i]),
+ cos(f_2*x[..., i]),
+ ...
+ cos(f_N * x[..., i]),
+ x[..., i] # only present if include_input is True.
+ ], here f_i is the frequency.
+
+ Denote the space is [0 / num_freqs, 1 / num_freqs, 2 / num_freqs, 3 / num_freqs, ..., (num_freqs - 1) / num_freqs].
+ If logspace is True, then the frequency f_i is [2^(0 / num_freqs), ..., 2^(i / num_freqs), ...];
+ Otherwise, the frequencies are linearly spaced between [1.0, 2^(num_freqs - 1)].
+
+ Args:
+ num_freqs (int): the number of frequencies, default is 6;
+ logspace (bool): If logspace is True, then the frequency f_i is [..., 2^(i / num_freqs), ...],
+ otherwise, the frequencies are linearly spaced between [1.0, 2^(num_freqs - 1)];
+ input_dim (int): the input dimension, default is 3;
+ include_input (bool): include the input tensor or not, default is True.
+
+ Attributes:
+ frequencies (torch.Tensor): If logspace is True, then the frequency f_i is [..., 2^(i / num_freqs), ...],
+ otherwise, the frequencies are linearly spaced between [1.0, 2^(num_freqs - 1);
+
+ out_dim (int): the embedding size, if include_input is True, it is input_dim * (num_freqs * 2 + 1),
+ otherwise, it is input_dim * num_freqs * 2.
+
+ """
+
+ def __init__(self,
+ num_freqs: int = 6,
+ logspace: bool = True,
+ input_dim: int = 3,
+ include_input: bool = True,
+ include_pi: bool = True) -> None:
+
+ """The initialization"""
+
+ super().__init__()
+
+ if logspace:
+ frequencies = 2.0 ** torch.arange(
+ num_freqs,
+ dtype=torch.float32
+ )
+ else:
+ frequencies = torch.linspace(
+ 1.0,
+ 2.0 ** (num_freqs - 1),
+ num_freqs,
+ dtype=torch.float32
+ )
+
+ if include_pi:
+ frequencies *= torch.pi
+
+ self.register_buffer("frequencies", frequencies, persistent=False)
+ self.include_input = include_input
+ self.num_freqs = num_freqs
+
+ self.out_dim = self.get_dims(input_dim)
+
+ def get_dims(self, input_dim):
+ temp = 1 if self.include_input or self.num_freqs == 0 else 0
+ out_dim = input_dim * (self.num_freqs * 2 + temp)
+
+ return out_dim
+
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
+ """ Forward process.
+
+ Args:
+ x: tensor of shape [..., dim]
+
+ Returns:
+ embedding: an embedding of `x` of shape [..., dim * (num_freqs * 2 + temp)]
+ where temp is 1 if include_input is True and 0 otherwise.
+ """
+
+ if self.num_freqs > 0:
+ embed = (x[..., None].contiguous() * self.frequencies).view(*x.shape[:-1], -1)
+ if self.include_input:
+ return torch.cat((x, embed.sin(), embed.cos()), dim=-1)
+ else:
+ return torch.cat((embed.sin(), embed.cos()), dim=-1)
+ else:
+ return x
+
+
+class DropPath(nn.Module):
+ """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
+ """
+
+ def __init__(self, drop_prob: float = 0., scale_by_keep: bool = True):
+ super(DropPath, self).__init__()
+ self.drop_prob = drop_prob
+ self.scale_by_keep = scale_by_keep
+
+ def forward(self, x):
+ """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
+
+ This is the same as the DropConnect impl I created for EfficientNet, etc networks, however,
+ the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper...
+ See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for
+ changing the layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use
+ 'survival rate' as the argument.
+
+ """
+ if self.drop_prob == 0. or not self.training:
+ return x
+ keep_prob = 1 - self.drop_prob
+ shape = (x.shape[0],) + (1,) * (x.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
+ random_tensor = x.new_empty(shape).bernoulli_(keep_prob)
+ if keep_prob > 0.0 and self.scale_by_keep:
+ random_tensor.div_(keep_prob)
+ return x * random_tensor
+
+ def extra_repr(self):
+ return f'drop_prob={round(self.drop_prob, 3):0.3f}'
+
+
+class MLP(nn.Module):
+ def __init__(
+ self, *,
+ width: int,
+ expand_ratio: int = 4,
+ output_width: int = None,
+ drop_path_rate: float = 0.0
+ ):
+ super().__init__()
+ self.width = width
+ self.c_fc = nn.Linear(width, width * expand_ratio)
+ self.c_proj = nn.Linear(width * expand_ratio, output_width if output_width is not None else width)
+ self.gelu = nn.GELU()
+ self.drop_path = DropPath(drop_path_rate) if drop_path_rate > 0. else nn.Identity()
+
+ def forward(self, x):
+ return self.drop_path(self.c_proj(self.gelu(self.c_fc(x))))
+
+
+class QKVMultiheadCrossAttention(nn.Module):
+ def __init__(
+ self,
+ *,
+ heads: int,
+ n_data: Optional[int] = None,
+ width=None,
+ qk_norm=False,
+ norm_layer=nn.LayerNorm
+ ):
+ super().__init__()
+ self.heads = heads
+ self.n_data = n_data
+ self.q_norm = norm_layer(width // heads, elementwise_affine=True, eps=1e-6) if qk_norm else nn.Identity()
+ self.k_norm = norm_layer(width // heads, elementwise_affine=True, eps=1e-6) if qk_norm else nn.Identity()
+
+ self.attn_processor = CrossAttentionProcessor()
+
+ def forward(self, q, kv):
+ _, n_ctx, _ = q.shape
+ bs, n_data, width = kv.shape
+ attn_ch = width // self.heads // 2
+ q = q.view(bs, n_ctx, self.heads, -1)
+ kv = kv.view(bs, n_data, self.heads, -1)
+ k, v = torch.split(kv, attn_ch, dim=-1)
+
+ q = self.q_norm(q)
+ k = self.k_norm(k)
+ q, k, v = map(lambda t: rearrange(t, 'b n h d -> b h n d', h=self.heads), (q, k, v))
+ out = self.attn_processor(self, q, k, v)
+ out = out.transpose(1, 2).reshape(bs, n_ctx, -1)
+ return out
+
+
+class MultiheadCrossAttention(nn.Module):
+ def __init__(
+ self,
+ *,
+ width: int,
+ heads: int,
+ qkv_bias: bool = True,
+ n_data: Optional[int] = None,
+ data_width: Optional[int] = None,
+ norm_layer=nn.LayerNorm,
+ qk_norm: bool = False,
+ kv_cache: bool = False,
+ ):
+ super().__init__()
+ self.n_data = n_data
+ self.width = width
+ self.heads = heads
+ self.data_width = width if data_width is None else data_width
+ self.c_q = nn.Linear(width, width, bias=qkv_bias)
+ self.c_kv = nn.Linear(self.data_width, width * 2, bias=qkv_bias)
+ self.c_proj = nn.Linear(width, width)
+ self.attention = QKVMultiheadCrossAttention(
+ heads=heads,
+ n_data=n_data,
+ width=width,
+ norm_layer=norm_layer,
+ qk_norm=qk_norm
+ )
+ self.kv_cache = kv_cache
+ self.data = None
+
+ def forward(self, x, data):
+ x = self.c_q(x)
+ if self.kv_cache:
+ if self.data is None:
+ self.data = self.c_kv(data)
+ logger.info('Save kv cache,this should be called only once for one mesh')
+ data = self.data
+ else:
+ data = self.c_kv(data)
+ x = self.attention(x, data)
+ x = self.c_proj(x)
+ return x
+
+
+class ResidualCrossAttentionBlock(nn.Module):
+ def __init__(
+ self,
+ *,
+ n_data: Optional[int] = None,
+ width: int,
+ heads: int,
+ mlp_expand_ratio: int = 4,
+ data_width: Optional[int] = None,
+ qkv_bias: bool = True,
+ norm_layer=nn.LayerNorm,
+ qk_norm: bool = False
+ ):
+ super().__init__()
+
+ if data_width is None:
+ data_width = width
+
+ self.attn = MultiheadCrossAttention(
+ n_data=n_data,
+ width=width,
+ heads=heads,
+ data_width=data_width,
+ qkv_bias=qkv_bias,
+ norm_layer=norm_layer,
+ qk_norm=qk_norm
+ )
+ self.ln_1 = norm_layer(width, elementwise_affine=True, eps=1e-6)
+ self.ln_2 = norm_layer(data_width, elementwise_affine=True, eps=1e-6)
+ self.ln_3 = norm_layer(width, elementwise_affine=True, eps=1e-6)
+ self.mlp = MLP(width=width, expand_ratio=mlp_expand_ratio)
+
+ def forward(self, x: torch.Tensor, data: torch.Tensor):
+ x = x + self.attn(self.ln_1(x), self.ln_2(data))
+ x = x + self.mlp(self.ln_3(x))
+ return x
+
+
+class QKVMultiheadAttention(nn.Module):
+ def __init__(
+ self,
+ *,
+ heads: int,
+ n_ctx: int,
+ width=None,
+ qk_norm=False,
+ norm_layer=nn.LayerNorm
+ ):
+ super().__init__()
+ self.heads = heads
+ self.n_ctx = n_ctx
+ self.q_norm = norm_layer(width // heads, elementwise_affine=True, eps=1e-6) if qk_norm else nn.Identity()
+ self.k_norm = norm_layer(width // heads, elementwise_affine=True, eps=1e-6) if qk_norm else nn.Identity()
+
+ def forward(self, qkv):
+ bs, n_ctx, width = qkv.shape
+ attn_ch = width // self.heads // 3
+ qkv = qkv.view(bs, n_ctx, self.heads, -1)
+ q, k, v = torch.split(qkv, attn_ch, dim=-1)
+
+ q = self.q_norm(q)
+ k = self.k_norm(k)
+
+ q, k, v = map(lambda t: rearrange(t, 'b n h d -> b h n d', h=self.heads), (q, k, v))
+ out = scaled_dot_product_attention(q, k, v).transpose(1, 2).reshape(bs, n_ctx, -1)
+ return out
+
+
+class MultiheadAttention(nn.Module):
+ def __init__(
+ self,
+ *,
+ n_ctx: int,
+ width: int,
+ heads: int,
+ qkv_bias: bool,
+ norm_layer=nn.LayerNorm,
+ qk_norm: bool = False,
+ drop_path_rate: float = 0.0
+ ):
+ super().__init__()
+ self.n_ctx = n_ctx
+ self.width = width
+ self.heads = heads
+ self.c_qkv = nn.Linear(width, width * 3, bias=qkv_bias)
+ self.c_proj = nn.Linear(width, width)
+ self.attention = QKVMultiheadAttention(
+ heads=heads,
+ n_ctx=n_ctx,
+ width=width,
+ norm_layer=norm_layer,
+ qk_norm=qk_norm
+ )
+ self.drop_path = DropPath(drop_path_rate) if drop_path_rate > 0. else nn.Identity()
+
+ def forward(self, x):
+ x = self.c_qkv(x)
+ x = self.attention(x)
+ x = self.drop_path(self.c_proj(x))
+ return x
+
+
+class ResidualAttentionBlock(nn.Module):
+ def __init__(
+ self,
+ *,
+ n_ctx: int,
+ width: int,
+ heads: int,
+ qkv_bias: bool = True,
+ norm_layer=nn.LayerNorm,
+ qk_norm: bool = False,
+ drop_path_rate: float = 0.0,
+ ):
+ super().__init__()
+ self.attn = MultiheadAttention(
+ n_ctx=n_ctx,
+ width=width,
+ heads=heads,
+ qkv_bias=qkv_bias,
+ norm_layer=norm_layer,
+ qk_norm=qk_norm,
+ drop_path_rate=drop_path_rate
+ )
+ self.ln_1 = norm_layer(width, elementwise_affine=True, eps=1e-6)
+ self.mlp = MLP(width=width, drop_path_rate=drop_path_rate)
+ self.ln_2 = norm_layer(width, elementwise_affine=True, eps=1e-6)
+
+ def forward(self, x: torch.Tensor):
+ x = x + self.attn(self.ln_1(x))
+ x = x + self.mlp(self.ln_2(x))
+ return x
+
+
+class Transformer(nn.Module):
+ def __init__(
+ self,
+ *,
+ n_ctx: int,
+ width: int,
+ layers: int,
+ heads: int,
+ qkv_bias: bool = True,
+ norm_layer=nn.LayerNorm,
+ qk_norm: bool = False,
+ drop_path_rate: float = 0.0
+ ):
+ super().__init__()
+ self.n_ctx = n_ctx
+ self.width = width
+ self.layers = layers
+ self.resblocks = nn.ModuleList(
+ [
+ ResidualAttentionBlock(
+ n_ctx=n_ctx,
+ width=width,
+ heads=heads,
+ qkv_bias=qkv_bias,
+ norm_layer=norm_layer,
+ qk_norm=qk_norm,
+ drop_path_rate=drop_path_rate
+ )
+ for _ in range(layers)
+ ]
+ )
+
+ def forward(self, x: torch.Tensor):
+ for block in self.resblocks:
+ x = block(x)
+ return x
+
+
+class CrossAttentionDecoder(nn.Module):
+
+ def __init__(
+ self,
+ *,
+ num_latents: int,
+ out_channels: int,
+ fourier_embedder: FourierEmbedder,
+ width: int,
+ heads: int,
+ mlp_expand_ratio: int = 4,
+ downsample_ratio: int = 1,
+ enable_ln_post: bool = True,
+ qkv_bias: bool = True,
+ qk_norm: bool = False,
+ label_type: str = "binary"
+ ):
+ super().__init__()
+
+ self.enable_ln_post = enable_ln_post
+ self.fourier_embedder = fourier_embedder
+ self.downsample_ratio = downsample_ratio
+ self.query_proj = nn.Linear(self.fourier_embedder.out_dim, width)
+ if self.downsample_ratio != 1:
+ self.latents_proj = nn.Linear(width * downsample_ratio, width)
+ if self.enable_ln_post == False:
+ qk_norm = False
+ self.cross_attn_decoder = ResidualCrossAttentionBlock(
+ n_data=num_latents,
+ width=width,
+ mlp_expand_ratio=mlp_expand_ratio,
+ heads=heads,
+ qkv_bias=qkv_bias,
+ qk_norm=qk_norm
+ )
+
+ if self.enable_ln_post:
+ self.ln_post = nn.LayerNorm(width)
+ self.output_proj = nn.Linear(width, out_channels)
+ self.label_type = label_type
+ self.count = 0
+
+ def set_cross_attention_processor(self, processor):
+ self.cross_attn_decoder.attn.attention.attn_processor = processor
+
+ def set_default_cross_attention_processor(self):
+ self.cross_attn_decoder.attn.attention.attn_processor = CrossAttentionProcessor
+
+ def forward(self, queries=None, query_embeddings=None, latents=None):
+ if query_embeddings is None:
+ query_embeddings = self.query_proj(self.fourier_embedder(queries).to(latents.dtype))
+ self.count += query_embeddings.shape[1]
+ if self.downsample_ratio != 1:
+ latents = self.latents_proj(latents)
+ x = self.cross_attn_decoder(query_embeddings, latents)
+ if self.enable_ln_post:
+ x = self.ln_post(x)
+ occ = self.output_proj(x)
+ return occ
diff --git a/hy3dgen/shapegen/models/autoencoders/attention_processors.py b/hy3dgen/shapegen/models/autoencoders/attention_processors.py
new file mode 100644
index 0000000000000000000000000000000000000000..b04556008a6bcf7236ec67b303aeec033e43f4ee
--- /dev/null
+++ b/hy3dgen/shapegen/models/autoencoders/attention_processors.py
@@ -0,0 +1,96 @@
+# Hunyuan 3D is licensed under the TENCENT HUNYUAN NON-COMMERCIAL LICENSE AGREEMENT
+# except for the third-party components listed below.
+# Hunyuan 3D does not impose any additional limitations beyond what is outlined
+# in the repsective licenses of these third-party components.
+# Users must comply with all terms and conditions of original licenses of these third-party
+# components and must ensure that the usage of the third party components adheres to
+# all relevant laws and regulations.
+
+# For avoidance of doubts, Hunyuan 3D means the large language models and
+# their software and algorithms, including trained model weights, parameters (including
+# optimizer states), machine-learning model code, inference-enabling code, training-enabling code,
+# fine-tuning enabling code and other elements of the foregoing made publicly available
+# by Tencent in accordance with TENCENT HUNYUAN COMMUNITY LICENSE AGREEMENT.
+
+import os
+
+import torch
+import torch.nn.functional as F
+
+scaled_dot_product_attention = F.scaled_dot_product_attention
+if os.environ.get('CA_USE_SAGEATTN', '0') == '1':
+ try:
+ from sageattention import sageattn
+ except ImportError:
+ raise ImportError('Please install the package "sageattention" to use this USE_SAGEATTN.')
+ scaled_dot_product_attention = sageattn
+
+
+class CrossAttentionProcessor:
+ def __call__(self, attn, q, k, v):
+ out = scaled_dot_product_attention(q, k, v)
+ return out
+
+
+class FlashVDMCrossAttentionProcessor:
+ def __init__(self, topk=None):
+ self.topk = topk
+
+ def __call__(self, attn, q, k, v):
+ if k.shape[-2] == 3072:
+ topk = 1024
+ elif k.shape[-2] == 512:
+ topk = 256
+ else:
+ topk = k.shape[-2] // 3
+
+ if self.topk is True:
+ q1 = q[:, :, ::100, :]
+ sim = q1 @ k.transpose(-1, -2)
+ sim = torch.mean(sim, -2)
+ topk_ind = torch.topk(sim, dim=-1, k=topk).indices.squeeze(-2).unsqueeze(-1)
+ topk_ind = topk_ind.expand(-1, -1, -1, v.shape[-1])
+ v0 = torch.gather(v, dim=-2, index=topk_ind)
+ k0 = torch.gather(k, dim=-2, index=topk_ind)
+ out = scaled_dot_product_attention(q, k0, v0)
+ elif self.topk is False:
+ out = scaled_dot_product_attention(q, k, v)
+ else:
+ idx, counts = self.topk
+ start = 0
+ outs = []
+ for grid_coord, count in zip(idx, counts):
+ end = start + count
+ q_chunk = q[:, :, start:end, :]
+ k0, v0 = self.select_topkv(q_chunk, k, v, topk)
+ out = scaled_dot_product_attention(q_chunk, k0, v0)
+ outs.append(out)
+ start += count
+ out = torch.cat(outs, dim=-2)
+ self.topk = False
+ return out
+
+ def select_topkv(self, q_chunk, k, v, topk):
+ q1 = q_chunk[:, :, ::50, :]
+ sim = q1 @ k.transpose(-1, -2)
+ sim = torch.mean(sim, -2)
+ topk_ind = torch.topk(sim, dim=-1, k=topk).indices.squeeze(-2).unsqueeze(-1)
+ topk_ind = topk_ind.expand(-1, -1, -1, v.shape[-1])
+ v0 = torch.gather(v, dim=-2, index=topk_ind)
+ k0 = torch.gather(k, dim=-2, index=topk_ind)
+ return k0, v0
+
+
+class FlashVDMTopMCrossAttentionProcessor(FlashVDMCrossAttentionProcessor):
+ def select_topkv(self, q_chunk, k, v, topk):
+ q1 = q_chunk[:, :, ::30, :]
+ sim = q1 @ k.transpose(-1, -2)
+ # sim = sim.to(torch.float32)
+ sim = sim.softmax(-1)
+ sim = torch.mean(sim, 1)
+ activated_token = torch.where(sim > 1e-6)[2]
+ index = torch.unique(activated_token, return_counts=True)[0].unsqueeze(0).unsqueeze(0).unsqueeze(-1)
+ index = index.expand(-1, v.shape[1], -1, v.shape[-1])
+ v0 = torch.gather(v, dim=-2, index=index)
+ k0 = torch.gather(k, dim=-2, index=index)
+ return k0, v0
diff --git a/hy3dgen/shapegen/models/autoencoders/model.py b/hy3dgen/shapegen/models/autoencoders/model.py
new file mode 100644
index 0000000000000000000000000000000000000000..0c8eb60d68b13b9aad5331e9618dce22c864e6a8
--- /dev/null
+++ b/hy3dgen/shapegen/models/autoencoders/model.py
@@ -0,0 +1,189 @@
+# Hunyuan 3D is licensed under the TENCENT HUNYUAN NON-COMMERCIAL LICENSE AGREEMENT
+# except for the third-party components listed below.
+# Hunyuan 3D does not impose any additional limitations beyond what is outlined
+# in the repsective licenses of these third-party components.
+# Users must comply with all terms and conditions of original licenses of these third-party
+# components and must ensure that the usage of the third party components adheres to
+# all relevant laws and regulations.
+
+# For avoidance of doubts, Hunyuan 3D means the large language models and
+# their software and algorithms, including trained model weights, parameters (including
+# optimizer states), machine-learning model code, inference-enabling code, training-enabling code,
+# fine-tuning enabling code and other elements of the foregoing made publicly available
+# by Tencent in accordance with TENCENT HUNYUAN COMMUNITY LICENSE AGREEMENT.
+
+import os
+
+import torch
+import torch.nn as nn
+import yaml
+
+from .attention_blocks import FourierEmbedder, Transformer, CrossAttentionDecoder
+from .surface_extractors import MCSurfaceExtractor, SurfaceExtractors
+from .volume_decoders import VanillaVolumeDecoder, FlashVDMVolumeDecoding, HierarchicalVolumeDecoding
+from ...utils import logger, synchronize_timer, smart_load_model
+
+
+class VectsetVAE(nn.Module):
+
+ @classmethod
+ @synchronize_timer('VectsetVAE Model Loading')
+ def from_single_file(
+ cls,
+ ckpt_path,
+ config_path,
+ device='cuda',
+ dtype=torch.float16,
+ use_safetensors=None,
+ **kwargs,
+ ):
+ # load config
+ with open(config_path, 'r') as f:
+ config = yaml.safe_load(f)
+
+ # load ckpt
+ if use_safetensors:
+ ckpt_path = ckpt_path.replace('.ckpt', '.safetensors')
+ if not os.path.exists(ckpt_path):
+ raise FileNotFoundError(f"Model file {ckpt_path} not found")
+
+ logger.info(f"Loading model from {ckpt_path}")
+ if use_safetensors:
+ import safetensors.torch
+ ckpt = safetensors.torch.load_file(ckpt_path, device='cpu')
+ else:
+ ckpt = torch.load(ckpt_path, map_location='cpu', weights_only=True)
+
+ model_kwargs = config['params']
+ model_kwargs.update(kwargs)
+
+ model = cls(**model_kwargs)
+ model.load_state_dict(ckpt)
+ model.to(device=device, dtype=dtype)
+ return model
+
+ @classmethod
+ def from_pretrained(
+ cls,
+ model_path,
+ device='cuda',
+ dtype=torch.float16,
+ use_safetensors=True,
+ variant='fp16',
+ subfolder='hunyuan3d-vae-v2-0',
+ **kwargs,
+ ):
+ config_path, ckpt_path = smart_load_model(
+ model_path,
+ subfolder=subfolder,
+ use_safetensors=use_safetensors,
+ variant=variant
+ )
+
+ return cls.from_single_file(
+ ckpt_path,
+ config_path,
+ device=device,
+ dtype=dtype,
+ use_safetensors=use_safetensors,
+ **kwargs
+ )
+
+ def __init__(
+ self,
+ volume_decoder=None,
+ surface_extractor=None
+ ):
+ super().__init__()
+ if volume_decoder is None:
+ volume_decoder = VanillaVolumeDecoder()
+ if surface_extractor is None:
+ surface_extractor = MCSurfaceExtractor()
+ self.volume_decoder = volume_decoder
+ self.surface_extractor = surface_extractor
+
+ def latents2mesh(self, latents: torch.FloatTensor, **kwargs):
+ with synchronize_timer('Volume decoding'):
+ grid_logits = self.volume_decoder(latents, self.geo_decoder, **kwargs)
+ with synchronize_timer('Surface extraction'):
+ outputs = self.surface_extractor(grid_logits, **kwargs)
+ return outputs
+
+ def enable_flashvdm_decoder(
+ self,
+ enabled: bool = True,
+ adaptive_kv_selection=True,
+ topk_mode='mean',
+ mc_algo='dmc',
+ ):
+ if enabled:
+ if adaptive_kv_selection:
+ self.volume_decoder = FlashVDMVolumeDecoding(topk_mode)
+ else:
+ self.volume_decoder = HierarchicalVolumeDecoding()
+ if mc_algo not in SurfaceExtractors.keys():
+ raise ValueError(f'Unsupported mc_algo {mc_algo}, available: {list(SurfaceExtractors.keys())}')
+ self.surface_extractor = SurfaceExtractors[mc_algo]()
+ else:
+ self.volume_decoder = VanillaVolumeDecoder()
+ self.surface_extractor = MCSurfaceExtractor()
+
+
+class ShapeVAE(VectsetVAE):
+ def __init__(
+ self,
+ *,
+ num_latents: int,
+ embed_dim: int,
+ width: int,
+ heads: int,
+ num_decoder_layers: int,
+ geo_decoder_downsample_ratio: int = 1,
+ geo_decoder_mlp_expand_ratio: int = 4,
+ geo_decoder_ln_post: bool = True,
+ num_freqs: int = 8,
+ include_pi: bool = True,
+ qkv_bias: bool = True,
+ qk_norm: bool = False,
+ label_type: str = "binary",
+ drop_path_rate: float = 0.0,
+ scale_factor: float = 1.0,
+ ):
+ super().__init__()
+ self.geo_decoder_ln_post = geo_decoder_ln_post
+
+ self.fourier_embedder = FourierEmbedder(num_freqs=num_freqs, include_pi=include_pi)
+
+ self.post_kl = nn.Linear(embed_dim, width)
+
+ self.transformer = Transformer(
+ n_ctx=num_latents,
+ width=width,
+ layers=num_decoder_layers,
+ heads=heads,
+ qkv_bias=qkv_bias,
+ qk_norm=qk_norm,
+ drop_path_rate=drop_path_rate
+ )
+
+ self.geo_decoder = CrossAttentionDecoder(
+ fourier_embedder=self.fourier_embedder,
+ out_channels=1,
+ num_latents=num_latents,
+ mlp_expand_ratio=geo_decoder_mlp_expand_ratio,
+ downsample_ratio=geo_decoder_downsample_ratio,
+ enable_ln_post=self.geo_decoder_ln_post,
+ width=width // geo_decoder_downsample_ratio,
+ heads=heads // geo_decoder_downsample_ratio,
+ qkv_bias=qkv_bias,
+ qk_norm=qk_norm,
+ label_type=label_type,
+ )
+
+ self.scale_factor = scale_factor
+ self.latent_shape = (num_latents, embed_dim)
+
+ def forward(self, latents):
+ latents = self.post_kl(latents)
+ latents = self.transformer(latents)
+ return latents
diff --git a/hy3dgen/shapegen/models/autoencoders/surface_extractors.py b/hy3dgen/shapegen/models/autoencoders/surface_extractors.py
new file mode 100644
index 0000000000000000000000000000000000000000..93ae12b9abca854a7dc45c3d6086f8783261f12f
--- /dev/null
+++ b/hy3dgen/shapegen/models/autoencoders/surface_extractors.py
@@ -0,0 +1,100 @@
+# Hunyuan 3D is licensed under the TENCENT HUNYUAN NON-COMMERCIAL LICENSE AGREEMENT
+# except for the third-party components listed below.
+# Hunyuan 3D does not impose any additional limitations beyond what is outlined
+# in the repsective licenses of these third-party components.
+# Users must comply with all terms and conditions of original licenses of these third-party
+# components and must ensure that the usage of the third party components adheres to
+# all relevant laws and regulations.
+
+# For avoidance of doubts, Hunyuan 3D means the large language models and
+# their software and algorithms, including trained model weights, parameters (including
+# optimizer states), machine-learning model code, inference-enabling code, training-enabling code,
+# fine-tuning enabling code and other elements of the foregoing made publicly available
+# by Tencent in accordance with TENCENT HUNYUAN COMMUNITY LICENSE AGREEMENT.
+
+from typing import Union, Tuple, List
+
+import numpy as np
+import torch
+from skimage import measure
+
+
+class Latent2MeshOutput:
+
+ def __init__(self, mesh_v=None, mesh_f=None):
+ self.mesh_v = mesh_v
+ self.mesh_f = mesh_f
+
+
+def center_vertices(vertices):
+ """Translate the vertices so that bounding box is centered at zero."""
+ vert_min = vertices.min(dim=0)[0]
+ vert_max = vertices.max(dim=0)[0]
+ vert_center = 0.5 * (vert_min + vert_max)
+ return vertices - vert_center
+
+
+class SurfaceExtractor:
+ def _compute_box_stat(self, bounds: Union[Tuple[float], List[float], float], octree_resolution: int):
+ if isinstance(bounds, float):
+ bounds = [-bounds, -bounds, -bounds, bounds, bounds, bounds]
+
+ bbox_min, bbox_max = np.array(bounds[0:3]), np.array(bounds[3:6])
+ bbox_size = bbox_max - bbox_min
+ grid_size = [int(octree_resolution) + 1, int(octree_resolution) + 1, int(octree_resolution) + 1]
+ return grid_size, bbox_min, bbox_size
+
+ def run(self, *args, **kwargs):
+ return NotImplementedError
+
+ def __call__(self, grid_logits, **kwargs):
+ outputs = []
+ for i in range(grid_logits.shape[0]):
+ try:
+ vertices, faces = self.run(grid_logits[i], **kwargs)
+ vertices = vertices.astype(np.float32)
+ faces = np.ascontiguousarray(faces)
+ outputs.append(Latent2MeshOutput(mesh_v=vertices, mesh_f=faces))
+
+ except Exception:
+ import traceback
+ traceback.print_exc()
+ outputs.append(None)
+
+ return outputs
+
+
+class MCSurfaceExtractor(SurfaceExtractor):
+ def run(self, grid_logit, *, mc_level, bounds, octree_resolution, **kwargs):
+ vertices, faces, normals, _ = measure.marching_cubes(
+ grid_logit.cpu().numpy(),
+ mc_level,
+ method="lewiner"
+ )
+ grid_size, bbox_min, bbox_size = self._compute_box_stat(bounds, octree_resolution)
+ vertices = vertices / grid_size * bbox_size + bbox_min
+ return vertices, faces
+
+
+class DMCSurfaceExtractor(SurfaceExtractor):
+ def run(self, grid_logit, *, octree_resolution, **kwargs):
+ device = grid_logit.device
+ if not hasattr(self, 'dmc'):
+ try:
+ from diso import DiffDMC
+ except:
+ raise ImportError("Please install diso via `pip install diso`, or set mc_algo to 'mc'")
+ self.dmc = DiffDMC(dtype=torch.float32).to(device)
+ sdf = -grid_logit / octree_resolution
+ sdf = sdf.to(torch.float32).contiguous()
+ verts, faces = self.dmc(sdf, deform=None, return_quads=False, normalize=True)
+ verts = center_vertices(verts)
+ vertices = verts.detach().cpu().numpy()
+ faces = faces.detach().cpu().numpy()[:, ::-1]
+ return vertices, faces
+
+
+SurfaceExtractors = {
+ 'mc': MCSurfaceExtractor,
+ 'dmc': DMCSurfaceExtractor,
+}
diff --git a/hy3dgen/shapegen/models/autoencoders/volume_decoders.py b/hy3dgen/shapegen/models/autoencoders/volume_decoders.py
new file mode 100644
index 0000000000000000000000000000000000000000..5c43925bc22cc277e50b2b49a8831aa76118f264
--- /dev/null
+++ b/hy3dgen/shapegen/models/autoencoders/volume_decoders.py
@@ -0,0 +1,435 @@
+# Hunyuan 3D is licensed under the TENCENT HUNYUAN NON-COMMERCIAL LICENSE AGREEMENT
+# except for the third-party components listed below.
+# Hunyuan 3D does not impose any additional limitations beyond what is outlined
+# in the repsective licenses of these third-party components.
+# Users must comply with all terms and conditions of original licenses of these third-party
+# components and must ensure that the usage of the third party components adheres to
+# all relevant laws and regulations.
+
+# For avoidance of doubts, Hunyuan 3D means the large language models and
+# their software and algorithms, including trained model weights, parameters (including
+# optimizer states), machine-learning model code, inference-enabling code, training-enabling code,
+# fine-tuning enabling code and other elements of the foregoing made publicly available
+# by Tencent in accordance with TENCENT HUNYUAN COMMUNITY LICENSE AGREEMENT.
+
+from typing import Union, Tuple, List, Callable
+
+import numpy as np
+import torch
+import torch.nn as nn
+import torch.nn.functional as F
+from einops import repeat
+from tqdm import tqdm
+
+from .attention_blocks import CrossAttentionDecoder
+from .attention_processors import FlashVDMCrossAttentionProcessor, FlashVDMTopMCrossAttentionProcessor
+from ...utils import logger
+
+
+def extract_near_surface_volume_fn(input_tensor: torch.Tensor, alpha: float):
+ device = input_tensor.device
+ D = input_tensor.shape[0]
+ signed_val = 0.0
+
+ # 添加偏移并处理无效值
+ val = input_tensor + alpha
+ valid_mask = val > -9000 # 假设-9000是无效值
+
+ # 改进的邻居获取函数(保持维度一致)
+ def get_neighbor(t, shift, axis):
+ """根据指定轴进行位移并保持维度一致"""
+ if shift == 0:
+ return t.clone()
+
+ # 确定填充轴(输入为[D, D, D]对应z,y,x轴)
+ pad_dims = [0, 0, 0, 0, 0, 0] # 格式:[x前,x后,y前,y后,z前,z后]
+
+ # 根据轴类型设置填充
+ if axis == 0: # x轴(最后一个维度)
+ pad_idx = 0 if shift > 0 else 1
+ pad_dims[pad_idx] = abs(shift)
+ elif axis == 1: # y轴(中间维度)
+ pad_idx = 2 if shift > 0 else 3
+ pad_dims[pad_idx] = abs(shift)
+ elif axis == 2: # z轴(第一个维度)
+ pad_idx = 4 if shift > 0 else 5
+ pad_dims[pad_idx] = abs(shift)
+
+ # 执行填充(添加batch和channel维度适配F.pad)
+ padded = F.pad(t.unsqueeze(0).unsqueeze(0), pad_dims[::-1], mode='replicate') # 反转顺序适配F.pad
+
+ # 构建动态切片索引
+ slice_dims = [slice(None)] * 3 # 初始化为全切片
+ if axis == 0: # x轴(dim=2)
+ if shift > 0:
+ slice_dims[0] = slice(shift, None)
+ else:
+ slice_dims[0] = slice(None, shift)
+ elif axis == 1: # y轴(dim=1)
+ if shift > 0:
+ slice_dims[1] = slice(shift, None)
+ else:
+ slice_dims[1] = slice(None, shift)
+ elif axis == 2: # z轴(dim=0)
+ if shift > 0:
+ slice_dims[2] = slice(shift, None)
+ else:
+ slice_dims[2] = slice(None, shift)
+
+ # 应用切片并恢复维度
+ padded = padded.squeeze(0).squeeze(0)
+ sliced = padded[slice_dims]
+ return sliced
+
+ # 获取各方向邻居(确保维度一致)
+ left = get_neighbor(val, 1, axis=0) # x方向
+ right = get_neighbor(val, -1, axis=0)
+ back = get_neighbor(val, 1, axis=1) # y方向
+ front = get_neighbor(val, -1, axis=1)
+ down = get_neighbor(val, 1, axis=2) # z方向
+ up = get_neighbor(val, -1, axis=2)
+
+ # 处理边界无效值(使用where保持维度一致)
+ def safe_where(neighbor):
+ return torch.where(neighbor > -9000, neighbor, val)
+
+ left = safe_where(left)
+ right = safe_where(right)
+ back = safe_where(back)
+ front = safe_where(front)
+ down = safe_where(down)
+ up = safe_where(up)
+
+ # 计算符号一致性(转换为float32确保精度)
+ sign = torch.sign(val.to(torch.float32))
+ neighbors_sign = torch.stack([
+ torch.sign(left.to(torch.float32)),
+ torch.sign(right.to(torch.float32)),
+ torch.sign(back.to(torch.float32)),
+ torch.sign(front.to(torch.float32)),
+ torch.sign(down.to(torch.float32)),
+ torch.sign(up.to(torch.float32))
+ ], dim=0)
+
+ # 检查所有符号是否一致
+ same_sign = torch.all(neighbors_sign == sign, dim=0)
+
+ # 生成最终掩码
+ mask = (~same_sign).to(torch.int32)
+ return mask * valid_mask.to(torch.int32)
+
+
+def generate_dense_grid_points(
+ bbox_min: np.ndarray,
+ bbox_max: np.ndarray,
+ octree_resolution: int,
+ indexing: str = "ij",
+):
+ length = bbox_max - bbox_min
+ num_cells = octree_resolution
+
+ x = np.linspace(bbox_min[0], bbox_max[0], int(num_cells) + 1, dtype=np.float32)
+ y = np.linspace(bbox_min[1], bbox_max[1], int(num_cells) + 1, dtype=np.float32)
+ z = np.linspace(bbox_min[2], bbox_max[2], int(num_cells) + 1, dtype=np.float32)
+ [xs, ys, zs] = np.meshgrid(x, y, z, indexing=indexing)
+ xyz = np.stack((xs, ys, zs), axis=-1)
+ grid_size = [int(num_cells) + 1, int(num_cells) + 1, int(num_cells) + 1]
+
+ return xyz, grid_size, length
+
+
+class VanillaVolumeDecoder:
+ @torch.no_grad()
+ def __call__(
+ self,
+ latents: torch.FloatTensor,
+ geo_decoder: Callable,
+ bounds: Union[Tuple[float], List[float], float] = 1.01,
+ num_chunks: int = 10000,
+ octree_resolution: int = None,
+ enable_pbar: bool = True,
+ **kwargs,
+ ):
+ device = latents.device
+ dtype = latents.dtype
+ batch_size = latents.shape[0]
+
+ # 1. generate query points
+ if isinstance(bounds, float):
+ bounds = [-bounds, -bounds, -bounds, bounds, bounds, bounds]
+
+ bbox_min, bbox_max = np.array(bounds[0:3]), np.array(bounds[3:6])
+ xyz_samples, grid_size, length = generate_dense_grid_points(
+ bbox_min=bbox_min,
+ bbox_max=bbox_max,
+ octree_resolution=octree_resolution,
+ indexing="ij"
+ )
+ xyz_samples = torch.from_numpy(xyz_samples).to(device, dtype=dtype).contiguous().reshape(-1, 3)
+
+ # 2. latents to 3d volume
+ batch_logits = []
+ for start in tqdm(range(0, xyz_samples.shape[0], num_chunks), desc=f"Volume Decoding",
+ disable=not enable_pbar):
+ chunk_queries = xyz_samples[start: start + num_chunks, :]
+ chunk_queries = repeat(chunk_queries, "p c -> b p c", b=batch_size)
+ logits = geo_decoder(queries=chunk_queries, latents=latents)
+ batch_logits.append(logits)
+
+ grid_logits = torch.cat(batch_logits, dim=1)
+ grid_logits = grid_logits.view((batch_size, *grid_size)).float()
+
+ return grid_logits
+
+
+class HierarchicalVolumeDecoding:
+ @torch.no_grad()
+ def __call__(
+ self,
+ latents: torch.FloatTensor,
+ geo_decoder: Callable,
+ bounds: Union[Tuple[float], List[float], float] = 1.01,
+ num_chunks: int = 10000,
+ mc_level: float = 0.0,
+ octree_resolution: int = None,
+ min_resolution: int = 63,
+ enable_pbar: bool = True,
+ **kwargs,
+ ):
+ device = latents.device
+ dtype = latents.dtype
+
+ resolutions = []
+ if octree_resolution < min_resolution:
+ resolutions.append(octree_resolution)
+ while octree_resolution >= min_resolution:
+ resolutions.append(octree_resolution)
+ octree_resolution = octree_resolution // 2
+ resolutions.reverse()
+
+ # 1. generate query points
+ if isinstance(bounds, float):
+ bounds = [-bounds, -bounds, -bounds, bounds, bounds, bounds]
+ bbox_min = np.array(bounds[0:3])
+ bbox_max = np.array(bounds[3:6])
+ bbox_size = bbox_max - bbox_min
+
+ xyz_samples, grid_size, length = generate_dense_grid_points(
+ bbox_min=bbox_min,
+ bbox_max=bbox_max,
+ octree_resolution=resolutions[0],
+ indexing="ij"
+ )
+
+ dilate = nn.Conv3d(1, 1, 3, padding=1, bias=False, device=device, dtype=dtype)
+ dilate.weight = torch.nn.Parameter(torch.ones(dilate.weight.shape, dtype=dtype, device=device))
+
+ grid_size = np.array(grid_size)
+ xyz_samples = torch.from_numpy(xyz_samples).to(device, dtype=dtype).contiguous().reshape(-1, 3)
+
+ # 2. latents to 3d volume
+ batch_logits = []
+ batch_size = latents.shape[0]
+ for start in tqdm(range(0, xyz_samples.shape[0], num_chunks),
+ desc=f"Hierarchical Volume Decoding [r{resolutions[0] + 1}]"):
+ queries = xyz_samples[start: start + num_chunks, :]
+ batch_queries = repeat(queries, "p c -> b p c", b=batch_size)
+ logits = geo_decoder(queries=batch_queries, latents=latents)
+ batch_logits.append(logits)
+
+ grid_logits = torch.cat(batch_logits, dim=1).view((batch_size, grid_size[0], grid_size[1], grid_size[2]))
+
+ for octree_depth_now in resolutions[1:]:
+ grid_size = np.array([octree_depth_now + 1] * 3)
+ resolution = bbox_size / octree_depth_now
+ next_index = torch.zeros(tuple(grid_size), dtype=dtype, device=device)
+ next_logits = torch.full(next_index.shape, -10000., dtype=dtype, device=device)
+ curr_points = extract_near_surface_volume_fn(grid_logits.squeeze(0), mc_level)
+ curr_points += grid_logits.squeeze(0).abs() < 0.95
+
+ if octree_depth_now == resolutions[-1]:
+ expand_num = 0
+ else:
+ expand_num = 1
+ for i in range(expand_num):
+ curr_points = dilate(curr_points.unsqueeze(0).to(dtype)).squeeze(0)
+ (cidx_x, cidx_y, cidx_z) = torch.where(curr_points > 0)
+ next_index[cidx_x * 2, cidx_y * 2, cidx_z * 2] = 1
+ for i in range(2 - expand_num):
+ next_index = dilate(next_index.unsqueeze(0)).squeeze(0)
+ nidx = torch.where(next_index > 0)
+
+ next_points = torch.stack(nidx, dim=1)
+ next_points = (next_points * torch.tensor(resolution, dtype=next_points.dtype, device=device) +
+ torch.tensor(bbox_min, dtype=next_points.dtype, device=device))
+ batch_logits = []
+ for start in tqdm(range(0, next_points.shape[0], num_chunks),
+ desc=f"Hierarchical Volume Decoding [r{octree_depth_now + 1}]"):
+ queries = next_points[start: start + num_chunks, :]
+ batch_queries = repeat(queries, "p c -> b p c", b=batch_size)
+ logits = geo_decoder(queries=batch_queries.to(latents.dtype), latents=latents)
+ batch_logits.append(logits)
+ grid_logits = torch.cat(batch_logits, dim=1)
+ next_logits[nidx] = grid_logits[0, ..., 0]
+ grid_logits = next_logits.unsqueeze(0)
+ grid_logits[grid_logits == -10000.] = float('nan')
+
+ return grid_logits
+
+
+class FlashVDMVolumeDecoding:
+ def __init__(self, topk_mode='mean'):
+ if topk_mode not in ['mean', 'merge']:
+ raise ValueError(f'Unsupported topk_mode {topk_mode}, available: {["mean", "merge"]}')
+
+ if topk_mode == 'mean':
+ self.processor = FlashVDMCrossAttentionProcessor()
+ else:
+ self.processor = FlashVDMTopMCrossAttentionProcessor()
+
+ @torch.no_grad()
+ def __call__(
+ self,
+ latents: torch.FloatTensor,
+ geo_decoder: CrossAttentionDecoder,
+ bounds: Union[Tuple[float], List[float], float] = 1.01,
+ num_chunks: int = 10000,
+ mc_level: float = 0.0,
+ octree_resolution: int = None,
+ min_resolution: int = 63,
+ mini_grid_num: int = 4,
+ enable_pbar: bool = True,
+ **kwargs,
+ ):
+ processor = self.processor
+ geo_decoder.set_cross_attention_processor(processor)
+
+ device = latents.device
+ dtype = latents.dtype
+
+ resolutions = []
+ if octree_resolution < min_resolution:
+ resolutions.append(octree_resolution)
+ while octree_resolution >= min_resolution:
+ resolutions.append(octree_resolution)
+ octree_resolution = octree_resolution // 2
+ resolutions.reverse()
+ resolutions[0] = round(resolutions[0] / mini_grid_num) * mini_grid_num - 1
+ for i, resolution in enumerate(resolutions[1:]):
+ resolutions[i + 1] = resolutions[0] * 2 ** (i + 1)
+
+ logger.info(f"FlashVDMVolumeDecoding Resolution: {resolutions}")
+
+ # 1. generate query points
+ if isinstance(bounds, float):
+ bounds = [-bounds, -bounds, -bounds, bounds, bounds, bounds]
+ bbox_min = np.array(bounds[0:3])
+ bbox_max = np.array(bounds[3:6])
+ bbox_size = bbox_max - bbox_min
+
+ xyz_samples, grid_size, length = generate_dense_grid_points(
+ bbox_min=bbox_min,
+ bbox_max=bbox_max,
+ octree_resolution=resolutions[0],
+ indexing="ij"
+ )
+
+ dilate = nn.Conv3d(1, 1, 3, padding=1, bias=False, device=device, dtype=dtype)
+ dilate.weight = torch.nn.Parameter(torch.ones(dilate.weight.shape, dtype=dtype, device=device))
+
+ grid_size = np.array(grid_size)
+
+ # 2. latents to 3d volume
+ xyz_samples = torch.from_numpy(xyz_samples).to(device, dtype=dtype)
+ batch_size = latents.shape[0]
+ mini_grid_size = xyz_samples.shape[0] // mini_grid_num
+ xyz_samples = xyz_samples.view(
+ mini_grid_num, mini_grid_size,
+ mini_grid_num, mini_grid_size,
+ mini_grid_num, mini_grid_size, 3
+ ).permute(
+ 0, 2, 4, 1, 3, 5, 6
+ ).reshape(
+ -1, mini_grid_size * mini_grid_size * mini_grid_size, 3
+ )
+ batch_logits = []
+ num_batchs = max(num_chunks // xyz_samples.shape[1], 1)
+ for start in tqdm(range(0, xyz_samples.shape[0], num_batchs),
+ desc=f"FlashVDM Volume Decoding", disable=not enable_pbar):
+ queries = xyz_samples[start: start + num_batchs, :]
+ batch = queries.shape[0]
+ batch_latents = repeat(latents.squeeze(0), "p c -> b p c", b=batch)
+ processor.topk = True
+ logits = geo_decoder(queries=queries, latents=batch_latents)
+ batch_logits.append(logits)
+ grid_logits = torch.cat(batch_logits, dim=0).reshape(
+ mini_grid_num, mini_grid_num, mini_grid_num,
+ mini_grid_size, mini_grid_size,
+ mini_grid_size
+ ).permute(0, 3, 1, 4, 2, 5).contiguous().view(
+ (batch_size, grid_size[0], grid_size[1], grid_size[2])
+ )
+
+ for octree_depth_now in resolutions[1:]:
+ grid_size = np.array([octree_depth_now + 1] * 3)
+ resolution = bbox_size / octree_depth_now
+ next_index = torch.zeros(tuple(grid_size), dtype=dtype, device=device)
+ next_logits = torch.full(next_index.shape, -10000., dtype=dtype, device=device)
+ curr_points = extract_near_surface_volume_fn(grid_logits.squeeze(0), mc_level)
+ curr_points += grid_logits.squeeze(0).abs() < 0.95
+
+ if octree_depth_now == resolutions[-1]:
+ expand_num = 0
+ else:
+ expand_num = 1
+ for i in range(expand_num):
+ curr_points = dilate(curr_points.unsqueeze(0).to(dtype)).squeeze(0)
+ (cidx_x, cidx_y, cidx_z) = torch.where(curr_points > 0)
+
+ next_index[cidx_x * 2, cidx_y * 2, cidx_z * 2] = 1
+ for i in range(2 - expand_num):
+ next_index = dilate(next_index.unsqueeze(0)).squeeze(0)
+ nidx = torch.where(next_index > 0)
+
+ next_points = torch.stack(nidx, dim=1)
+ next_points = (next_points * torch.tensor(resolution, dtype=torch.float32, device=device) +
+ torch.tensor(bbox_min, dtype=torch.float32, device=device))
+
+ query_grid_num = 6
+ min_val = next_points.min(axis=0).values
+ max_val = next_points.max(axis=0).values
+ vol_queries_index = (next_points - min_val) / (max_val - min_val) * (query_grid_num - 0.001)
+ index = torch.floor(vol_queries_index).long()
+ index = index[..., 0] * (query_grid_num ** 2) + index[..., 1] * query_grid_num + index[..., 2]
+ index = index.sort()
+ next_points = next_points[index.indices].unsqueeze(0).contiguous()
+ unique_values = torch.unique(index.values, return_counts=True)
+ grid_logits = torch.zeros((next_points.shape[1]), dtype=latents.dtype, device=latents.device)
+ input_grid = [[], []]
+ logits_grid_list = []
+ start_num = 0
+ sum_num = 0
+ for grid_index, count in zip(unique_values[0].cpu().tolist(), unique_values[1].cpu().tolist()):
+ if sum_num + count < num_chunks or sum_num == 0:
+ sum_num += count
+ input_grid[0].append(grid_index)
+ input_grid[1].append(count)
+ else:
+ processor.topk = input_grid
+ logits_grid = geo_decoder(queries=next_points[:, start_num:start_num + sum_num], latents=latents)
+ start_num = start_num + sum_num
+ logits_grid_list.append(logits_grid)
+ input_grid = [[grid_index], [count]]
+ sum_num = count
+ if sum_num > 0:
+ processor.topk = input_grid
+ logits_grid = geo_decoder(queries=next_points[:, start_num:start_num + sum_num], latents=latents)
+ logits_grid_list.append(logits_grid)
+ logits_grid = torch.cat(logits_grid_list, dim=1)
+ grid_logits[index.indices] = logits_grid.squeeze(0).squeeze(-1)
+ next_logits[nidx] = grid_logits
+ grid_logits = next_logits.unsqueeze(0)
+
+ grid_logits[grid_logits == -10000.] = float('nan')
+
+ return grid_logits
diff --git a/hy3dgen/shapegen/models/conditioner.py b/hy3dgen/shapegen/models/conditioner.py
new file mode 100644
index 0000000000000000000000000000000000000000..6b79b9b5260e570c0c804899a97f5ecdb0a4b538
--- /dev/null
+++ b/hy3dgen/shapegen/models/conditioner.py
@@ -0,0 +1,257 @@
+# Open Source Model Licensed under the Apache License Version 2.0
+# and Other Licenses of the Third-Party Components therein:
+# The below Model in this distribution may have been modified by THL A29 Limited
+# ("Tencent Modifications"). All Tencent Modifications are Copyright (C) 2024 THL A29 Limited.
+
+# Copyright (C) 2024 THL A29 Limited, a Tencent company. All rights reserved.
+# The below software and/or models in this distribution may have been
+# modified by THL A29 Limited ("Tencent Modifications").
+# All Tencent Modifications are Copyright (C) THL A29 Limited.
+
+# Hunyuan 3D is licensed under the TENCENT HUNYUAN NON-COMMERCIAL LICENSE AGREEMENT
+# except for the third-party components listed below.
+# Hunyuan 3D does not impose any additional limitations beyond what is outlined
+# in the repsective licenses of these third-party components.
+# Users must comply with all terms and conditions of original licenses of these third-party
+# components and must ensure that the usage of the third party components adheres to
+# all relevant laws and regulations.
+
+# For avoidance of doubts, Hunyuan 3D means the large language models and
+# their software and algorithms, including trained model weights, parameters (including
+# optimizer states), machine-learning model code, inference-enabling code, training-enabling code,
+# fine-tuning enabling code and other elements of the foregoing made publicly available
+# by Tencent in accordance with TENCENT HUNYUAN COMMUNITY LICENSE AGREEMENT.
+
+import numpy as np
+import torch
+import torch.nn as nn
+from torchvision import transforms
+from transformers import (
+ CLIPVisionModelWithProjection,
+ CLIPVisionConfig,
+ Dinov2Model,
+ Dinov2Config,
+)
+
+
+def get_1d_sincos_pos_embed_from_grid(embed_dim, pos):
+ """
+ embed_dim: output dimension for each position
+ pos: a list of positions to be encoded: size (M,)
+ out: (M, D)
+ """
+ assert embed_dim % 2 == 0
+ omega = np.arange(embed_dim // 2, dtype=np.float64)
+ omega /= embed_dim / 2.
+ omega = 1. / 10000 ** omega # (D/2,)
+
+ pos = pos.reshape(-1) # (M,)
+ out = np.einsum('m,d->md', pos, omega) # (M, D/2), outer product
+
+ emb_sin = np.sin(out) # (M, D/2)
+ emb_cos = np.cos(out) # (M, D/2)
+
+ return np.concatenate([emb_sin, emb_cos], axis=1)
+
+
+class ImageEncoder(nn.Module):
+ def __init__(
+ self,
+ version=None,
+ config=None,
+ use_cls_token=True,
+ image_size=224,
+ **kwargs,
+ ):
+ super().__init__()
+
+ if config is None:
+ self.model = self.MODEL_CLASS.from_pretrained(version)
+ else:
+ self.model = self.MODEL_CLASS(self.MODEL_CONFIG_CLASS.from_dict(config))
+ self.model.eval()
+ self.model.requires_grad_(False)
+ self.use_cls_token = use_cls_token
+ self.size = image_size // 14
+ self.num_patches = (image_size // 14) ** 2
+ if self.use_cls_token:
+ self.num_patches += 1
+
+ self.transform = transforms.Compose(
+ [
+ transforms.Resize(image_size, transforms.InterpolationMode.BILINEAR, antialias=True),
+ transforms.CenterCrop(image_size),
+ transforms.Normalize(
+ mean=self.mean,
+ std=self.std,
+ ),
+ ]
+ )
+
+ def forward(self, image, mask=None, value_range=(-1, 1), **kwargs):
+ if value_range is not None:
+ low, high = value_range
+ image = (image - low) / (high - low)
+
+ image = image.to(self.model.device, dtype=self.model.dtype)
+ inputs = self.transform(image)
+ outputs = self.model(inputs)
+
+ last_hidden_state = outputs.last_hidden_state
+ if not self.use_cls_token:
+ last_hidden_state = last_hidden_state[:, 1:, :]
+
+ return last_hidden_state
+
+ def unconditional_embedding(self, batch_size, **kwargs):
+ device = next(self.model.parameters()).device
+ dtype = next(self.model.parameters()).dtype
+ zero = torch.zeros(
+ batch_size,
+ self.num_patches,
+ self.model.config.hidden_size,
+ device=device,
+ dtype=dtype,
+ )
+
+ return zero
+
+
+class CLIPImageEncoder(ImageEncoder):
+ MODEL_CLASS = CLIPVisionModelWithProjection
+ MODEL_CONFIG_CLASS = CLIPVisionConfig
+ mean = [0.48145466, 0.4578275, 0.40821073]
+ std = [0.26862954, 0.26130258, 0.27577711]
+
+
+class DinoImageEncoder(ImageEncoder):
+ MODEL_CLASS = Dinov2Model
+ MODEL_CONFIG_CLASS = Dinov2Config
+ mean = [0.485, 0.456, 0.406]
+ std = [0.229, 0.224, 0.225]
+
+
+class DinoImageEncoderMV(DinoImageEncoder):
+ def __init__(
+ self,
+ version=None,
+ config=None,
+ use_cls_token=True,
+ image_size=224,
+ view_num=4,
+ **kwargs,
+ ):
+ super().__init__(version, config, use_cls_token, image_size, **kwargs)
+ self.view_num = view_num
+ self.num_patches = self.num_patches
+ pos = np.arange(self.view_num, dtype=np.float32)
+ view_embedding = torch.from_numpy(
+ get_1d_sincos_pos_embed_from_grid(self.model.config.hidden_size, pos)).float()
+
+ view_embedding = view_embedding.unsqueeze(1).repeat(1, self.num_patches, 1)
+ self.view_embed = view_embedding.unsqueeze(0)
+
+ def forward(self, image, mask=None, value_range=(-1, 1), view_idxs=None):
+ if value_range is not None:
+ low, high = value_range
+ image = (image - low) / (high - low)
+
+ image = image.to(self.model.device, dtype=self.model.dtype)
+
+ bs, num_views, c, h, w = image.shape
+ image = image.view(bs * num_views, c, h, w)
+
+ inputs = self.transform(image)
+ outputs = self.model(inputs)
+
+ last_hidden_state = outputs.last_hidden_state
+ last_hidden_state = last_hidden_state.view(
+ bs, num_views, last_hidden_state.shape[-2],
+ last_hidden_state.shape[-1]
+ )
+
+ view_embedding = self.view_embed.to(last_hidden_state.dtype).to(last_hidden_state.device)
+ if view_idxs is not None:
+ assert len(view_idxs) == bs
+ view_embeddings = []
+ for i in range(bs):
+ view_idx = view_idxs[i]
+ assert num_views == len(view_idx)
+ view_embeddings.append(self.view_embed[:, view_idx, ...])
+ view_embedding = torch.cat(view_embeddings, 0).to(last_hidden_state.dtype).to(last_hidden_state.device)
+
+ if num_views != self.view_num:
+ view_embedding = view_embedding[:, :num_views, ...]
+ last_hidden_state = last_hidden_state + view_embedding
+ last_hidden_state = last_hidden_state.view(bs, num_views * last_hidden_state.shape[-2],
+ last_hidden_state.shape[-1])
+ return last_hidden_state
+
+ def unconditional_embedding(self, batch_size, view_idxs=None, **kwargs):
+ device = next(self.model.parameters()).device
+ dtype = next(self.model.parameters()).dtype
+ zero = torch.zeros(
+ batch_size,
+ self.num_patches * len(view_idxs[0]),
+ self.model.config.hidden_size,
+ device=device,
+ dtype=dtype,
+ )
+ return zero
+
+
+def build_image_encoder(config):
+ if config['type'] == 'CLIPImageEncoder':
+ return CLIPImageEncoder(**config['kwargs'])
+ elif config['type'] == 'DinoImageEncoder':
+ return DinoImageEncoder(**config['kwargs'])
+ elif config['type'] == 'DinoImageEncoderMV':
+ return DinoImageEncoderMV(**config['kwargs'])
+ else:
+ raise ValueError(f'Unknown image encoder type: {config["type"]}')
+
+
+class DualImageEncoder(nn.Module):
+ def __init__(
+ self,
+ main_image_encoder,
+ additional_image_encoder,
+ ):
+ super().__init__()
+ self.main_image_encoder = build_image_encoder(main_image_encoder)
+ self.additional_image_encoder = build_image_encoder(additional_image_encoder)
+
+ def forward(self, image, mask=None, **kwargs):
+ outputs = {
+ 'main': self.main_image_encoder(image, mask=mask, **kwargs),
+ 'additional': self.additional_image_encoder(image, mask=mask, **kwargs),
+ }
+ return outputs
+
+ def unconditional_embedding(self, batch_size, **kwargs):
+ outputs = {
+ 'main': self.main_image_encoder.unconditional_embedding(batch_size, **kwargs),
+ 'additional': self.additional_image_encoder.unconditional_embedding(batch_size, **kwargs),
+ }
+ return outputs
+
+
+class SingleImageEncoder(nn.Module):
+ def __init__(
+ self,
+ main_image_encoder,
+ ):
+ super().__init__()
+ self.main_image_encoder = build_image_encoder(main_image_encoder)
+
+ def forward(self, image, mask=None, **kwargs):
+ outputs = {
+ 'main': self.main_image_encoder(image, mask=mask, **kwargs),
+ }
+ return outputs
+
+ def unconditional_embedding(self, batch_size, **kwargs):
+ outputs = {
+ 'main': self.main_image_encoder.unconditional_embedding(batch_size, **kwargs),
+ }
+ return outputs
diff --git a/hy3dgen/shapegen/models/denoisers/__init__.py b/hy3dgen/shapegen/models/denoisers/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..f62f353998aae8e6af0f8e43460f4ae00cf1c1f4
--- /dev/null
+++ b/hy3dgen/shapegen/models/denoisers/__init__.py
@@ -0,0 +1,15 @@
+# Hunyuan 3D is licensed under the TENCENT HUNYUAN NON-COMMERCIAL LICENSE AGREEMENT
+# except for the third-party components listed below.
+# Hunyuan 3D does not impose any additional limitations beyond what is outlined
+# in the repsective licenses of these third-party components.
+# Users must comply with all terms and conditions of original licenses of these third-party
+# components and must ensure that the usage of the third party components adheres to
+# all relevant laws and regulations.
+
+# For avoidance of doubts, Hunyuan 3D means the large language models and
+# their software and algorithms, including trained model weights, parameters (including
+# optimizer states), machine-learning model code, inference-enabling code, training-enabling code,
+# fine-tuning enabling code and other elements of the foregoing made publicly available
+# by Tencent in accordance with TENCENT HUNYUAN COMMUNITY LICENSE AGREEMENT.
+
+from .hunyuan3ddit import Hunyuan3DDiT
diff --git a/hy3dgen/shapegen/models/denoisers/hunyuan3ddit.py b/hy3dgen/shapegen/models/denoisers/hunyuan3ddit.py
new file mode 100644
index 0000000000000000000000000000000000000000..461d4106ee9115203a27ee20f882a64ddc617675
--- /dev/null
+++ b/hy3dgen/shapegen/models/denoisers/hunyuan3ddit.py
@@ -0,0 +1,410 @@
+# Hunyuan 3D is licensed under the TENCENT HUNYUAN NON-COMMERCIAL LICENSE AGREEMENT
+# except for the third-party components listed below.
+# Hunyuan 3D does not impose any additional limitations beyond what is outlined
+# in the repsective licenses of these third-party components.
+# Users must comply with all terms and conditions of original licenses of these third-party
+# components and must ensure that the usage of the third party components adheres to
+# all relevant laws and regulations.
+
+# For avoidance of doubts, Hunyuan 3D means the large language models and
+# their software and algorithms, including trained model weights, parameters (including
+# optimizer states), machine-learning model code, inference-enabling code, training-enabling code,
+# fine-tuning enabling code and other elements of the foregoing made publicly available
+# by Tencent in accordance with TENCENT HUNYUAN COMMUNITY LICENSE AGREEMENT.
+
+import math
+import os
+from dataclasses import dataclass
+from typing import List, Tuple, Optional
+
+import torch
+from einops import rearrange
+from torch import Tensor, nn
+
+scaled_dot_product_attention = nn.functional.scaled_dot_product_attention
+if os.environ.get('USE_SAGEATTN', '0') == '1':
+ try:
+ from sageattention import sageattn
+ except ImportError:
+ raise ImportError('Please install the package "sageattention" to use this USE_SAGEATTN.')
+ scaled_dot_product_attention = sageattn
+
+
+def attention(q: Tensor, k: Tensor, v: Tensor, **kwargs) -> Tensor:
+ x = scaled_dot_product_attention(q, k, v)
+ x = rearrange(x, "B H L D -> B L (H D)")
+ return x
+
+
+def timestep_embedding(t: Tensor, dim, max_period=10000, time_factor: float = 1000.0):
+ """
+ Create sinusoidal timestep embeddings.
+ :param t: a 1-D Tensor of N indices, one per batch element.
+ These may be fractional.
+ :param dim: the dimension of the output.
+ :param max_period: controls the minimum frequency of the embeddings.
+ :return: an (N, D) Tensor of positional embeddings.
+ """
+ t = time_factor * t
+ half = dim // 2
+ freqs = torch.exp(-math.log(max_period) * torch.arange(start=0, end=half, dtype=torch.float32) / half).to(
+ t.device
+ )
+
+ args = t[:, None].float() * freqs[None]
+ embedding = torch.cat([torch.cos(args), torch.sin(args)], dim=-1)
+ if dim % 2:
+ embedding = torch.cat([embedding, torch.zeros_like(embedding[:, :1])], dim=-1)
+ if torch.is_floating_point(t):
+ embedding = embedding.to(t)
+ return embedding
+
+
+class GELU(nn.Module):
+ def __init__(self, approximate='tanh'):
+ super().__init__()
+ self.approximate = approximate
+
+ def forward(self, x: Tensor) -> Tensor:
+ return nn.functional.gelu(x.contiguous(), approximate=self.approximate)
+
+
+class MLPEmbedder(nn.Module):
+ def __init__(self, in_dim: int, hidden_dim: int):
+ super().__init__()
+ self.in_layer = nn.Linear(in_dim, hidden_dim, bias=True)
+ self.silu = nn.SiLU()
+ self.out_layer = nn.Linear(hidden_dim, hidden_dim, bias=True)
+
+ def forward(self, x: Tensor) -> Tensor:
+ return self.out_layer(self.silu(self.in_layer(x)))
+
+
+class RMSNorm(torch.nn.Module):
+ def __init__(self, dim: int):
+ super().__init__()
+ self.scale = nn.Parameter(torch.ones(dim))
+
+ def forward(self, x: Tensor):
+ x_dtype = x.dtype
+ x = x.float()
+ rrms = torch.rsqrt(torch.mean(x ** 2, dim=-1, keepdim=True) + 1e-6)
+ return (x * rrms).to(dtype=x_dtype) * self.scale
+
+
+class QKNorm(torch.nn.Module):
+ def __init__(self, dim: int):
+ super().__init__()
+ self.query_norm = RMSNorm(dim)
+ self.key_norm = RMSNorm(dim)
+
+ def forward(self, q: Tensor, k: Tensor, v: Tensor) -> Tuple[Tensor, Tensor]:
+ q = self.query_norm(q)
+ k = self.key_norm(k)
+ return q.to(v), k.to(v)
+
+
+class SelfAttention(nn.Module):
+ def __init__(
+ self,
+ dim: int,
+ num_heads: int = 8,
+ qkv_bias: bool = False,
+ ):
+ super().__init__()
+ self.num_heads = num_heads
+ head_dim = dim // num_heads
+
+ self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
+ self.norm = QKNorm(head_dim)
+ self.proj = nn.Linear(dim, dim)
+
+ def forward(self, x: Tensor, pe: Tensor) -> Tensor:
+ qkv = self.qkv(x)
+ q, k, v = rearrange(qkv, "B L (K H D) -> K B H L D", K=3, H=self.num_heads)
+ q, k = self.norm(q, k, v)
+ x = attention(q, k, v, pe=pe)
+ x = self.proj(x)
+ return x
+
+
+@dataclass
+class ModulationOut:
+ shift: Tensor
+ scale: Tensor
+ gate: Tensor
+
+
+class Modulation(nn.Module):
+ def __init__(self, dim: int, double: bool):
+ super().__init__()
+ self.is_double = double
+ self.multiplier = 6 if double else 3
+ self.lin = nn.Linear(dim, self.multiplier * dim, bias=True)
+
+ def forward(self, vec: Tensor) -> Tuple[ModulationOut, Optional[ModulationOut]]:
+ out = self.lin(nn.functional.silu(vec))[:, None, :]
+ out = out.chunk(self.multiplier, dim=-1)
+
+ return (
+ ModulationOut(*out[:3]),
+ ModulationOut(*out[3:]) if self.is_double else None,
+ )
+
+
+class DoubleStreamBlock(nn.Module):
+ def __init__(
+ self,
+ hidden_size: int,
+ num_heads: int,
+ mlp_ratio: float,
+ qkv_bias: bool = False,
+ ):
+ super().__init__()
+ mlp_hidden_dim = int(hidden_size * mlp_ratio)
+ self.num_heads = num_heads
+ self.hidden_size = hidden_size
+ self.img_mod = Modulation(hidden_size, double=True)
+ self.img_norm1 = nn.LayerNorm(hidden_size, elementwise_affine=False, eps=1e-6)
+ self.img_attn = SelfAttention(dim=hidden_size, num_heads=num_heads, qkv_bias=qkv_bias)
+
+ self.img_norm2 = nn.LayerNorm(hidden_size, elementwise_affine=False, eps=1e-6)
+ self.img_mlp = nn.Sequential(
+ nn.Linear(hidden_size, mlp_hidden_dim, bias=True),
+ GELU(approximate="tanh"),
+ nn.Linear(mlp_hidden_dim, hidden_size, bias=True),
+ )
+
+ self.txt_mod = Modulation(hidden_size, double=True)
+ self.txt_norm1 = nn.LayerNorm(hidden_size, elementwise_affine=False, eps=1e-6)
+ self.txt_attn = SelfAttention(dim=hidden_size, num_heads=num_heads, qkv_bias=qkv_bias)
+
+ self.txt_norm2 = nn.LayerNorm(hidden_size, elementwise_affine=False, eps=1e-6)
+ self.txt_mlp = nn.Sequential(
+ nn.Linear(hidden_size, mlp_hidden_dim, bias=True),
+ GELU(approximate="tanh"),
+ nn.Linear(mlp_hidden_dim, hidden_size, bias=True),
+ )
+
+ def forward(self, img: Tensor, txt: Tensor, vec: Tensor, pe: Tensor) -> Tuple[Tensor, Tensor]:
+ img_mod1, img_mod2 = self.img_mod(vec)
+ txt_mod1, txt_mod2 = self.txt_mod(vec)
+
+ img_modulated = self.img_norm1(img)
+ img_modulated = (1 + img_mod1.scale) * img_modulated + img_mod1.shift
+ img_qkv = self.img_attn.qkv(img_modulated)
+ img_q, img_k, img_v = rearrange(img_qkv, "B L (K H D) -> K B H L D", K=3, H=self.num_heads)
+ img_q, img_k = self.img_attn.norm(img_q, img_k, img_v)
+
+ txt_modulated = self.txt_norm1(txt)
+ txt_modulated = (1 + txt_mod1.scale) * txt_modulated + txt_mod1.shift
+ txt_qkv = self.txt_attn.qkv(txt_modulated)
+ txt_q, txt_k, txt_v = rearrange(txt_qkv, "B L (K H D) -> K B H L D", K=3, H=self.num_heads)
+ txt_q, txt_k = self.txt_attn.norm(txt_q, txt_k, txt_v)
+
+ q = torch.cat((txt_q, img_q), dim=2)
+ k = torch.cat((txt_k, img_k), dim=2)
+ v = torch.cat((txt_v, img_v), dim=2)
+
+ attn = attention(q, k, v, pe=pe)
+ txt_attn, img_attn = attn[:, : txt.shape[1]], attn[:, txt.shape[1]:]
+
+ img = img + img_mod1.gate * self.img_attn.proj(img_attn)
+ img = img + img_mod2.gate * self.img_mlp((1 + img_mod2.scale) * self.img_norm2(img) + img_mod2.shift)
+
+ txt = txt + txt_mod1.gate * self.txt_attn.proj(txt_attn)
+ txt = txt + txt_mod2.gate * self.txt_mlp((1 + txt_mod2.scale) * self.txt_norm2(txt) + txt_mod2.shift)
+ return img, txt
+
+
+class SingleStreamBlock(nn.Module):
+ """
+ A DiT block with parallel linear layers as described in
+ https://arxiv.org/abs/2302.05442 and adapted modulation interface.
+ """
+
+ def __init__(
+ self,
+ hidden_size: int,
+ num_heads: int,
+ mlp_ratio: float = 4.0,
+ qk_scale: Optional[float] = None,
+ ):
+ super().__init__()
+
+ self.hidden_dim = hidden_size
+ self.num_heads = num_heads
+ head_dim = hidden_size // num_heads
+ self.scale = qk_scale or head_dim ** -0.5
+
+ self.mlp_hidden_dim = int(hidden_size * mlp_ratio)
+ # qkv and mlp_in
+ self.linear1 = nn.Linear(hidden_size, hidden_size * 3 + self.mlp_hidden_dim)
+ # proj and mlp_out
+ self.linear2 = nn.Linear(hidden_size + self.mlp_hidden_dim, hidden_size)
+
+ self.norm = QKNorm(head_dim)
+
+ self.hidden_size = hidden_size
+ self.pre_norm = nn.LayerNorm(hidden_size, elementwise_affine=False, eps=1e-6)
+
+ self.mlp_act = GELU(approximate="tanh")
+ self.modulation = Modulation(hidden_size, double=False)
+
+ def forward(self, x: Tensor, vec: Tensor, pe: Tensor) -> Tensor:
+ mod, _ = self.modulation(vec)
+
+ x_mod = (1 + mod.scale) * self.pre_norm(x) + mod.shift
+ qkv, mlp = torch.split(self.linear1(x_mod), [3 * self.hidden_size, self.mlp_hidden_dim], dim=-1)
+
+ q, k, v = rearrange(qkv, "B L (K H D) -> K B H L D", K=3, H=self.num_heads)
+ q, k = self.norm(q, k, v)
+
+ # compute attention
+ attn = attention(q, k, v, pe=pe)
+ # compute activation in mlp stream, cat again and run second linear layer
+ output = self.linear2(torch.cat((attn, self.mlp_act(mlp)), 2))
+ return x + mod.gate * output
+
+
+class LastLayer(nn.Module):
+ def __init__(self, hidden_size: int, patch_size: int, out_channels: int):
+ super().__init__()
+ self.norm_final = nn.LayerNorm(hidden_size, elementwise_affine=False, eps=1e-6)
+ self.linear = nn.Linear(hidden_size, patch_size * patch_size * out_channels, bias=True)
+ self.adaLN_modulation = nn.Sequential(nn.SiLU(), nn.Linear(hidden_size, 2 * hidden_size, bias=True))
+
+ def forward(self, x: Tensor, vec: Tensor) -> Tensor:
+ shift, scale = self.adaLN_modulation(vec).chunk(2, dim=1)
+ x = (1 + scale[:, None, :]) * self.norm_final(x) + shift[:, None, :]
+ x = self.linear(x)
+ return x
+
+
+class Hunyuan3DDiT(nn.Module):
+ def __init__(
+ self,
+ in_channels: int = 64,
+ context_in_dim: int = 1536,
+ hidden_size: int = 1024,
+ mlp_ratio: float = 4.0,
+ num_heads: int = 16,
+ depth: int = 16,
+ depth_single_blocks: int = 32,
+ axes_dim: List[int] = [64],
+ theta: int = 10_000,
+ qkv_bias: bool = True,
+ time_factor: float = 1000,
+ guidance_embed: bool = False,
+ ckpt_path: Optional[str] = None,
+ **kwargs,
+ ):
+ super().__init__()
+ self.in_channels = in_channels
+ self.context_in_dim = context_in_dim
+ self.hidden_size = hidden_size
+ self.mlp_ratio = mlp_ratio
+ self.num_heads = num_heads
+ self.depth = depth
+ self.depth_single_blocks = depth_single_blocks
+ self.axes_dim = axes_dim
+ self.theta = theta
+ self.qkv_bias = qkv_bias
+ self.time_factor = time_factor
+ self.out_channels = self.in_channels
+ self.guidance_embed = guidance_embed
+
+ if hidden_size % num_heads != 0:
+ raise ValueError(
+ f"Hidden size {hidden_size} must be divisible by num_heads {num_heads}"
+ )
+ pe_dim = hidden_size // num_heads
+ if sum(axes_dim) != pe_dim:
+ raise ValueError(f"Got {axes_dim} but expected positional dim {pe_dim}")
+ self.hidden_size = hidden_size
+ self.num_heads = num_heads
+ self.latent_in = nn.Linear(self.in_channels, self.hidden_size, bias=True)
+ self.time_in = MLPEmbedder(in_dim=256, hidden_dim=self.hidden_size)
+ self.cond_in = nn.Linear(context_in_dim, self.hidden_size)
+ self.guidance_in = (
+ MLPEmbedder(in_dim=256, hidden_dim=self.hidden_size) if guidance_embed else nn.Identity()
+ )
+
+ self.double_blocks = nn.ModuleList(
+ [
+ DoubleStreamBlock(
+ self.hidden_size,
+ self.num_heads,
+ mlp_ratio=mlp_ratio,
+ qkv_bias=qkv_bias,
+ )
+ for _ in range(depth)
+ ]
+ )
+
+ self.single_blocks = nn.ModuleList(
+ [
+ SingleStreamBlock(
+ self.hidden_size,
+ self.num_heads,
+ mlp_ratio=mlp_ratio,
+ )
+ for _ in range(depth_single_blocks)
+ ]
+ )
+
+ self.final_layer = LastLayer(self.hidden_size, 1, self.out_channels)
+
+ if ckpt_path is not None:
+ print('restored denoiser ckpt', ckpt_path)
+
+ ckpt = torch.load(ckpt_path, map_location="cpu")
+ if 'state_dict' not in ckpt:
+ # deepspeed ckpt
+ state_dict = {}
+ for k in ckpt.keys():
+ new_k = k.replace('_forward_module.', '')
+ state_dict[new_k] = ckpt[k]
+ else:
+ state_dict = ckpt["state_dict"]
+
+ final_state_dict = {}
+ for k, v in state_dict.items():
+ if k.startswith('model.'):
+ final_state_dict[k.replace('model.', '')] = v
+ else:
+ final_state_dict[k] = v
+ missing, unexpected = self.load_state_dict(final_state_dict, strict=False)
+ print('unexpected keys:', unexpected)
+ print('missing keys:', missing)
+
+ def forward(
+ self,
+ x,
+ t,
+ contexts,
+ **kwargs,
+ ) -> Tensor:
+ cond = contexts['main']
+ latent = self.latent_in(x)
+
+ vec = self.time_in(timestep_embedding(t, 256, self.time_factor).to(dtype=latent.dtype))
+ if self.guidance_embed:
+ guidance = kwargs.get('guidance', None)
+ if guidance is None:
+ raise ValueError("Didn't get guidance strength for guidance distilled model.")
+ vec = vec + self.guidance_in(timestep_embedding(guidance, 256, self.time_factor))
+
+ cond = self.cond_in(cond)
+ pe = None
+
+ for block in self.double_blocks:
+ latent, cond = block(img=latent, txt=cond, vec=vec, pe=pe)
+
+ latent = torch.cat((cond, latent), 1)
+ for block in self.single_blocks:
+ latent = block(latent, vec=vec, pe=pe)
+
+ latent = latent[:, cond.shape[1]:, ...]
+ latent = self.final_layer(latent, vec)
+ return latent
diff --git a/hy3dgen/shapegen/pipelines.py b/hy3dgen/shapegen/pipelines.py
new file mode 100644
index 0000000000000000000000000000000000000000..03bfb24661133dc0278894dffecc64132a2c8ece
--- /dev/null
+++ b/hy3dgen/shapegen/pipelines.py
@@ -0,0 +1,765 @@
+# Hunyuan 3D is licensed under the TENCENT HUNYUAN NON-COMMERCIAL LICENSE AGREEMENT
+# except for the third-party components listed below.
+# Hunyuan 3D does not impose any additional limitations beyond what is outlined
+# in the repsective licenses of these third-party components.
+# Users must comply with all terms and conditions of original licenses of these third-party
+# components and must ensure that the usage of the third party components adheres to
+# all relevant laws and regulations.
+
+# For avoidance of doubts, Hunyuan 3D means the large language models and
+# their software and algorithms, including trained model weights, parameters (including
+# optimizer states), machine-learning model code, inference-enabling code, training-enabling code,
+# fine-tuning enabling code and other elements of the foregoing made publicly available
+# by Tencent in accordance with TENCENT HUNYUAN COMMUNITY LICENSE AGREEMENT.
+
+import copy
+import importlib
+import inspect
+import os
+from typing import List, Optional, Union
+
+import numpy as np
+import torch
+import trimesh
+import yaml
+from PIL import Image
+from diffusers.utils.torch_utils import randn_tensor
+from diffusers.utils.import_utils import is_accelerate_version, is_accelerate_available
+from tqdm import tqdm
+
+from .models.autoencoders import ShapeVAE
+from .models.autoencoders import SurfaceExtractors
+from .utils import logger, synchronize_timer, smart_load_model
+
+
+def retrieve_timesteps(
+ scheduler,
+ num_inference_steps: Optional[int] = None,
+ device: Optional[Union[str, torch.device]] = None,
+ timesteps: Optional[List[int]] = None,
+ sigmas: Optional[List[float]] = None,
+ **kwargs,
+):
+ """
+ Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles
+ custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`.
+
+ Args:
+ scheduler (`SchedulerMixin`):
+ The scheduler to get timesteps from.
+ num_inference_steps (`int`):
+ The number of diffusion steps used when generating samples with a pre-trained model. If used, `timesteps`
+ must be `None`.
+ device (`str` or `torch.device`, *optional*):
+ The device to which the timesteps should be moved to. If `None`, the timesteps are not moved.
+ timesteps (`List[int]`, *optional*):
+ Custom timesteps used to override the timestep spacing strategy of the scheduler. If `timesteps` is passed,
+ `num_inference_steps` and `sigmas` must be `None`.
+ sigmas (`List[float]`, *optional*):
+ Custom sigmas used to override the timestep spacing strategy of the scheduler. If `sigmas` is passed,
+ `num_inference_steps` and `timesteps` must be `None`.
+
+ Returns:
+ `Tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the
+ second element is the number of inference steps.
+ """
+ if timesteps is not None and sigmas is not None:
+ raise ValueError("Only one of `timesteps` or `sigmas` can be passed. Please choose one to set custom values")
+ if timesteps is not None:
+ accepts_timesteps = "timesteps" in set(inspect.signature(scheduler.set_timesteps).parameters.keys())
+ if not accepts_timesteps:
+ raise ValueError(
+ f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom"
+ f" timestep schedules. Please check whether you are using the correct scheduler."
+ )
+ scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs)
+ timesteps = scheduler.timesteps
+ num_inference_steps = len(timesteps)
+ elif sigmas is not None:
+ accept_sigmas = "sigmas" in set(inspect.signature(scheduler.set_timesteps).parameters.keys())
+ if not accept_sigmas:
+ raise ValueError(
+ f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom"
+ f" sigmas schedules. Please check whether you are using the correct scheduler."
+ )
+ scheduler.set_timesteps(sigmas=sigmas, device=device, **kwargs)
+ timesteps = scheduler.timesteps
+ num_inference_steps = len(timesteps)
+ else:
+ scheduler.set_timesteps(num_inference_steps, device=device, **kwargs)
+ timesteps = scheduler.timesteps
+ return timesteps, num_inference_steps
+
+
+@synchronize_timer('Export to trimesh')
+def export_to_trimesh(mesh_output):
+ if isinstance(mesh_output, list):
+ outputs = []
+ for mesh in mesh_output:
+ if mesh is None:
+ outputs.append(None)
+ else:
+ mesh.mesh_f = mesh.mesh_f[:, ::-1]
+ mesh_output = trimesh.Trimesh(mesh.mesh_v, mesh.mesh_f)
+ outputs.append(mesh_output)
+ return outputs
+ else:
+ mesh_output.mesh_f = mesh_output.mesh_f[:, ::-1]
+ mesh_output = trimesh.Trimesh(mesh_output.mesh_v, mesh_output.mesh_f)
+ return mesh_output
+
+
+def get_obj_from_str(string, reload=False):
+ module, cls = string.rsplit(".", 1)
+ if reload:
+ module_imp = importlib.import_module(module)
+ importlib.reload(module_imp)
+ return getattr(importlib.import_module(module, package=None), cls)
+
+
+def instantiate_from_config(config, **kwargs):
+ if "target" not in config:
+ raise KeyError("Expected key `target` to instantiate.")
+ cls = get_obj_from_str(config["target"])
+ params = config.get("params", dict())
+ kwargs.update(params)
+ instance = cls(**kwargs)
+ return instance
+
+
+class Hunyuan3DDiTPipeline:
+ model_cpu_offload_seq = "conditioner->model->vae"
+ _exclude_from_cpu_offload = []
+
+ @classmethod
+ @synchronize_timer('Hunyuan3DDiTPipeline Model Loading')
+ def from_single_file(
+ cls,
+ ckpt_path,
+ config_path,
+ device='cuda',
+ dtype=torch.float16,
+ use_safetensors=None,
+ **kwargs,
+ ):
+ # load config
+ with open(config_path, 'r') as f:
+ config = yaml.safe_load(f)
+
+ # load ckpt
+ if use_safetensors:
+ ckpt_path = ckpt_path.replace('.ckpt', '.safetensors')
+ if not os.path.exists(ckpt_path):
+ raise FileNotFoundError(f"Model file {ckpt_path} not found")
+ logger.info(f"Loading model from {ckpt_path}")
+
+ if use_safetensors:
+ # parse safetensors
+ import safetensors.torch
+ safetensors_ckpt = safetensors.torch.load_file(ckpt_path, device='cpu')
+ ckpt = {}
+ for key, value in safetensors_ckpt.items():
+ model_name = key.split('.')[0]
+ new_key = key[len(model_name) + 1:]
+ if model_name not in ckpt:
+ ckpt[model_name] = {}
+ ckpt[model_name][new_key] = value
+ else:
+ ckpt = torch.load(ckpt_path, map_location='cpu', weights_only=True)
+ # load model
+ model = instantiate_from_config(config['model'])
+ model.load_state_dict(ckpt['model'])
+ vae = instantiate_from_config(config['vae'])
+ vae.load_state_dict(ckpt['vae'])
+ conditioner = instantiate_from_config(config['conditioner'])
+ if 'conditioner' in ckpt:
+ conditioner.load_state_dict(ckpt['conditioner'])
+ image_processor = instantiate_from_config(config['image_processor'])
+ scheduler = instantiate_from_config(config['scheduler'])
+
+ model_kwargs = dict(
+ vae=vae,
+ model=model,
+ scheduler=scheduler,
+ conditioner=conditioner,
+ image_processor=image_processor,
+ device=device,
+ dtype=dtype,
+ )
+ model_kwargs.update(kwargs)
+
+ return cls(
+ **model_kwargs
+ )
+
+ @classmethod
+ def from_pretrained(
+ cls,
+ model_path,
+ device='cuda',
+ dtype=torch.float16,
+ use_safetensors=True,
+ variant='fp16',
+ subfolder='hunyuan3d-dit-v2-0',
+ **kwargs,
+ ):
+ kwargs['from_pretrained_kwargs'] = dict(
+ model_path=model_path,
+ subfolder=subfolder,
+ use_safetensors=use_safetensors,
+ variant=variant,
+ dtype=dtype,
+ device=device,
+ )
+ config_path, ckpt_path = smart_load_model(
+ model_path,
+ subfolder=subfolder,
+ use_safetensors=use_safetensors,
+ variant=variant
+ )
+ return cls.from_single_file(
+ ckpt_path,
+ config_path,
+ device=device,
+ dtype=dtype,
+ use_safetensors=use_safetensors,
+ **kwargs
+ )
+
+ def __init__(
+ self,
+ vae,
+ model,
+ scheduler,
+ conditioner,
+ image_processor,
+ device='cuda',
+ dtype=torch.float16,
+ **kwargs
+ ):
+ self.vae = vae
+ self.model = model
+ self.scheduler = scheduler
+ self.conditioner = conditioner
+ self.image_processor = image_processor
+ self.kwargs = kwargs
+ self.to(device, dtype)
+
+ def compile(self):
+ self.vae = torch.compile(self.vae)
+ self.model = torch.compile(self.model)
+ self.conditioner = torch.compile(self.conditioner)
+
+ def enable_flashvdm(
+ self,
+ enabled: bool = True,
+ adaptive_kv_selection=True,
+ topk_mode='mean',
+ mc_algo='dmc',
+ replace_vae=True,
+ ):
+ if enabled:
+ model_path = self.kwargs['from_pretrained_kwargs']['model_path']
+ turbo_vae_mapping = {
+ 'Hunyuan3D-2': ('tencent/Hunyuan3D-2', 'hunyuan3d-vae-v2-0-turbo'),
+ 'Hunyuan3D-2mv': ('tencent/Hunyuan3D-2', 'hunyuan3d-vae-v2-0-turbo'),
+ 'Hunyuan3D-2mini': ('tencent/Hunyuan3D-2mini', 'hunyuan3d-vae-v2-mini-turbo'),
+ }
+ model_name = model_path.split('/')[-1]
+ if replace_vae and model_name in turbo_vae_mapping:
+ model_path, subfolder = turbo_vae_mapping[model_name]
+ self.vae = ShapeVAE.from_pretrained(
+ model_path, subfolder=subfolder,
+ use_safetensors=self.kwargs['from_pretrained_kwargs']['use_safetensors'],
+ device=self.device,
+ )
+ self.vae.enable_flashvdm_decoder(
+ enabled=enabled,
+ adaptive_kv_selection=adaptive_kv_selection,
+ topk_mode=topk_mode,
+ mc_algo=mc_algo
+ )
+ else:
+ model_path = self.kwargs['from_pretrained_kwargs']['model_path']
+ vae_mapping = {
+ 'Hunyuan3D-2': ('tencent/Hunyuan3D-2', 'hunyuan3d-vae-v2-0'),
+ 'Hunyuan3D-2mv': ('tencent/Hunyuan3D-2', 'hunyuan3d-vae-v2-0'),
+ 'Hunyuan3D-2mini': ('tencent/Hunyuan3D-2mini', 'hunyuan3d-vae-v2-mini'),
+ }
+ model_name = model_path.split('/')[-1]
+ if model_name in vae_mapping:
+ model_path, subfolder = vae_mapping[model_name]
+ self.vae = ShapeVAE.from_pretrained(model_path, subfolder=subfolder)
+ self.vae.enable_flashvdm_decoder(enabled=False)
+
+ def to(self, device=None, dtype=None):
+ if dtype is not None:
+ self.dtype = dtype
+ self.vae.to(dtype=dtype)
+ self.model.to(dtype=dtype)
+ self.conditioner.to(dtype=dtype)
+ if device is not None:
+ self.device = torch.device(device)
+ self.vae.to(device)
+ self.model.to(device)
+ self.conditioner.to(device)
+
+ @property
+ def _execution_device(self):
+ r"""
+ Returns the device on which the pipeline's models will be executed. After calling
+ [`~DiffusionPipeline.enable_sequential_cpu_offload`] the execution device can only be inferred from
+ Accelerate's module hooks.
+ """
+ for name, model in self.components.items():
+ if not isinstance(model, torch.nn.Module) or name in self._exclude_from_cpu_offload:
+ continue
+
+ if not hasattr(model, "_hf_hook"):
+ return self.device
+ for module in model.modules():
+ if (
+ hasattr(module, "_hf_hook")
+ and hasattr(module._hf_hook, "execution_device")
+ and module._hf_hook.execution_device is not None
+ ):
+ return torch.device(module._hf_hook.execution_device)
+ return self.device
+
+ def enable_model_cpu_offload(self, gpu_id: Optional[int] = None, device: Union[torch.device, str] = "cuda"):
+ r"""
+ Offloads all models to CPU using accelerate, reducing memory usage with a low impact on performance. Compared
+ to `enable_sequential_cpu_offload`, this method moves one whole model at a time to the GPU when its `forward`
+ method is called, and the model remains in GPU until the next model runs. Memory savings are lower than with
+ `enable_sequential_cpu_offload`, but performance is much better due to the iterative execution of the `unet`.
+
+ Arguments:
+ gpu_id (`int`, *optional*):
+ The ID of the accelerator that shall be used in inference. If not specified, it will default to 0.
+ device (`torch.Device` or `str`, *optional*, defaults to "cuda"):
+ The PyTorch device type of the accelerator that shall be used in inference. If not specified, it will
+ default to "cuda".
+ """
+ if self.model_cpu_offload_seq is None:
+ raise ValueError(
+ "Model CPU offload cannot be enabled because no `model_cpu_offload_seq` class attribute is set."
+ )
+
+ if is_accelerate_available() and is_accelerate_version(">=", "0.17.0.dev0"):
+ from accelerate import cpu_offload_with_hook
+ else:
+ raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.")
+
+ torch_device = torch.device(device)
+ device_index = torch_device.index
+
+ if gpu_id is not None and device_index is not None:
+ raise ValueError(
+ f"You have passed both `gpu_id`={gpu_id} and an index as part of the passed device `device`={device}"
+ f"Cannot pass both. Please make sure to either not define `gpu_id` or not pass the index as part of the device: `device`={torch_device.type}"
+ )
+
+ # _offload_gpu_id should be set to passed gpu_id (or id in passed `device`) or default to previously set id or default to 0
+ self._offload_gpu_id = gpu_id or torch_device.index or getattr(self, "_offload_gpu_id", 0)
+
+ device_type = torch_device.type
+ device = torch.device(f"{device_type}:{self._offload_gpu_id}")
+
+ if self.device.type != "cpu":
+ self.to("cpu")
+ device_mod = getattr(torch, self.device.type, None)
+ if hasattr(device_mod, "empty_cache") and device_mod.is_available():
+ device_mod.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
+
+ all_model_components = {k: v for k, v in self.components.items() if isinstance(v, torch.nn.Module)}
+
+ self._all_hooks = []
+ hook = None
+ for model_str in self.model_cpu_offload_seq.split("->"):
+ model = all_model_components.pop(model_str, None)
+ if not isinstance(model, torch.nn.Module):
+ continue
+
+ _, hook = cpu_offload_with_hook(model, device, prev_module_hook=hook)
+ self._all_hooks.append(hook)
+
+ # CPU offload models that are not in the seq chain unless they are explicitly excluded
+ # these models will stay on CPU until maybe_free_model_hooks is called
+ # some models cannot be in the seq chain because they are iteratively called, such as controlnet
+ for name, model in all_model_components.items():
+ if not isinstance(model, torch.nn.Module):
+ continue
+
+ if name in self._exclude_from_cpu_offload:
+ model.to(device)
+ else:
+ _, hook = cpu_offload_with_hook(model, device)
+ self._all_hooks.append(hook)
+
+ def maybe_free_model_hooks(self):
+ r"""
+ Function that offloads all components, removes all model hooks that were added when using
+ `enable_model_cpu_offload` and then applies them again. In case the model has not been offloaded this function
+ is a no-op. Make sure to add this function to the end of the `__call__` function of your pipeline so that it
+ functions correctly when applying enable_model_cpu_offload.
+ """
+ if not hasattr(self, "_all_hooks") or len(self._all_hooks) == 0:
+ # `enable_model_cpu_offload` has not be called, so silently do nothing
+ return
+
+ for hook in self._all_hooks:
+ # offload model and remove hook from model
+ hook.offload()
+ hook.remove()
+
+ # make sure the model is in the same state as before calling it
+ self.enable_model_cpu_offload()
+
+ @synchronize_timer('Encode cond')
+ def encode_cond(self, image, additional_cond_inputs, do_classifier_free_guidance, dual_guidance):
+ bsz = image.shape[0]
+ cond = self.conditioner(image=image, **additional_cond_inputs)
+
+ if do_classifier_free_guidance:
+ un_cond = self.conditioner.unconditional_embedding(bsz, **additional_cond_inputs)
+
+ if dual_guidance:
+ un_cond_drop_main = copy.deepcopy(un_cond)
+ un_cond_drop_main['additional'] = cond['additional']
+
+ def cat_recursive(a, b, c):
+ if isinstance(a, torch.Tensor):
+ return torch.cat([a, b, c], dim=0).to(self.dtype)
+ out = {}
+ for k in a.keys():
+ out[k] = cat_recursive(a[k], b[k], c[k])
+ return out
+
+ cond = cat_recursive(cond, un_cond_drop_main, un_cond)
+ else:
+ def cat_recursive(a, b):
+ if isinstance(a, torch.Tensor):
+ return torch.cat([a, b], dim=0).to(self.dtype)
+ out = {}
+ for k in a.keys():
+ out[k] = cat_recursive(a[k], b[k])
+ return out
+
+ cond = cat_recursive(cond, un_cond)
+ return cond
+
+ def prepare_extra_step_kwargs(self, generator, eta):
+ # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
+ # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
+ # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
+ # and should be between [0, 1]
+
+ accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
+ extra_step_kwargs = {}
+ if accepts_eta:
+ extra_step_kwargs["eta"] = eta
+
+ # check if the scheduler accepts generator
+ accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
+ if accepts_generator:
+ extra_step_kwargs["generator"] = generator
+ return extra_step_kwargs
+
+ def prepare_latents(self, batch_size, dtype, device, generator, latents=None):
+ shape = (batch_size, *self.vae.latent_shape)
+ if isinstance(generator, list) and len(generator) != batch_size:
+ raise ValueError(
+ f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
+ f" size of {batch_size}. Make sure the batch size matches the length of the generators."
+ )
+
+ if latents is None:
+ latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
+ else:
+ latents = latents.to(device)
+
+ # scale the initial noise by the standard deviation required by the scheduler
+ latents = latents * getattr(self.scheduler, 'init_noise_sigma', 1.0)
+ return latents
+
+ def prepare_image(self, image) -> dict:
+ if isinstance(image, str) and not os.path.exists(image):
+ raise FileNotFoundError(f"Couldn't find image at path {image}")
+
+ if not isinstance(image, list):
+ image = [image]
+
+ outputs = []
+ for img in image:
+ output = self.image_processor(img)
+ outputs.append(output)
+
+ cond_input = {k: [] for k in outputs[0].keys()}
+ for output in outputs:
+ for key, value in output.items():
+ cond_input[key].append(value)
+ for key, value in cond_input.items():
+ if isinstance(value[0], torch.Tensor):
+ cond_input[key] = torch.cat(value, dim=0)
+
+ return cond_input
+
+ def get_guidance_scale_embedding(self, w, embedding_dim=512, dtype=torch.float32):
+ """
+ See https://github.com/google-research/vdm/blob/dc27b98a554f65cdc654b800da5aa1846545d41b/model_vdm.py#L298
+
+ Args:
+ timesteps (`torch.Tensor`):
+ generate embedding vectors at these timesteps
+ embedding_dim (`int`, *optional*, defaults to 512):
+ dimension of the embeddings to generate
+ dtype:
+ data type of the generated embeddings
+
+ Returns:
+ `torch.FloatTensor`: Embedding vectors with shape `(len(timesteps), embedding_dim)`
+ """
+ assert len(w.shape) == 1
+ w = w * 1000.0
+
+ half_dim = embedding_dim // 2
+ emb = torch.log(torch.tensor(10000.0)) / (half_dim - 1)
+ emb = torch.exp(torch.arange(half_dim, dtype=dtype) * -emb)
+ emb = w.to(dtype)[:, None] * emb[None, :]
+ emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1)
+ if embedding_dim % 2 == 1: # zero pad
+ emb = torch.nn.functional.pad(emb, (0, 1))
+ assert emb.shape == (w.shape[0], embedding_dim)
+ return emb
+
+ def set_surface_extractor(self, mc_algo):
+ if mc_algo is None:
+ return
+ logger.info('The parameters `mc_algo` is deprecated, and will be removed in future versions.\n'
+ 'Please use: \n'
+ 'from hy3dgen.shapegen.models.autoencoders import SurfaceExtractors\n'
+ 'pipeline.vae.surface_extractor = SurfaceExtractors[mc_algo]() instead\n')
+ if mc_algo not in SurfaceExtractors.keys():
+ raise ValueError(f"Unknown mc_algo {mc_algo}")
+ self.vae.surface_extractor = SurfaceExtractors[mc_algo]()
+
+ @torch.no_grad()
+ def __call__(
+ self,
+ image: Union[str, List[str], Image.Image] = None,
+ num_inference_steps: int = 50,
+ timesteps: List[int] = None,
+ sigmas: List[float] = None,
+ eta: float = 0.0,
+ guidance_scale: float = 7.5,
+ dual_guidance_scale: float = 10.5,
+ dual_guidance: bool = True,
+ generator=None,
+ box_v=1.01,
+ octree_resolution=384,
+ mc_level=-1 / 512,
+ num_chunks=8000,
+ mc_algo=None,
+ output_type: Optional[str] = "trimesh",
+ enable_pbar=True,
+ **kwargs,
+ ) -> List[List[trimesh.Trimesh]]:
+ callback = kwargs.pop("callback", None)
+ callback_steps = kwargs.pop("callback_steps", None)
+
+ self.set_surface_extractor(mc_algo)
+
+ device = self.device
+ dtype = self.dtype
+ do_classifier_free_guidance = guidance_scale >= 0 and \
+ getattr(self.model, 'guidance_cond_proj_dim', None) is None
+ dual_guidance = dual_guidance_scale >= 0 and dual_guidance
+
+ cond_inputs = self.prepare_image(image)
+ image = cond_inputs.pop('image')
+ cond = self.encode_cond(
+ image=image,
+ additional_cond_inputs=cond_inputs,
+ do_classifier_free_guidance=do_classifier_free_guidance,
+ dual_guidance=False,
+ )
+ batch_size = image.shape[0]
+
+ t_dtype = torch.long
+ timesteps, num_inference_steps = retrieve_timesteps(
+ self.scheduler, num_inference_steps, device, timesteps, sigmas)
+
+ latents = self.prepare_latents(batch_size, dtype, device, generator)
+ extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
+
+ guidance_cond = None
+ if getattr(self.model, 'guidance_cond_proj_dim', None) is not None:
+ logger.info('Using lcm guidance scale')
+ guidance_scale_tensor = torch.tensor(guidance_scale - 1).repeat(batch_size)
+ guidance_cond = self.get_guidance_scale_embedding(
+ guidance_scale_tensor, embedding_dim=self.model.guidance_cond_proj_dim
+ ).to(device=device, dtype=latents.dtype)
+ with synchronize_timer('Diffusion Sampling'):
+ for i, t in enumerate(tqdm(timesteps, disable=not enable_pbar, desc="Diffusion Sampling:", leave=False)):
+ # expand the latents if we are doing classifier free guidance
+ if do_classifier_free_guidance:
+ latent_model_input = torch.cat([latents] * (3 if dual_guidance else 2))
+ else:
+ latent_model_input = latents
+ latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
+
+ # predict the noise residual
+ timestep_tensor = torch.tensor([t], dtype=t_dtype, device=device)
+ timestep_tensor = timestep_tensor.expand(latent_model_input.shape[0])
+ noise_pred = self.model(latent_model_input, timestep_tensor, cond, guidance_cond=guidance_cond)
+
+ # no drop, drop clip, all drop
+ if do_classifier_free_guidance:
+ if dual_guidance:
+ noise_pred_clip, noise_pred_dino, noise_pred_uncond = noise_pred.chunk(3)
+ noise_pred = (
+ noise_pred_uncond
+ + guidance_scale * (noise_pred_clip - noise_pred_dino)
+ + dual_guidance_scale * (noise_pred_dino - noise_pred_uncond)
+ )
+ else:
+ noise_pred_cond, noise_pred_uncond = noise_pred.chunk(2)
+ noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_cond - noise_pred_uncond)
+
+ # compute the previous noisy sample x_t -> x_t-1
+ outputs = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs)
+ latents = outputs.prev_sample
+
+ if callback is not None and i % callback_steps == 0:
+ step_idx = i // getattr(self.scheduler, "order", 1)
+ callback(step_idx, t, outputs)
+
+ return self._export(
+ latents,
+ output_type,
+ box_v, mc_level, num_chunks, octree_resolution, mc_algo,
+ )
+
+ def _export(
+ self,
+ latents,
+ output_type='trimesh',
+ box_v=1.01,
+ mc_level=0.0,
+ num_chunks=20000,
+ octree_resolution=256,
+ mc_algo='mc',
+ enable_pbar=True
+ ):
+ if not output_type == "latent":
+ latents = 1. / self.vae.scale_factor * latents
+ latents = self.vae(latents)
+ outputs = self.vae.latents2mesh(
+ latents,
+ bounds=box_v,
+ mc_level=mc_level,
+ num_chunks=num_chunks,
+ octree_resolution=octree_resolution,
+ mc_algo=mc_algo,
+ enable_pbar=enable_pbar,
+ )
+ else:
+ outputs = latents
+
+ if output_type == 'trimesh':
+ outputs = export_to_trimesh(outputs)
+
+ return outputs
+
+
+class Hunyuan3DDiTFlowMatchingPipeline(Hunyuan3DDiTPipeline):
+
+ @torch.inference_mode()
+ def __call__(
+ self,
+ image: Union[str, List[str], Image.Image, dict, List[dict]] = None,
+ num_inference_steps: int = 50,
+ timesteps: List[int] = None,
+ sigmas: List[float] = None,
+ eta: float = 0.0,
+ guidance_scale: float = 5.0,
+ generator=None,
+ box_v=1.01,
+ octree_resolution=384,
+ mc_level=0.0,
+ mc_algo=None,
+ num_chunks=8000,
+ output_type: Optional[str] = "trimesh",
+ enable_pbar=True,
+ **kwargs,
+ ) -> List[List[trimesh.Trimesh]]:
+ callback = kwargs.pop("callback", None)
+ callback_steps = kwargs.pop("callback_steps", None)
+
+ self.set_surface_extractor(mc_algo)
+
+ device = self.device
+ dtype = self.dtype
+ do_classifier_free_guidance = guidance_scale >= 0 and not (
+ hasattr(self.model, 'guidance_embed') and
+ self.model.guidance_embed is True
+ )
+
+ cond_inputs = self.prepare_image(image)
+ image = cond_inputs.pop('image')
+ cond = self.encode_cond(
+ image=image,
+ additional_cond_inputs=cond_inputs,
+ do_classifier_free_guidance=do_classifier_free_guidance,
+ dual_guidance=False,
+ )
+ batch_size = image.shape[0]
+
+ # 5. Prepare timesteps
+ # NOTE: this is slightly different from common usage, we start from 0.
+ sigmas = np.linspace(0, 1, num_inference_steps) if sigmas is None else sigmas
+ timesteps, num_inference_steps = retrieve_timesteps(
+ self.scheduler,
+ num_inference_steps,
+ device,
+ sigmas=sigmas,
+ )
+ latents = self.prepare_latents(batch_size, dtype, device, generator)
+
+ guidance = None
+ if hasattr(self.model, 'guidance_embed') and \
+ self.model.guidance_embed is True:
+ guidance = torch.tensor([guidance_scale] * batch_size, device=device, dtype=dtype)
+ # logger.info(f'Using guidance embed with scale {guidance_scale}')
+
+ with synchronize_timer('Diffusion Sampling'):
+ for i, t in enumerate(tqdm(timesteps, disable=not enable_pbar, desc="Diffusion Sampling:")):
+ # expand the latents if we are doing classifier free guidance
+ if do_classifier_free_guidance:
+ latent_model_input = torch.cat([latents] * 2)
+ else:
+ latent_model_input = latents
+
+ # NOTE: we assume model get timesteps ranged from 0 to 1
+ timestep = t.expand(latent_model_input.shape[0]).to(
+ latents.dtype) / self.scheduler.config.num_train_timesteps
+ noise_pred = self.model(latent_model_input, timestep, cond, guidance=guidance)
+
+ if do_classifier_free_guidance:
+ noise_pred_cond, noise_pred_uncond = noise_pred.chunk(2)
+ noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_cond - noise_pred_uncond)
+
+ # compute the previous noisy sample x_t -> x_t-1
+ outputs = self.scheduler.step(noise_pred, t, latents)
+ latents = outputs.prev_sample
+
+ if callback is not None and i % callback_steps == 0:
+ step_idx = i // getattr(self.scheduler, "order", 1)
+ callback(step_idx, t, outputs)
+
+ return self._export(
+ latents,
+ output_type,
+ box_v, mc_level, num_chunks, octree_resolution, mc_algo,
+ enable_pbar=enable_pbar,
+ )
diff --git a/hy3dgen/shapegen/postprocessors.py b/hy3dgen/shapegen/postprocessors.py
new file mode 100644
index 0000000000000000000000000000000000000000..6ab910d3b63e5ac4d295a7773639eb9493155fdb
--- /dev/null
+++ b/hy3dgen/shapegen/postprocessors.py
@@ -0,0 +1,202 @@
+# Hunyuan 3D is licensed under the TENCENT HUNYUAN NON-COMMERCIAL LICENSE AGREEMENT
+# except for the third-party components listed below.
+# Hunyuan 3D does not impose any additional limitations beyond what is outlined
+# in the repsective licenses of these third-party components.
+# Users must comply with all terms and conditions of original licenses of these third-party
+# components and must ensure that the usage of the third party components adheres to
+# all relevant laws and regulations.
+
+# For avoidance of doubts, Hunyuan 3D means the large language models and
+# their software and algorithms, including trained model weights, parameters (including
+# optimizer states), machine-learning model code, inference-enabling code, training-enabling code,
+# fine-tuning enabling code and other elements of the foregoing made publicly available
+# by Tencent in accordance with TENCENT HUNYUAN COMMUNITY LICENSE AGREEMENT.
+
+import os
+import tempfile
+from typing import Union
+
+import numpy as np
+import pymeshlab
+import torch
+import trimesh
+
+from .models.autoencoders import Latent2MeshOutput
+from .utils import synchronize_timer
+
+
+def load_mesh(path):
+ if path.endswith(".glb"):
+ mesh = trimesh.load(path)
+ else:
+ mesh = pymeshlab.MeshSet()
+ mesh.load_new_mesh(path)
+ return mesh
+
+
+def reduce_face(mesh: pymeshlab.MeshSet, max_facenum: int = 200000):
+ if max_facenum > mesh.current_mesh().face_number():
+ return mesh
+
+ mesh.apply_filter(
+ "meshing_decimation_quadric_edge_collapse",
+ targetfacenum=max_facenum,
+ qualitythr=1.0,
+ preserveboundary=True,
+ boundaryweight=3,
+ preservenormal=True,
+ preservetopology=True,
+ autoclean=True
+ )
+ return mesh
+
+
+def remove_floater(mesh: pymeshlab.MeshSet):
+ mesh.apply_filter("compute_selection_by_small_disconnected_components_per_face",
+ nbfaceratio=0.005)
+ mesh.apply_filter("compute_selection_transfer_face_to_vertex", inclusive=False)
+ mesh.apply_filter("meshing_remove_selected_vertices_and_faces")
+ return mesh
+
+
+def pymeshlab2trimesh(mesh: pymeshlab.MeshSet):
+ with tempfile.NamedTemporaryFile(suffix='.ply', delete=False) as temp_file:
+ mesh.save_current_mesh(temp_file.name)
+ mesh = trimesh.load(temp_file.name)
+ # 检查加载的对象类型
+ if isinstance(mesh, trimesh.Scene):
+ combined_mesh = trimesh.Trimesh()
+ # 如果是Scene,遍历所有的geometry并合并
+ for geom in mesh.geometry.values():
+ combined_mesh = trimesh.util.concatenate([combined_mesh, geom])
+ mesh = combined_mesh
+ return mesh
+
+
+def trimesh2pymeshlab(mesh: trimesh.Trimesh):
+ with tempfile.NamedTemporaryFile(suffix='.ply', delete=False) as temp_file:
+ if isinstance(mesh, trimesh.scene.Scene):
+ for idx, obj in enumerate(mesh.geometry.values()):
+ if idx == 0:
+ temp_mesh = obj
+ else:
+ temp_mesh = temp_mesh + obj
+ mesh = temp_mesh
+ mesh.export(temp_file.name)
+ mesh = pymeshlab.MeshSet()
+ mesh.load_new_mesh(temp_file.name)
+ return mesh
+
+
+def export_mesh(input, output):
+ if isinstance(input, pymeshlab.MeshSet):
+ mesh = output
+ elif isinstance(input, Latent2MeshOutput):
+ output = Latent2MeshOutput()
+ output.mesh_v = output.current_mesh().vertex_matrix()
+ output.mesh_f = output.current_mesh().face_matrix()
+ mesh = output
+ else:
+ mesh = pymeshlab2trimesh(output)
+ return mesh
+
+
+def import_mesh(mesh: Union[pymeshlab.MeshSet, trimesh.Trimesh, Latent2MeshOutput, str]) -> pymeshlab.MeshSet:
+ if isinstance(mesh, str):
+ mesh = load_mesh(mesh)
+ elif isinstance(mesh, Latent2MeshOutput):
+ mesh = pymeshlab.MeshSet()
+ mesh_pymeshlab = pymeshlab.Mesh(vertex_matrix=mesh.mesh_v, face_matrix=mesh.mesh_f)
+ mesh.add_mesh(mesh_pymeshlab, "converted_mesh")
+
+ if isinstance(mesh, (trimesh.Trimesh, trimesh.scene.Scene)):
+ mesh = trimesh2pymeshlab(mesh)
+
+ return mesh
+
+
+class FaceReducer:
+ @synchronize_timer('FaceReducer')
+ def __call__(
+ self,
+ mesh: Union[pymeshlab.MeshSet, trimesh.Trimesh, Latent2MeshOutput, str],
+ max_facenum: int = 40000
+ ) -> Union[pymeshlab.MeshSet, trimesh.Trimesh]:
+ ms = import_mesh(mesh)
+ ms = reduce_face(ms, max_facenum=max_facenum)
+ mesh = export_mesh(mesh, ms)
+ return mesh
+
+
+class FloaterRemover:
+ @synchronize_timer('FloaterRemover')
+ def __call__(
+ self,
+ mesh: Union[pymeshlab.MeshSet, trimesh.Trimesh, Latent2MeshOutput, str],
+ ) -> Union[pymeshlab.MeshSet, trimesh.Trimesh, Latent2MeshOutput]:
+ ms = import_mesh(mesh)
+ ms = remove_floater(ms)
+ mesh = export_mesh(mesh, ms)
+ return mesh
+
+
+class DegenerateFaceRemover:
+ @synchronize_timer('DegenerateFaceRemover')
+ def __call__(
+ self,
+ mesh: Union[pymeshlab.MeshSet, trimesh.Trimesh, Latent2MeshOutput, str],
+ ) -> Union[pymeshlab.MeshSet, trimesh.Trimesh, Latent2MeshOutput]:
+ ms = import_mesh(mesh)
+
+ with tempfile.NamedTemporaryFile(suffix='.ply', delete=False) as temp_file:
+ ms.save_current_mesh(temp_file.name)
+ ms = pymeshlab.MeshSet()
+ ms.load_new_mesh(temp_file.name)
+
+ mesh = export_mesh(mesh, ms)
+ return mesh
+
+
+def mesh_normalize(mesh):
+ """
+ Normalize mesh vertices to sphere
+ """
+ scale_factor = 1.2
+ vtx_pos = np.asarray(mesh.vertices)
+ max_bb = (vtx_pos - 0).max(0)[0]
+ min_bb = (vtx_pos - 0).min(0)[0]
+
+ center = (max_bb + min_bb) / 2
+
+ scale = torch.norm(torch.tensor(vtx_pos - center, dtype=torch.float32), dim=1).max() * 2.0
+
+ vtx_pos = (vtx_pos - center) * (scale_factor / float(scale))
+ mesh.vertices = vtx_pos
+
+ return mesh
+
+
+class MeshSimplifier:
+ def __init__(self, executable: str = None):
+ if executable is None:
+ CURRENT_DIR = os.path.dirname(os.path.abspath(__file__))
+ executable = os.path.join(CURRENT_DIR, "mesh_simplifier.bin")
+ self.executable = executable
+
+ @synchronize_timer('MeshSimplifier')
+ def __call__(
+ self,
+ mesh: Union[trimesh.Trimesh],
+ ) -> Union[trimesh.Trimesh]:
+ with tempfile.NamedTemporaryFile(suffix='.obj', delete=False) as temp_input:
+ with tempfile.NamedTemporaryFile(suffix='.obj', delete=False) as temp_output:
+ mesh.export(temp_input.name)
+ os.system(f'{self.executable} {temp_input.name} {temp_output.name}')
+ ms = trimesh.load(temp_output.name, process=False)
+ if isinstance(ms, trimesh.Scene):
+ combined_mesh = trimesh.Trimesh()
+ for geom in ms.geometry.values():
+ combined_mesh = trimesh.util.concatenate([combined_mesh, geom])
+ ms = combined_mesh
+ ms = mesh_normalize(ms)
+ return ms
diff --git a/hy3dgen/shapegen/preprocessors.py b/hy3dgen/shapegen/preprocessors.py
new file mode 100644
index 0000000000000000000000000000000000000000..649dbd565db1f2345b8f45a94cfde189cc360b3a
--- /dev/null
+++ b/hy3dgen/shapegen/preprocessors.py
@@ -0,0 +1,167 @@
+# Hunyuan 3D is licensed under the TENCENT HUNYUAN NON-COMMERCIAL LICENSE AGREEMENT
+# except for the third-party components listed below.
+# Hunyuan 3D does not impose any additional limitations beyond what is outlined
+# in the repsective licenses of these third-party components.
+# Users must comply with all terms and conditions of original licenses of these third-party
+# components and must ensure that the usage of the third party components adheres to
+# all relevant laws and regulations.
+
+# For avoidance of doubts, Hunyuan 3D means the large language models and
+# their software and algorithms, including trained model weights, parameters (including
+# optimizer states), machine-learning model code, inference-enabling code, training-enabling code,
+# fine-tuning enabling code and other elements of the foregoing made publicly available
+# by Tencent in accordance with TENCENT HUNYUAN COMMUNITY LICENSE AGREEMENT.
+
+import cv2
+import numpy as np
+import torch
+from PIL import Image
+from einops import repeat, rearrange
+
+
+def array_to_tensor(np_array):
+ image_pt = torch.tensor(np_array).float()
+ image_pt = image_pt / 255 * 2 - 1
+ image_pt = rearrange(image_pt, "h w c -> c h w")
+ image_pts = repeat(image_pt, "c h w -> b c h w", b=1)
+ return image_pts
+
+
+class ImageProcessorV2:
+ def __init__(self, size=512, border_ratio=None):
+ self.size = size
+ self.border_ratio = border_ratio
+
+ @staticmethod
+ def recenter(image, border_ratio: float = 0.2):
+ """ recenter an image to leave some empty space at the image border.
+
+ Args:
+ image (ndarray): input image, float/uint8 [H, W, 3/4]
+ mask (ndarray): alpha mask, bool [H, W]
+ border_ratio (float, optional): border ratio, image will be resized to (1 - border_ratio). Defaults to 0.2.
+
+ Returns:
+ ndarray: output image, float/uint8 [H, W, 3/4]
+ """
+
+ if image.shape[-1] == 4:
+ mask = image[..., 3]
+ else:
+ mask = np.ones_like(image[..., 0:1]) * 255
+ image = np.concatenate([image, mask], axis=-1)
+ mask = mask[..., 0]
+
+ H, W, C = image.shape
+
+ size = max(H, W)
+ result = np.zeros((size, size, C), dtype=np.uint8)
+
+ coords = np.nonzero(mask)
+ x_min, x_max = coords[0].min(), coords[0].max()
+ y_min, y_max = coords[1].min(), coords[1].max()
+ h = x_max - x_min
+ w = y_max - y_min
+ if h == 0 or w == 0:
+ raise ValueError('input image is empty')
+ desired_size = int(size * (1 - border_ratio))
+ scale = desired_size / max(h, w)
+ h2 = int(h * scale)
+ w2 = int(w * scale)
+ x2_min = (size - h2) // 2
+ x2_max = x2_min + h2
+
+ y2_min = (size - w2) // 2
+ y2_max = y2_min + w2
+
+ result[x2_min:x2_max, y2_min:y2_max] = cv2.resize(image[x_min:x_max, y_min:y_max], (w2, h2),
+ interpolation=cv2.INTER_AREA)
+
+ bg = np.ones((result.shape[0], result.shape[1], 3), dtype=np.uint8) * 255
+
+ mask = result[..., 3:].astype(np.float32) / 255
+ result = result[..., :3] * mask + bg * (1 - mask)
+
+ mask = mask * 255
+ result = result.clip(0, 255).astype(np.uint8)
+ mask = mask.clip(0, 255).astype(np.uint8)
+ return result, mask
+
+ def load_image(self, image, border_ratio=0.15, to_tensor=True):
+ if isinstance(image, str):
+ image = cv2.imread(image, cv2.IMREAD_UNCHANGED)
+ image, mask = self.recenter(image, border_ratio=border_ratio)
+ image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
+ elif isinstance(image, Image.Image):
+ image = image.convert("RGBA")
+ image = np.asarray(image)
+ image, mask = self.recenter(image, border_ratio=border_ratio)
+
+ image = cv2.resize(image, (self.size, self.size), interpolation=cv2.INTER_CUBIC)
+ mask = cv2.resize(mask, (self.size, self.size), interpolation=cv2.INTER_NEAREST)
+ mask = mask[..., np.newaxis]
+
+ if to_tensor:
+ image = array_to_tensor(image)
+ mask = array_to_tensor(mask)
+ return image, mask
+
+ def __call__(self, image, border_ratio=0.15, to_tensor=True, **kwargs):
+ if self.border_ratio is not None:
+ border_ratio = self.border_ratio
+ image, mask = self.load_image(image, border_ratio=border_ratio, to_tensor=to_tensor)
+ outputs = {
+ 'image': image,
+ 'mask': mask
+ }
+ return outputs
+
+
+class MVImageProcessorV2(ImageProcessorV2):
+ """
+ view order: front, front clockwise 90, back, front clockwise 270
+ """
+ return_view_idx = True
+
+ def __init__(self, size=512, border_ratio=None):
+ super().__init__(size, border_ratio)
+ self.view2idx = {
+ 'front': 0,
+ 'left': 1,
+ 'back': 2,
+ 'right': 3
+ }
+
+ def __call__(self, image_dict, border_ratio=0.15, to_tensor=True, **kwargs):
+ if self.border_ratio is not None:
+ border_ratio = self.border_ratio
+
+ images = []
+ masks = []
+ view_idxs = []
+ for idx, (view_tag, image) in enumerate(image_dict.items()):
+ view_idxs.append(self.view2idx[view_tag])
+ image, mask = self.load_image(image, border_ratio=border_ratio, to_tensor=to_tensor)
+ images.append(image)
+ masks.append(mask)
+
+ zipped_lists = zip(view_idxs, images, masks)
+ sorted_zipped_lists = sorted(zipped_lists)
+ view_idxs, images, masks = zip(*sorted_zipped_lists)
+
+ image = torch.cat(images, 0).unsqueeze(0)
+ mask = torch.cat(masks, 0).unsqueeze(0)
+ outputs = {
+ 'image': image,
+ 'mask': mask,
+ 'view_idxs': view_idxs
+ }
+ return outputs
+
+
+IMAGE_PROCESSORS = {
+ "v2": ImageProcessorV2,
+ 'mv_v2': MVImageProcessorV2,
+}
+
+DEFAULT_IMAGEPROCESSOR = 'v2'
diff --git a/hy3dgen/shapegen/schedulers.py b/hy3dgen/shapegen/schedulers.py
new file mode 100644
index 0000000000000000000000000000000000000000..54eb2e57696d57fd0e567f2c341f7ced2970955c
--- /dev/null
+++ b/hy3dgen/shapegen/schedulers.py
@@ -0,0 +1,480 @@
+# Copyright 2024 Stability AI, Katherine Crowson and The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Hunyuan 3D is licensed under the TENCENT HUNYUAN NON-COMMERCIAL LICENSE AGREEMENT
+# except for the third-party components listed below.
+# Hunyuan 3D does not impose any additional limitations beyond what is outlined
+# in the repsective licenses of these third-party components.
+# Users must comply with all terms and conditions of original licenses of these third-party
+# components and must ensure that the usage of the third party components adheres to
+# all relevant laws and regulations.
+
+# For avoidance of doubts, Hunyuan 3D means the large language models and
+# their software and algorithms, including trained model weights, parameters (including
+# optimizer states), machine-learning model code, inference-enabling code, training-enabling code,
+# fine-tuning enabling code and other elements of the foregoing made publicly available
+# by Tencent in accordance with TENCENT HUNYUAN COMMUNITY LICENSE AGREEMENT.
+
+import math
+from dataclasses import dataclass
+from typing import List, Optional, Tuple, Union
+
+import numpy as np
+import torch
+from diffusers.configuration_utils import ConfigMixin, register_to_config
+from diffusers.schedulers.scheduling_utils import SchedulerMixin
+from diffusers.utils import BaseOutput, logging
+
+logger = logging.get_logger(__name__) # pylint: disable=invalid-name
+
+
+@dataclass
+class FlowMatchEulerDiscreteSchedulerOutput(BaseOutput):
+ """
+ Output class for the scheduler's `step` function output.
+
+ Args:
+ prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
+ Computed sample `(x_{t-1})` of previous timestep. `prev_sample` should be used as next model input in the
+ denoising loop.
+ """
+
+ prev_sample: torch.FloatTensor
+
+
+class FlowMatchEulerDiscreteScheduler(SchedulerMixin, ConfigMixin):
+ """
+ NOTE: this is very similar to diffusers.FlowMatchEulerDiscreteScheduler. Except our timesteps are reversed
+
+ Euler scheduler.
+
+ This model inherits from [`SchedulerMixin`] and [`ConfigMixin`]. Check the superclass documentation for the generic
+ methods the library implements for all schedulers such as loading and saving.
+
+ Args:
+ num_train_timesteps (`int`, defaults to 1000):
+ The number of diffusion steps to train the model.
+ timestep_spacing (`str`, defaults to `"linspace"`):
+ The way the timesteps should be scaled. Refer to Table 2 of the [Common Diffusion Noise Schedules and
+ Sample Steps are Flawed](https://huggingface.co/papers/2305.08891) for more information.
+ shift (`float`, defaults to 1.0):
+ The shift value for the timestep schedule.
+ """
+
+ _compatibles = []
+ order = 1
+
+ @register_to_config
+ def __init__(
+ self,
+ num_train_timesteps: int = 1000,
+ shift: float = 1.0,
+ use_dynamic_shifting=False,
+ ):
+ timesteps = np.linspace(1, num_train_timesteps, num_train_timesteps, dtype=np.float32).copy()
+ timesteps = torch.from_numpy(timesteps).to(dtype=torch.float32)
+
+ sigmas = timesteps / num_train_timesteps
+ if not use_dynamic_shifting:
+ # when use_dynamic_shifting is True, we apply the timestep shifting on the fly based on the image resolution
+ sigmas = shift * sigmas / (1 + (shift - 1) * sigmas)
+
+ self.timesteps = sigmas * num_train_timesteps
+
+ self._step_index = None
+ self._begin_index = None
+
+ self.sigmas = sigmas.to("cpu") # to avoid too much CPU/GPU communication
+ self.sigma_min = self.sigmas[-1].item()
+ self.sigma_max = self.sigmas[0].item()
+
+ @property
+ def step_index(self):
+ """
+ The index counter for current timestep. It will increase 1 after each scheduler step.
+ """
+ return self._step_index
+
+ @property
+ def begin_index(self):
+ """
+ The index for the first timestep. It should be set from pipeline with `set_begin_index` method.
+ """
+ return self._begin_index
+
+ # Copied from diffusers.schedulers.scheduling_dpmsolver_multistep.DPMSolverMultistepScheduler.set_begin_index
+ def set_begin_index(self, begin_index: int = 0):
+ """
+ Sets the begin index for the scheduler. This function should be run from pipeline before the inference.
+
+ Args:
+ begin_index (`int`):
+ The begin index for the scheduler.
+ """
+ self._begin_index = begin_index
+
+ def scale_noise(
+ self,
+ sample: torch.FloatTensor,
+ timestep: Union[float, torch.FloatTensor],
+ noise: Optional[torch.FloatTensor] = None,
+ ) -> torch.FloatTensor:
+ """
+ Forward process in flow-matching
+
+ Args:
+ sample (`torch.FloatTensor`):
+ The input sample.
+ timestep (`int`, *optional*):
+ The current timestep in the diffusion chain.
+
+ Returns:
+ `torch.FloatTensor`:
+ A scaled input sample.
+ """
+ # Make sure sigmas and timesteps have the same device and dtype as original_samples
+ sigmas = self.sigmas.to(device=sample.device, dtype=sample.dtype)
+
+ if sample.device.type == "mps" and torch.is_floating_point(timestep):
+ # mps does not support float64
+ schedule_timesteps = self.timesteps.to(sample.device, dtype=torch.float32)
+ timestep = timestep.to(sample.device, dtype=torch.float32)
+ else:
+ schedule_timesteps = self.timesteps.to(sample.device)
+ timestep = timestep.to(sample.device)
+
+ # self.begin_index is None when scheduler is used for training, or pipeline does not implement set_begin_index
+ if self.begin_index is None:
+ step_indices = [self.index_for_timestep(t, schedule_timesteps) for t in timestep]
+ elif self.step_index is not None:
+ # add_noise is called after first denoising step (for inpainting)
+ step_indices = [self.step_index] * timestep.shape[0]
+ else:
+ # add noise is called before first denoising step to create initial latent(img2img)
+ step_indices = [self.begin_index] * timestep.shape[0]
+
+ sigma = sigmas[step_indices].flatten()
+ while len(sigma.shape) < len(sample.shape):
+ sigma = sigma.unsqueeze(-1)
+
+ sample = sigma * noise + (1.0 - sigma) * sample
+
+ return sample
+
+ def _sigma_to_t(self, sigma):
+ return sigma * self.config.num_train_timesteps
+
+ def time_shift(self, mu: float, sigma: float, t: torch.Tensor):
+ return math.exp(mu) / (math.exp(mu) + (1 / t - 1) ** sigma)
+
+ def set_timesteps(
+ self,
+ num_inference_steps: int = None,
+ device: Union[str, torch.device] = None,
+ sigmas: Optional[List[float]] = None,
+ mu: Optional[float] = None,
+ ):
+ """
+ Sets the discrete timesteps used for the diffusion chain (to be run before inference).
+
+ Args:
+ num_inference_steps (`int`):
+ The number of diffusion steps used when generating samples with a pre-trained model.
+ device (`str` or `torch.device`, *optional*):
+ The device to which the timesteps should be moved to. If `None`, the timesteps are not moved.
+ """
+
+ if self.config.use_dynamic_shifting and mu is None:
+ raise ValueError(" you have a pass a value for `mu` when `use_dynamic_shifting` is set to be `True`")
+
+ if sigmas is None:
+ self.num_inference_steps = num_inference_steps
+ timesteps = np.linspace(
+ self._sigma_to_t(self.sigma_max), self._sigma_to_t(self.sigma_min), num_inference_steps
+ )
+
+ sigmas = timesteps / self.config.num_train_timesteps
+
+ if self.config.use_dynamic_shifting:
+ sigmas = self.time_shift(mu, 1.0, sigmas)
+ else:
+ sigmas = self.config.shift * sigmas / (1 + (self.config.shift - 1) * sigmas)
+
+ sigmas = torch.from_numpy(sigmas).to(dtype=torch.float32, device=device)
+ timesteps = sigmas * self.config.num_train_timesteps
+
+ self.timesteps = timesteps.to(device=device)
+ self.sigmas = torch.cat([sigmas, torch.ones(1, device=sigmas.device)])
+
+ self._step_index = None
+ self._begin_index = None
+
+ def index_for_timestep(self, timestep, schedule_timesteps=None):
+ if schedule_timesteps is None:
+ schedule_timesteps = self.timesteps
+
+ indices = (schedule_timesteps == timestep).nonzero()
+
+ # The sigma index that is taken for the **very** first `step`
+ # is always the second index (or the last index if there is only 1)
+ # This way we can ensure we don't accidentally skip a sigma in
+ # case we start in the middle of the denoising schedule (e.g. for image-to-image)
+ pos = 1 if len(indices) > 1 else 0
+
+ return indices[pos].item()
+
+ def _init_step_index(self, timestep):
+ if self.begin_index is None:
+ if isinstance(timestep, torch.Tensor):
+ timestep = timestep.to(self.timesteps.device)
+ self._step_index = self.index_for_timestep(timestep)
+ else:
+ self._step_index = self._begin_index
+
+ def step(
+ self,
+ model_output: torch.FloatTensor,
+ timestep: Union[float, torch.FloatTensor],
+ sample: torch.FloatTensor,
+ s_churn: float = 0.0,
+ s_tmin: float = 0.0,
+ s_tmax: float = float("inf"),
+ s_noise: float = 1.0,
+ generator: Optional[torch.Generator] = None,
+ return_dict: bool = True,
+ ) -> Union[FlowMatchEulerDiscreteSchedulerOutput, Tuple]:
+ """
+ Predict the sample from the previous timestep by reversing the SDE. This function propagates the diffusion
+ process from the learned model outputs (most often the predicted noise).
+
+ Args:
+ model_output (`torch.FloatTensor`):
+ The direct output from learned diffusion model.
+ timestep (`float`):
+ The current discrete timestep in the diffusion chain.
+ sample (`torch.FloatTensor`):
+ A current instance of a sample created by the diffusion process.
+ s_churn (`float`):
+ s_tmin (`float`):
+ s_tmax (`float`):
+ s_noise (`float`, defaults to 1.0):
+ Scaling factor for noise added to the sample.
+ generator (`torch.Generator`, *optional*):
+ A random number generator.
+ return_dict (`bool`):
+ Whether or not to return a [`~schedulers.scheduling_euler_discrete.EulerDiscreteSchedulerOutput`] or
+ tuple.
+
+ Returns:
+ [`~schedulers.scheduling_euler_discrete.EulerDiscreteSchedulerOutput`] or `tuple`:
+ If return_dict is `True`, [`~schedulers.scheduling_euler_discrete.EulerDiscreteSchedulerOutput`] is
+ returned, otherwise a tuple is returned where the first element is the sample tensor.
+ """
+
+ if (
+ isinstance(timestep, int)
+ or isinstance(timestep, torch.IntTensor)
+ or isinstance(timestep, torch.LongTensor)
+ ):
+ raise ValueError(
+ (
+ "Passing integer indices (e.g. from `enumerate(timesteps)`) as timesteps to"
+ " `EulerDiscreteScheduler.step()` is not supported. Make sure to pass"
+ " one of the `scheduler.timesteps` as a timestep."
+ ),
+ )
+
+ if self.step_index is None:
+ self._init_step_index(timestep)
+
+ # Upcast to avoid precision issues when computing prev_sample
+ sample = sample.to(torch.float32)
+
+ sigma = self.sigmas[self.step_index]
+ sigma_next = self.sigmas[self.step_index + 1]
+
+ prev_sample = sample + (sigma_next - sigma) * model_output
+
+ # Cast sample back to model compatible dtype
+ prev_sample = prev_sample.to(model_output.dtype)
+
+ # upon completion increase step index by one
+ self._step_index += 1
+
+ if not return_dict:
+ return (prev_sample,)
+
+ return FlowMatchEulerDiscreteSchedulerOutput(prev_sample=prev_sample)
+
+ def __len__(self):
+ return self.config.num_train_timesteps
+
+
+@dataclass
+class ConsistencyFlowMatchEulerDiscreteSchedulerOutput(BaseOutput):
+ prev_sample: torch.FloatTensor
+ pred_original_sample: torch.FloatTensor
+
+
+class ConsistencyFlowMatchEulerDiscreteScheduler(SchedulerMixin, ConfigMixin):
+ _compatibles = []
+ order = 1
+
+ @register_to_config
+ def __init__(
+ self,
+ num_train_timesteps: int = 1000,
+ pcm_timesteps: int = 50,
+ ):
+ sigmas = np.linspace(0, 1, num_train_timesteps)
+ step_ratio = num_train_timesteps // pcm_timesteps
+
+ euler_timesteps = (np.arange(1, pcm_timesteps) * step_ratio).round().astype(np.int64) - 1
+ euler_timesteps = np.asarray([0] + euler_timesteps.tolist())
+
+ self.euler_timesteps = euler_timesteps
+ self.sigmas = sigmas[self.euler_timesteps]
+ self.sigmas = torch.from_numpy((self.sigmas.copy()))
+ self.timesteps = self.sigmas * num_train_timesteps
+ self._step_index = None
+ self._begin_index = None
+ self.sigmas = self.sigmas.to("cpu") # to avoid too much CPU/GPU communication
+
+ @property
+ def step_index(self):
+ """
+ The index counter for current timestep. It will increase 1 after each scheduler step.
+ """
+ return self._step_index
+
+ @property
+ def begin_index(self):
+ """
+ The index for the first timestep. It should be set from pipeline with `set_begin_index` method.
+ """
+ return self._begin_index
+
+ # Copied from diffusers.schedulers.scheduling_dpmsolver_multistep.DPMSolverMultistepScheduler.set_begin_index
+ def set_begin_index(self, begin_index: int = 0):
+ """
+ Sets the begin index for the scheduler. This function should be run from pipeline before the inference.
+
+ Args:
+ begin_index (`int`):
+ The begin index for the scheduler.
+ """
+ self._begin_index = begin_index
+
+ def _sigma_to_t(self, sigma):
+ return sigma * self.config.num_train_timesteps
+
+ def set_timesteps(
+ self,
+ num_inference_steps: int = None,
+ device: Union[str, torch.device] = None,
+ sigmas: Optional[List[float]] = None,
+ ):
+ """
+ Sets the discrete timesteps used for the diffusion chain (to be run before inference).
+
+ Args:
+ num_inference_steps (`int`):
+ The number of diffusion steps used when generating samples with a pre-trained model.
+ device (`str` or `torch.device`, *optional*):
+ The device to which the timesteps should be moved to. If `None`, the timesteps are not moved.
+ """
+ self.num_inference_steps = num_inference_steps if num_inference_steps is not None else len(sigmas)
+ inference_indices = np.linspace(
+ 0, self.config.pcm_timesteps, num=self.num_inference_steps, endpoint=False
+ )
+ inference_indices = np.floor(inference_indices).astype(np.int64)
+ inference_indices = torch.from_numpy(inference_indices).long()
+
+ self.sigmas_ = self.sigmas[inference_indices]
+ timesteps = self.sigmas_ * self.config.num_train_timesteps
+ self.timesteps = timesteps.to(device=device)
+ self.sigmas_ = torch.cat(
+ [self.sigmas_, torch.ones(1, device=self.sigmas_.device)]
+ )
+
+ self._step_index = None
+ self._begin_index = None
+
+ def index_for_timestep(self, timestep, schedule_timesteps=None):
+ if schedule_timesteps is None:
+ schedule_timesteps = self.timesteps
+
+ indices = (schedule_timesteps == timestep).nonzero()
+
+ # The sigma index that is taken for the **very** first `step`
+ # is always the second index (or the last index if there is only 1)
+ # This way we can ensure we don't accidentally skip a sigma in
+ # case we start in the middle of the denoising schedule (e.g. for image-to-image)
+ pos = 1 if len(indices) > 1 else 0
+
+ return indices[pos].item()
+
+ def _init_step_index(self, timestep):
+ if self.begin_index is None:
+ if isinstance(timestep, torch.Tensor):
+ timestep = timestep.to(self.timesteps.device)
+ self._step_index = self.index_for_timestep(timestep)
+ else:
+ self._step_index = self._begin_index
+
+ def step(
+ self,
+ model_output: torch.FloatTensor,
+ timestep: Union[float, torch.FloatTensor],
+ sample: torch.FloatTensor,
+ generator: Optional[torch.Generator] = None,
+ return_dict: bool = True,
+ ) -> Union[ConsistencyFlowMatchEulerDiscreteSchedulerOutput, Tuple]:
+ if (
+ isinstance(timestep, int)
+ or isinstance(timestep, torch.IntTensor)
+ or isinstance(timestep, torch.LongTensor)
+ ):
+ raise ValueError(
+ (
+ "Passing integer indices (e.g. from `enumerate(timesteps)`) as timesteps to"
+ " `EulerDiscreteScheduler.step()` is not supported. Make sure to pass"
+ " one of the `scheduler.timesteps` as a timestep."
+ ),
+ )
+
+ if self.step_index is None:
+ self._init_step_index(timestep)
+
+ sample = sample.to(torch.float32)
+
+ sigma = self.sigmas_[self.step_index]
+ sigma_next = self.sigmas_[self.step_index + 1]
+
+ prev_sample = sample + (sigma_next - sigma) * model_output
+ prev_sample = prev_sample.to(model_output.dtype)
+
+ pred_original_sample = sample + (1.0 - sigma) * model_output
+ pred_original_sample = pred_original_sample.to(model_output.dtype)
+
+ self._step_index += 1
+
+ if not return_dict:
+ return (prev_sample,)
+
+ return ConsistencyFlowMatchEulerDiscreteSchedulerOutput(prev_sample=prev_sample,
+ pred_original_sample=pred_original_sample)
+
+ def __len__(self):
+ return self.config.num_train_timesteps
diff --git a/hy3dgen/shapegen/utils.py b/hy3dgen/shapegen/utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..7ac28c8df5154107f271433f798e46439121ab7c
--- /dev/null
+++ b/hy3dgen/shapegen/utils.py
@@ -0,0 +1,126 @@
+# Hunyuan 3D is licensed under the TENCENT HUNYUAN NON-COMMERCIAL LICENSE AGREEMENT
+# except for the third-party components listed below.
+# Hunyuan 3D does not impose any additional limitations beyond what is outlined
+# in the repsective licenses of these third-party components.
+# Users must comply with all terms and conditions of original licenses of these third-party
+# components and must ensure that the usage of the third party components adheres to
+# all relevant laws and regulations.
+
+# For avoidance of doubts, Hunyuan 3D means the large language models and
+# their software and algorithms, including trained model weights, parameters (including
+# optimizer states), machine-learning model code, inference-enabling code, training-enabling code,
+# fine-tuning enabling code and other elements of the foregoing made publicly available
+# by Tencent in accordance with TENCENT HUNYUAN COMMUNITY LICENSE AGREEMENT.
+
+import logging
+import os
+from functools import wraps
+
+import torch
+
+
+def get_logger(name):
+ logger = logging.getLogger(name)
+ logger.setLevel(logging.INFO)
+
+ console_handler = logging.StreamHandler()
+ console_handler.setLevel(logging.INFO)
+
+ formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
+ console_handler.setFormatter(formatter)
+ logger.addHandler(console_handler)
+ return logger
+
+
+logger = get_logger('hy3dgen.shapgen')
+
+
+class synchronize_timer:
+ """ Synchronized timer to count the inference time of `nn.Module.forward`.
+
+ Supports both context manager and decorator usage.
+
+ Example as context manager:
+ ```python
+ with synchronize_timer('name') as t:
+ run()
+ ```
+
+ Example as decorator:
+ ```python
+ @synchronize_timer('Export to trimesh')
+ def export_to_trimesh(mesh_output):
+ pass
+ ```
+ """
+
+ def __init__(self, name=None):
+ self.name = name
+
+ def __enter__(self):
+ """Context manager entry: start timing."""
+ if os.environ.get('HY3DGEN_DEBUG', '0') == '1':
+ self.start = torch.cuda.Event(enable_timing=True)
+ self.end = torch.cuda.Event(enable_timing=True)
+ self.start.record()
+ return lambda: self.time
+
+ def __exit__(self, exc_type, exc_value, exc_tb):
+ """Context manager exit: stop timing and log results."""
+ if os.environ.get('HY3DGEN_DEBUG', '0') == '1':
+ self.end.record()
+ torch.cuda.synchronize()
+ self.time = self.start.elapsed_time(self.end)
+ if self.name is not None:
+ logger.info(f'{self.name} takes {self.time} ms')
+
+ def __call__(self, func):
+ """Decorator: wrap the function to time its execution."""
+
+ @wraps(func)
+ def wrapper(*args, **kwargs):
+ with self:
+ result = func(*args, **kwargs)
+ return result
+
+ return wrapper
+
+
+def smart_load_model(
+ model_path,
+ subfolder,
+ use_safetensors,
+ variant,
+):
+ original_model_path = model_path
+ # try local path
+ base_dir = os.environ.get('HY3DGEN_MODELS', '~/.cache/hy3dgen')
+ model_path = os.path.expanduser(os.path.join(base_dir, model_path, subfolder))
+ logger.info(f'Try to load model from local path: {model_path}')
+ if not os.path.exists(model_path):
+ logger.info('Model path not exists, try to download from huggingface')
+ try:
+ from huggingface_hub import snapshot_download
+ # 只下载指定子目录
+ path = snapshot_download(
+ repo_id=original_model_path,
+ allow_patterns=[f"{subfolder}/*"], # 关键修改:模式匹配子文件夹
+ )
+ model_path = os.path.join(path, subfolder) # 保持路径拼接逻辑不变
+ except ImportError:
+ logger.warning(
+ "You need to install HuggingFace Hub to load models from the hub."
+ )
+ raise RuntimeError(f"Model path {model_path} not found")
+ except Exception as e:
+ raise e
+
+ if not os.path.exists(model_path):
+ raise FileNotFoundError(f"Model path {original_model_path} not found")
+
+ extension = 'ckpt' if not use_safetensors else 'safetensors'
+ variant = '' if variant is None else f'.{variant}'
+ ckpt_name = f'model{variant}.{extension}'
+ config_path = os.path.join(model_path, 'config.yaml')
+ ckpt_path = os.path.join(model_path, ckpt_name)
+ return config_path, ckpt_path
diff --git a/hy3dgen/texgen/__init__.py b/hy3dgen/texgen/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..6e6dfce6e2ea84adeebdd9fac6f2c6635cd401ab
--- /dev/null
+++ b/hy3dgen/texgen/__init__.py
@@ -0,0 +1,16 @@
+# Hunyuan 3D is licensed under the TENCENT HUNYUAN NON-COMMERCIAL LICENSE AGREEMENT
+# except for the third-party components listed below.
+# Hunyuan 3D does not impose any additional limitations beyond what is outlined
+# in the repsective licenses of these third-party components.
+# Users must comply with all terms and conditions of original licenses of these third-party
+# components and must ensure that the usage of the third party components adheres to
+# all relevant laws and regulations.
+
+# For avoidance of doubts, Hunyuan 3D means the large language models and
+# their software and algorithms, including trained model weights, parameters (including
+# optimizer states), machine-learning model code, inference-enabling code, training-enabling code,
+# fine-tuning enabling code and other elements of the foregoing made publicly available
+# by Tencent in accordance with TENCENT HUNYUAN COMMUNITY LICENSE AGREEMENT.
+
+
+from .pipelines import Hunyuan3DPaintPipeline, Hunyuan3DTexGenConfig
diff --git a/hy3dgen/texgen/custom_rasterizer/custom_rasterizer/__init__.py b/hy3dgen/texgen/custom_rasterizer/custom_rasterizer/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..da0dc9a613b9c5f265067c2fe571cccc89e6d37d
--- /dev/null
+++ b/hy3dgen/texgen/custom_rasterizer/custom_rasterizer/__init__.py
@@ -0,0 +1,22 @@
+# Hunyuan 3D is licensed under the TENCENT HUNYUAN NON-COMMERCIAL LICENSE AGREEMENT
+# except for the third-party components listed below.
+# Hunyuan 3D does not impose any additional limitations beyond what is outlined
+# in the repsective licenses of these third-party components.
+# Users must comply with all terms and conditions of original licenses of these third-party
+# components and must ensure that the usage of the third party components adheres to
+# all relevant laws and regulations.
+
+# For avoidance of doubts, Hunyuan 3D means the large language models and
+# their software and algorithms, including trained model weights, parameters (including
+# optimizer states), machine-learning model code, inference-enabling code, training-enabling code,
+# fine-tuning enabling code and other elements of the foregoing made publicly available
+# by Tencent in accordance with TENCENT HUNYUAN COMMUNITY LICENSE AGREEMENT.
+
+'''
+from .hierarchy import BuildHierarchy, BuildHierarchyWithColor
+from .io_obj import LoadObj, LoadObjWithTexture
+from .render import rasterize, interpolate
+'''
+from .io_glb import *
+from .io_obj import *
+from .render import *
diff --git a/hy3dgen/texgen/custom_rasterizer/custom_rasterizer/io_glb.py b/hy3dgen/texgen/custom_rasterizer/custom_rasterizer/io_glb.py
new file mode 100644
index 0000000000000000000000000000000000000000..5d2894a792c03164a26506f8a39e89403136014e
--- /dev/null
+++ b/hy3dgen/texgen/custom_rasterizer/custom_rasterizer/io_glb.py
@@ -0,0 +1,238 @@
+# Hunyuan 3D is licensed under the TENCENT HUNYUAN NON-COMMERCIAL LICENSE AGREEMENT
+# except for the third-party components listed below.
+# Hunyuan 3D does not impose any additional limitations beyond what is outlined
+# in the repsective licenses of these third-party components.
+# Users must comply with all terms and conditions of original licenses of these third-party
+# components and must ensure that the usage of the third party components adheres to
+# all relevant laws and regulations.
+
+# For avoidance of doubts, Hunyuan 3D means the large language models and
+# their software and algorithms, including trained model weights, parameters (including
+# optimizer states), machine-learning model code, inference-enabling code, training-enabling code,
+# fine-tuning enabling code and other elements of the foregoing made publicly available
+# by Tencent in accordance with TENCENT HUNYUAN COMMUNITY LICENSE AGREEMENT.
+
+import base64
+import io
+import os
+
+import numpy as np
+from PIL import Image as PILImage
+from pygltflib import GLTF2
+from scipy.spatial.transform import Rotation as R
+
+
+# Function to extract buffer data
+def get_buffer_data(gltf, buffer_view):
+ buffer = gltf.buffers[buffer_view.buffer]
+ buffer_data = gltf.get_data_from_buffer_uri(buffer.uri)
+ byte_offset = buffer_view.byteOffset if buffer_view.byteOffset else 0
+ byte_length = buffer_view.byteLength
+ return buffer_data[byte_offset:byte_offset + byte_length]
+
+
+# Function to extract attribute data
+def get_attribute_data(gltf, accessor_index):
+ accessor = gltf.accessors[accessor_index]
+ buffer_view = gltf.bufferViews[accessor.bufferView]
+ buffer_data = get_buffer_data(gltf, buffer_view)
+
+ comptype = {5120: np.int8, 5121: np.uint8, 5122: np.int16, 5123: np.uint16, 5125: np.uint32, 5126: np.float32}
+ dtype = comptype[accessor.componentType]
+
+ t2n = {'SCALAR': 1, 'VEC2': 2, 'VEC3': 3, 'VEC4': 4, 'MAT2': 4, 'MAT3': 9, 'MAT4': 16}
+ num_components = t2n[accessor.type]
+
+ # Calculate the correct slice of data
+ byte_offset = accessor.byteOffset if accessor.byteOffset else 0
+ byte_stride = buffer_view.byteStride if buffer_view.byteStride else num_components * np.dtype(dtype).itemsize
+ count = accessor.count
+
+ # Extract the attribute data
+ attribute_data = np.zeros((count, num_components), dtype=dtype)
+ for i in range(count):
+ start = byte_offset + i * byte_stride
+ end = start + num_components * np.dtype(dtype).itemsize
+ attribute_data[i] = np.frombuffer(buffer_data[start:end], dtype=dtype)
+
+ return attribute_data
+
+
+# Function to extract image data
+def get_image_data(gltf, image, folder):
+ if image.uri:
+ if image.uri.startswith('data:'):
+ # Data URI
+ header, encoded = image.uri.split(',', 1)
+ data = base64.b64decode(encoded)
+ else:
+ # External file
+ fn = image.uri
+ if not os.path.isabs(fn):
+ fn = folder + '/' + fn
+ with open(fn, 'rb') as f:
+ data = f.read()
+ else:
+ buffer_view = gltf.bufferViews[image.bufferView]
+ data = get_buffer_data(gltf, buffer_view)
+ return data
+
+
+# Function to convert triangle strip to triangles
+def convert_triangle_strip_to_triangles(indices):
+ triangles = []
+ for i in range(len(indices) - 2):
+ if i % 2 == 0:
+ triangles.append([indices[i], indices[i + 1], indices[i + 2]])
+ else:
+ triangles.append([indices[i], indices[i + 2], indices[i + 1]])
+ return np.array(triangles).reshape(-1, 3)
+
+
+# Function to convert triangle fan to triangles
+def convert_triangle_fan_to_triangles(indices):
+ triangles = []
+ for i in range(1, len(indices) - 1):
+ triangles.append([indices[0], indices[i], indices[i + 1]])
+ return np.array(triangles).reshape(-1, 3)
+
+
+# Function to get the transformation matrix from a node
+def get_node_transform(node):
+ if node.matrix:
+ return np.array(node.matrix).reshape(4, 4).T
+ else:
+ T = np.eye(4)
+ if node.translation:
+ T[:3, 3] = node.translation
+ if node.rotation:
+ R_mat = R.from_quat(node.rotation).as_matrix()
+ T[:3, :3] = R_mat
+ if node.scale:
+ S = np.diag(node.scale + [1])
+ T = T @ S
+ return T
+
+
+def get_world_transform(gltf, node_index, parents, world_transforms):
+ if parents[node_index] == -2:
+ return world_transforms[node_index]
+
+ node = gltf.nodes[node_index]
+ if parents[node_index] == -1:
+ world_transforms[node_index] = get_node_transform(node)
+ parents[node_index] = -2
+ return world_transforms[node_index]
+
+ parent_index = parents[node_index]
+ parent_transform = get_world_transform(gltf, parent_index, parents, world_transforms)
+ world_transforms[node_index] = parent_transform @ get_node_transform(node)
+ parents[node_index] = -2
+ return world_transforms[node_index]
+
+
+def LoadGlb(path):
+ # Load the GLB file using pygltflib
+ gltf = GLTF2().load(path)
+
+ primitives = []
+ images = {}
+ # Iterate through the meshes in the GLB file
+
+ world_transforms = [np.identity(4) for i in range(len(gltf.nodes))]
+ parents = [-1 for i in range(len(gltf.nodes))]
+ for node_index, node in enumerate(gltf.nodes):
+ for idx in node.children:
+ parents[idx] = node_index
+ # for i in range(len(gltf.nodes)):
+ # get_world_transform(gltf, i, parents, world_transform)
+
+ for node_index, node in enumerate(gltf.nodes):
+ if node.mesh is not None:
+ world_transform = get_world_transform(gltf, node_index, parents, world_transforms)
+ # Iterate through the primitives in the mesh
+ mesh = gltf.meshes[node.mesh]
+ for primitive in mesh.primitives:
+ # Access the attributes of the primitive
+ attributes = primitive.attributes.__dict__
+ mode = primitive.mode if primitive.mode is not None else 4 # Default to TRIANGLES
+ result = {}
+ if primitive.indices is not None:
+ indices = get_attribute_data(gltf, primitive.indices)
+ if mode == 4: # TRIANGLES
+ face_indices = indices.reshape(-1, 3)
+ elif mode == 5: # TRIANGLE_STRIP
+ face_indices = convert_triangle_strip_to_triangles(indices)
+ elif mode == 6: # TRIANGLE_FAN
+ face_indices = convert_triangle_fan_to_triangles(indices)
+ else:
+ continue
+ result['F'] = face_indices
+
+ # Extract vertex positions
+ if 'POSITION' in attributes and attributes['POSITION'] is not None:
+ positions = get_attribute_data(gltf, attributes['POSITION'])
+ # Apply the world transformation to the positions
+ positions_homogeneous = np.hstack([positions, np.ones((positions.shape[0], 1))])
+ transformed_positions = (world_transform @ positions_homogeneous.T).T[:, :3]
+ result['V'] = transformed_positions
+
+ # Extract vertex colors
+ if 'COLOR_0' in attributes and attributes['COLOR_0'] is not None:
+ colors = get_attribute_data(gltf, attributes['COLOR_0'])
+ if colors.shape[-1] > 3:
+ colors = colors[..., :3]
+ result['VC'] = colors
+
+ # Extract UVs
+ if 'TEXCOORD_0' in attributes and not attributes['TEXCOORD_0'] is None:
+ uvs = get_attribute_data(gltf, attributes['TEXCOORD_0'])
+ result['UV'] = uvs
+
+ if primitive.material is not None:
+ material = gltf.materials[primitive.material]
+ if material.pbrMetallicRoughness is not None and material.pbrMetallicRoughness.baseColorTexture is not None:
+ texture_index = material.pbrMetallicRoughness.baseColorTexture.index
+ texture = gltf.textures[texture_index]
+ image_index = texture.source
+ if not image_index in images:
+ image = gltf.images[image_index]
+ image_data = get_image_data(gltf, image, os.path.dirname(path))
+ pil_image = PILImage.open(io.BytesIO(image_data))
+ if pil_image.mode != 'RGB':
+ pil_image = pil_image.convert('RGB')
+ images[image_index] = pil_image
+ result['TEX'] = image_index
+ elif material.emissiveTexture is not None:
+ texture_index = material.emissiveTexture.index
+ texture = gltf.textures[texture_index]
+ image_index = texture.source
+ if not image_index in images:
+ image = gltf.images[image_index]
+ image_data = get_image_data(gltf, image, os.path.dirname(path))
+ pil_image = PILImage.open(io.BytesIO(image_data))
+ if pil_image.mode != 'RGB':
+ pil_image = pil_image.convert('RGB')
+ images[image_index] = pil_image
+ result['TEX'] = image_index
+ else:
+ if material.pbrMetallicRoughness is not None:
+ base_color = material.pbrMetallicRoughness.baseColorFactor
+ else:
+ base_color = np.array([0.8, 0.8, 0.8], dtype=np.float32)
+ result['MC'] = base_color
+
+ primitives.append(result)
+
+ return primitives, images
+
+
+def RotatePrimitives(primitives, transform):
+ for i in range(len(primitives)):
+ if 'V' in primitives[i]:
+ primitives[i]['V'] = primitives[i]['V'] @ transform.T
+
+
+if __name__ == '__main__':
+ path = 'data/test.glb'
+ LoadGlb(path)
diff --git a/hy3dgen/texgen/custom_rasterizer/custom_rasterizer/io_obj.py b/hy3dgen/texgen/custom_rasterizer/custom_rasterizer/io_obj.py
new file mode 100644
index 0000000000000000000000000000000000000000..b76e2367853d8c7b0a64b503a46e9d3f8629c859
--- /dev/null
+++ b/hy3dgen/texgen/custom_rasterizer/custom_rasterizer/io_obj.py
@@ -0,0 +1,66 @@
+# Hunyuan 3D is licensed under the TENCENT HUNYUAN NON-COMMERCIAL LICENSE AGREEMENT
+# except for the third-party components listed below.
+# Hunyuan 3D does not impose any additional limitations beyond what is outlined
+# in the repsective licenses of these third-party components.
+# Users must comply with all terms and conditions of original licenses of these third-party
+# components and must ensure that the usage of the third party components adheres to
+# all relevant laws and regulations.
+
+# For avoidance of doubts, Hunyuan 3D means the large language models and
+# their software and algorithms, including trained model weights, parameters (including
+# optimizer states), machine-learning model code, inference-enabling code, training-enabling code,
+# fine-tuning enabling code and other elements of the foregoing made publicly available
+# by Tencent in accordance with TENCENT HUNYUAN COMMUNITY LICENSE AGREEMENT.
+
+import cv2
+import numpy as np
+
+
+def LoadObj(fn):
+ lines = [l.strip() for l in open(fn)]
+ vertices = []
+ faces = []
+ for l in lines:
+ words = [w for w in l.split(' ') if w != '']
+ if len(words) == 0:
+ continue
+ if words[0] == 'v':
+ v = [float(words[i]) for i in range(1, 4)]
+ vertices.append(v)
+ elif words[0] == 'f':
+ f = [int(words[i]) - 1 for i in range(1, 4)]
+ faces.append(f)
+
+ return np.array(vertices).astype('float32'), np.array(faces).astype('int32')
+
+
+def LoadObjWithTexture(fn, tex_fn):
+ lines = [l.strip() for l in open(fn)]
+ vertices = []
+ vertex_textures = []
+ faces = []
+ face_textures = []
+ for l in lines:
+ words = [w for w in l.split(' ') if w != '']
+ if len(words) == 0:
+ continue
+ if words[0] == 'v':
+ v = [float(words[i]) for i in range(1, len(words))]
+ vertices.append(v)
+ elif words[0] == 'vt':
+ v = [float(words[i]) for i in range(1, len(words))]
+ vertex_textures.append(v)
+ elif words[0] == 'f':
+ f = []
+ ft = []
+ for i in range(1, len(words)):
+ t = words[i].split('/')
+ f.append(int(t[0]) - 1)
+ ft.append(int(t[1]) - 1)
+ for i in range(2, len(f)):
+ faces.append([f[0], f[i - 1], f[i]])
+ face_textures.append([ft[0], ft[i - 1], ft[i]])
+
+ tex_image = cv2.cvtColor(cv2.imread(tex_fn), cv2.COLOR_BGR2RGB)
+ return np.array(vertices).astype('float32'), np.array(vertex_textures).astype('float32'), np.array(faces).astype(
+ 'int32'), np.array(face_textures).astype('int32'), tex_image
diff --git a/hy3dgen/texgen/custom_rasterizer/custom_rasterizer/render.py b/hy3dgen/texgen/custom_rasterizer/custom_rasterizer/render.py
new file mode 100644
index 0000000000000000000000000000000000000000..ec767f80a0a8a72fb3130e5ed21be091ebcf0a49
--- /dev/null
+++ b/hy3dgen/texgen/custom_rasterizer/custom_rasterizer/render.py
@@ -0,0 +1,31 @@
+# Hunyuan 3D is licensed under the TENCENT HUNYUAN NON-COMMERCIAL LICENSE AGREEMENT
+# except for the third-party components listed below.
+# Hunyuan 3D does not impose any additional limitations beyond what is outlined
+# in the repsective licenses of these third-party components.
+# Users must comply with all terms and conditions of original licenses of these third-party
+# components and must ensure that the usage of the third party components adheres to
+# all relevant laws and regulations.
+
+# For avoidance of doubts, Hunyuan 3D means the large language models and
+# their software and algorithms, including trained model weights, parameters (including
+# optimizer states), machine-learning model code, inference-enabling code, training-enabling code,
+# fine-tuning enabling code and other elements of the foregoing made publicly available
+# by Tencent in accordance with TENCENT HUNYUAN COMMUNITY LICENSE AGREEMENT.
+
+import custom_rasterizer_kernel
+import torch
+
+
+def rasterize(pos, tri, resolution, clamp_depth=torch.zeros(0), use_depth_prior=0):
+ assert (pos.device == tri.device)
+ findices, barycentric = custom_rasterizer_kernel.rasterize_image(pos[0], tri, clamp_depth, resolution[1],
+ resolution[0], 1e-6, use_depth_prior)
+ return findices, barycentric
+
+
+def interpolate(col, findices, barycentric, tri):
+ f = findices - 1 + (findices == 0)
+ vcol = col[0, tri.long()[f.long()]]
+ result = barycentric.view(*barycentric.shape, 1) * vcol
+ result = torch.sum(result, axis=-2)
+ return result.view(1, *result.shape)
diff --git a/hy3dgen/texgen/custom_rasterizer/setup.py b/hy3dgen/texgen/custom_rasterizer/setup.py
new file mode 100644
index 0000000000000000000000000000000000000000..fdcd04b8fb635495d6d3ccb2d70bed0cb559e872
--- /dev/null
+++ b/hy3dgen/texgen/custom_rasterizer/setup.py
@@ -0,0 +1,26 @@
+from setuptools import setup, find_packages
+from torch.utils.cpp_extension import BuildExtension, CUDAExtension
+
+# build custom rasterizer
+# build with `python setup.py install`
+# nvcc is needed
+
+custom_rasterizer_module = CUDAExtension('custom_rasterizer_kernel', [
+ 'lib/custom_rasterizer_kernel/rasterizer.cpp',
+ 'lib/custom_rasterizer_kernel/grid_neighbor.cpp',
+ 'lib/custom_rasterizer_kernel/rasterizer_gpu.cu',
+])
+
+setup(
+ packages=find_packages(),
+ version='0.1',
+ name='custom_rasterizer',
+ include_package_data=True,
+ package_dir={'': '.'},
+ ext_modules=[
+ custom_rasterizer_module,
+ ],
+ cmdclass={
+ 'build_ext': BuildExtension
+ }
+)
diff --git a/hy3dgen/texgen/differentiable_renderer/__init__.py b/hy3dgen/texgen/differentiable_renderer/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..3cde306b834c182f04339203fb5a78d846ac613b
--- /dev/null
+++ b/hy3dgen/texgen/differentiable_renderer/__init__.py
@@ -0,0 +1,13 @@
+# Hunyuan 3D is licensed under the TENCENT HUNYUAN NON-COMMERCIAL LICENSE AGREEMENT
+# except for the third-party components listed below.
+# Hunyuan 3D does not impose any additional limitations beyond what is outlined
+# in the repsective licenses of these third-party components.
+# Users must comply with all terms and conditions of original licenses of these third-party
+# components and must ensure that the usage of the third party components adheres to
+# all relevant laws and regulations.
+
+# For avoidance of doubts, Hunyuan 3D means the large language models and
+# their software and algorithms, including trained model weights, parameters (including
+# optimizer states), machine-learning model code, inference-enabling code, training-enabling code,
+# fine-tuning enabling code and other elements of the foregoing made publicly available
+# by Tencent in accordance with TENCENT HUNYUAN COMMUNITY LICENSE AGREEMENT.
\ No newline at end of file
diff --git a/hy3dgen/texgen/differentiable_renderer/camera_utils.py b/hy3dgen/texgen/differentiable_renderer/camera_utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..eeb66e43150ae5136f69b9ddd34b97a0ab32c97f
--- /dev/null
+++ b/hy3dgen/texgen/differentiable_renderer/camera_utils.py
@@ -0,0 +1,106 @@
+# Hunyuan 3D is licensed under the TENCENT HUNYUAN NON-COMMERCIAL LICENSE AGREEMENT
+# except for the third-party components listed below.
+# Hunyuan 3D does not impose any additional limitations beyond what is outlined
+# in the repsective licenses of these third-party components.
+# Users must comply with all terms and conditions of original licenses of these third-party
+# components and must ensure that the usage of the third party components adheres to
+# all relevant laws and regulations.
+
+# For avoidance of doubts, Hunyuan 3D means the large language models and
+# their software and algorithms, including trained model weights, parameters (including
+# optimizer states), machine-learning model code, inference-enabling code, training-enabling code,
+# fine-tuning enabling code and other elements of the foregoing made publicly available
+# by Tencent in accordance with TENCENT HUNYUAN COMMUNITY LICENSE AGREEMENT.
+
+import math
+
+import numpy as np
+import torch
+
+
+def transform_pos(mtx, pos, keepdim=False):
+ t_mtx = torch.from_numpy(mtx).to(
+ pos.device) if isinstance(
+ mtx, np.ndarray) else mtx
+ if pos.shape[-1] == 3:
+ posw = torch.cat(
+ [pos, torch.ones([pos.shape[0], 1]).to(pos.device)], axis=1)
+ else:
+ posw = pos
+
+ if keepdim:
+ return torch.matmul(posw, t_mtx.t())[...]
+ else:
+ return torch.matmul(posw, t_mtx.t())[None, ...]
+
+
+def get_mv_matrix(elev, azim, camera_distance, center=None):
+ elev = -elev
+ azim += 90
+
+ elev_rad = math.radians(elev)
+ azim_rad = math.radians(azim)
+
+ camera_position = np.array([camera_distance * math.cos(elev_rad) * math.cos(azim_rad),
+ camera_distance *
+ math.cos(elev_rad) * math.sin(azim_rad),
+ camera_distance * math.sin(elev_rad)])
+
+ if center is None:
+ center = np.array([0, 0, 0])
+ else:
+ center = np.array(center)
+
+ lookat = center - camera_position
+ lookat = lookat / np.linalg.norm(lookat)
+
+ up = np.array([0, 0, 1.0])
+ right = np.cross(lookat, up)
+ right = right / np.linalg.norm(right)
+ up = np.cross(right, lookat)
+ up = up / np.linalg.norm(up)
+
+ c2w = np.concatenate(
+ [np.stack([right, up, -lookat], axis=-1), camera_position[:, None]], axis=-1)
+
+ w2c = np.zeros((4, 4))
+ w2c[:3, :3] = np.transpose(c2w[:3, :3], (1, 0))
+ w2c[:3, 3:] = -np.matmul(np.transpose(c2w[:3, :3], (1, 0)), c2w[:3, 3:])
+ w2c[3, 3] = 1.0
+
+ return w2c.astype(np.float32)
+
+
+def get_orthographic_projection_matrix(
+ left=-1, right=1, bottom=-1, top=1, near=0, far=2):
+ """
+ 计算正交投影矩阵。
+
+ 参数:
+ left (float): 投影区域左侧边界。
+ right (float): 投影区域右侧边界。
+ bottom (float): 投影区域底部边界。
+ top (float): 投影区域顶部边界。
+ near (float): 投影区域近裁剪面距离。
+ far (float): 投影区域远裁剪面距离。
+
+ 返回:
+ numpy.ndarray: 正交投影矩阵。
+ """
+ ortho_matrix = np.eye(4, dtype=np.float32)
+ ortho_matrix[0, 0] = 2 / (right - left)
+ ortho_matrix[1, 1] = 2 / (top - bottom)
+ ortho_matrix[2, 2] = -2 / (far - near)
+ ortho_matrix[0, 3] = -(right + left) / (right - left)
+ ortho_matrix[1, 3] = -(top + bottom) / (top - bottom)
+ ortho_matrix[2, 3] = -(far + near) / (far - near)
+ return ortho_matrix
+
+
+def get_perspective_projection_matrix(fovy, aspect_wh, near, far):
+ fovy_rad = math.radians(fovy)
+ return np.array([[1.0 / (math.tan(fovy_rad / 2.0) * aspect_wh), 0, 0, 0],
+ [0, 1.0 / math.tan(fovy_rad / 2.0), 0, 0],
+ [0, 0, -(far + near) / (far - near), -
+ 2.0 * far * near / (far - near)],
+ [0, 0, -1, 0]]).astype(np.float32)
diff --git a/hy3dgen/texgen/differentiable_renderer/compile_mesh_painter.bat b/hy3dgen/texgen/differentiable_renderer/compile_mesh_painter.bat
new file mode 100644
index 0000000000000000000000000000000000000000..00ce6e82b82c5bf967a3a711f94eb1de94fe2700
--- /dev/null
+++ b/hy3dgen/texgen/differentiable_renderer/compile_mesh_painter.bat
@@ -0,0 +1,3 @@
+FOR /F "tokens=*" %%i IN ('python -m pybind11 --includes') DO SET PYINCLUDES=%%i
+echo %PYINCLUDES%
+g++ -O3 -Wall -shared -std=c++11 -fPIC %PYINCLUDES% mesh_processor.cpp -o mesh_processor.pyd -lpython3.12
\ No newline at end of file
diff --git a/hy3dgen/texgen/differentiable_renderer/mesh_processor.cpp b/hy3dgen/texgen/differentiable_renderer/mesh_processor.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..eb4ace39a72faab6e92462c3a3becddd1710098c
--- /dev/null
+++ b/hy3dgen/texgen/differentiable_renderer/mesh_processor.cpp
@@ -0,0 +1,161 @@
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+
+namespace py = pybind11;
+using namespace std;
+
+std::pair,
+ py::array_t> meshVerticeInpaint_smooth(py::array_t texture,
+py::array_t mask,
+ py::array_t vtx_pos, py::array_t vtx_uv,
+ py::array_t pos_idx, py::array_t uv_idx) {
+ auto texture_buf = texture.request();
+ auto mask_buf = mask.request();
+ auto vtx_pos_buf = vtx_pos.request();
+ auto vtx_uv_buf = vtx_uv.request();
+ auto pos_idx_buf = pos_idx.request();
+ auto uv_idx_buf = uv_idx.request();
+
+ int texture_height = texture_buf.shape[0];
+ int texture_width = texture_buf.shape[1];
+ int texture_channel = texture_buf.shape[2];
+ float* texture_ptr = static_cast(texture_buf.ptr);
+ uint8_t* mask_ptr = static_cast(mask_buf.ptr);
+
+ int vtx_num = vtx_pos_buf.shape[0];
+ float* vtx_pos_ptr = static_cast(vtx_pos_buf.ptr);
+ float* vtx_uv_ptr = static_cast(vtx_uv_buf.ptr);
+ int* pos_idx_ptr = static_cast(pos_idx_buf.ptr);
+ int* uv_idx_ptr = static_cast(uv_idx_buf.ptr);
+
+ vector vtx_mask(vtx_num, 0.0f);
+ vector> vtx_color(vtx_num, vector(texture_channel, 0.0f));
+ vector uncolored_vtxs;
+
+ vector> G(vtx_num);
+
+ for (int i = 0; i < uv_idx_buf.shape[0]; ++i) {
+ for (int k = 0; k < 3; ++k) {
+ int vtx_uv_idx = uv_idx_ptr[i * 3 + k];
+ int vtx_idx = pos_idx_ptr[i * 3 + k];
+ int uv_v = round(vtx_uv_ptr[vtx_uv_idx * 2] * (texture_width - 1));
+ int uv_u = round((1.0 - vtx_uv_ptr[vtx_uv_idx * 2 + 1]) * (texture_height - 1));
+
+ if (mask_ptr[uv_u * texture_width + uv_v] > 0) {
+ vtx_mask[vtx_idx] = 1.0f;
+ for (int c = 0; c < texture_channel; ++c) {
+ vtx_color[vtx_idx][c] = texture_ptr[(uv_u * texture_width + uv_v) * texture_channel + c];
+ }
+ }else{
+ uncolored_vtxs.push_back(vtx_idx);
+ }
+
+ G[pos_idx_ptr[i * 3 + k]].push_back(pos_idx_ptr[i * 3 + (k + 1) % 3]);
+ }
+ }
+
+ int smooth_count = 2;
+ int last_uncolored_vtx_count = 0;
+ while (smooth_count>0) {
+ int uncolored_vtx_count = 0;
+
+ for (int vtx_idx : uncolored_vtxs) {
+
+ vector sum_color(texture_channel, 0.0f);
+ float total_weight = 0.0f;
+
+ array vtx_0 = {vtx_pos_ptr[vtx_idx * 3],
+vtx_pos_ptr[vtx_idx * 3 + 1], vtx_pos_ptr[vtx_idx * 3 + 2]};
+ for (int connected_idx : G[vtx_idx]) {
+ if (vtx_mask[connected_idx] > 0) {
+ array vtx1 = {vtx_pos_ptr[connected_idx * 3],
+ vtx_pos_ptr[connected_idx * 3 + 1], vtx_pos_ptr[connected_idx * 3 + 2]};
+ float dist_weight = 1.0f / max(sqrt(pow(vtx_0[0] - vtx1[0], 2) + pow(vtx_0[1] - vtx1[1], 2) + \
+ pow(vtx_0[2] - vtx1[2], 2)), 1E-4);
+ dist_weight = dist_weight * dist_weight;
+ for (int c = 0; c < texture_channel; ++c) {
+ sum_color[c] += vtx_color[connected_idx][c] * dist_weight;
+ }
+ total_weight += dist_weight;
+ }
+ }
+
+ if (total_weight > 0.0f) {
+ for (int c = 0; c < texture_channel; ++c) {
+ vtx_color[vtx_idx][c] = sum_color[c] / total_weight;
+ }
+ vtx_mask[vtx_idx] = 1.0f;
+ } else {
+ uncolored_vtx_count++;
+ }
+
+ }
+
+ if(last_uncolored_vtx_count==uncolored_vtx_count){
+ smooth_count--;
+ }else{
+ smooth_count++;
+ }
+ last_uncolored_vtx_count = uncolored_vtx_count;
+ }
+
+ // Create new arrays for the output
+ py::array_t new_texture(texture_buf.size);
+ py::array_t new_mask(mask_buf.size);
+
+ auto new_texture_buf = new_texture.request();
+ auto new_mask_buf = new_mask.request();
+
+ float* new_texture_ptr = static_cast(new_texture_buf.ptr);
+ uint8_t* new_mask_ptr = static_cast(new_mask_buf.ptr);
+ // Copy original texture and mask to new arrays
+ std::copy(texture_ptr, texture_ptr + texture_buf.size, new_texture_ptr);
+ std::copy(mask_ptr, mask_ptr + mask_buf.size, new_mask_ptr);
+
+ for (int face_idx = 0; face_idx < uv_idx_buf.shape[0]; ++face_idx) {
+ for (int k = 0; k < 3; ++k) {
+ int vtx_uv_idx = uv_idx_ptr[face_idx * 3 + k];
+ int vtx_idx = pos_idx_ptr[face_idx * 3 + k];
+
+ if (vtx_mask[vtx_idx] == 1.0f) {
+ int uv_v = round(vtx_uv_ptr[vtx_uv_idx * 2] * (texture_width - 1));
+ int uv_u = round((1.0 - vtx_uv_ptr[vtx_uv_idx * 2 + 1]) * (texture_height - 1));
+
+ for (int c = 0; c < texture_channel; ++c) {
+ new_texture_ptr[(uv_u * texture_width + uv_v) * texture_channel + c] = vtx_color[vtx_idx][c];
+ }
+ new_mask_ptr[uv_u * texture_width + uv_v] = 255;
+ }
+ }
+ }
+
+ // Reshape the new arrays to match the original texture and mask shapes
+ new_texture.resize({texture_height, texture_width, 3});
+ new_mask.resize({texture_height, texture_width});
+ return std::make_pair(new_texture, new_mask);
+}
+
+
+std::pair, py::array_t> meshVerticeInpaint(py::array_t texture,
+ py::array_t mask,
+ py::array_t vtx_pos, py::array_t vtx_uv,
+ py::array_t pos_idx, py::array_t uv_idx, const std::string& method = "smooth") {
+ if (method == "smooth") {
+ return meshVerticeInpaint_smooth(texture, mask, vtx_pos, vtx_uv, pos_idx, uv_idx);
+ } else {
+ throw std::invalid_argument("Invalid method. Use 'smooth' or 'forward'.");
+ }
+}
+
+PYBIND11_MODULE(mesh_processor, m) {
+ m.def("meshVerticeInpaint", &meshVerticeInpaint, "A function to process mesh",
+ py::arg("texture"), py::arg("mask"),
+ py::arg("vtx_pos"), py::arg("vtx_uv"),
+ py::arg("pos_idx"), py::arg("uv_idx"),
+ py::arg("method") = "smooth");
+}
\ No newline at end of file
diff --git a/hy3dgen/texgen/differentiable_renderer/mesh_processor.py b/hy3dgen/texgen/differentiable_renderer/mesh_processor.py
new file mode 100644
index 0000000000000000000000000000000000000000..dc546bb5bfe85cade6af3265c8d4f316162810f7
--- /dev/null
+++ b/hy3dgen/texgen/differentiable_renderer/mesh_processor.py
@@ -0,0 +1,84 @@
+# Hunyuan 3D is licensed under the TENCENT HUNYUAN NON-COMMERCIAL LICENSE AGREEMENT
+# except for the third-party components listed below.
+# Hunyuan 3D does not impose any additional limitations beyond what is outlined
+# in the repsective licenses of these third-party components.
+# Users must comply with all terms and conditions of original licenses of these third-party
+# components and must ensure that the usage of the third party components adheres to
+# all relevant laws and regulations.
+
+# For avoidance of doubts, Hunyuan 3D means the large language models and
+# their software and algorithms, including trained model weights, parameters (including
+# optimizer states), machine-learning model code, inference-enabling code, training-enabling code,
+# fine-tuning enabling code and other elements of the foregoing made publicly available
+# by Tencent in accordance with TENCENT HUNYUAN COMMUNITY LICENSE AGREEMENT.
+
+import numpy as np
+
+def meshVerticeInpaint_smooth(texture, mask, vtx_pos, vtx_uv, pos_idx, uv_idx):
+ texture_height, texture_width, texture_channel = texture.shape
+ vtx_num = vtx_pos.shape[0]
+
+ vtx_mask = np.zeros(vtx_num, dtype=np.float32)
+ vtx_color = [np.zeros(texture_channel, dtype=np.float32) for _ in range(vtx_num)]
+ uncolored_vtxs = []
+ G = [[] for _ in range(vtx_num)]
+
+ for i in range(uv_idx.shape[0]):
+ for k in range(3):
+ vtx_uv_idx = uv_idx[i, k]
+ vtx_idx = pos_idx[i, k]
+ uv_v = int(round(vtx_uv[vtx_uv_idx, 0] * (texture_width - 1)))
+ uv_u = int(round((1.0 - vtx_uv[vtx_uv_idx, 1]) * (texture_height - 1)))
+ if mask[uv_u, uv_v] > 0:
+ vtx_mask[vtx_idx] = 1.0
+ vtx_color[vtx_idx] = texture[uv_u, uv_v]
+ else:
+ uncolored_vtxs.append(vtx_idx)
+ G[pos_idx[i, k]].append(pos_idx[i, (k + 1) % 3])
+
+ smooth_count = 2
+ last_uncolored_vtx_count = 0
+ while smooth_count > 0:
+ uncolored_vtx_count = 0
+ for vtx_idx in uncolored_vtxs:
+ sum_color = np.zeros(texture_channel, dtype=np.float32)
+ total_weight = 0.0
+ vtx_0 = vtx_pos[vtx_idx]
+ for connected_idx in G[vtx_idx]:
+ if vtx_mask[connected_idx] > 0:
+ vtx1 = vtx_pos[connected_idx]
+ dist = np.sqrt(np.sum((vtx_0 - vtx1) ** 2))
+ dist_weight = 1.0 / max(dist, 1e-4)
+ dist_weight *= dist_weight
+ sum_color += vtx_color[connected_idx] * dist_weight
+ total_weight += dist_weight
+ if total_weight > 0:
+ vtx_color[vtx_idx] = sum_color / total_weight
+ vtx_mask[vtx_idx] = 1.0
+ else:
+ uncolored_vtx_count += 1
+
+ if last_uncolored_vtx_count == uncolored_vtx_count:
+ smooth_count -= 1
+ else:
+ smooth_count += 1
+ last_uncolored_vtx_count = uncolored_vtx_count
+
+ new_texture = texture.copy()
+ new_mask = mask.copy()
+ for face_idx in range(uv_idx.shape[0]):
+ for k in range(3):
+ vtx_uv_idx = uv_idx[face_idx, k]
+ vtx_idx = pos_idx[face_idx, k]
+ if vtx_mask[vtx_idx] == 1.0:
+ uv_v = int(round(vtx_uv[vtx_uv_idx, 0] * (texture_width - 1)))
+ uv_u = int(round((1.0 - vtx_uv[vtx_uv_idx, 1]) * (texture_height - 1)))
+ new_texture[uv_u, uv_v] = vtx_color[vtx_idx]
+ new_mask[uv_u, uv_v] = 255
+ return new_texture, new_mask
+
+def meshVerticeInpaint(texture, mask, vtx_pos, vtx_uv, pos_idx, uv_idx, method="smooth"):
+ if method == "smooth":
+ return meshVerticeInpaint_smooth(texture, mask, vtx_pos, vtx_uv, pos_idx, uv_idx)
+ else:
+ raise ValueError("Invalid method. Use 'smooth' or 'forward'.")
\ No newline at end of file
diff --git a/hy3dgen/texgen/differentiable_renderer/mesh_render.py b/hy3dgen/texgen/differentiable_renderer/mesh_render.py
new file mode 100644
index 0000000000000000000000000000000000000000..d23a1a58661e5ed0ee7eb07de440daf4efac681f
--- /dev/null
+++ b/hy3dgen/texgen/differentiable_renderer/mesh_render.py
@@ -0,0 +1,823 @@
+# Hunyuan 3D is licensed under the TENCENT HUNYUAN NON-COMMERCIAL LICENSE AGREEMENT
+# except for the third-party components listed below.
+# Hunyuan 3D does not impose any additional limitations beyond what is outlined
+# in the repsective licenses of these third-party components.
+# Users must comply with all terms and conditions of original licenses of these third-party
+# components and must ensure that the usage of the third party components adheres to
+# all relevant laws and regulations.
+
+# For avoidance of doubts, Hunyuan 3D means the large language models and
+# their software and algorithms, including trained model weights, parameters (including
+# optimizer states), machine-learning model code, inference-enabling code, training-enabling code,
+# fine-tuning enabling code and other elements of the foregoing made publicly available
+# by Tencent in accordance with TENCENT HUNYUAN COMMUNITY LICENSE AGREEMENT.
+
+import cv2
+import numpy as np
+import torch
+import torch.nn.functional as F
+import trimesh
+from PIL import Image
+
+from .camera_utils import (
+ transform_pos,
+ get_mv_matrix,
+ get_orthographic_projection_matrix,
+ get_perspective_projection_matrix,
+)
+from .mesh_processor import meshVerticeInpaint
+from .mesh_utils import load_mesh, save_mesh
+
+
+def stride_from_shape(shape):
+ stride = [1]
+ for x in reversed(shape[1:]):
+ stride.append(stride[-1] * x)
+ return list(reversed(stride))
+
+
+def scatter_add_nd_with_count(input, count, indices, values, weights=None):
+ # input: [..., C], D dimension + C channel
+ # count: [..., 1], D dimension
+ # indices: [N, D], long
+ # values: [N, C]
+
+ D = indices.shape[-1]
+ C = input.shape[-1]
+ size = input.shape[:-1]
+ stride = stride_from_shape(size)
+
+ assert len(size) == D
+
+ input = input.view(-1, C) # [HW, C]
+ count = count.view(-1, 1)
+
+ flatten_indices = (indices * torch.tensor(stride,
+ dtype=torch.long, device=indices.device)).sum(-1) # [N]
+
+ if weights is None:
+ weights = torch.ones_like(values[..., :1])
+
+ input.scatter_add_(0, flatten_indices.unsqueeze(1).repeat(1, C), values)
+ count.scatter_add_(0, flatten_indices.unsqueeze(1), weights)
+
+ return input.view(*size, C), count.view(*size, 1)
+
+
+def linear_grid_put_2d(H, W, coords, values, return_count=False):
+ # coords: [N, 2], float in [0, 1]
+ # values: [N, C]
+
+ C = values.shape[-1]
+
+ indices = coords * torch.tensor(
+ [H - 1, W - 1], dtype=torch.float32, device=coords.device
+ )
+ indices_00 = indices.floor().long() # [N, 2]
+ indices_00[:, 0].clamp_(0, H - 2)
+ indices_00[:, 1].clamp_(0, W - 2)
+ indices_01 = indices_00 + torch.tensor(
+ [0, 1], dtype=torch.long, device=indices.device
+ )
+ indices_10 = indices_00 + torch.tensor(
+ [1, 0], dtype=torch.long, device=indices.device
+ )
+ indices_11 = indices_00 + torch.tensor(
+ [1, 1], dtype=torch.long, device=indices.device
+ )
+
+ h = indices[..., 0] - indices_00[..., 0].float()
+ w = indices[..., 1] - indices_00[..., 1].float()
+ w_00 = (1 - h) * (1 - w)
+ w_01 = (1 - h) * w
+ w_10 = h * (1 - w)
+ w_11 = h * w
+
+ result = torch.zeros(H, W, C, device=values.device,
+ dtype=values.dtype) # [H, W, C]
+ count = torch.zeros(H, W, 1, device=values.device,
+ dtype=values.dtype) # [H, W, 1]
+ weights = torch.ones_like(values[..., :1]) # [N, 1]
+
+ result, count = scatter_add_nd_with_count(
+ result, count, indices_00, values * w_00.unsqueeze(1), weights * w_00.unsqueeze(1))
+ result, count = scatter_add_nd_with_count(
+ result, count, indices_01, values * w_01.unsqueeze(1), weights * w_01.unsqueeze(1))
+ result, count = scatter_add_nd_with_count(
+ result, count, indices_10, values * w_10.unsqueeze(1), weights * w_10.unsqueeze(1))
+ result, count = scatter_add_nd_with_count(
+ result, count, indices_11, values * w_11.unsqueeze(1), weights * w_11.unsqueeze(1))
+
+ if return_count:
+ return result, count
+
+ mask = (count.squeeze(-1) > 0)
+ result[mask] = result[mask] / count[mask].repeat(1, C)
+
+ return result
+
+
+class MeshRender():
+ def __init__(
+ self,
+ camera_distance=1.45, camera_type='orth',
+ default_resolution=1024, texture_size=1024,
+ use_antialias=True, max_mip_level=None, filter_mode='linear',
+ bake_mode='linear', raster_mode='cr', device='cuda'):
+
+ self.device = device
+
+ self.set_default_render_resolution(default_resolution)
+ self.set_default_texture_resolution(texture_size)
+
+ self.camera_distance = camera_distance
+ self.use_antialias = use_antialias
+ self.max_mip_level = max_mip_level
+ self.filter_mode = filter_mode
+
+ self.bake_angle_thres = 75
+ self.bake_unreliable_kernel_size = int(
+ (2 / 512) * max(self.default_resolution[0], self.default_resolution[1]))
+ self.bake_mode = bake_mode
+
+ self.raster_mode = raster_mode
+ if self.raster_mode == 'cr':
+ import custom_rasterizer as cr
+ self.raster = cr
+ else:
+ raise f'No raster named {self.raster_mode}'
+
+ if camera_type == 'orth':
+ self.ortho_scale = 1.2
+ self.camera_proj_mat = get_orthographic_projection_matrix(
+ left=-self.ortho_scale * 0.5, right=self.ortho_scale * 0.5,
+ bottom=-self.ortho_scale * 0.5, top=self.ortho_scale * 0.5,
+ near=0.1, far=100
+ )
+ elif camera_type == 'perspective':
+ self.camera_proj_mat = get_perspective_projection_matrix(
+ 49.13, self.default_resolution[1] / self.default_resolution[0],
+ 0.01, 100.0
+ )
+ else:
+ raise f'No camera type {camera_type}'
+
+ def raster_rasterize(self, pos, tri, resolution, ranges=None, grad_db=True):
+
+ if self.raster_mode == 'cr':
+ rast_out_db = None
+ if pos.dim() == 2:
+ pos = pos.unsqueeze(0)
+ findices, barycentric = self.raster.rasterize(pos, tri, resolution)
+ rast_out = torch.cat((barycentric, findices.unsqueeze(-1)), dim=-1)
+ rast_out = rast_out.unsqueeze(0)
+ else:
+ raise f'No raster named {self.raster_mode}'
+
+ return rast_out, rast_out_db
+
+ def raster_interpolate(self, uv, rast_out, uv_idx, rast_db=None, diff_attrs=None):
+
+ if self.raster_mode == 'cr':
+ textd = None
+ barycentric = rast_out[0, ..., :-1]
+ findices = rast_out[0, ..., -1]
+ if uv.dim() == 2:
+ uv = uv.unsqueeze(0)
+ textc = self.raster.interpolate(uv, findices, barycentric, uv_idx)
+ else:
+ raise f'No raster named {self.raster_mode}'
+
+ return textc, textd
+
+ def raster_texture(self, tex, uv, uv_da=None, mip_level_bias=None, mip=None, filter_mode='auto',
+ boundary_mode='wrap', max_mip_level=None):
+
+ if self.raster_mode == 'cr':
+ raise f'Texture is not implemented in cr'
+ else:
+ raise f'No raster named {self.raster_mode}'
+
+ return color
+
+ def raster_antialias(self, color, rast, pos, tri, topology_hash=None, pos_gradient_boost=1.0):
+
+ if self.raster_mode == 'cr':
+ # Antialias has not been supported yet
+ color = color
+ else:
+ raise f'No raster named {self.raster_mode}'
+
+ return color
+
+ def load_mesh(
+ self,
+ mesh,
+ scale_factor=1.15,
+ auto_center=True,
+ ):
+ vtx_pos, pos_idx, vtx_uv, uv_idx, texture_data = load_mesh(mesh)
+ self.mesh_copy = mesh
+ self.set_mesh(vtx_pos, pos_idx,
+ vtx_uv=vtx_uv, uv_idx=uv_idx,
+ scale_factor=scale_factor, auto_center=auto_center
+ )
+ if texture_data is not None:
+ self.set_texture(texture_data)
+
+ def save_mesh(self):
+ texture_data = self.get_texture()
+ texture_data = Image.fromarray((texture_data * 255).astype(np.uint8))
+ return save_mesh(self.mesh_copy, texture_data)
+
+ def set_mesh(
+ self,
+ vtx_pos, pos_idx,
+ vtx_uv=None, uv_idx=None,
+ scale_factor=1.15, auto_center=True
+ ):
+
+ self.vtx_pos = torch.from_numpy(vtx_pos).to(self.device).float()
+ self.pos_idx = torch.from_numpy(pos_idx).to(self.device).to(torch.int)
+ if (vtx_uv is not None) and (uv_idx is not None):
+ self.vtx_uv = torch.from_numpy(vtx_uv).to(self.device).float()
+ self.uv_idx = torch.from_numpy(uv_idx).to(self.device).to(torch.int)
+ else:
+ self.vtx_uv = None
+ self.uv_idx = None
+
+ self.vtx_pos[:, [0, 1]] = -self.vtx_pos[:, [0, 1]]
+ self.vtx_pos[:, [1, 2]] = self.vtx_pos[:, [2, 1]]
+ if (vtx_uv is not None) and (uv_idx is not None):
+ self.vtx_uv[:, 1] = 1.0 - self.vtx_uv[:, 1]
+
+ if auto_center:
+ max_bb = (self.vtx_pos - 0).max(0)[0]
+ min_bb = (self.vtx_pos - 0).min(0)[0]
+ center = (max_bb + min_bb) / 2
+ scale = torch.norm(self.vtx_pos - center, dim=1).max() * 2.0
+ self.vtx_pos = (self.vtx_pos - center) * \
+ (scale_factor / float(scale))
+ self.scale_factor = scale_factor
+
+ def set_texture(self, tex):
+ if isinstance(tex, np.ndarray):
+ tex = Image.fromarray((tex * 255).astype(np.uint8))
+ elif isinstance(tex, torch.Tensor):
+ tex = tex.cpu().numpy()
+ tex = Image.fromarray((tex * 255).astype(np.uint8))
+
+ tex = tex.resize(self.texture_size).convert('RGB')
+ tex = np.array(tex) / 255.0
+ self.tex = torch.from_numpy(tex).to(self.device)
+ self.tex = self.tex.float()
+
+ def set_default_render_resolution(self, default_resolution):
+ if isinstance(default_resolution, int):
+ default_resolution = (default_resolution, default_resolution)
+ self.default_resolution = default_resolution
+
+ def set_default_texture_resolution(self, texture_size):
+ if isinstance(texture_size, int):
+ texture_size = (texture_size, texture_size)
+ self.texture_size = texture_size
+
+ def get_mesh(self):
+ vtx_pos = self.vtx_pos.cpu().numpy()
+ pos_idx = self.pos_idx.cpu().numpy()
+ vtx_uv = self.vtx_uv.cpu().numpy()
+ uv_idx = self.uv_idx.cpu().numpy()
+
+ # 坐标变换的逆变换
+ vtx_pos[:, [1, 2]] = vtx_pos[:, [2, 1]]
+ vtx_pos[:, [0, 1]] = -vtx_pos[:, [0, 1]]
+
+ vtx_uv[:, 1] = 1.0 - vtx_uv[:, 1]
+ return vtx_pos, pos_idx, vtx_uv, uv_idx
+
+ def get_texture(self):
+ return self.tex.cpu().numpy()
+
+ def to(self, device):
+ self.device = device
+
+ for attr_name in dir(self):
+ attr_value = getattr(self, attr_name)
+ if isinstance(attr_value, torch.Tensor):
+ setattr(self, attr_name, attr_value.to(self.device))
+
+ def color_rgb_to_srgb(self, image):
+ if isinstance(image, Image.Image):
+ image_rgb = torch.tesnor(
+ np.array(image) /
+ 255.0).float().to(
+ self.device)
+ elif isinstance(image, np.ndarray):
+ image_rgb = torch.tensor(image).float()
+ else:
+ image_rgb = image.to(self.device)
+
+ image_srgb = torch.where(
+ image_rgb <= 0.0031308,
+ 12.92 * image_rgb,
+ 1.055 * torch.pow(image_rgb, 1 / 2.4) - 0.055
+ )
+
+ if isinstance(image, Image.Image):
+ image_srgb = Image.fromarray(
+ (image_srgb.cpu().numpy() *
+ 255).astype(
+ np.uint8))
+ elif isinstance(image, np.ndarray):
+ image_srgb = image_srgb.cpu().numpy()
+ else:
+ image_srgb = image_srgb.to(image.device)
+
+ return image_srgb
+
+ def _render(
+ self,
+ glctx,
+ mvp,
+ pos,
+ pos_idx,
+ uv,
+ uv_idx,
+ tex,
+ resolution,
+ max_mip_level,
+ keep_alpha,
+ filter_mode
+ ):
+ pos_clip = transform_pos(mvp, pos)
+ if isinstance(resolution, (int, float)):
+ resolution = [resolution, resolution]
+ rast_out, rast_out_db = self.raster_rasterize(
+ glctx, pos_clip, pos_idx, resolution=resolution)
+
+ tex = tex.contiguous()
+ if filter_mode == 'linear-mipmap-linear':
+ texc, texd = self.raster_interpolate(
+ uv[None, ...], rast_out, uv_idx, rast_db=rast_out_db, diff_attrs='all')
+ color = self.raster_texture(
+ tex[None, ...], texc, texd, filter_mode='linear-mipmap-linear', max_mip_level=max_mip_level)
+ else:
+ texc, _ = self.raster_interpolate(uv[None, ...], rast_out, uv_idx)
+ color = self.raster_texture(tex[None, ...], texc, filter_mode=filter_mode)
+
+ visible_mask = torch.clamp(rast_out[..., -1:], 0, 1)
+ color = color * visible_mask # Mask out background.
+ if self.use_antialias:
+ color = self.raster_antialias(color, rast_out, pos_clip, pos_idx)
+
+ if keep_alpha:
+ color = torch.cat([color, visible_mask], dim=-1)
+ return color[0, ...]
+
+ def render(
+ self,
+ elev,
+ azim,
+ camera_distance=None,
+ center=None,
+ resolution=None,
+ tex=None,
+ keep_alpha=True,
+ bgcolor=None,
+ filter_mode=None,
+ return_type='th'
+ ):
+
+ proj = self.camera_proj_mat
+ r_mv = get_mv_matrix(
+ elev=elev,
+ azim=azim,
+ camera_distance=self.camera_distance if camera_distance is None else camera_distance,
+ center=center)
+ r_mvp = np.matmul(proj, r_mv).astype(np.float32)
+ if tex is not None:
+ if isinstance(tex, Image.Image):
+ tex = torch.tensor(np.array(tex) / 255.0)
+ elif isinstance(tex, np.ndarray):
+ tex = torch.tensor(tex)
+ if tex.dim() == 2:
+ tex = tex.unsqueeze(-1)
+ tex = tex.float().to(self.device)
+ image = self._render(r_mvp, self.vtx_pos, self.pos_idx, self.vtx_uv, self.uv_idx,
+ self.tex if tex is None else tex,
+ self.default_resolution if resolution is None else resolution,
+ self.max_mip_level, True, filter_mode if filter_mode else self.filter_mode)
+ mask = (image[..., [-1]] == 1).float()
+ if bgcolor is None:
+ bgcolor = [0 for _ in range(image.shape[-1] - 1)]
+ image = image * mask + (1 - mask) * \
+ torch.tensor(bgcolor + [0]).to(self.device)
+ if keep_alpha == False:
+ image = image[..., :-1]
+ if return_type == 'np':
+ image = image.cpu().numpy()
+ elif return_type == 'pl':
+ image = image.squeeze(-1).cpu().numpy() * 255
+ image = Image.fromarray(image.astype(np.uint8))
+ return image
+
+ def render_normal(
+ self,
+ elev,
+ azim,
+ camera_distance=None,
+ center=None,
+ resolution=None,
+ bg_color=[1, 1, 1],
+ use_abs_coor=False,
+ normalize_rgb=True,
+ return_type='th'
+ ):
+
+ pos_camera, pos_clip = self.get_pos_from_mvp(elev, azim, camera_distance, center)
+ if resolution is None:
+ resolution = self.default_resolution
+ if isinstance(resolution, (int, float)):
+ resolution = [resolution, resolution]
+ rast_out, rast_out_db = self.raster_rasterize(
+ pos_clip, self.pos_idx, resolution=resolution)
+
+ if use_abs_coor:
+ mesh_triangles = self.vtx_pos[self.pos_idx[:, :3], :]
+ else:
+ pos_camera = pos_camera[:, :3] / pos_camera[:, 3:4]
+ mesh_triangles = pos_camera[self.pos_idx[:, :3], :]
+ face_normals = F.normalize(
+ torch.cross(mesh_triangles[:,
+ 1,
+ :] - mesh_triangles[:,
+ 0,
+ :],
+ mesh_triangles[:,
+ 2,
+ :] - mesh_triangles[:,
+ 0,
+ :],
+ dim=-1),
+ dim=-1)
+
+ vertex_normals = trimesh.geometry.mean_vertex_normals(vertex_count=self.vtx_pos.shape[0],
+ faces=self.pos_idx.cpu(),
+ face_normals=face_normals.cpu(), )
+ vertex_normals = torch.from_numpy(
+ vertex_normals).float().to(self.device).contiguous()
+
+ # Interpolate normal values across the rasterized pixels
+ normal, _ = self.raster_interpolate(
+ vertex_normals[None, ...], rast_out, self.pos_idx)
+
+ visible_mask = torch.clamp(rast_out[..., -1:], 0, 1)
+ normal = normal * visible_mask + \
+ torch.tensor(bg_color, dtype=torch.float32, device=self.device) * (1 -
+ visible_mask) # Mask out background.
+
+ if normalize_rgb:
+ normal = (normal + 1) * 0.5
+ if self.use_antialias:
+ normal = self.raster_antialias(normal, rast_out, pos_clip, self.pos_idx)
+
+ image = normal[0, ...]
+ if return_type == 'np':
+ image = image.cpu().numpy()
+ elif return_type == 'pl':
+ image = image.cpu().numpy() * 255
+ image = Image.fromarray(image.astype(np.uint8))
+
+ return image
+
+ def convert_normal_map(self, image):
+ # blue is front, red is left, green is top
+ if isinstance(image, Image.Image):
+ image = np.array(image)
+ mask = (image == [255, 255, 255]).all(axis=-1)
+
+ image = (image / 255.0) * 2.0 - 1.0
+
+ image[..., [1]] = -image[..., [1]]
+ image[..., [1, 2]] = image[..., [2, 1]]
+ image[..., [0]] = -image[..., [0]]
+
+ image = (image + 1.0) * 0.5
+
+ image = (image * 255).astype(np.uint8)
+ image[mask] = [127, 127, 255]
+
+ return Image.fromarray(image)
+
+ def get_pos_from_mvp(self, elev, azim, camera_distance, center):
+ proj = self.camera_proj_mat
+ r_mv = get_mv_matrix(
+ elev=elev,
+ azim=azim,
+ camera_distance=self.camera_distance if camera_distance is None else camera_distance,
+ center=center)
+
+ pos_camera = transform_pos(r_mv, self.vtx_pos, keepdim=True)
+ pos_clip = transform_pos(proj, pos_camera)
+
+ return pos_camera, pos_clip
+
+ def render_depth(
+ self,
+ elev,
+ azim,
+ camera_distance=None,
+ center=None,
+ resolution=None,
+ return_type='th'
+ ):
+ pos_camera, pos_clip = self.get_pos_from_mvp(elev, azim, camera_distance, center)
+
+ if resolution is None:
+ resolution = self.default_resolution
+ if isinstance(resolution, (int, float)):
+ resolution = [resolution, resolution]
+ rast_out, rast_out_db = self.raster_rasterize(
+ pos_clip, self.pos_idx, resolution=resolution)
+
+ pos_camera = pos_camera[:, :3] / pos_camera[:, 3:4]
+ tex_depth = pos_camera[:, 2].reshape(1, -1, 1).contiguous()
+
+ # Interpolate depth values across the rasterized pixels
+ depth, _ = self.raster_interpolate(tex_depth, rast_out, self.pos_idx)
+
+ visible_mask = torch.clamp(rast_out[..., -1:], 0, 1)
+ depth_max, depth_min = depth[visible_mask >
+ 0].max(), depth[visible_mask > 0].min()
+ depth = (depth - depth_min) / (depth_max - depth_min)
+
+ depth = depth * visible_mask # Mask out background.
+ if self.use_antialias:
+ depth = self.raster_antialias(depth, rast_out, pos_clip, self.pos_idx)
+
+ image = depth[0, ...]
+ if return_type == 'np':
+ image = image.cpu().numpy()
+ elif return_type == 'pl':
+ image = image.squeeze(-1).cpu().numpy() * 255
+ image = Image.fromarray(image.astype(np.uint8))
+ return image
+
+ def render_position(self, elev, azim, camera_distance=None, center=None,
+ resolution=None, bg_color=[1, 1, 1], return_type='th'):
+ pos_camera, pos_clip = self.get_pos_from_mvp(elev, azim, camera_distance, center)
+ if resolution is None:
+ resolution = self.default_resolution
+ if isinstance(resolution, (int, float)):
+ resolution = [resolution, resolution]
+ rast_out, rast_out_db = self.raster_rasterize(
+ pos_clip, self.pos_idx, resolution=resolution)
+
+ tex_position = 0.5 - self.vtx_pos[:, :3] / self.scale_factor
+ tex_position = tex_position.contiguous()
+
+ # Interpolate depth values across the rasterized pixels
+ position, _ = self.raster_interpolate(
+ tex_position[None, ...], rast_out, self.pos_idx)
+
+ visible_mask = torch.clamp(rast_out[..., -1:], 0, 1)
+
+ position = position * visible_mask + \
+ torch.tensor(bg_color, dtype=torch.float32, device=self.device) * (1 -
+ visible_mask) # Mask out background.
+ if self.use_antialias:
+ position = self.raster_antialias(position, rast_out, pos_clip, self.pos_idx)
+
+ image = position[0, ...]
+
+ if return_type == 'np':
+ image = image.cpu().numpy()
+ elif return_type == 'pl':
+ image = image.squeeze(-1).cpu().numpy() * 255
+ image = Image.fromarray(image.astype(np.uint8))
+ return image
+
+ def render_uvpos(self, return_type='th'):
+ image = self.uv_feature_map(self.vtx_pos * 0.5 + 0.5)
+ if return_type == 'np':
+ image = image.cpu().numpy()
+ elif return_type == 'pl':
+ image = image.cpu().numpy() * 255
+ image = Image.fromarray(image.astype(np.uint8))
+ return image
+
+ def uv_feature_map(self, vert_feat, bg=None):
+ vtx_uv = self.vtx_uv * 2 - 1.0
+ vtx_uv = torch.cat(
+ [vtx_uv, torch.zeros_like(self.vtx_uv)], dim=1).unsqueeze(0)
+ vtx_uv[..., -1] = 1
+ uv_idx = self.uv_idx
+ rast_out, rast_out_db = self.raster_rasterize(
+ vtx_uv, uv_idx, resolution=self.texture_size)
+ feat_map, _ = self.raster_interpolate(vert_feat[None, ...], rast_out, uv_idx)
+ feat_map = feat_map[0, ...]
+ if bg is not None:
+ visible_mask = torch.clamp(rast_out[..., -1:], 0, 1)[0, ...]
+ feat_map[visible_mask == 0] = bg
+ return feat_map
+
+ def render_sketch_from_geometry(self, normal_image, depth_image):
+ normal_image_np = normal_image.cpu().numpy()
+ depth_image_np = depth_image.cpu().numpy()
+
+ normal_image_np = (normal_image_np * 255).astype(np.uint8)
+ depth_image_np = (depth_image_np * 255).astype(np.uint8)
+ normal_image_np = cv2.cvtColor(normal_image_np, cv2.COLOR_RGB2GRAY)
+
+ normal_edges = cv2.Canny(normal_image_np, 80, 150)
+ depth_edges = cv2.Canny(depth_image_np, 30, 80)
+
+ combined_edges = np.maximum(normal_edges, depth_edges)
+
+ sketch_image = torch.from_numpy(combined_edges).to(
+ normal_image.device).float() / 255.0
+ sketch_image = sketch_image.unsqueeze(-1)
+
+ return sketch_image
+
+ def render_sketch_from_depth(self, depth_image):
+ depth_image_np = depth_image.cpu().numpy()
+ depth_image_np = (depth_image_np * 255).astype(np.uint8)
+ depth_edges = cv2.Canny(depth_image_np, 30, 80)
+ combined_edges = depth_edges
+ sketch_image = torch.from_numpy(combined_edges).to(
+ depth_image.device).float() / 255.0
+ sketch_image = sketch_image.unsqueeze(-1)
+ return sketch_image
+
+ def back_project(self, image, elev, azim,
+ camera_distance=None, center=None, method=None):
+ if isinstance(image, Image.Image):
+ image = torch.tensor(np.array(image) / 255.0)
+ elif isinstance(image, np.ndarray):
+ image = torch.tensor(image)
+ if image.dim() == 2:
+ image = image.unsqueeze(-1)
+ image = image.float().to(self.device)
+ resolution = image.shape[:2]
+ channel = image.shape[-1]
+ texture = torch.zeros(self.texture_size + (channel,)).to(self.device)
+ cos_map = torch.zeros(self.texture_size + (1,)).to(self.device)
+
+ proj = self.camera_proj_mat
+ r_mv = get_mv_matrix(
+ elev=elev,
+ azim=azim,
+ camera_distance=self.camera_distance if camera_distance is None else camera_distance,
+ center=center)
+ pos_camera = transform_pos(r_mv, self.vtx_pos, keepdim=True)
+ pos_clip = transform_pos(proj, pos_camera)
+ pos_camera = pos_camera[:, :3] / pos_camera[:, 3:4]
+ v0 = pos_camera[self.pos_idx[:, 0], :]
+ v1 = pos_camera[self.pos_idx[:, 1], :]
+ v2 = pos_camera[self.pos_idx[:, 2], :]
+ face_normals = F.normalize(
+ torch.cross(
+ v1 - v0,
+ v2 - v0,
+ dim=-1),
+ dim=-1)
+ vertex_normals = trimesh.geometry.mean_vertex_normals(vertex_count=self.vtx_pos.shape[0],
+ faces=self.pos_idx.cpu(),
+ face_normals=face_normals.cpu(), )
+ vertex_normals = torch.from_numpy(
+ vertex_normals).float().to(self.device).contiguous()
+ tex_depth = pos_camera[:, 2].reshape(1, -1, 1).contiguous()
+ rast_out, rast_out_db = self.raster_rasterize(
+ pos_clip, self.pos_idx, resolution=resolution)
+ visible_mask = torch.clamp(rast_out[..., -1:], 0, 1)[0, ...]
+
+ normal, _ = self.raster_interpolate(
+ vertex_normals[None, ...], rast_out, self.pos_idx)
+ normal = normal[0, ...]
+ uv, _ = self.raster_interpolate(self.vtx_uv[None, ...], rast_out, self.uv_idx)
+ depth, _ = self.raster_interpolate(tex_depth, rast_out, self.pos_idx)
+ depth = depth[0, ...]
+
+ depth_max, depth_min = depth[visible_mask >
+ 0].max(), depth[visible_mask > 0].min()
+ depth_normalized = (depth - depth_min) / (depth_max - depth_min)
+ depth_image = depth_normalized * visible_mask # Mask out background.
+
+ sketch_image = self.render_sketch_from_depth(depth_image)
+
+ lookat = torch.tensor([[0, 0, -1]], device=self.device)
+ cos_image = torch.nn.functional.cosine_similarity(
+ lookat, normal.view(-1, 3))
+ cos_image = cos_image.view(normal.shape[0], normal.shape[1], 1)
+
+ cos_thres = np.cos(self.bake_angle_thres / 180 * np.pi)
+ cos_image[cos_image < cos_thres] = 0
+
+ # shrink
+ kernel_size = self.bake_unreliable_kernel_size * 2 + 1
+ kernel = torch.ones(
+ (1, 1, kernel_size, kernel_size), dtype=torch.float32).to(
+ sketch_image.device)
+
+ visible_mask = visible_mask.permute(2, 0, 1).unsqueeze(0).float()
+ visible_mask = F.conv2d(
+ 1.0 - visible_mask,
+ kernel,
+ padding=kernel_size // 2)
+ visible_mask = 1.0 - (visible_mask > 0).float() # 二值化
+ visible_mask = visible_mask.squeeze(0).permute(1, 2, 0)
+
+ sketch_image = sketch_image.permute(2, 0, 1).unsqueeze(0)
+ sketch_image = F.conv2d(sketch_image, kernel, padding=kernel_size // 2)
+ sketch_image = (sketch_image > 0).float() # 二值化
+ sketch_image = sketch_image.squeeze(0).permute(1, 2, 0)
+ visible_mask = visible_mask * (sketch_image < 0.5)
+
+ cos_image[visible_mask == 0] = 0
+
+ method = self.bake_mode if method is None else method
+
+ if method == 'linear':
+ proj_mask = (visible_mask != 0).view(-1)
+ uv = uv.squeeze(0).contiguous().view(-1, 2)[proj_mask]
+ image = image.squeeze(0).contiguous().view(-1, channel)[proj_mask]
+ cos_image = cos_image.contiguous().view(-1, 1)[proj_mask]
+ sketch_image = sketch_image.contiguous().view(-1, 1)[proj_mask]
+
+ texture = linear_grid_put_2d(
+ self.texture_size[1], self.texture_size[0], uv[..., [1, 0]], image)
+ cos_map = linear_grid_put_2d(
+ self.texture_size[1], self.texture_size[0], uv[..., [1, 0]], cos_image)
+ boundary_map = linear_grid_put_2d(
+ self.texture_size[1], self.texture_size[0], uv[..., [1, 0]], sketch_image)
+ else:
+ raise f'No bake mode {method}'
+
+ return texture, cos_map, boundary_map
+
+ def bake_texture(self, colors, elevs, azims,
+ camera_distance=None, center=None, exp=6, weights=None):
+ for i in range(len(colors)):
+ if isinstance(colors[i], Image.Image):
+ colors[i] = torch.tensor(
+ np.array(
+ colors[i]) / 255.0,
+ device=self.device).float()
+ if weights is None:
+ weights = [1.0 for _ in range(colors)]
+ textures = []
+ cos_maps = []
+ for color, elev, azim, weight in zip(colors, elevs, azims, weights):
+ texture, cos_map, _ = self.back_project(
+ color, elev, azim, camera_distance, center)
+ cos_map = weight * (cos_map ** exp)
+ textures.append(texture)
+ cos_maps.append(cos_map)
+
+ texture_merge, trust_map_merge = self.fast_bake_texture(
+ textures, cos_maps)
+ return texture_merge, trust_map_merge
+
+ @torch.no_grad()
+ def fast_bake_texture(self, textures, cos_maps):
+
+ channel = textures[0].shape[-1]
+ texture_merge = torch.zeros(
+ self.texture_size + (channel,)).to(self.device)
+ trust_map_merge = torch.zeros(self.texture_size + (1,)).to(self.device)
+ for texture, cos_map in zip(textures, cos_maps):
+ view_sum = (cos_map > 0).sum()
+ painted_sum = ((cos_map > 0) * (trust_map_merge > 0)).sum()
+ if painted_sum / view_sum > 0.99:
+ continue
+ texture_merge += texture * cos_map
+ trust_map_merge += cos_map
+ texture_merge = texture_merge / torch.clamp(trust_map_merge, min=1E-8)
+
+ return texture_merge, trust_map_merge > 1E-8
+
+ def uv_inpaint(self, texture, mask):
+
+ if isinstance(texture, torch.Tensor):
+ texture_np = texture.cpu().numpy()
+ elif isinstance(texture, np.ndarray):
+ texture_np = texture
+ elif isinstance(texture, Image.Image):
+ texture_np = np.array(texture) / 255.0
+
+ vtx_pos, pos_idx, vtx_uv, uv_idx = self.get_mesh()
+
+ texture_np, mask = meshVerticeInpaint(
+ texture_np, mask, vtx_pos, vtx_uv, pos_idx, uv_idx)
+
+ texture_np = cv2.inpaint(
+ (texture_np *
+ 255).astype(
+ np.uint8),
+ 255 -
+ mask,
+ 3,
+ cv2.INPAINT_NS)
+
+ return texture_np
diff --git a/hy3dgen/texgen/differentiable_renderer/mesh_utils.py b/hy3dgen/texgen/differentiable_renderer/mesh_utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..ea0f566d08bb11f1d5f95be7c37638204e437966
--- /dev/null
+++ b/hy3dgen/texgen/differentiable_renderer/mesh_utils.py
@@ -0,0 +1,34 @@
+# Hunyuan 3D is licensed under the TENCENT HUNYUAN NON-COMMERCIAL LICENSE AGREEMENT
+# except for the third-party components listed below.
+# Hunyuan 3D does not impose any additional limitations beyond what is outlined
+# in the repsective licenses of these third-party components.
+# Users must comply with all terms and conditions of original licenses of these third-party
+# components and must ensure that the usage of the third party components adheres to
+# all relevant laws and regulations.
+
+# For avoidance of doubts, Hunyuan 3D means the large language models and
+# their software and algorithms, including trained model weights, parameters (including
+# optimizer states), machine-learning model code, inference-enabling code, training-enabling code,
+# fine-tuning enabling code and other elements of the foregoing made publicly available
+# by Tencent in accordance with TENCENT HUNYUAN COMMUNITY LICENSE AGREEMENT.
+
+import trimesh
+
+
+def load_mesh(mesh):
+ vtx_pos = mesh.vertices if hasattr(mesh, 'vertices') else None
+ pos_idx = mesh.faces if hasattr(mesh, 'faces') else None
+
+ vtx_uv = mesh.visual.uv if hasattr(mesh.visual, 'uv') else None
+ uv_idx = mesh.faces if hasattr(mesh, 'faces') else None
+
+ texture_data = None
+
+ return vtx_pos, pos_idx, vtx_uv, uv_idx, texture_data
+
+
+def save_mesh(mesh, texture_data):
+ material = trimesh.visual.texture.SimpleMaterial(image=texture_data, diffuse=(255, 255, 255))
+ texture_visuals = trimesh.visual.TextureVisuals(uv=mesh.visual.uv, image=texture_data, material=material)
+ mesh.visual = texture_visuals
+ return mesh
diff --git a/hy3dgen/texgen/differentiable_renderer/setup.py b/hy3dgen/texgen/differentiable_renderer/setup.py
new file mode 100644
index 0000000000000000000000000000000000000000..b7218647bb78658625e2506555a329aece8e9788
--- /dev/null
+++ b/hy3dgen/texgen/differentiable_renderer/setup.py
@@ -0,0 +1,62 @@
+# Hunyuan 3D is licensed under the TENCENT HUNYUAN NON-COMMERCIAL LICENSE AGREEMENT
+# except for the third-party components listed below.
+# Hunyuan 3D does not impose any additional limitations beyond what is outlined
+# in the repsective licenses of these third-party components.
+# Users must comply with all terms and conditions of original licenses of these third-party
+# components and must ensure that the usage of the third party components adheres to
+# all relevant laws and regulations.
+
+# For avoidance of doubts, Hunyuan 3D means the large language models and
+# their software and algorithms, including trained model weights, parameters (including
+# optimizer states), machine-learning model code, inference-enabling code, training-enabling code,
+# fine-tuning enabling code and other elements of the foregoing made publicly available
+# by Tencent in accordance with TENCENT HUNYUAN COMMUNITY LICENSE AGREEMENT.
+
+from setuptools import setup, Extension
+import pybind11
+import sys
+import platform
+
+def get_platform_specific_args():
+ system = platform.system().lower()
+ cpp_std = 'c++14' # Make configurable if needed
+
+ if sys.platform == 'win32':
+ compile_args = ['/O2', f'/std:{cpp_std}', '/EHsc', '/MP', '/DWIN32_LEAN_AND_MEAN', '/bigobj']
+ link_args = []
+ extra_includes = []
+ elif system == 'linux':
+ compile_args = ['-O3', f'-std={cpp_std}', '-fPIC', '-Wall', '-Wextra', '-pthread']
+ link_args = ['-fPIC', '-pthread']
+ extra_includes = []
+ elif sys.platform == 'darwin':
+ compile_args = ['-O3', f'-std={cpp_std}', '-fPIC', '-Wall', '-Wextra',
+ '-stdlib=libc++', '-mmacosx-version-min=10.14']
+ link_args = ['-fPIC', '-stdlib=libc++', '-mmacosx-version-min=10.14', '-dynamiclib']
+ extra_includes = []
+ else:
+ raise RuntimeError(f"Unsupported platform: {system}")
+
+ return compile_args, link_args, extra_includes
+
+extra_compile_args, extra_link_args, platform_includes = get_platform_specific_args()
+include_dirs = [pybind11.get_include(), pybind11.get_include(user=True)]
+include_dirs.extend(platform_includes)
+
+ext_modules = [
+ Extension(
+ "mesh_processor",
+ ["mesh_processor.cpp"],
+ include_dirs=include_dirs,
+ language='c++',
+ extra_compile_args=extra_compile_args,
+ extra_link_args=extra_link_args,
+ ),
+]
+
+setup(
+ name="mesh_processor",
+ ext_modules=ext_modules,
+ install_requires=['pybind11>=2.6.0'],
+ python_requires='>=3.6',
+)
\ No newline at end of file
diff --git a/hy3dgen/texgen/hunyuanpaint/__init__.py b/hy3dgen/texgen/hunyuanpaint/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..3cde306b834c182f04339203fb5a78d846ac613b
--- /dev/null
+++ b/hy3dgen/texgen/hunyuanpaint/__init__.py
@@ -0,0 +1,13 @@
+# Hunyuan 3D is licensed under the TENCENT HUNYUAN NON-COMMERCIAL LICENSE AGREEMENT
+# except for the third-party components listed below.
+# Hunyuan 3D does not impose any additional limitations beyond what is outlined
+# in the repsective licenses of these third-party components.
+# Users must comply with all terms and conditions of original licenses of these third-party
+# components and must ensure that the usage of the third party components adheres to
+# all relevant laws and regulations.
+
+# For avoidance of doubts, Hunyuan 3D means the large language models and
+# their software and algorithms, including trained model weights, parameters (including
+# optimizer states), machine-learning model code, inference-enabling code, training-enabling code,
+# fine-tuning enabling code and other elements of the foregoing made publicly available
+# by Tencent in accordance with TENCENT HUNYUAN COMMUNITY LICENSE AGREEMENT.
\ No newline at end of file
diff --git a/hy3dgen/texgen/hunyuanpaint/pipeline.py b/hy3dgen/texgen/hunyuanpaint/pipeline.py
new file mode 100644
index 0000000000000000000000000000000000000000..c22d8961470d2f2024e67cc46bf75e417276fc8b
--- /dev/null
+++ b/hy3dgen/texgen/hunyuanpaint/pipeline.py
@@ -0,0 +1,546 @@
+# Hunyuan 3D is licensed under the TENCENT HUNYUAN NON-COMMERCIAL LICENSE AGREEMENT
+# except for the third-party components listed below.
+# Hunyuan 3D does not impose any additional limitations beyond what is outlined
+# in the repsective licenses of these third-party components.
+# Users must comply with all terms and conditions of original licenses of these third-party
+# components and must ensure that the usage of the third party components adheres to
+# all relevant laws and regulations.
+
+# For avoidance of doubts, Hunyuan 3D means the large language models and
+# their software and algorithms, including trained model weights, parameters (including
+# optimizer states), machine-learning model code, inference-enabling code, training-enabling code,
+# fine-tuning enabling code and other elements of the foregoing made publicly available
+# by Tencent in accordance with TENCENT HUNYUAN COMMUNITY LICENSE AGREEMENT.
+
+from typing import Any, Callable, Dict, List, Optional, Union
+
+import numpy
+import numpy as np
+import torch
+import torch.distributed
+import torch.utils.checkpoint
+from PIL import Image
+from diffusers import (
+ AutoencoderKL,
+ DiffusionPipeline,
+ ImagePipelineOutput
+)
+from diffusers.callbacks import MultiPipelineCallbacks, PipelineCallback
+from diffusers.image_processor import PipelineImageInput
+from diffusers.image_processor import VaeImageProcessor
+from diffusers.pipelines.stable_diffusion.pipeline_output import StableDiffusionPipelineOutput
+from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipeline, retrieve_timesteps, \
+ rescale_noise_cfg
+from diffusers.schedulers import KarrasDiffusionSchedulers
+from diffusers.utils import deprecate
+from einops import rearrange
+from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
+
+from .unet.modules import UNet2p5DConditionModel
+
+
+def to_rgb_image(maybe_rgba: Image.Image):
+ if maybe_rgba.mode == 'RGB':
+ return maybe_rgba
+ elif maybe_rgba.mode == 'RGBA':
+ rgba = maybe_rgba
+ img = numpy.random.randint(127, 128, size=[rgba.size[1], rgba.size[0], 3], dtype=numpy.uint8)
+ img = Image.fromarray(img, 'RGB')
+ img.paste(rgba, mask=rgba.getchannel('A'))
+ return img
+ else:
+ raise ValueError("Unsupported image type.", maybe_rgba.mode)
+
+
+class HunyuanPaintPipeline(StableDiffusionPipeline):
+
+ def __init__(
+ self,
+ vae: AutoencoderKL,
+ text_encoder: CLIPTextModel,
+ tokenizer: CLIPTokenizer,
+ unet: UNet2p5DConditionModel,
+ scheduler: KarrasDiffusionSchedulers,
+ feature_extractor: CLIPImageProcessor,
+ safety_checker=None,
+ use_torch_compile=False,
+ ):
+ DiffusionPipeline.__init__(self)
+
+ safety_checker = None
+ self.register_modules(
+ vae=torch.compile(vae) if use_torch_compile else vae,
+ text_encoder=text_encoder,
+ tokenizer=tokenizer,
+ unet=unet,
+ scheduler=scheduler,
+ safety_checker=safety_checker,
+ feature_extractor=torch.compile(feature_extractor) if use_torch_compile else feature_extractor,
+ )
+ self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
+ self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
+
+ @torch.no_grad()
+ def encode_images(self, images):
+ B = images.shape[0]
+ images = rearrange(images, 'b n c h w -> (b n) c h w')
+
+ dtype = next(self.vae.parameters()).dtype
+ images = (images - 0.5) * 2.0
+ posterior = self.vae.encode(images.to(dtype)).latent_dist
+ latents = posterior.sample() * self.vae.config.scaling_factor
+
+ latents = rearrange(latents, '(b n) c h w -> b n c h w', b=B)
+ return latents
+
+ @torch.no_grad()
+ def __call__(
+ self,
+ image: Image.Image = None,
+ prompt=None,
+ negative_prompt='watermark, ugly, deformed, noisy, blurry, low contrast',
+ *args,
+ num_images_per_prompt: Optional[int] = 1,
+ guidance_scale=2.0,
+ output_type: Optional[str] = "pil",
+ width=512,
+ height=512,
+ num_inference_steps=28,
+ return_dict=True,
+ **cached_condition,
+ ):
+ device = self._execution_device
+
+ if image is None:
+ raise ValueError("Inputting embeddings not supported for this pipeline. Please pass an image.")
+ assert not isinstance(image, torch.Tensor)
+
+ image = to_rgb_image(image)
+
+ image_vae = torch.tensor(np.array(image) / 255.0)
+ image_vae = image_vae.unsqueeze(0).permute(0, 3, 1, 2).unsqueeze(0)
+ image_vae = image_vae.to(device=device, dtype=self.vae.dtype)
+
+ batch_size = image_vae.shape[0]
+ assert batch_size == 1
+ assert num_images_per_prompt == 1
+
+ ref_latents = self.encode_images(image_vae)
+
+ def convert_pil_list_to_tensor(images):
+ bg_c = [1., 1., 1.]
+ images_tensor = []
+ for batch_imgs in images:
+ view_imgs = []
+ for pil_img in batch_imgs:
+ img = numpy.asarray(pil_img, dtype=numpy.float32) / 255.
+ if img.shape[2] > 3:
+ alpha = img[:, :, 3:]
+ img = img[:, :, :3] * alpha + bg_c * (1 - alpha)
+ img = torch.from_numpy(img).permute(2, 0, 1).unsqueeze(0).contiguous().half().to("cuda")
+ view_imgs.append(img)
+ view_imgs = torch.cat(view_imgs, dim=0)
+ images_tensor.append(view_imgs.unsqueeze(0))
+
+ images_tensor = torch.cat(images_tensor, dim=0)
+ return images_tensor
+
+ if "normal_imgs" in cached_condition:
+
+ if isinstance(cached_condition["normal_imgs"], List):
+ cached_condition["normal_imgs"] = convert_pil_list_to_tensor(cached_condition["normal_imgs"])
+
+ cached_condition['normal_imgs'] = self.encode_images(cached_condition["normal_imgs"])
+
+ if "position_imgs" in cached_condition:
+
+ if isinstance(cached_condition["position_imgs"], List):
+ cached_condition["position_imgs"] = convert_pil_list_to_tensor(cached_condition["position_imgs"])
+
+ cached_condition["position_imgs"] = self.encode_images(cached_condition["position_imgs"])
+
+ if 'camera_info_gen' in cached_condition:
+ camera_info = cached_condition['camera_info_gen'] # B,N
+ if isinstance(camera_info, List):
+ camera_info = torch.tensor(camera_info)
+ camera_info = camera_info.to(device).to(torch.int64)
+ cached_condition['camera_info_gen'] = camera_info
+ if 'camera_info_ref' in cached_condition:
+ camera_info = cached_condition['camera_info_ref'] # B,N
+ if isinstance(camera_info, List):
+ camera_info = torch.tensor(camera_info)
+ camera_info = camera_info.to(device).to(torch.int64)
+ cached_condition['camera_info_ref'] = camera_info
+
+ cached_condition['ref_latents'] = ref_latents
+
+ if guidance_scale > 1:
+ negative_ref_latents = torch.zeros_like(cached_condition['ref_latents'])
+ cached_condition['ref_latents'] = torch.cat([negative_ref_latents, cached_condition['ref_latents']])
+ cached_condition['ref_scale'] = torch.as_tensor([0.0, 1.0]).to(cached_condition['ref_latents'])
+ if "normal_imgs" in cached_condition:
+ cached_condition['normal_imgs'] = torch.cat(
+ (cached_condition['normal_imgs'], cached_condition['normal_imgs']))
+
+ if "position_imgs" in cached_condition:
+ cached_condition['position_imgs'] = torch.cat(
+ (cached_condition['position_imgs'], cached_condition['position_imgs']))
+
+ if 'position_maps' in cached_condition:
+ cached_condition['position_maps'] = torch.cat(
+ (cached_condition['position_maps'], cached_condition['position_maps']))
+
+ if 'camera_info_gen' in cached_condition:
+ cached_condition['camera_info_gen'] = torch.cat(
+ (cached_condition['camera_info_gen'], cached_condition['camera_info_gen']))
+ if 'camera_info_ref' in cached_condition:
+ cached_condition['camera_info_ref'] = torch.cat(
+ (cached_condition['camera_info_ref'], cached_condition['camera_info_ref']))
+
+ prompt_embeds = self.unet.learned_text_clip_gen.repeat(num_images_per_prompt, 1, 1)
+ negative_prompt_embeds = torch.zeros_like(prompt_embeds)
+
+ latents: torch.Tensor = self.denoise(
+ None,
+ *args,
+ cross_attention_kwargs=None,
+ guidance_scale=guidance_scale,
+ num_images_per_prompt=num_images_per_prompt,
+ prompt_embeds=prompt_embeds,
+ negative_prompt_embeds=negative_prompt_embeds,
+ num_inference_steps=num_inference_steps,
+ output_type='latent',
+ width=width,
+ height=height,
+ **cached_condition
+ ).images
+
+ if not output_type == "latent":
+ image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0]
+ else:
+ image = latents
+
+ image = self.image_processor.postprocess(image, output_type=output_type)
+ if not return_dict:
+ return (image,)
+
+ return ImagePipelineOutput(images=image)
+
+ def denoise(
+ self,
+ prompt: Union[str, List[str]] = None,
+ height: Optional[int] = None,
+ width: Optional[int] = None,
+ num_inference_steps: int = 50,
+ timesteps: List[int] = None,
+ sigmas: List[float] = None,
+ guidance_scale: float = 7.5,
+ negative_prompt: Optional[Union[str, List[str]]] = None,
+ num_images_per_prompt: Optional[int] = 1,
+ eta: float = 0.0,
+ generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
+ latents: Optional[torch.Tensor] = None,
+ prompt_embeds: Optional[torch.Tensor] = None,
+ negative_prompt_embeds: Optional[torch.Tensor] = None,
+ ip_adapter_image: Optional[PipelineImageInput] = None,
+ ip_adapter_image_embeds: Optional[List[torch.Tensor]] = None,
+ output_type: Optional[str] = "pil",
+ return_dict: bool = True,
+ cross_attention_kwargs: Optional[Dict[str, Any]] = None,
+ guidance_rescale: float = 0.0,
+ clip_skip: Optional[int] = None,
+ callback_on_step_end: Optional[
+ Union[Callable[[int, int, Dict], None], PipelineCallback, MultiPipelineCallbacks]
+ ] = None,
+ callback_on_step_end_tensor_inputs: List[str] = ["latents"],
+ **kwargs,
+ ):
+ r"""
+ The call function to the pipeline for generation.
+
+ Args:
+ prompt (`str` or `List[str]`, *optional*):
+ The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`.
+ height (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`):
+ The height in pixels of the generated image.
+ width (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`):
+ The width in pixels of the generated image.
+ num_inference_steps (`int`, *optional*, defaults to 50):
+ The number of denoising steps. More denoising steps usually lead to a higher quality image at the
+ expense of slower inference.
+ timesteps (`List[int]`, *optional*):
+ Custom timesteps to use for the denoising process with schedulers which support a `timesteps` argument
+ in their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is
+ passed will be used. Must be in descending order.
+ sigmas (`List[float]`, *optional*):
+ Custom sigmas to use for the denoising process with schedulers which support a `sigmas` argument in
+ their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is passed
+ will be used.
+ guidance_scale (`float`, *optional*, defaults to 7.5):
+ A higher guidance scale value encourages the model to generate images closely linked to the text
+ `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`.
+ negative_prompt (`str` or `List[str]`, *optional*):
+ The prompt or prompts to guide what to not include in image generation. If not defined, you need to
+ pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`).
+ num_images_per_prompt (`int`, *optional*, defaults to 1):
+ The number of images to generate per prompt.
+ eta (`float`, *optional*, defaults to 0.0):
+ Corresponds to parameter eta (η) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies
+ to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers.
+ generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
+ A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make
+ generation deterministic.
+ latents (`torch.Tensor`, *optional*):
+ Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image
+ generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
+ tensor is generated by sampling using the supplied random `generator`.
+ prompt_embeds (`torch.Tensor`, *optional*):
+ Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not
+ provided, text embeddings are generated from the `prompt` input argument.
+ negative_prompt_embeds (`torch.Tensor`, *optional*):
+ Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If
+ not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument.
+ ip_adapter_image: (`PipelineImageInput`, *optional*): Optional image input to work with IP Adapters.
+ ip_adapter_image_embeds (`List[torch.Tensor]`, *optional*):
+ Pre-generated image embeddings for IP-Adapter. It should be a list of length same as number of
+ IP-adapters. Each element should be a tensor of shape `(batch_size, num_images, emb_dim)`. It should
+ contain the negative image embedding if `do_classifier_free_guidance` is set to `True`. If not
+ provided, embeddings are computed from the `ip_adapter_image` input argument.
+ output_type (`str`, *optional*, defaults to `"pil"`):
+ The output format of the generated image. Choose between `PIL.Image` or `np.array`.
+ return_dict (`bool`, *optional*, defaults to `True`):
+ Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
+ plain tuple.
+ cross_attention_kwargs (`dict`, *optional*):
+ A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in
+ [`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
+ guidance_rescale (`float`, *optional*, defaults to 0.0):
+ Guidance rescale factor from [Common Diffusion Noise Schedules and Sample Steps are
+ Flawed](https://arxiv.org/pdf/2305.08891.pdf). Guidance rescale factor should fix overexposure when
+ using zero terminal SNR.
+ clip_skip (`int`, *optional*):
+ Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that
+ the output of the pre-final layer will be used for computing the prompt embeddings.
+ callback_on_step_end (`Callable`, `PipelineCallback`, `MultiPipelineCallbacks`, *optional*):
+ A function or a subclass of `PipelineCallback` or `MultiPipelineCallbacks` that is called at the end of
+ each denoising step during the inference. with the following arguments: `callback_on_step_end(self:
+ DiffusionPipeline, step: int, timestep: int, callback_kwargs: Dict)`. `callback_kwargs` will include a
+ list of all tensors as specified by `callback_on_step_end_tensor_inputs`.
+ callback_on_step_end_tensor_inputs (`List`, *optional*):
+ The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list
+ will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the
+ `._callback_tensor_inputs` attribute of your pipeline class.
+
+ Examples:
+
+ Returns:
+ [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
+ If `return_dict` is `True`, [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] is returned,
+ otherwise a `tuple` is returned where the first element is a list with the generated images and the
+ second element is a list of `bool`s indicating whether the corresponding generated image contains
+ "not-safe-for-work" (nsfw) content.
+ """
+
+ callback = kwargs.pop("callback", None)
+ callback_steps = kwargs.pop("callback_steps", None)
+
+ if callback is not None:
+ deprecate(
+ "callback",
+ "1.0.0",
+ "Passing `callback` as an input argument to `__call__` is deprecated, consider using `callback_on_step_end`",
+ )
+ if callback_steps is not None:
+ deprecate(
+ "callback_steps",
+ "1.0.0",
+ "Passing `callback_steps` as an input argument to `__call__` is deprecated, consider using `callback_on_step_end`",
+ )
+
+ if isinstance(callback_on_step_end, (PipelineCallback, MultiPipelineCallbacks)):
+ callback_on_step_end_tensor_inputs = callback_on_step_end.tensor_inputs
+
+ # 0. Default height and width to unet
+ height = height or self.unet.config.sample_size * self.vae_scale_factor
+ width = width or self.unet.config.sample_size * self.vae_scale_factor
+ # to deal with lora scaling and other possible forward hooks
+
+ # 1. Check inputs. Raise error if not correct
+ self.check_inputs(
+ prompt,
+ height,
+ width,
+ callback_steps,
+ negative_prompt,
+ prompt_embeds,
+ negative_prompt_embeds,
+ ip_adapter_image,
+ ip_adapter_image_embeds,
+ callback_on_step_end_tensor_inputs,
+ )
+
+ self._guidance_scale = guidance_scale
+ self._guidance_rescale = guidance_rescale
+ self._clip_skip = clip_skip
+ self._cross_attention_kwargs = cross_attention_kwargs
+ self._interrupt = False
+
+ # 2. Define call parameters
+ if prompt is not None and isinstance(prompt, str):
+ batch_size = 1
+ elif prompt is not None and isinstance(prompt, list):
+ batch_size = len(prompt)
+ else:
+ batch_size = prompt_embeds.shape[0]
+
+ device = self._execution_device
+
+ # 3. Encode input prompt
+ lora_scale = (
+ self.cross_attention_kwargs.get("scale", None) if self.cross_attention_kwargs is not None else None
+ )
+
+ prompt_embeds, negative_prompt_embeds = self.encode_prompt(
+ prompt,
+ device,
+ num_images_per_prompt,
+ self.do_classifier_free_guidance,
+ negative_prompt,
+ prompt_embeds=prompt_embeds,
+ negative_prompt_embeds=negative_prompt_embeds,
+ lora_scale=lora_scale,
+ clip_skip=self.clip_skip,
+ )
+
+ # For classifier free guidance, we need to do two forward passes.
+ # Here we concatenate the unconditional and text embeddings into a single batch
+ # to avoid doing two forward passes
+ if self.do_classifier_free_guidance:
+ prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds])
+
+ if ip_adapter_image is not None or ip_adapter_image_embeds is not None:
+ image_embeds = self.prepare_ip_adapter_image_embeds(
+ ip_adapter_image,
+ ip_adapter_image_embeds,
+ device,
+ batch_size * num_images_per_prompt,
+ self.do_classifier_free_guidance,
+ )
+
+ # 4. Prepare timesteps
+ timesteps, num_inference_steps = retrieve_timesteps(
+ self.scheduler, num_inference_steps, device, timesteps, sigmas
+ )
+ assert num_images_per_prompt == 1
+ # 5. Prepare latent variables
+ num_channels_latents = self.unet.config.in_channels
+ latents = self.prepare_latents(
+ batch_size * kwargs['num_in_batch'], # num_images_per_prompt,
+ num_channels_latents,
+ height,
+ width,
+ prompt_embeds.dtype,
+ device,
+ generator,
+ latents,
+ )
+
+ # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
+ extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
+
+ # 6.1 Add image embeds for IP-Adapter
+ added_cond_kwargs = (
+ {"image_embeds": image_embeds}
+ if (ip_adapter_image is not None or ip_adapter_image_embeds is not None)
+ else None
+ )
+
+ # 6.2 Optionally get Guidance Scale Embedding
+ timestep_cond = None
+ if self.unet.config.time_cond_proj_dim is not None:
+ guidance_scale_tensor = torch.tensor(self.guidance_scale - 1).repeat(batch_size * num_images_per_prompt)
+ timestep_cond = self.get_guidance_scale_embedding(
+ guidance_scale_tensor, embedding_dim=self.unet.config.time_cond_proj_dim
+ ).to(device=device, dtype=latents.dtype)
+
+ # 7. Denoising loop
+ num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order
+ self._num_timesteps = len(timesteps)
+ with self.progress_bar(total=num_inference_steps) as progress_bar:
+ for i, t in enumerate(timesteps):
+ if self.interrupt:
+ continue
+
+ # expand the latents if we are doing classifier free guidance
+ latents = rearrange(latents, '(b n) c h w -> b n c h w', n=kwargs['num_in_batch'])
+ latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents
+ latent_model_input = rearrange(latent_model_input, 'b n c h w -> (b n) c h w')
+ latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
+ latent_model_input = rearrange(latent_model_input, '(b n) c h w ->b n c h w', n=kwargs['num_in_batch'])
+
+ # predict the noise residual
+
+ noise_pred = self.unet(
+ latent_model_input,
+ t,
+ encoder_hidden_states=prompt_embeds,
+ timestep_cond=timestep_cond,
+ cross_attention_kwargs=self.cross_attention_kwargs,
+ added_cond_kwargs=added_cond_kwargs,
+ return_dict=False, **kwargs
+ )[0]
+ latents = rearrange(latents, 'b n c h w -> (b n) c h w')
+ # perform guidance
+ if self.do_classifier_free_guidance:
+ noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
+ noise_pred = noise_pred_uncond + self.guidance_scale * (noise_pred_text - noise_pred_uncond)
+
+ if self.do_classifier_free_guidance and self.guidance_rescale > 0.0:
+ # Based on 3.4. in https://arxiv.org/pdf/2305.08891.pdf
+ noise_pred = rescale_noise_cfg(noise_pred, noise_pred_text, guidance_rescale=self.guidance_rescale)
+
+ # compute the previous noisy sample x_t -> x_t-1
+ latents = \
+ self.scheduler.step(noise_pred, t, latents[:, :num_channels_latents, :, :], **extra_step_kwargs,
+ return_dict=False)[0]
+
+ if callback_on_step_end is not None:
+ callback_kwargs = {}
+ for k in callback_on_step_end_tensor_inputs:
+ callback_kwargs[k] = locals()[k]
+ callback_outputs = callback_on_step_end(self, i, t, callback_kwargs)
+
+ latents = callback_outputs.pop("latents", latents)
+ prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds)
+ negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds)
+
+ # call the callback, if provided
+ if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
+ progress_bar.update()
+ if callback is not None and i % callback_steps == 0:
+ step_idx = i // getattr(self.scheduler, "order", 1)
+ callback(step_idx, t, latents)
+
+ if not output_type == "latent":
+ image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False, generator=generator)[
+ 0
+ ]
+ image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype)
+ else:
+ image = latents
+ has_nsfw_concept = None
+
+ if has_nsfw_concept is None:
+ do_denormalize = [True] * image.shape[0]
+ else:
+ do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept]
+
+ image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize)
+
+ # Offload all models
+ self.maybe_free_model_hooks()
+
+ if not return_dict:
+ return (image, has_nsfw_concept)
+
+ return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)
diff --git a/hy3dgen/texgen/hunyuanpaint/unet/__init__.py b/hy3dgen/texgen/hunyuanpaint/unet/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..3cde306b834c182f04339203fb5a78d846ac613b
--- /dev/null
+++ b/hy3dgen/texgen/hunyuanpaint/unet/__init__.py
@@ -0,0 +1,13 @@
+# Hunyuan 3D is licensed under the TENCENT HUNYUAN NON-COMMERCIAL LICENSE AGREEMENT
+# except for the third-party components listed below.
+# Hunyuan 3D does not impose any additional limitations beyond what is outlined
+# in the repsective licenses of these third-party components.
+# Users must comply with all terms and conditions of original licenses of these third-party
+# components and must ensure that the usage of the third party components adheres to
+# all relevant laws and regulations.
+
+# For avoidance of doubts, Hunyuan 3D means the large language models and
+# their software and algorithms, including trained model weights, parameters (including
+# optimizer states), machine-learning model code, inference-enabling code, training-enabling code,
+# fine-tuning enabling code and other elements of the foregoing made publicly available
+# by Tencent in accordance with TENCENT HUNYUAN COMMUNITY LICENSE AGREEMENT.
\ No newline at end of file
diff --git a/hy3dgen/texgen/hunyuanpaint/unet/modules.py b/hy3dgen/texgen/hunyuanpaint/unet/modules.py
new file mode 100644
index 0000000000000000000000000000000000000000..1475ee4ae11a9acff37e79006b533db76c1b0f13
--- /dev/null
+++ b/hy3dgen/texgen/hunyuanpaint/unet/modules.py
@@ -0,0 +1,429 @@
+# Hunyuan 3D is licensed under the TENCENT HUNYUAN NON-COMMERCIAL LICENSE AGREEMENT
+# except for the third-party components listed below.
+# Hunyuan 3D does not impose any additional limitations beyond what is outlined
+# in the repsective licenses of these third-party components.
+# Users must comply with all terms and conditions of original licenses of these third-party
+# components and must ensure that the usage of the third party components adheres to
+# all relevant laws and regulations.
+
+# For avoidance of doubts, Hunyuan 3D means the large language models and
+# their software and algorithms, including trained model weights, parameters (including
+# optimizer states), machine-learning model code, inference-enabling code, training-enabling code,
+# fine-tuning enabling code and other elements of the foregoing made publicly available
+# by Tencent in accordance with TENCENT HUNYUAN COMMUNITY LICENSE AGREEMENT.
+
+import copy
+import json
+import os
+from typing import Any, Dict, Optional
+
+import torch
+import torch.nn as nn
+from diffusers.models import UNet2DConditionModel
+from diffusers.models.attention_processor import Attention
+from diffusers.models.transformers.transformer_2d import BasicTransformerBlock
+from einops import rearrange
+
+
+def _chunked_feed_forward(ff: nn.Module, hidden_states: torch.Tensor, chunk_dim: int, chunk_size: int):
+ # "feed_forward_chunk_size" can be used to save memory
+ if hidden_states.shape[chunk_dim] % chunk_size != 0:
+ raise ValueError(
+ f"`hidden_states` dimension to be chunked: {hidden_states.shape[chunk_dim]} has to be divisible by chunk size: {chunk_size}. Make sure to set an appropriate `chunk_size` when calling `unet.enable_forward_chunking`."
+ )
+
+ num_chunks = hidden_states.shape[chunk_dim] // chunk_size
+ ff_output = torch.cat(
+ [ff(hid_slice) for hid_slice in hidden_states.chunk(num_chunks, dim=chunk_dim)],
+ dim=chunk_dim,
+ )
+ return ff_output
+
+
+class Basic2p5DTransformerBlock(torch.nn.Module):
+ def __init__(self, transformer: BasicTransformerBlock, layer_name, use_ma=True, use_ra=True) -> None:
+ super().__init__()
+ self.transformer = transformer
+ self.layer_name = layer_name
+ self.use_ma = use_ma
+ self.use_ra = use_ra
+
+ # multiview attn
+ if self.use_ma:
+ self.attn_multiview = Attention(
+ query_dim=self.dim,
+ heads=self.num_attention_heads,
+ dim_head=self.attention_head_dim,
+ dropout=self.dropout,
+ bias=self.attention_bias,
+ cross_attention_dim=None,
+ upcast_attention=self.attn1.upcast_attention,
+ out_bias=True,
+ )
+
+ # ref attn
+ if self.use_ra:
+ self.attn_refview = Attention(
+ query_dim=self.dim,
+ heads=self.num_attention_heads,
+ dim_head=self.attention_head_dim,
+ dropout=self.dropout,
+ bias=self.attention_bias,
+ cross_attention_dim=None,
+ upcast_attention=self.attn1.upcast_attention,
+ out_bias=True,
+ )
+
+ def __getattr__(self, name: str):
+ try:
+ return super().__getattr__(name)
+ except AttributeError:
+ return getattr(self.transformer, name)
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ attention_mask: Optional[torch.Tensor] = None,
+ encoder_hidden_states: Optional[torch.Tensor] = None,
+ encoder_attention_mask: Optional[torch.Tensor] = None,
+ timestep: Optional[torch.LongTensor] = None,
+ cross_attention_kwargs: Dict[str, Any] = None,
+ class_labels: Optional[torch.LongTensor] = None,
+ added_cond_kwargs: Optional[Dict[str, torch.Tensor]] = None,
+ ) -> torch.Tensor:
+
+ # Notice that normalization is always applied before the real computation in the following blocks.
+ # 0. Self-Attention
+ batch_size = hidden_states.shape[0]
+
+ cross_attention_kwargs = cross_attention_kwargs.copy() if cross_attention_kwargs is not None else {}
+ num_in_batch = cross_attention_kwargs.pop('num_in_batch', 1)
+ mode = cross_attention_kwargs.pop('mode', None)
+ mva_scale = cross_attention_kwargs.pop('mva_scale', 1.0)
+ ref_scale = cross_attention_kwargs.pop('ref_scale', 1.0)
+ condition_embed_dict = cross_attention_kwargs.pop("condition_embed_dict", None)
+
+ if self.norm_type == "ada_norm":
+ norm_hidden_states = self.norm1(hidden_states, timestep)
+ elif self.norm_type == "ada_norm_zero":
+ norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(
+ hidden_states, timestep, class_labels, hidden_dtype=hidden_states.dtype
+ )
+ elif self.norm_type in ["layer_norm", "layer_norm_i2vgen"]:
+ norm_hidden_states = self.norm1(hidden_states)
+ elif self.norm_type == "ada_norm_continuous":
+ norm_hidden_states = self.norm1(hidden_states, added_cond_kwargs["pooled_text_emb"])
+ elif self.norm_type == "ada_norm_single":
+ shift_msa, scale_msa, gate_msa, shift_mlp, scale_mlp, gate_mlp = (
+ self.scale_shift_table[None] + timestep.reshape(batch_size, 6, -1)
+ ).chunk(6, dim=1)
+ norm_hidden_states = self.norm1(hidden_states)
+ norm_hidden_states = norm_hidden_states * (1 + scale_msa) + shift_msa
+ else:
+ raise ValueError("Incorrect norm used")
+
+ if self.pos_embed is not None:
+ norm_hidden_states = self.pos_embed(norm_hidden_states)
+
+ # 1. Prepare GLIGEN inputs
+ cross_attention_kwargs = cross_attention_kwargs.copy() if cross_attention_kwargs is not None else {}
+ gligen_kwargs = cross_attention_kwargs.pop("gligen", None)
+
+ attn_output = self.attn1(
+ norm_hidden_states,
+ encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None,
+ attention_mask=attention_mask,
+ **cross_attention_kwargs,
+ )
+
+ if self.norm_type == "ada_norm_zero":
+ attn_output = gate_msa.unsqueeze(1) * attn_output
+ elif self.norm_type == "ada_norm_single":
+ attn_output = gate_msa * attn_output
+
+ hidden_states = attn_output + hidden_states
+ if hidden_states.ndim == 4:
+ hidden_states = hidden_states.squeeze(1)
+
+ # 1.2 Reference Attention
+ if 'w' in mode:
+ condition_embed_dict[self.layer_name] = rearrange(norm_hidden_states, '(b n) l c -> b (n l) c',
+ n=num_in_batch) # B, (N L), C
+
+ if 'r' in mode and self.use_ra:
+ condition_embed = condition_embed_dict[self.layer_name].unsqueeze(1).repeat(1, num_in_batch, 1,
+ 1) # B N L C
+ condition_embed = rearrange(condition_embed, 'b n l c -> (b n) l c')
+
+ attn_output = self.attn_refview(
+ norm_hidden_states,
+ encoder_hidden_states=condition_embed,
+ attention_mask=None,
+ **cross_attention_kwargs
+ )
+ ref_scale_timing = ref_scale
+ if isinstance(ref_scale, torch.Tensor):
+ ref_scale_timing = ref_scale.unsqueeze(1).repeat(1, num_in_batch).view(-1)
+ for _ in range(attn_output.ndim - 1):
+ ref_scale_timing = ref_scale_timing.unsqueeze(-1)
+ hidden_states = ref_scale_timing * attn_output + hidden_states
+ if hidden_states.ndim == 4:
+ hidden_states = hidden_states.squeeze(1)
+
+ # 1.3 Multiview Attention
+ if num_in_batch > 1 and self.use_ma:
+ multivew_hidden_states = rearrange(norm_hidden_states, '(b n) l c -> b (n l) c', n=num_in_batch)
+
+ attn_output = self.attn_multiview(
+ multivew_hidden_states,
+ encoder_hidden_states=multivew_hidden_states,
+ **cross_attention_kwargs
+ )
+
+ attn_output = rearrange(attn_output, 'b (n l) c -> (b n) l c', n=num_in_batch)
+
+ hidden_states = mva_scale * attn_output + hidden_states
+ if hidden_states.ndim == 4:
+ hidden_states = hidden_states.squeeze(1)
+
+ # 1.2 GLIGEN Control
+ if gligen_kwargs is not None:
+ hidden_states = self.fuser(hidden_states, gligen_kwargs["objs"])
+
+ # 3. Cross-Attention
+ if self.attn2 is not None:
+ if self.norm_type == "ada_norm":
+ norm_hidden_states = self.norm2(hidden_states, timestep)
+ elif self.norm_type in ["ada_norm_zero", "layer_norm", "layer_norm_i2vgen"]:
+ norm_hidden_states = self.norm2(hidden_states)
+ elif self.norm_type == "ada_norm_single":
+ # For PixArt norm2 isn't applied here:
+ # https://github.com/PixArt-alpha/PixArt-alpha/blob/0f55e922376d8b797edd44d25d0e7464b260dcab/diffusion/model/nets/PixArtMS.py#L70C1-L76C103
+ norm_hidden_states = hidden_states
+ elif self.norm_type == "ada_norm_continuous":
+ norm_hidden_states = self.norm2(hidden_states, added_cond_kwargs["pooled_text_emb"])
+ else:
+ raise ValueError("Incorrect norm")
+
+ if self.pos_embed is not None and self.norm_type != "ada_norm_single":
+ norm_hidden_states = self.pos_embed(norm_hidden_states)
+
+ attn_output = self.attn2(
+ norm_hidden_states,
+ encoder_hidden_states=encoder_hidden_states,
+ attention_mask=encoder_attention_mask,
+ **cross_attention_kwargs,
+ )
+
+ hidden_states = attn_output + hidden_states
+
+ # 4. Feed-forward
+ # i2vgen doesn't have this norm 🤷♂️
+ if self.norm_type == "ada_norm_continuous":
+ norm_hidden_states = self.norm3(hidden_states, added_cond_kwargs["pooled_text_emb"])
+ elif not self.norm_type == "ada_norm_single":
+ norm_hidden_states = self.norm3(hidden_states)
+
+ if self.norm_type == "ada_norm_zero":
+ norm_hidden_states = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None]
+
+ if self.norm_type == "ada_norm_single":
+ norm_hidden_states = self.norm2(hidden_states)
+ norm_hidden_states = norm_hidden_states * (1 + scale_mlp) + shift_mlp
+
+ if self._chunk_size is not None:
+ # "feed_forward_chunk_size" can be used to save memory
+ ff_output = _chunked_feed_forward(self.ff, norm_hidden_states, self._chunk_dim, self._chunk_size)
+ else:
+ ff_output = self.ff(norm_hidden_states)
+
+ if self.norm_type == "ada_norm_zero":
+ ff_output = gate_mlp.unsqueeze(1) * ff_output
+ elif self.norm_type == "ada_norm_single":
+ ff_output = gate_mlp * ff_output
+
+ hidden_states = ff_output + hidden_states
+ if hidden_states.ndim == 4:
+ hidden_states = hidden_states.squeeze(1)
+
+ return hidden_states
+
+
+class UNet2p5DConditionModel(torch.nn.Module):
+ def __init__(self, unet: UNet2DConditionModel) -> None:
+ super().__init__()
+ self.unet = unet
+
+ self.use_ma = True
+ self.use_ra = True
+ self.use_camera_embedding = True
+ self.use_dual_stream = True
+
+ if self.use_dual_stream:
+ self.unet_dual = copy.deepcopy(unet)
+ self.init_attention(self.unet_dual)
+ self.init_attention(self.unet, use_ma=self.use_ma, use_ra=self.use_ra)
+ self.init_condition()
+ self.init_camera_embedding()
+
+ @staticmethod
+ def from_pretrained(pretrained_model_name_or_path, **kwargs):
+ torch_dtype = kwargs.pop('torch_dtype', torch.float32)
+ config_path = os.path.join(pretrained_model_name_or_path, 'config.json')
+ unet_ckpt_path = os.path.join(pretrained_model_name_or_path, 'diffusion_pytorch_model.bin')
+ with open(config_path, 'r', encoding='utf-8') as file:
+ config = json.load(file)
+ unet = UNet2DConditionModel(**config)
+ unet = UNet2p5DConditionModel(unet)
+ unet_ckpt = torch.load(unet_ckpt_path, map_location='cpu', weights_only=True)
+ unet.load_state_dict(unet_ckpt, strict=True)
+ unet = unet.to(torch_dtype)
+ return unet
+
+ def init_condition(self):
+ self.unet.conv_in = torch.nn.Conv2d(
+ 12,
+ self.unet.conv_in.out_channels,
+ kernel_size=self.unet.conv_in.kernel_size,
+ stride=self.unet.conv_in.stride,
+ padding=self.unet.conv_in.padding,
+ dilation=self.unet.conv_in.dilation,
+ groups=self.unet.conv_in.groups,
+ bias=self.unet.conv_in.bias is not None)
+
+ self.unet.learned_text_clip_gen = nn.Parameter(torch.randn(1, 77, 1024))
+ self.unet.learned_text_clip_ref = nn.Parameter(torch.randn(1, 77, 1024))
+
+ def init_camera_embedding(self):
+
+ if self.use_camera_embedding:
+ time_embed_dim = 1280
+ self.max_num_ref_image = 5
+ self.max_num_gen_image = 12 * 3 + 4 * 2
+ self.unet.class_embedding = nn.Embedding(self.max_num_ref_image + self.max_num_gen_image, time_embed_dim)
+
+ def init_attention(self, unet, use_ma=False, use_ra=False):
+
+ for down_block_i, down_block in enumerate(unet.down_blocks):
+ if hasattr(down_block, "has_cross_attention") and down_block.has_cross_attention:
+ for attn_i, attn in enumerate(down_block.attentions):
+ for transformer_i, transformer in enumerate(attn.transformer_blocks):
+ if isinstance(transformer, BasicTransformerBlock):
+ attn.transformer_blocks[transformer_i] = Basic2p5DTransformerBlock(transformer,
+ f'down_{down_block_i}_{attn_i}_{transformer_i}',
+ use_ma, use_ra)
+
+ if hasattr(unet.mid_block, "has_cross_attention") and unet.mid_block.has_cross_attention:
+ for attn_i, attn in enumerate(unet.mid_block.attentions):
+ for transformer_i, transformer in enumerate(attn.transformer_blocks):
+ if isinstance(transformer, BasicTransformerBlock):
+ attn.transformer_blocks[transformer_i] = Basic2p5DTransformerBlock(transformer,
+ f'mid_{attn_i}_{transformer_i}',
+ use_ma, use_ra)
+
+ for up_block_i, up_block in enumerate(unet.up_blocks):
+ if hasattr(up_block, "has_cross_attention") and up_block.has_cross_attention:
+ for attn_i, attn in enumerate(up_block.attentions):
+ for transformer_i, transformer in enumerate(attn.transformer_blocks):
+ if isinstance(transformer, BasicTransformerBlock):
+ attn.transformer_blocks[transformer_i] = Basic2p5DTransformerBlock(transformer,
+ f'up_{up_block_i}_{attn_i}_{transformer_i}',
+ use_ma, use_ra)
+
+ def __getattr__(self, name: str):
+ try:
+ return super().__getattr__(name)
+ except AttributeError:
+ return getattr(self.unet, name)
+
+ def forward(
+ self, sample, timestep, encoder_hidden_states,
+ *args, down_intrablock_additional_residuals=None,
+ down_block_res_samples=None, mid_block_res_sample=None,
+ **cached_condition,
+ ):
+ B, N_gen, _, H, W = sample.shape
+ assert H == W
+
+ if self.use_camera_embedding:
+ camera_info_gen = cached_condition['camera_info_gen'] + self.max_num_ref_image
+ camera_info_gen = rearrange(camera_info_gen, 'b n -> (b n)')
+ else:
+ camera_info_gen = None
+
+ sample = [sample]
+ if 'normal_imgs' in cached_condition:
+ sample.append(cached_condition["normal_imgs"])
+ if 'position_imgs' in cached_condition:
+ sample.append(cached_condition["position_imgs"])
+ sample = torch.cat(sample, dim=2)
+
+ sample = rearrange(sample, 'b n c h w -> (b n) c h w')
+
+ encoder_hidden_states_gen = encoder_hidden_states.unsqueeze(1).repeat(1, N_gen, 1, 1)
+ encoder_hidden_states_gen = rearrange(encoder_hidden_states_gen, 'b n l c -> (b n) l c')
+
+ if self.use_ra:
+ if 'condition_embed_dict' in cached_condition:
+ condition_embed_dict = cached_condition['condition_embed_dict']
+ else:
+ condition_embed_dict = {}
+ ref_latents = cached_condition['ref_latents']
+ N_ref = ref_latents.shape[1]
+ if self.use_camera_embedding:
+ camera_info_ref = cached_condition['camera_info_ref']
+ camera_info_ref = rearrange(camera_info_ref, 'b n -> (b n)')
+ else:
+ camera_info_ref = None
+
+ ref_latents = rearrange(ref_latents, 'b n c h w -> (b n) c h w')
+
+ encoder_hidden_states_ref = self.unet.learned_text_clip_ref.unsqueeze(1).repeat(B, N_ref, 1, 1)
+ encoder_hidden_states_ref = rearrange(encoder_hidden_states_ref, 'b n l c -> (b n) l c')
+
+ noisy_ref_latents = ref_latents
+ timestep_ref = 0
+
+ if self.use_dual_stream:
+ unet_ref = self.unet_dual
+ else:
+ unet_ref = self.unet
+ unet_ref(
+ noisy_ref_latents, timestep_ref,
+ encoder_hidden_states=encoder_hidden_states_ref,
+ class_labels=camera_info_ref,
+ # **kwargs
+ return_dict=False,
+ cross_attention_kwargs={
+ 'mode': 'w', 'num_in_batch': N_ref,
+ 'condition_embed_dict': condition_embed_dict},
+ )
+ cached_condition['condition_embed_dict'] = condition_embed_dict
+ else:
+ condition_embed_dict = None
+
+ mva_scale = cached_condition.get('mva_scale', 1.0)
+ ref_scale = cached_condition.get('ref_scale', 1.0)
+
+ return self.unet(
+ sample, timestep,
+ encoder_hidden_states_gen, *args,
+ class_labels=camera_info_gen,
+ down_intrablock_additional_residuals=[
+ sample.to(dtype=self.unet.dtype) for sample in down_intrablock_additional_residuals
+ ] if down_intrablock_additional_residuals is not None else None,
+ down_block_additional_residuals=[
+ sample.to(dtype=self.unet.dtype) for sample in down_block_res_samples
+ ] if down_block_res_samples is not None else None,
+ mid_block_additional_residual=(
+ mid_block_res_sample.to(dtype=self.unet.dtype)
+ if mid_block_res_sample is not None else None
+ ),
+ return_dict=False,
+ cross_attention_kwargs={
+ 'mode': 'r', 'num_in_batch': N_gen,
+ 'condition_embed_dict': condition_embed_dict,
+ 'mva_scale': mva_scale,
+ 'ref_scale': ref_scale,
+ },
+ )
diff --git a/hy3dgen/texgen/pipelines.py b/hy3dgen/texgen/pipelines.py
new file mode 100644
index 0000000000000000000000000000000000000000..4e41bc03f388402bfaf2542a4b64cdd3866e3bad
--- /dev/null
+++ b/hy3dgen/texgen/pipelines.py
@@ -0,0 +1,227 @@
+# Hunyuan 3D is licensed under the TENCENT HUNYUAN NON-COMMERCIAL LICENSE AGREEMENT
+# except for the third-party components listed below.
+# Hunyuan 3D does not impose any additional limitations beyond what is outlined
+# in the repsective licenses of these third-party components.
+# Users must comply with all terms and conditions of original licenses of these third-party
+# components and must ensure that the usage of the third party components adheres to
+# all relevant laws and regulations.
+
+# For avoidance of doubts, Hunyuan 3D means the large language models and
+# their software and algorithms, including trained model weights, parameters (including
+# optimizer states), machine-learning model code, inference-enabling code, training-enabling code,
+# fine-tuning enabling code and other elements of the foregoing made publicly available
+# by Tencent in accordance with TENCENT HUNYUAN COMMUNITY LICENSE AGREEMENT.
+
+
+import logging
+import numpy as np
+import os
+import torch
+from PIL import Image
+from typing import Union, Optional
+
+from .differentiable_renderer.mesh_render import MeshRender
+from .utils.dehighlight_utils import Light_Shadow_Remover
+from .utils.multiview_utils import Multiview_Diffusion_Net
+from .utils.imagesuper_utils import Image_Super_Net
+from .utils.uv_warp_utils import mesh_uv_wrap
+
+logger = logging.getLogger(__name__)
+
+
+class Hunyuan3DTexGenConfig:
+
+ def __init__(self, light_remover_ckpt_path, multiview_ckpt_path):
+ self.device = 'cuda'
+ self.light_remover_ckpt_path = light_remover_ckpt_path
+ self.multiview_ckpt_path = multiview_ckpt_path
+
+ self.candidate_camera_azims = [0, 90, 180, 270, 0, 180]
+ self.candidate_camera_elevs = [0, 0, 0, 0, 90, -90]
+ self.candidate_view_weights = [1, 0.1, 0.5, 0.1, 0.05, 0.05]
+
+ self.render_size = 2048
+ self.texture_size = 2048
+ self.bake_exp = 4
+ self.merge_method = 'fast'
+
+
+class Hunyuan3DPaintPipeline:
+ @classmethod
+ def from_pretrained(cls, model_path):
+ original_model_path = model_path
+ if not os.path.exists(model_path):
+ # try local path
+ base_dir = os.environ.get('HY3DGEN_MODELS', '~/.cache/hy3dgen')
+ model_path = os.path.expanduser(os.path.join(base_dir, model_path))
+
+ delight_model_path = os.path.join(model_path, 'hunyuan3d-delight-v2-0')
+ multiview_model_path = os.path.join(model_path, 'hunyuan3d-paint-v2-0')
+
+ if not os.path.exists(delight_model_path) or not os.path.exists(multiview_model_path):
+ try:
+ import huggingface_hub
+ # download from huggingface
+ model_path = huggingface_hub.snapshot_download(repo_id=original_model_path,
+ allow_patterns=["hunyuan3d-delight-v2-0/*"])
+ model_path = huggingface_hub.snapshot_download(repo_id=original_model_path,
+ allow_patterns=["hunyuan3d-paint-v2-0/*"])
+ delight_model_path = os.path.join(model_path, 'hunyuan3d-delight-v2-0')
+ multiview_model_path = os.path.join(model_path, 'hunyuan3d-paint-v2-0')
+ return cls(Hunyuan3DTexGenConfig(delight_model_path, multiview_model_path))
+ except ImportError:
+ logger.warning(
+ "You need to install HuggingFace Hub to load models from the hub."
+ )
+ raise RuntimeError(f"Model path {model_path} not found")
+ else:
+ return cls(Hunyuan3DTexGenConfig(delight_model_path, multiview_model_path))
+
+ raise FileNotFoundError(f"Model path {original_model_path} not found and we could not find it at huggingface")
+
+ def __init__(self, config):
+ self.config = config
+ self.models = {}
+ self.render = MeshRender(
+ default_resolution=self.config.render_size,
+ texture_size=self.config.texture_size)
+
+ self.load_models()
+
+ def load_models(self):
+ # empty cude cache
+ torch.cuda.empty_cache()
+ # Load model
+ self.models['delight_model'] = Light_Shadow_Remover(self.config)
+ self.models['multiview_model'] = Multiview_Diffusion_Net(self.config)
+ # self.models['super_model'] = Image_Super_Net(self.config)
+
+ def enable_model_cpu_offload(self, gpu_id: Optional[int] = None, device: Union[torch.device, str] = "cuda"):
+ self.models['delight_model'].pipeline.enable_model_cpu_offload(gpu_id=gpu_id, device=device)
+ self.models['multiview_model'].pipeline.enable_model_cpu_offload(gpu_id=gpu_id, device=device)
+
+ def render_normal_multiview(self, camera_elevs, camera_azims, use_abs_coor=True):
+ normal_maps = []
+ for elev, azim in zip(camera_elevs, camera_azims):
+ normal_map = self.render.render_normal(
+ elev, azim, use_abs_coor=use_abs_coor, return_type='pl')
+ normal_maps.append(normal_map)
+
+ return normal_maps
+
+ def render_position_multiview(self, camera_elevs, camera_azims):
+ position_maps = []
+ for elev, azim in zip(camera_elevs, camera_azims):
+ position_map = self.render.render_position(
+ elev, azim, return_type='pl')
+ position_maps.append(position_map)
+
+ return position_maps
+
+ def bake_from_multiview(self, views, camera_elevs,
+ camera_azims, view_weights, method='graphcut'):
+ project_textures, project_weighted_cos_maps = [], []
+ project_boundary_maps = []
+ for view, camera_elev, camera_azim, weight in zip(
+ views, camera_elevs, camera_azims, view_weights):
+ project_texture, project_cos_map, project_boundary_map = self.render.back_project(
+ view, camera_elev, camera_azim)
+ project_cos_map = weight * (project_cos_map ** self.config.bake_exp)
+ project_textures.append(project_texture)
+ project_weighted_cos_maps.append(project_cos_map)
+ project_boundary_maps.append(project_boundary_map)
+
+ if method == 'fast':
+ texture, ori_trust_map = self.render.fast_bake_texture(
+ project_textures, project_weighted_cos_maps)
+ else:
+ raise f'no method {method}'
+ return texture, ori_trust_map > 1E-8
+
+ def texture_inpaint(self, texture, mask):
+
+ texture_np = self.render.uv_inpaint(texture, mask)
+ texture = torch.tensor(texture_np / 255).float().to(texture.device)
+
+ return texture
+
+ def recenter_image(self, image, border_ratio=0.2):
+ if image.mode == 'RGB':
+ return image
+ elif image.mode == 'L':
+ image = image.convert('RGB')
+ return image
+
+ alpha_channel = np.array(image)[:, :, 3]
+ non_zero_indices = np.argwhere(alpha_channel > 0)
+ if non_zero_indices.size == 0:
+ raise ValueError("Image is fully transparent")
+
+ min_row, min_col = non_zero_indices.min(axis=0)
+ max_row, max_col = non_zero_indices.max(axis=0)
+
+ cropped_image = image.crop((min_col, min_row, max_col + 1, max_row + 1))
+
+ width, height = cropped_image.size
+ border_width = int(width * border_ratio)
+ border_height = int(height * border_ratio)
+
+ new_width = width + 2 * border_width
+ new_height = height + 2 * border_height
+
+ square_size = max(new_width, new_height)
+
+ new_image = Image.new('RGBA', (square_size, square_size), (255, 255, 255, 0))
+
+ paste_x = (square_size - new_width) // 2 + border_width
+ paste_y = (square_size - new_height) // 2 + border_height
+
+ new_image.paste(cropped_image, (paste_x, paste_y))
+ return new_image
+
+ @torch.no_grad()
+ def __call__(self, mesh, image):
+
+ if isinstance(image, str):
+ image_prompt = Image.open(image)
+ else:
+ image_prompt = image
+
+ image_prompt = self.recenter_image(image_prompt)
+
+ image_prompt = self.models['delight_model'](image_prompt)
+
+ mesh = mesh_uv_wrap(mesh)
+
+ self.render.load_mesh(mesh)
+
+ selected_camera_elevs, selected_camera_azims, selected_view_weights = \
+ self.config.candidate_camera_elevs, self.config.candidate_camera_azims, self.config.candidate_view_weights
+
+ normal_maps = self.render_normal_multiview(
+ selected_camera_elevs, selected_camera_azims, use_abs_coor=True)
+ position_maps = self.render_position_multiview(
+ selected_camera_elevs, selected_camera_azims)
+
+ camera_info = [(((azim // 30) + 9) % 12) // {-20: 1, 0: 1, 20: 1, -90: 3, 90: 3}[
+ elev] + {-20: 0, 0: 12, 20: 24, -90: 36, 90: 40}[elev] for azim, elev in
+ zip(selected_camera_azims, selected_camera_elevs)]
+ multiviews = self.models['multiview_model'](image_prompt, normal_maps + position_maps, camera_info)
+
+ for i in range(len(multiviews)):
+ # multiviews[i] = self.models['super_model'](multiviews[i])
+ multiviews[i] = multiviews[i].resize(
+ (self.config.render_size, self.config.render_size))
+
+ texture, mask = self.bake_from_multiview(multiviews,
+ selected_camera_elevs, selected_camera_azims, selected_view_weights,
+ method=self.config.merge_method)
+
+ mask_np = (mask.squeeze(-1).cpu().numpy() * 255).astype(np.uint8)
+
+ texture = self.texture_inpaint(texture, mask_np)
+
+ self.render.set_texture(texture)
+ textured_mesh = self.render.save_mesh()
+
+ return textured_mesh
diff --git a/hy3dgen/texgen/utils/__init__.py b/hy3dgen/texgen/utils/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..3cde306b834c182f04339203fb5a78d846ac613b
--- /dev/null
+++ b/hy3dgen/texgen/utils/__init__.py
@@ -0,0 +1,13 @@
+# Hunyuan 3D is licensed under the TENCENT HUNYUAN NON-COMMERCIAL LICENSE AGREEMENT
+# except for the third-party components listed below.
+# Hunyuan 3D does not impose any additional limitations beyond what is outlined
+# in the repsective licenses of these third-party components.
+# Users must comply with all terms and conditions of original licenses of these third-party
+# components and must ensure that the usage of the third party components adheres to
+# all relevant laws and regulations.
+
+# For avoidance of doubts, Hunyuan 3D means the large language models and
+# their software and algorithms, including trained model weights, parameters (including
+# optimizer states), machine-learning model code, inference-enabling code, training-enabling code,
+# fine-tuning enabling code and other elements of the foregoing made publicly available
+# by Tencent in accordance with TENCENT HUNYUAN COMMUNITY LICENSE AGREEMENT.
\ No newline at end of file
diff --git a/hy3dgen/texgen/utils/alignImg4Tex_utils.py b/hy3dgen/texgen/utils/alignImg4Tex_utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..9a15787764d32b7470b6f3a44c876e4cdcb7f46a
--- /dev/null
+++ b/hy3dgen/texgen/utils/alignImg4Tex_utils.py
@@ -0,0 +1,121 @@
+# Hunyuan 3D is licensed under the TENCENT HUNYUAN NON-COMMERCIAL LICENSE AGREEMENT
+# except for the third-party components listed below.
+# Hunyuan 3D does not impose any additional limitations beyond what is outlined
+# in the repsective licenses of these third-party components.
+# Users must comply with all terms and conditions of original licenses of these third-party
+# components and must ensure that the usage of the third party components adheres to
+# all relevant laws and regulations.
+
+# For avoidance of doubts, Hunyuan 3D means the large language models and
+# their software and algorithms, including trained model weights, parameters (including
+# optimizer states), machine-learning model code, inference-enabling code, training-enabling code,
+# fine-tuning enabling code and other elements of the foregoing made publicly available
+# by Tencent in accordance with TENCENT HUNYUAN COMMUNITY LICENSE AGREEMENT.
+
+import torch
+from diffusers import EulerAncestralDiscreteScheduler
+from diffusers import StableDiffusionControlNetPipeline, StableDiffusionXLControlNetImg2ImgPipeline, ControlNetModel, \
+ AutoencoderKL
+
+
+class Img2img_Control_Ip_adapter:
+ def __init__(self, device):
+ controlnet = ControlNetModel.from_pretrained('lllyasviel/control_v11f1p_sd15_depth', torch_dtype=torch.float16,
+ variant="fp16", use_safetensors=True)
+ pipe = StableDiffusionControlNetPipeline.from_pretrained(
+ 'runwayml/stable-diffusion-v1-5', controlnet=controlnet, torch_dtype=torch.float16, use_safetensors=True
+ )
+ pipe.load_ip_adapter('h94/IP-Adapter', subfolder="models", weight_name="ip-adapter-plus_sd15.safetensors")
+ pipe.set_ip_adapter_scale(0.7)
+
+ pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)
+ # pipe.enable_model_cpu_offload()
+ self.pipe = pipe.to(device)
+
+ def __call__(
+ self,
+ prompt,
+ control_image,
+ ip_adapter_image,
+ negative_prompt,
+ height=512,
+ width=512,
+ num_inference_steps=20,
+ guidance_scale=8.0,
+ controlnet_conditioning_scale=1.0,
+ output_type="pil",
+ **kwargs,
+ ):
+ results = self.pipe(
+ prompt=prompt,
+ negative_prompt=negative_prompt,
+ image=control_image,
+ ip_adapter_image=ip_adapter_image,
+ generator=torch.manual_seed(42),
+ seed=42,
+ num_inference_steps=num_inference_steps,
+ guidance_scale=guidance_scale,
+ controlnet_conditioning_scale=controlnet_conditioning_scale,
+ strength=1,
+ # clip_skip=2,
+ height=height,
+ width=width,
+ output_type=output_type,
+ **kwargs,
+ ).images[0]
+ return results
+
+
+################################################################
+
+class HesModel:
+ def __init__(self, ):
+ controlnet_depth = ControlNetModel.from_pretrained(
+ 'diffusers/controlnet-depth-sdxl-1.0',
+ torch_dtype=torch.float16,
+ variant="fp16",
+ use_safetensors=True
+ )
+ self.pipe = StableDiffusionXLControlNetImg2ImgPipeline.from_pretrained(
+ 'stabilityai/stable-diffusion-xl-base-1.0',
+ torch_dtype=torch.float16,
+ variant="fp16",
+ controlnet=controlnet_depth,
+ use_safetensors=True,
+ )
+ self.pipe.vae = AutoencoderKL.from_pretrained(
+ 'madebyollin/sdxl-vae-fp16-fix',
+ torch_dtype=torch.float16
+ )
+
+ self.pipe.load_ip_adapter('h94/IP-Adapter', subfolder="sdxl_models", weight_name="ip-adapter_sdxl.safetensors")
+ self.pipe.set_ip_adapter_scale(0.7)
+ self.pipe.to("cuda")
+
+ def __call__(self,
+ init_image,
+ control_image,
+ ip_adapter_image=None,
+ prompt='3D image',
+ negative_prompt='2D image',
+ seed=42,
+ strength=0.8,
+ num_inference_steps=40,
+ guidance_scale=7.5,
+ controlnet_conditioning_scale=0.5,
+ **kwargs
+ ):
+ image = self.pipe(
+ prompt=prompt,
+ image=init_image,
+ control_image=control_image,
+ ip_adapter_image=ip_adapter_image,
+ negative_prompt=negative_prompt,
+ num_inference_steps=num_inference_steps,
+ guidance_scale=guidance_scale,
+ strength=strength,
+ controlnet_conditioning_scale=controlnet_conditioning_scale,
+ seed=seed,
+ **kwargs
+ ).images[0]
+ return image
diff --git a/hy3dgen/texgen/utils/counter_utils.py b/hy3dgen/texgen/utils/counter_utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..52c71f87a7e4839913047ed050500311ce15afe2
--- /dev/null
+++ b/hy3dgen/texgen/utils/counter_utils.py
@@ -0,0 +1,48 @@
+# Hunyuan 3D is licensed under the TENCENT HUNYUAN NON-COMMERCIAL LICENSE AGREEMENT
+# except for the third-party components listed below.
+# Hunyuan 3D does not impose any additional limitations beyond what is outlined
+# in the repsective licenses of these third-party components.
+# Users must comply with all terms and conditions of original licenses of these third-party
+# components and must ensure that the usage of the third party components adheres to
+# all relevant laws and regulations.
+
+# For avoidance of doubts, Hunyuan 3D means the large language models and
+# their software and algorithms, including trained model weights, parameters (including
+# optimizer states), machine-learning model code, inference-enabling code, training-enabling code,
+# fine-tuning enabling code and other elements of the foregoing made publicly available
+# by Tencent in accordance with TENCENT HUNYUAN COMMUNITY LICENSE AGREEMENT.
+
+
+class RunningStats():
+ def __init__(self) -> None:
+ self.count = 0
+ self.sum = 0
+ self.mean = 0
+ self.min = None
+ self.max = None
+
+ def add_value(self, value):
+ self.count += 1
+ self.sum += value
+ self.mean = self.sum / self.count
+
+ if self.min is None or value < self.min:
+ self.min = value
+
+ if self.max is None or value > self.max:
+ self.max = value
+
+ def get_count(self):
+ return self.count
+
+ def get_sum(self):
+ return self.sum
+
+ def get_mean(self):
+ return self.mean
+
+ def get_min(self):
+ return self.min
+
+ def get_max(self):
+ return self.max
diff --git a/hy3dgen/texgen/utils/dehighlight_utils.py b/hy3dgen/texgen/utils/dehighlight_utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..ae7b8197bcd055ade66f4057fe6b4e675f6967f5
--- /dev/null
+++ b/hy3dgen/texgen/utils/dehighlight_utils.py
@@ -0,0 +1,107 @@
+# Hunyuan 3D is licensed under the TENCENT HUNYUAN NON-COMMERCIAL LICENSE AGREEMENT
+# except for the third-party components listed below.
+# Hunyuan 3D does not impose any additional limitations beyond what is outlined
+# in the repsective licenses of these third-party components.
+# Users must comply with all terms and conditions of original licenses of these third-party
+# components and must ensure that the usage of the third party components adheres to
+# all relevant laws and regulations.
+
+# For avoidance of doubts, Hunyuan 3D means the large language models and
+# their software and algorithms, including trained model weights, parameters (including
+# optimizer states), machine-learning model code, inference-enabling code, training-enabling code,
+# fine-tuning enabling code and other elements of the foregoing made publicly available
+# by Tencent in accordance with TENCENT HUNYUAN COMMUNITY LICENSE AGREEMENT.
+
+import cv2
+import numpy as np
+import torch
+from PIL import Image
+from diffusers import StableDiffusionInstructPix2PixPipeline, EulerAncestralDiscreteScheduler
+
+
+class Light_Shadow_Remover():
+ def __init__(self, config):
+ self.device = config.device
+ self.cfg_image = 1.5
+ self.cfg_text = 1.0
+
+ pipeline = StableDiffusionInstructPix2PixPipeline.from_pretrained(
+ config.light_remover_ckpt_path,
+ torch_dtype=torch.float16,
+ safety_checker=None,
+ )
+ pipeline.scheduler = EulerAncestralDiscreteScheduler.from_config(pipeline.scheduler.config)
+ pipeline.set_progress_bar_config(disable=True)
+
+ self.pipeline = pipeline.to(self.device, torch.float16)
+
+ def recorrect_rgb(self, src_image, target_image, alpha_channel, scale=0.95):
+
+ def flat_and_mask(bgr, a):
+ mask = torch.where(a > 0.5, True, False)
+ bgr_flat = bgr.reshape(-1, bgr.shape[-1])
+ mask_flat = mask.reshape(-1)
+ bgr_flat_masked = bgr_flat[mask_flat, :]
+ return bgr_flat_masked
+
+ src_flat = flat_and_mask(src_image, alpha_channel)
+ target_flat = flat_and_mask(target_image, alpha_channel)
+ corrected_bgr = torch.zeros_like(src_image)
+
+ for i in range(3):
+ src_mean, src_stddev = torch.mean(src_flat[:, i]), torch.std(src_flat[:, i])
+ target_mean, target_stddev = torch.mean(target_flat[:, i]), torch.std(target_flat[:, i])
+ corrected_bgr[:, :, i] = torch.clamp((src_image[:, :, i] - scale * src_mean) * (target_stddev / src_stddev) + scale * target_mean, 0, 1)
+
+ src_mse = torch.mean((src_image - target_image) ** 2)
+ modify_mse = torch.mean((corrected_bgr - target_image) ** 2)
+ if src_mse < modify_mse:
+ corrected_bgr = torch.cat([src_image, alpha_channel], dim=-1)
+ else:
+ corrected_bgr = torch.cat([corrected_bgr, alpha_channel], dim=-1)
+
+ return corrected_bgr
+
+ @torch.no_grad()
+ def __call__(self, image):
+
+ image = image.resize((512, 512))
+
+ if image.mode == 'RGBA':
+ image_array = np.array(image)
+ alpha_channel = image_array[:, :, 3]
+ erosion_size = 3
+ kernel = np.ones((erosion_size, erosion_size), np.uint8)
+ alpha_channel = cv2.erode(alpha_channel, kernel, iterations=1)
+ image_array[alpha_channel == 0, :3] = 255
+ image_array[:, :, 3] = alpha_channel
+ image = Image.fromarray(image_array)
+
+ image_tensor = torch.tensor(np.array(image) / 255.0).to(self.device)
+ alpha = image_tensor[:, :, 3:]
+ rgb_target = image_tensor[:, :, :3]
+ else:
+ image_tensor = torch.tensor(np.array(image) / 255.0).to(self.device)
+ alpha = torch.ones_like(image_tensor)[:, :, :1]
+ rgb_target = image_tensor[:, :, :3]
+
+ image = image.convert('RGB')
+
+ image = self.pipeline(
+ prompt="",
+ image=image,
+ generator=torch.manual_seed(42),
+ height=512,
+ width=512,
+ num_inference_steps=50,
+ image_guidance_scale=self.cfg_image,
+ guidance_scale=self.cfg_text,
+ ).images[0]
+
+ image_tensor = torch.tensor(np.array(image)/255.0).to(self.device)
+ rgb_src = image_tensor[:,:,:3]
+ image = self.recorrect_rgb(rgb_src, rgb_target, alpha)
+ image = image[:,:,:3]*image[:,:,3:] + torch.ones_like(image[:,:,:3])*(1.0-image[:,:,3:])
+ image = Image.fromarray((image.cpu().numpy()*255).astype(np.uint8))
+
+ return image
diff --git a/hy3dgen/texgen/utils/imagesuper_utils.py b/hy3dgen/texgen/utils/imagesuper_utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..3d2d230f34e226eb8e899efc6ddd8b1ed5415eb0
--- /dev/null
+++ b/hy3dgen/texgen/utils/imagesuper_utils.py
@@ -0,0 +1,34 @@
+# Hunyuan 3D is licensed under the TENCENT HUNYUAN NON-COMMERCIAL LICENSE AGREEMENT
+# except for the third-party components listed below.
+# Hunyuan 3D does not impose any additional limitations beyond what is outlined
+# in the repsective licenses of these third-party components.
+# Users must comply with all terms and conditions of original licenses of these third-party
+# components and must ensure that the usage of the third party components adheres to
+# all relevant laws and regulations.
+
+# For avoidance of doubts, Hunyuan 3D means the large language models and
+# their software and algorithms, including trained model weights, parameters (including
+# optimizer states), machine-learning model code, inference-enabling code, training-enabling code,
+# fine-tuning enabling code and other elements of the foregoing made publicly available
+# by Tencent in accordance with TENCENT HUNYUAN COMMUNITY LICENSE AGREEMENT.
+
+import torch
+from diffusers import StableDiffusionUpscalePipeline
+
+class Image_Super_Net():
+ def __init__(self, config):
+ self.up_pipeline_x4 = StableDiffusionUpscalePipeline.from_pretrained(
+ 'stabilityai/stable-diffusion-x4-upscaler',
+ torch_dtype=torch.float16,
+ ).to(config.device)
+ self.up_pipeline_x4.set_progress_bar_config(disable=True)
+
+ def __call__(self, image, prompt=''):
+ with torch.no_grad():
+ upscaled_image = self.up_pipeline_x4(
+ prompt=[prompt],
+ image=image,
+ num_inference_steps=5,
+ ).images[0]
+
+ return upscaled_image
diff --git a/hy3dgen/texgen/utils/multiview_utils.py b/hy3dgen/texgen/utils/multiview_utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..cd76ff93568147bc80831bd2f8cc1b131a02a389
--- /dev/null
+++ b/hy3dgen/texgen/utils/multiview_utils.py
@@ -0,0 +1,76 @@
+# Hunyuan 3D is licensed under the TENCENT HUNYUAN NON-COMMERCIAL LICENSE AGREEMENT
+# except for the third-party components listed below.
+# Hunyuan 3D does not impose any additional limitations beyond what is outlined
+# in the repsective licenses of these third-party components.
+# Users must comply with all terms and conditions of original licenses of these third-party
+# components and must ensure that the usage of the third party components adheres to
+# all relevant laws and regulations.
+
+# For avoidance of doubts, Hunyuan 3D means the large language models and
+# their software and algorithms, including trained model weights, parameters (including
+# optimizer states), machine-learning model code, inference-enabling code, training-enabling code,
+# fine-tuning enabling code and other elements of the foregoing made publicly available
+# by Tencent in accordance with TENCENT HUNYUAN COMMUNITY LICENSE AGREEMENT.
+
+import os
+import random
+
+import numpy as np
+import torch
+from diffusers import DiffusionPipeline
+from diffusers import EulerAncestralDiscreteScheduler
+
+
+class Multiview_Diffusion_Net():
+ def __init__(self, config) -> None:
+ self.device = config.device
+ self.view_size = 512
+ multiview_ckpt_path = config.multiview_ckpt_path
+
+ current_file_path = os.path.abspath(__file__)
+ custom_pipeline_path = os.path.join(os.path.dirname(current_file_path), '..', 'hunyuanpaint')
+
+ pipeline = DiffusionPipeline.from_pretrained(
+ multiview_ckpt_path,
+ custom_pipeline=custom_pipeline_path, torch_dtype=torch.float16)
+
+ pipeline.scheduler = EulerAncestralDiscreteScheduler.from_config(pipeline.scheduler.config,
+ timestep_spacing='trailing')
+
+ pipeline.set_progress_bar_config(disable=True)
+ self.pipeline = pipeline.to(self.device)
+
+ def seed_everything(self, seed):
+ random.seed(seed)
+ np.random.seed(seed)
+ torch.manual_seed(seed)
+ os.environ["PL_GLOBAL_SEED"] = str(seed)
+
+ def __call__(self, input_image, control_images, camera_info):
+
+ self.seed_everything(0)
+
+ input_image = input_image.resize((self.view_size, self.view_size))
+ for i in range(len(control_images)):
+ control_images[i] = control_images[i].resize((self.view_size, self.view_size))
+ if control_images[i].mode == 'L':
+ control_images[i] = control_images[i].point(lambda x: 255 if x > 1 else 0, mode='1')
+
+ kwargs = dict(generator=torch.Generator(device=self.pipeline.device).manual_seed(0))
+
+ num_view = len(control_images) // 2
+ normal_image = [[control_images[i] for i in range(num_view)]]
+ position_image = [[control_images[i + num_view] for i in range(num_view)]]
+
+ camera_info_gen = [camera_info]
+ camera_info_ref = [[0]]
+ kwargs['width'] = self.view_size
+ kwargs['height'] = self.view_size
+ kwargs['num_in_batch'] = num_view
+ kwargs['camera_info_gen'] = camera_info_gen
+ kwargs['camera_info_ref'] = camera_info_ref
+ kwargs["normal_imgs"] = normal_image
+ kwargs["position_imgs"] = position_image
+
+ mvd_image = self.pipeline(input_image, num_inference_steps=30, **kwargs).images
+ return mvd_image
diff --git a/hy3dgen/texgen/utils/simplify_mesh_utils.py b/hy3dgen/texgen/utils/simplify_mesh_utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..fb82b1fcc44f51497d09bf1dc1dd83ba175fc940
--- /dev/null
+++ b/hy3dgen/texgen/utils/simplify_mesh_utils.py
@@ -0,0 +1,36 @@
+# Hunyuan 3D is licensed under the TENCENT HUNYUAN NON-COMMERCIAL LICENSE AGREEMENT
+# except for the third-party components listed below.
+# Hunyuan 3D does not impose any additional limitations beyond what is outlined
+# in the repsective licenses of these third-party components.
+# Users must comply with all terms and conditions of original licenses of these third-party
+# components and must ensure that the usage of the third party components adheres to
+# all relevant laws and regulations.
+
+# For avoidance of doubts, Hunyuan 3D means the large language models and
+# their software and algorithms, including trained model weights, parameters (including
+# optimizer states), machine-learning model code, inference-enabling code, training-enabling code,
+# fine-tuning enabling code and other elements of the foregoing made publicly available
+# by Tencent in accordance with TENCENT HUNYUAN COMMUNITY LICENSE AGREEMENT.
+
+import trimesh
+
+
+def remesh_mesh(mesh_path, remesh_path, method='trimesh'):
+ if method == 'trimesh':
+ mesh_simplify_trimesh(mesh_path, remesh_path)
+ else:
+ raise f'Method {method} has not been implemented.'
+
+
+def mesh_simplify_trimesh(inputpath, outputpath):
+ import pymeshlab
+ ms = pymeshlab.MeshSet()
+ ms.load_new_mesh(inputpath, load_in_a_single_layer=True)
+ ms.save_current_mesh(outputpath.replace('.glb', '.obj'), save_textures=False)
+
+ courent = trimesh.load(outputpath.replace('.glb', '.obj'), force='mesh')
+ face_num = courent.faces.shape[0]
+
+ if face_num > 100000:
+ courent = courent.simplify_quadric_decimation(40000)
+ courent.export(outputpath)
diff --git a/hy3dgen/texgen/utils/uv_warp_utils.py b/hy3dgen/texgen/utils/uv_warp_utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..83628342365f2060fd5077ef1b45a6b460f572f0
--- /dev/null
+++ b/hy3dgen/texgen/utils/uv_warp_utils.py
@@ -0,0 +1,32 @@
+# Hunyuan 3D is licensed under the TENCENT HUNYUAN NON-COMMERCIAL LICENSE AGREEMENT
+# except for the third-party components listed below.
+# Hunyuan 3D does not impose any additional limitations beyond what is outlined
+# in the repsective licenses of these third-party components.
+# Users must comply with all terms and conditions of original licenses of these third-party
+# components and must ensure that the usage of the third party components adheres to
+# all relevant laws and regulations.
+
+# For avoidance of doubts, Hunyuan 3D means the large language models and
+# their software and algorithms, including trained model weights, parameters (including
+# optimizer states), machine-learning model code, inference-enabling code, training-enabling code,
+# fine-tuning enabling code and other elements of the foregoing made publicly available
+# by Tencent in accordance with TENCENT HUNYUAN COMMUNITY LICENSE AGREEMENT.
+
+import trimesh
+import xatlas
+
+
+def mesh_uv_wrap(mesh):
+ if isinstance(mesh, trimesh.Scene):
+ mesh = mesh.dump(concatenate=True)
+
+ if len(mesh.faces) > 500000000:
+ raise ValueError("The mesh has more than 500,000,000 faces, which is not supported.")
+
+ vmapping, indices, uvs = xatlas.parametrize(mesh.vertices, mesh.faces)
+
+ mesh.vertices = mesh.vertices[vmapping]
+ mesh.faces = indices
+ mesh.visual.uv = uvs
+
+ return mesh
diff --git a/hy3dgen/text2image.py b/hy3dgen/text2image.py
new file mode 100644
index 0000000000000000000000000000000000000000..156949c79291f1e2c039d95ac41039646f2f6b55
--- /dev/null
+++ b/hy3dgen/text2image.py
@@ -0,0 +1,81 @@
+# Hunyuan 3D is licensed under the TENCENT HUNYUAN NON-COMMERCIAL LICENSE AGREEMENT
+# except for the third-party components listed below.
+# Hunyuan 3D does not impose any additional limitations beyond what is outlined
+# in the repsective licenses of these third-party components.
+# Users must comply with all terms and conditions of original licenses of these third-party
+# components and must ensure that the usage of the third party components adheres to
+# all relevant laws and regulations.
+
+# For avoidance of doubts, Hunyuan 3D means the large language models and
+# their software and algorithms, including trained model weights, parameters (including
+# optimizer states), machine-learning model code, inference-enabling code, training-enabling code,
+# fine-tuning enabling code and other elements of the foregoing made publicly available
+# by Tencent in accordance with TENCENT HUNYUAN COMMUNITY LICENSE AGREEMENT.
+
+import os
+import random
+
+import numpy as np
+import torch
+from diffusers import AutoPipelineForText2Image
+
+
+def seed_everything(seed):
+ random.seed(seed)
+ np.random.seed(seed)
+ torch.manual_seed(seed)
+ os.environ["PL_GLOBAL_SEED"] = str(seed)
+
+
+class HunyuanDiTPipeline:
+ def __init__(
+ self,
+ model_path="Tencent-Hunyuan/HunyuanDiT-v1.1-Diffusers-Distilled",
+ device='cuda'
+ ):
+ self.device = device
+ self.pipe = AutoPipelineForText2Image.from_pretrained(
+ model_path,
+ torch_dtype=torch.float16,
+ enable_pag=True,
+ pag_applied_layers=["blocks.(16|17|18|19)"]
+ ).to(device)
+ self.pos_txt = ",白色背景,3D风格,最佳质量"
+ self.neg_txt = "文本,特写,裁剪,出框,最差质量,低质量,JPEG伪影,PGLY,重复,病态," \
+ "残缺,多余的手指,变异的手,画得不好的手,画得不好的脸,变异,畸形,模糊,脱水,糟糕的解剖学," \
+ "糟糕的比例,多余的肢体,克隆的脸,毁容,恶心的比例,畸形的肢体,缺失的手臂,缺失的腿," \
+ "额外的手臂,额外的腿,融合的手指,手指太多,长脖子"
+
+ def compile(self):
+ # accelarate hunyuan-dit transformer,first inference will cost long time
+ torch.set_float32_matmul_precision('high')
+ self.pipe.transformer = torch.compile(self.pipe.transformer, fullgraph=True)
+ # self.pipe.vae.decode = torch.compile(self.pipe.vae.decode, fullgraph=True)
+ generator = torch.Generator(device=self.pipe.device) # infer once for hot-start
+ out_img = self.pipe(
+ prompt='美少女战士',
+ negative_prompt='模糊',
+ num_inference_steps=25,
+ pag_scale=1.3,
+ width=1024,
+ height=1024,
+ generator=generator,
+ return_dict=False
+ )[0][0]
+
+ @torch.no_grad()
+ def __call__(self, prompt, seed=0):
+ seed_everything(seed)
+ generator = torch.Generator(device=self.pipe.device)
+ generator = generator.manual_seed(int(seed))
+ out_img = self.pipe(
+ prompt=prompt[:60] + self.pos_txt,
+ negative_prompt=self.neg_txt,
+ num_inference_steps=25,
+ pag_scale=1.3,
+ width=1024,
+ height=1024,
+ generator=generator,
+ return_dict=False
+ )[0][0]
+ return out_img
diff --git a/minimal_demo.py b/minimal_demo.py
new file mode 100644
index 0000000000000000000000000000000000000000..1f6ff96471a75b99e324f27bc58dc5502669336f
--- /dev/null
+++ b/minimal_demo.py
@@ -0,0 +1,79 @@
+# Open Source Model Licensed under the Apache License Version 2.0
+# and Other Licenses of the Third-Party Components therein:
+# The below Model in this distribution may have been modified by THL A29 Limited
+# ("Tencent Modifications"). All Tencent Modifications are Copyright (C) 2024 THL A29 Limited.
+
+# Copyright (C) 2024 THL A29 Limited, a Tencent company. All rights reserved.
+# The below software and/or models in this distribution may have been
+# modified by THL A29 Limited ("Tencent Modifications").
+# All Tencent Modifications are Copyright (C) THL A29 Limited.
+
+# Hunyuan 3D is licensed under the TENCENT HUNYUAN NON-COMMERCIAL LICENSE AGREEMENT
+# except for the third-party components listed below.
+# Hunyuan 3D does not impose any additional limitations beyond what is outlined
+# in the repsective licenses of these third-party components.
+# Users must comply with all terms and conditions of original licenses of these third-party
+# components and must ensure that the usage of the third party components adheres to
+# all relevant laws and regulations.
+
+# For avoidance of doubts, Hunyuan 3D means the large language models and
+# their software and algorithms, including trained model weights, parameters (including
+# optimizer states), machine-learning model code, inference-enabling code, training-enabling code,
+# fine-tuning enabling code and other elements of the foregoing made publicly available
+# by Tencent in accordance with TENCENT HUNYUAN COMMUNITY LICENSE AGREEMENT.
+
+import torch
+from PIL imprt Image
+
+from hy3dgen.rembg import BackgroundRemover
+from hy3dgen.shapegen import Hunyuan3DDiTFlowMatchingPipeline, FaceReducer, FloaterRemover, DegenerateFaceRemover
+from hy3dgen.text2image import HunyuanDiTPipeline
+
+
+def image_to_3d(image_path='assets/demo.png'):
+ rembg = BackgroundRemover()
+ model_path = 'Hunyuan3D-2'
+
+ image = Image.open(image_path)
+ image = image.resize((1024, 1024))
+
+ if image.mode == 'RGB':
+ image = rembg(image)
+
+ pipeline = Hunyuan3DDiTFlowMatchingPipeline.from_pretrained(model_path)
+
+ mesh = pipeline(image=image, num_inference_steps=30, mc_algo='mc',
+ generator=torch.manual_seed(2025))[0]
+ mesh = FloaterRemover()(mesh)
+ mesh = DegenerateFaceRemover()(mesh)
+ mesh = FaceReducer()(mesh)
+ mesh.export('mesh.glb')
+
+ try:
+ from hy3dgen.texgen import Hunyuan3DPaintPipeline
+ pipeline = Hunyuan3DPaintPipeline.from_pretrained(model_path)
+ mesh = pipeline(mesh, image=image)
+ mesh.export('texture.glb')
+ except Exception as e:
+ print(e)
+ print('Please try to install requirements by following README.md')
+
+
+def text_to_3d(prompt='a car'):
+ rembg = BackgroundRemover()
+ t2i = HunyuanDiTPipeline('Tencent-Hunyuan--HunyuanDiT-v1.1-Diffusers-Distilled')
+ model_path = 'Hunyuan3D-2'
+ i23d = Hunyuan3DDiTFlowMatchingPipeline.from_pretrained(model_path)
+
+ image = t2i(prompt)
+ image = rembg(image)
+ mesh = i23d(image, num_inference_steps=30, mc_algo='mc')[0]
+ mesh = FloaterRemover()(mesh)
+ mesh = DegenerateFaceRemover()(mesh)
+ mesh = FaceReducer()(mesh)
+ mesh.export('t2i_demo.glb')
+
+
+if __name__ == '__main__':
+ image_to_3d()
+ # text_to_3d()
diff --git a/requirements.txt b/requirements.txt
new file mode 100644
index 0000000000000000000000000000000000000000..868e3da1267bbec95d769ef0dd0e3a2ade85b551
--- /dev/null
+++ b/requirements.txt
@@ -0,0 +1,40 @@
+ninja
+pybind11
+
+diffusers
+einops
+opencv-python
+numpy
+torch
+transformers
+torchvision
+#taming-transformers-rom1504
+#ConfigArgParse
+#ipdb
+omegaconf
+
+#sentencepiece
+tqdm
+
+# Mesh Processing
+trimesh
+pymeshlab
+pygltflib
+xatlas
+#kornia
+#facexlib
+
+# Training
+accelerate
+#pytorch_lightning
+#scikit-learn
+#scikit-image
+
+# Demo only
+gradio
+fastapi
+uvicorn
+rembg
+onnxruntime
+#gevent
+#geventhttpclient
\ No newline at end of file
diff --git a/setup.py b/setup.py
new file mode 100644
index 0000000000000000000000000000000000000000..d8fe695f55083bf6c72079871bffa1dc6c660eaf
--- /dev/null
+++ b/setup.py
@@ -0,0 +1,32 @@
+# Open Source Model Licensed under the Apache License Version 2.0
+# and Other Licenses of the Third-Party Components therein:
+# The below Model in this distribution may have been modified by THL A29 Limited
+# ("Tencent Modifications"). All Tencent Modifications are Copyright (C) 2024 THL A29 Limited.
+
+# Copyright (C) 2024 THL A29 Limited, a Tencent company. All rights reserved.
+# The below software and/or models in this distribution may have been
+# modified by THL A29 Limited ("Tencent Modifications").
+# All Tencent Modifications are Copyright (C) THL A29 Limited.
+
+# Hunyuan 3D is licensed under the TENCENT HUNYUAN NON-COMMERCIAL LICENSE AGREEMENT
+# except for the third-party components listed below.
+# Hunyuan 3D does not impose any additional limitations beyond what is outlined
+# in the repsective licenses of these third-party components.
+# Users must comply with all terms and conditions of original licenses of these third-party
+# components and must ensure that the usage of the third party components adheres to
+# all relevant laws and regulations.
+
+# For avoidance of doubts, Hunyuan 3D means the large language models and
+# their software and algorithms, including trained model weights, parameters (including
+# optimizer states), machine-learning model code, inference-enabling code, training-enabling code,
+# fine-tuning enabling code and other elements of the foregoing made publicly available
+# by Tencent in accordance with TENCENT HUNYUAN COMMUNITY LICENSE AGREEMENT.
+
+from setuptools import setup, find_packages
+
+setup(
+ name="hy3dgen",
+ version="2.0.0",
+ packages=find_packages(),
+
+)