diff --git a/.gitattributes b/.gitattributes index 5129ae3b8b68f5d0a2db8a5b4bd32b6d6ee780e9..bf07816c74bac9b682df196e02c6482e474e9b52 100644 --- a/.gitattributes +++ b/.gitattributes @@ -29,9 +29,3 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text *.zip filter=lfs diff=lfs merge=lfs -text *.zstandard filter=lfs diff=lfs merge=lfs -text *tfevents* filter=lfs diff=lfs merge=lfs -text -*tfevents* filter=lfs diff=lfs merge=lfs -text -*.obj filter=lfs diff=lfs merge=lfs -text -*.mp4 filter=lfs diff=lfs merge=lfs -text -*.glb filter=lfs diff=lfs merge=lfs -text -*.png filter=lfs diff=lfs merge=lfs -text -*.gif filter=lfs diff=lfs merge=lfs -text diff --git a/.gitignore b/.gitignore index 22c0e039111aa2f4ebb86a7da0ba71b10771600a..00f45fce85d23a168874aa20d503c5605ce156a1 100644 --- a/.gitignore +++ b/.gitignore @@ -4,15 +4,14 @@ data/thuman* __pycache__ debug/ log/ +results/* .vscode !.gitignore force_push.sh .idea +smplx/ human_det/ kaolin/ neural_voxelization_layer/ pytorch3d/ force_push.sh -results/ -gradio_cached_examples/ -gradio_queue.db diff --git a/Dockerfile b/Dockerfile deleted file mode 100644 index b3a34c7a7def8bc7e3b5b39dda7d3e28a17e96ac..0000000000000000000000000000000000000000 --- a/Dockerfile +++ /dev/null @@ -1,96 +0,0 @@ -FROM nvidia/cuda:11.3.1-cudnn8-devel-ubuntu20.04 - -ARG DEBIAN_FRONTEND=noninteractive - -RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \ - curl \ - git \ - wget \ - libgl1 \ - freeglut3-dev \ - unzip \ - ffmpeg \ - libsm6 \ - libxext6 \ - libgomp1 \ - libfontconfig1 \ - libegl1-mesa-dev \ - libgl1-mesa-glx \ - libglapi-mesa \ - libc6 \ - libxdamage1 \ - libxfixes3 \ - libxcb-glx0 \ - libxcb-dri2-0 \ - libxcb-dri3-0 \ - libxcb-present0 \ - libxcb-sync1 \ - libxshmfence1 \ - libxxf86vm1 \ - libgl1-mesa-dev \ - libgles2-mesa-dev \ - libglib2.0-dev \ - libxrender1 \ - libgbm1 \ - build-essential \ - libeigen3-dev \ - python3.8 \ - python3-pip \ - python-is-python3 \ - python3-opencv \ - nvidia-cuda-toolkit \ - && rm -rf /var/lib/apt/lists/* - - -# Set up a new user named "user" with user ID 1000 -RUN useradd -m -u 1000 user - -# Switch to the "user" user -USER user - -FROM python:3.8 - -ENV PYTHONUNBUFFERED=1 - -ENV TORCH_CUDA_ARCH_LIST="6.0 6.1 7.0 7.5 8.0 8.6" -ENV TCNN_CUDA_ARCHITECTURES=86;80;75;70;61;60 -ENV FORCE_CUDA=1 - -ENV CUDA_HOME=/usr/local/cuda -ENV PATH=${CUDA_HOME}/bin:/home/${USER_NAME}/.local/bin:/usr/bin:${PATH} -ENV LD_LIBRARY_PATH=${CUDA_HOME}/lib64:/usr/local/lib/python3.8/site-packages/open3d:/usr/lib:/usr/lib64:${LD_LIBRARY_PATH} -ENV LIBRARY_PATH=${CUDA_HOME}/lib64/stubs:/usr/local/lib/python3.8/site-packages/open3d:/usr/lib:/usr/lib64:${LIBRARY_PATH} - -# Set home to the user's home directory -ENV HOME=/home/user \ - PATH=/home/user/.local/bin:$PATH \ - PYTHONPATH=$HOME/app:$PYTHONPATH \ - PYTHONUNBUFFERED=1 \ - GRADIO_ALLOW_FLAGGING=never \ - GRADIO_NUM_PORTS=1 \ - GRADIO_SERVER_NAME=0.0.0.0 \ - GRADIO_THEME=huggingface \ - SYSTEM=spaces - -RUN pip install --upgrade pip ninja -RUN pip install setuptools==69.5.1 -RUN pip install torch==1.11.0+cu113 torchvision==0.12.0+cu113 -f https://download.pytorch.org/whl/cu113/torch_stable.html - -RUN python -c "import torch; print(torch.version.cuda)" -COPY requirements.txt /tmp -RUN cd /tmp && pip install -r requirements.txt - -RUN pip install https://download.is.tue.mpg.de/icon/HF/kaolin-0.11.0-cp38-cp38-linux_x86_64.whl -RUN pip install https://download.is.tue.mpg.de/icon/HF/pytorch3d-0.7.0-cp38-cp38-linux_x86_64.whl - -RUN chmod 777 $HOME -ENV TRANSFORMERS_CACHE=/tmp -ENV MPLCONFIGDIR=/tmp - -# Copy the current directory contents into the container at $HOME/app setting the owner to the user -COPY --chown=user . $HOME/app - -# Set the working directory to the user's home directory -WORKDIR $HOME/app - -CMD ["python", "app.py"] diff --git a/README.md b/README.md index 0d6e014b9e38e17c146e88db476787f9bcf32a3d..9952b781a60a4a971d81bc68963c05b285bcc519 100644 --- a/README.md +++ b/README.md @@ -1,42 +1,12 @@ --- -title: ICON - Clothed Human Digitization -metaTitle: ICON-Avatarify from Photo +title: ICON +metaTitle: "Image2Human by Yuliang Xiu" emoji: 🤼 colorFrom: indigo colorTo: yellow -sdk: docker +sdk: gradio +sdk_version: 3.1.1 +app_file: ./apps/app.py pinned: true ---- - -# ICON Clothed Human Digitization -### ICON: Implicit Clothed humans Obtained from Normals (CVPR 2022) - - - - -
- -Twitter Follow - -YouTube Video Views - - -
- -#### Citation -``` -@inproceedings{xiu2022icon, - title = {{ICON}: {I}mplicit {C}lothed humans {O}btained from {N}ormals}, - author = {Xiu, Yuliang and Yang, Jinlong and Tzionas, Dimitrios and Black, Michael J.}, - booktitle = {Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)}, - month = {June}, - year = {2022}, - pages = {13296-13306} -} -``` \ No newline at end of file +python_version: 3.8.13 +--- \ No newline at end of file diff --git a/app.py b/app.py deleted file mode 100644 index 98a07e18d42b1bf3250dda987cdfe7910abf4e0f..0000000000000000000000000000000000000000 --- a/app.py +++ /dev/null @@ -1,153 +0,0 @@ -# install - - -import glob -import gradio as gr -import numpy as np -import os -import subprocess - -from apps.infer import generate_model - -if os.getenv('SYSTEM') == 'spaces': - subprocess.run('pip install pyembree'.split()) - subprocess.run( - 'pip install git+https://github.com/YuliangXiu/neural_voxelization_layer.git'.split()) - -# running - -description = ''' -# ICON Clothed Human Digitization -### ICON: Implicit Clothed humans Obtained from Normals (CVPR 2022) - - - - -
- -Twitter Follow - -YouTube Video Views - - -
- -
- -

The reconstruction + refinement + video takes about 200 seconds for a single image.

-

ICON is only suitable for humanoid images and will not work well on cartoons with non-human shapes.

-
- -
- -More - -#### Citation -``` -@inproceedings{xiu2022icon, - title = {{ICON}: {I}mplicit {C}lothed humans {O}btained from {N}ormals}, - author = {Xiu, Yuliang and Yang, Jinlong and Tzionas, Dimitrios and Black, Michael J.}, - booktitle = {Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)}, - month = {June}, - year = {2022}, - pages = {13296-13306} -} -``` - -#### Acknowledgments: - -- [StyleGAN-Human, ECCV 2022](https://stylegan-human.github.io/) -- [nagolinc/styleGanHuman_and_PIFu](https://huggingface.co/spaces/nagolinc/styleGanHuman_and_PIFu) -- [radames/PIFu-Clothed-Human-Digitization](https://huggingface.co/spaces/radames/PIFu-Clothed-Human-Digitization) - -#### Image Credits - -* [Pinterest](https://www.pinterest.com/search/pins/?q=parkour&rs=sitelinks_searchbox) - -#### Related works - -* [ICON @ MPI](https://icon.is.tue.mpg.de/) -* [MonoPort @ USC](https://xiuyuliang.cn/monoport) -* [Phorhum @ Google](https://phorhum.github.io/) -* [PIFuHD @ Meta](https://shunsukesaito.github.io/PIFuHD/) -* [PaMIR @ Tsinghua](http://www.liuyebin.com/pamir/pamir.html) - -
-''' - - -def generate_image(seed, psi): - iface = gr.Interface.load("spaces/hysts/StyleGAN-Human") - img = iface(seed, psi) - return img - - -model_types = ['ICON', 'PIFu', 'PaMIR'] -examples_names = glob.glob('examples/*.png') -examples_types = np.random.choice( - model_types, len(examples_names), p=[0.6, 0.2, 0.2]) - -examples = [list(item) for item in zip(examples_names, examples_types)] - -with gr.Blocks() as demo: - gr.Markdown(description) - - out_lst = [] - with gr.Row(): - with gr.Column(): - with gr.Row(): - with gr.Column(): - seed = gr.inputs.Slider( - 0, 1000, step=1, default=0, label='Seed (For Image Generation)') - psi = gr.inputs.Slider( - 0, 2, step=0.05, default=0.7, label='Truncation psi (For Image Generation)') - radio_choice = gr.Radio( - model_types, label='Method (For Reconstruction)', value='icon-filter') - inp = gr.Image(type="filepath", label="Input Image") - with gr.Row(): - btn_sample = gr.Button("Generate Image") - btn_submit = gr.Button("Submit Image") - - gr.Examples(examples=examples, - inputs=[inp, radio_choice], - cache_examples=False, - fn=generate_model, - outputs=out_lst) - - out_vid = gr.Video( - label="Image + Normal + SMPL Body + Clothed Human") - out_vid_download = gr.File( - label="Download Video, welcome share on Twitter with #ICON") - - with gr.Column(): - overlap_inp = gr.Image( - type="filepath", label="Image Normal Overlap") - out_final = gr.Model3D( - clear_color=[0.0, 0.0, 0.0, 0.0], label="Clothed human") - out_final_download = gr.File( - label="Download clothed human mesh") - out_smpl = gr.Model3D( - clear_color=[0.0, 0.0, 0.0, 0.0], label="SMPL body") - out_smpl_download = gr.File(label="Download SMPL body mesh") - out_smpl_npy_download = gr.File(label="Download SMPL params") - - out_lst = [out_smpl, out_smpl_download, out_smpl_npy_download, - out_final, out_final_download, out_vid, out_vid_download, overlap_inp] - - btn_submit.click(fn=generate_model, inputs=[ - inp, radio_choice], outputs=out_lst) - btn_sample.click(fn=generate_image, inputs=[seed, psi], outputs=inp) - -if __name__ == "__main__": - - # demo.launch(debug=False, enable_queue=False, - # auth=(os.environ['USER'], os.environ['PASSWORD']), - # auth_message="Register at icon.is.tue.mpg.de to get HuggingFace username and password.") - - demo.launch(debug=True, enable_queue=True) diff --git a/apps/ICON.py b/apps/ICON.py index d82a10d795e121e9a5614dcd5ae96b81ccdf1df5..ae3d7e2c93fe971af6afdffd374e78ad4acc6287 100644 --- a/apps/ICON.py +++ b/apps/ICON.py @@ -14,26 +14,21 @@ # # Contact: ps-license@tuebingen.mpg.de - -import os - from lib.common.seg3d_lossless import Seg3dLossless from lib.dataset.Evaluator import Evaluator from lib.net import HGPIFuNet from lib.common.train_util import * +from lib.renderer.gl.init_gl import initialize_GL_context from lib.common.render import Render from lib.dataset.mesh_util import SMPLX, update_mesh_shape_prior_losses, get_visibility import warnings import logging import torch -import lib.smplx as smplx +import smplx import numpy as np from torch import nn -import os.path as osp - from skimage.transform import resize import pytorch_lightning as pl -from huggingface_hub import cached_download torch.backends.cudnn.benchmark = True @@ -102,8 +97,10 @@ class ICON(pl.LightningModule): self.get_smpl_model = lambda smpl_type, gender, age, v_template: smplx.create( self.smpl_data.model_dir, - kid_template_path=cached_download(osp.join(self.smpl_data.model_dir, - f"{smpl_type}/{smpl_type}_kid_template.npy"), use_auth_token=os.environ['ICON']), + kid_template_path=osp.join( + osp.realpath(self.smpl_data.model_dir), + f"{smpl_type}/{smpl_type}_kid_template.npy", + ), model_type=smpl_type, gender=gender, age=age, diff --git a/apps/__pycache__/app.cpython-38.pyc b/apps/__pycache__/app.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cdbe6b9262dbf510f32e2596fd209c0457b938d0 Binary files /dev/null and b/apps/__pycache__/app.cpython-38.pyc differ diff --git a/apps/app.py b/apps/app.py new file mode 100644 index 0000000000000000000000000000000000000000..15dc741ea1cc0c4723ca202bd99c370a4d4a9af0 --- /dev/null +++ b/apps/app.py @@ -0,0 +1,21 @@ +# install + +import os +os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID" +os.environ["CUDA_VISIBLE_DEVICES"]="0" +try: + os.system("bash install.sh") +except Exception as e: + print(e) + + +# running + +import gradio as gr + +def image_classifier(inp): + return {'cat': 0.3, 'dog': 0.7} + +demo = gr.Interface(fn=image_classifier, inputs="image", outputs="label") +demo.launch(auth=("icon@tue.mpg.de", "icon_2022"), + auth_message="Register at icon.is.tue.mpg.de/download to get the username and password.") diff --git a/apps/infer.py b/apps/infer.py index ce8e4e21179bfff43e2dfcb429c9602fb5e55802..ab34478cbfbdbb24cd47ddb77c35facb760d61f1 100644 --- a/apps/infer.py +++ b/apps/infer.py @@ -14,33 +14,34 @@ # # Contact: ps-license@tuebingen.mpg.de -import os -import gc - import logging +from lib.common.render import query_color, image2vid from lib.common.config import cfg +from lib.common.cloth_extraction import extract_cloth from lib.dataset.mesh_util import ( load_checkpoint, update_mesh_shape_prior_losses, + get_optim_grid_image, blend_rgb_norm, unwrap, remesh, tensor2variable, - rot6d_to_rotmat + normal_loss ) from lib.dataset.TestDataset import TestDataset -from lib.common.render import query_color from lib.net.local_affine import LocalAffine from pytorch3d.structures import Meshes from apps.ICON import ICON +import os from termcolor import colored +import argparse import numpy as np from PIL import Image import trimesh +import pickle import numpy as np -from tqdm import tqdm import torch torch.backends.cudnn.benchmark = True @@ -48,31 +49,36 @@ torch.backends.cudnn.benchmark = True logging.getLogger("trimesh").setLevel(logging.ERROR) -def generate_model(in_path, model_type): +if __name__ == "__main__": - torch.cuda.empty_cache() - - if model_type == 'ICON': - model_type = 'icon-filter' - else: - model_type = model_type.lower() + # loading cfg file + parser = argparse.ArgumentParser() + + parser.add_argument("-gpu", "--gpu_device", type=int, default=0) + parser.add_argument("-colab", action="store_true") + parser.add_argument("-loop_smpl", "--loop_smpl", type=int, default=100) + parser.add_argument("-patience", "--patience", type=int, default=5) + parser.add_argument("-vis_freq", "--vis_freq", type=int, default=10) + parser.add_argument("-loop_cloth", "--loop_cloth", type=int, default=200) + parser.add_argument("-hps_type", "--hps_type", type=str, default="pymaf") + parser.add_argument("-export_video", action="store_true") + parser.add_argument("-in_dir", "--in_dir", type=str, default="./examples") + parser.add_argument("-out_dir", "--out_dir", + type=str, default="./results") + parser.add_argument('-seg_dir', '--seg_dir', type=str, default=None) + parser.add_argument( + "-cfg", "--config", type=str, default="./configs/icon-filter.yaml" + ) - config_dict = {'loop_smpl': 100, - 'loop_cloth': 200, - 'patience': 5, - 'out_dir': './results', - 'hps_type': 'pymaf', - 'config': f"./configs/{model_type}.yaml"} + args = parser.parse_args() # cfg read and merge - cfg.merge_from_file(config_dict['config']) + cfg.merge_from_file(args.config) cfg.merge_from_file("./lib/pymaf/configs/pymaf_config.yaml") - os.makedirs(config_dict['out_dir'], exist_ok=True) - cfg_show_list = [ "test_gpus", - [0], + [args.gpu_device], "mcube_res", 256, "clean_mesh", @@ -82,21 +88,28 @@ def generate_model(in_path, model_type): cfg.merge_from_list(cfg_show_list) cfg.freeze() - os.environ["CUDA_VISIBLE_DEVICES"] = "0" - device = torch.device(f"cuda:0") + os.environ["CUDA_VISIBLE_DEVICES"] = "0,1" + device = torch.device(f"cuda:{args.gpu_device}") + + if args.colab: + print(colored("colab environment...", "red")) + from tqdm.notebook import tqdm + else: + print(colored("normal environment...", "red")) + from tqdm import tqdm # load model and dataloader model = ICON(cfg) model = load_checkpoint(model, cfg) dataset_param = { - 'image_path': in_path, - 'seg_dir': None, + 'image_dir': args.in_dir, + 'seg_dir': args.seg_dir, 'has_det': True, # w/ or w/o detection - 'hps_type': 'pymaf' # pymaf/pare/pixie + 'hps_type': args.hps_type # pymaf/pare/pixie } - if config_dict['hps_type'] == "pixie" and "pamir" in config_dict['config']: + if args.hps_type == "pixie" and "pamir" in args.config: print(colored("PIXIE isn't compatible with PaMIR, thus switch to PyMAF", "red")) dataset_param["hps_type"] = "pymaf" @@ -126,10 +139,10 @@ def generate_model(in_path, model_type): data["global_orient"], device=device, requires_grad=True ) # [1,1,3,3] - optimizer_smpl = torch.optim.Adam( + optimizer_smpl = torch.optim.SGD( [optimed_pose, optimed_trans, optimed_betas, optimed_orient], lr=1e-3, - amsgrad=True, + momentum=0.9, ) scheduler_smpl = torch.optim.lr_scheduler.ReduceLROnPlateau( optimizer_smpl, @@ -137,45 +150,38 @@ def generate_model(in_path, model_type): factor=0.5, verbose=0, min_lr=1e-5, - patience=config_dict['patience'], + patience=args.patience, ) losses = { - # Cloth: Normal_recon - Normal_pred - "cloth": {"weight": 1e1, "value": 0.0}, - # Cloth: [RT]_v1 - [RT]_v2 (v1-edge-v2) - "stiffness": {"weight": 1e5, "value": 0.0}, - # Cloth: det(R) = 1 - "rigid": {"weight": 1e5, "value": 0.0}, - # Cloth: edge length - "edge": {"weight": 0, "value": 0.0}, - # Cloth: normal consistency - "nc": {"weight": 0, "value": 0.0}, - # Cloth: laplacian smoonth - "laplacian": {"weight": 1e2, "value": 0.0}, - # Body: Normal_pred - Normal_smpl - "normal": {"weight": 1e0, "value": 0.0}, - # Body: Silhouette_pred - Silhouette_smpl - "silhouette": {"weight": 1e0, "value": 0.0}, + "cloth": {"weight": 1e1, "value": 0.0}, # Cloth: Normal_recon - Normal_pred + "stiffness": {"weight": 1e5, "value": 0.0}, # Cloth: [RT]_v1 - [RT]_v2 (v1-edge-v2) + "rigid": {"weight": 1e5, "value": 0.0}, # Cloth: det(R) = 1 + "edge": {"weight": 0, "value": 0.0}, # Cloth: edge length + "nc": {"weight": 0, "value": 0.0}, # Cloth: normal consistency + "laplacian": {"weight": 1e2, "value": 0.0}, # Cloth: laplacian smoonth + "normal": {"weight": 1e0, "value": 0.0}, # Body: Normal_pred - Normal_smpl + "silhouette": {"weight": 1e1, "value": 0.0}, # Body: Silhouette_pred - Silhouette_smpl } # smpl optimization - loop_smpl = tqdm(range(config_dict['loop_smpl'])) + loop_smpl = tqdm( + range(args.loop_smpl if cfg.net.prior_type != "pifu" else 1)) + + per_data_lst = [] - for _ in loop_smpl: + for i in loop_smpl: + + per_loop_lst = [] optimizer_smpl.zero_grad() - - # 6d_rot to rot_mat - optimed_orient_mat = rot6d_to_rotmat(optimed_orient.view(-1,6)).unsqueeze(0) - optimed_pose_mat = rot6d_to_rotmat(optimed_pose.view(-1,6)).unsqueeze(0) if dataset_param["hps_type"] != "pixie": smpl_out = dataset.smpl_model( betas=optimed_betas, - body_pose=optimed_pose_mat, - global_orient=optimed_orient_mat, + body_pose=optimed_pose, + global_orient=optimed_orient, pose2rot=False, ) @@ -185,8 +191,8 @@ def generate_model(in_path, model_type): smpl_verts, _, _ = dataset.smpl_model( shape_params=optimed_betas, expression_params=tensor2variable(data["exp"], device), - body_pose=optimed_pose_mat, - global_pose=optimed_orient_mat, + body_pose=optimed_pose, + global_pose=optimed_orient, jaw_pose=tensor2variable(data["jaw_pose"], device), left_hand_pose=tensor2variable( data["left_hand_pose"], device), @@ -214,7 +220,12 @@ def generate_model(in_path, model_type): diff_B_smpl = torch.abs( in_tensor["T_normal_B"] - in_tensor["normal_B"]) - losses["normal"]["value"] = (diff_F_smpl + diff_B_smpl).mean() + loss_F_smpl = normal_loss( + in_tensor["T_normal_F"], in_tensor["normal_F"]) + loss_B_smpl = normal_loss( + in_tensor["T_normal_B"], in_tensor["normal_B"]) + + losses["normal"]["value"] = (loss_F_smpl + loss_B_smpl).mean() # silhouette loss smpl_arr = torch.cat([T_mask_F, T_mask_B], dim=-1)[0] @@ -239,6 +250,33 @@ def generate_model(in_path, model_type): pbar_desc += f"Total: {smpl_loss:.3f}" loop_smpl.set_description(pbar_desc) + if i % args.vis_freq == 0: + + per_loop_lst.extend( + [ + in_tensor["image"], + in_tensor["T_normal_F"], + in_tensor["normal_F"], + diff_F_smpl / 2.0, + diff_S[:, :512].unsqueeze( + 0).unsqueeze(0).repeat(1, 3, 1, 1), + ] + ) + per_loop_lst.extend( + [ + in_tensor["image"], + in_tensor["T_normal_B"], + in_tensor["normal_B"], + diff_B_smpl / 2.0, + diff_S[:, 512:].unsqueeze( + 0).unsqueeze(0).repeat(1, 3, 1, 1), + ] + ) + per_data_lst.append( + get_optim_grid_image( + per_loop_lst, None, nrow=5, type="smpl") + ) + smpl_loss.backward() optimizer_smpl.step() scheduler_smpl.step(smpl_loss) @@ -249,20 +287,18 @@ def generate_model(in_path, model_type): # 1. SMPL Fitting # 2. Clothes Refinement - os.makedirs(os.path.join(config_dict['out_dir'], cfg.name, + os.makedirs(os.path.join(args.out_dir, cfg.name, "refinement"), exist_ok=True) # visualize the final results in self-rotation mode - os.makedirs(os.path.join(config_dict['out_dir'], - cfg.name, "vid"), exist_ok=True) + os.makedirs(os.path.join(args.out_dir, cfg.name, "vid"), exist_ok=True) # final results rendered as image # 1. Render the final fitted SMPL (xxx_smpl.png) # 2. Render the final reconstructed clothed human (xxx_cloth.png) # 3. Blend the original image with predicted cloth normal (xxx_overlap.png) - os.makedirs(os.path.join(config_dict['out_dir'], - cfg.name, "png"), exist_ok=True) + os.makedirs(os.path.join(args.out_dir, cfg.name, "png"), exist_ok=True) # final reconstruction meshes # 1. SMPL mesh (xxx_smpl.obj) @@ -271,41 +307,54 @@ def generate_model(in_path, model_type): # 4. remeshed clothed mesh (xxx_remesh.obj) # 5. refined clothed mesh (xxx_refine.obj) - os.makedirs(os.path.join(config_dict['out_dir'], - cfg.name, "obj"), exist_ok=True) + os.makedirs(os.path.join(args.out_dir, cfg.name, "obj"), exist_ok=True) + + if cfg.net.prior_type != "pifu": + + per_data_lst[0].save( + os.path.join( + args.out_dir, cfg.name, f"refinement/{data['name']}_smpl.gif" + ), + save_all=True, + append_images=per_data_lst[1:], + duration=500, + loop=0, + ) + + if args.vis_freq == 1: + image2vid( + per_data_lst, + os.path.join( + args.out_dir, cfg.name, f"refinement/{data['name']}_smpl.avi" + ), + ) + + per_data_lst[-1].save( + os.path.join(args.out_dir, cfg.name, + f"png/{data['name']}_smpl.png") + ) - norm_pred_F = ( + norm_pred = ( ((in_tensor["normal_F"][0].permute(1, 2, 0) + 1.0) * 255.0 / 2.0) .detach() .cpu() .numpy() .astype(np.uint8) ) - - norm_pred_B = ( - ((in_tensor["normal_B"][0].permute(1, 2, 0) + 1.0) * 255.0 / 2.0) - .detach() - .cpu() - .numpy() - .astype(np.uint8) - ) - norm_orig_F = unwrap(norm_pred_F, data) - norm_orig_B = unwrap(norm_pred_B, data) - + norm_orig = unwrap(norm_pred, data) mask_orig = unwrap( np.repeat( data["mask"].permute(1, 2, 0).detach().cpu().numpy(), 3, axis=2 ).astype(np.uint8), data, ) - rgb_norm_F = blend_rgb_norm(data["ori_image"], norm_orig_F, mask_orig) - rgb_norm_B = blend_rgb_norm(data["ori_image"], norm_orig_B, mask_orig) + rgb_norm = blend_rgb_norm(data["ori_image"], norm_orig, mask_orig) Image.fromarray( np.concatenate( - [data["ori_image"].astype(np.uint8), rgb_norm_F, rgb_norm_B], axis=1) - ).save(os.path.join(config_dict['out_dir'], cfg.name, f"png/{data['name']}_overlap.png")) + [data["ori_image"].astype(np.uint8), rgb_norm], axis=1) + ).save(os.path.join(args.out_dir, cfg.name, f"png/{data['name']}_overlap.png")) smpl_obj = trimesh.Trimesh( in_tensor["smpl_verts"].detach().cpu()[0] * @@ -314,24 +363,23 @@ def generate_model(in_path, model_type): process=False, maintains_order=True ) - smpl_obj.visual.vertex_colors = (smpl_obj.vertex_normals+1.0)*255.0*0.5 - smpl_obj.export( - f"{config_dict['out_dir']}/{cfg.name}/obj/{data['name']}_smpl.obj") smpl_obj.export( - f"{config_dict['out_dir']}/{cfg.name}/obj/{data['name']}_smpl.glb") + f"{args.out_dir}/{cfg.name}/obj/{data['name']}_smpl.obj") smpl_info = {'betas': optimed_betas, - 'pose': optimed_pose_mat, - 'orient': optimed_orient_mat, + 'pose': optimed_pose, + 'orient': optimed_orient, 'trans': optimed_trans} np.save( - f"{config_dict['out_dir']}/{cfg.name}/obj/{data['name']}_smpl.npy", smpl_info, allow_pickle=True) + f"{args.out_dir}/{cfg.name}/obj/{data['name']}_smpl.npy", smpl_info, allow_pickle=True) # ------------------------------------------------------------------------------------------------------------------ # cloth optimization + per_data_lst = [] + # cloth recon in_tensor.update( dataset.compute_vis_cmap( @@ -356,15 +404,13 @@ def generate_model(in_path, model_type): recon_obj = trimesh.Trimesh( verts_pr, faces_pr, process=False, maintains_order=True ) - recon_obj.visual.vertex_colors = ( - recon_obj.vertex_normals+1.0)*255.0*0.5 recon_obj.export( - os.path.join(config_dict['out_dir'], cfg.name, + os.path.join(args.out_dir, cfg.name, f"obj/{data['name']}_recon.obj") ) # Isotropic Explicit Remeshing for better geometry topology - verts_refine, faces_refine = remesh(os.path.join(config_dict['out_dir'], cfg.name, + verts_refine, faces_refine = remesh(os.path.join(args.out_dir, cfg.name, f"obj/{data['name']}_recon.obj"), 0.5, device) # define local_affine deform verts @@ -380,16 +426,26 @@ def generate_model(in_path, model_type): factor=0.1, verbose=0, min_lr=1e-5, - patience=config_dict['patience'], + patience=args.patience, ) + with torch.no_grad(): + per_loop_lst = [] + rotate_recon_lst = dataset.render.get_rgb_image(cam_ids=[ + 0, 1, 2, 3]) + per_loop_lst.extend(rotate_recon_lst) + per_data_lst.append(get_optim_grid_image( + per_loop_lst, None, type="cloth")) + final = None - if config_dict['loop_cloth'] > 0: + if args.loop_cloth > 0: - loop_cloth = tqdm(range(config_dict['loop_cloth'])) + loop_cloth = tqdm(range(args.loop_cloth)) - for _ in loop_cloth: + for i in loop_cloth: + + per_loop_lst = [] optimizer_cloth.zero_grad() @@ -426,67 +482,135 @@ def generate_model(in_path, model_type): loop_cloth.set_description(pbar_desc) # update params - cloth_loss.backward() + cloth_loss.backward(retain_graph=True) optimizer_cloth.step() scheduler_cloth.step(cloth_loss) + # for vis + with torch.no_grad(): + if i % args.vis_freq == 0: + + rotate_recon_lst = dataset.render.get_rgb_image(cam_ids=[ + 0, 1, 2, 3]) + + per_loop_lst.extend( + [ + in_tensor["image"], + in_tensor["P_normal_F"], + in_tensor["normal_F"], + diff_F_cloth / 2.0, + ] + ) + per_loop_lst.extend( + [ + in_tensor["image"], + in_tensor["P_normal_B"], + in_tensor["normal_B"], + diff_B_cloth / 2.0, + ] + ) + per_loop_lst.extend(rotate_recon_lst) + per_data_lst.append( + get_optim_grid_image( + per_loop_lst, None, type="cloth") + ) + + # gif for optimization + per_data_lst[1].save( + os.path.join( + args.out_dir, cfg.name, f"refinement/{data['name']}_cloth.gif" + ), + save_all=True, + append_images=per_data_lst[2:], + duration=500, + loop=0, + ) + + if args.vis_freq == 1: + image2vid( + per_data_lst, + os.path.join( + args.out_dir, cfg.name, f"refinement/{data['name']}_cloth.avi" + ), + ) + final = trimesh.Trimesh( mesh_pr.verts_packed().detach().squeeze(0).cpu(), mesh_pr.faces_packed().detach().squeeze(0).cpu(), process=False, maintains_order=True ) - - # only with front texture - tex_colors = query_color( + final_colors = query_color( mesh_pr.verts_packed().detach().squeeze(0).cpu(), mesh_pr.faces_packed().detach().squeeze(0).cpu(), in_tensor["image"], device=device, ) - - # full normal textures - norm_colors = (mesh_pr.verts_normals_padded().squeeze( - 0).detach().cpu() + 1.0) * 0.5 * 255.0 - - final.visual.vertex_colors = tex_colors - final.export( - f"{config_dict['out_dir']}/{cfg.name}/obj/{data['name']}_refine.obj") - - final.visual.vertex_colors = norm_colors + final.visual.vertex_colors = final_colors final.export( - f"{config_dict['out_dir']}/{cfg.name}/obj/{data['name']}_refine.glb") + f"{args.out_dir}/{cfg.name}/obj/{data['name']}_refine.obj") - # always export visualized video regardless of the cloth refinment - verts_lst = [smpl_obj.vertices, final.vertices] - faces_lst = [smpl_obj.faces, final.faces] - - # self-rotated video - dataset.render.load_meshes( - verts_lst, faces_lst) - dataset.render.get_rendered_video( - [data["ori_image"], rgb_norm_F, rgb_norm_B], - os.path.join(config_dict['out_dir'], cfg.name, - f"vid/{data['name']}_cloth.mp4"), + # always export visualized png regardless of the cloth refinment + per_data_lst[-1].save( + os.path.join(args.out_dir, cfg.name, + f"png/{data['name']}_cloth.png") ) - smpl_obj_path = f"{config_dict['out_dir']}/{cfg.name}/obj/{data['name']}_smpl.obj" - smpl_glb_path = f"{config_dict['out_dir']}/{cfg.name}/obj/{data['name']}_smpl.glb" - smpl_npy_path = f"{config_dict['out_dir']}/{cfg.name}/obj/{data['name']}_smpl.npy" - refine_obj_path = f"{config_dict['out_dir']}/{cfg.name}/obj/{data['name']}_refine.obj" - refine_glb_path = f"{config_dict['out_dir']}/{cfg.name}/obj/{data['name']}_refine.glb" - - video_path = os.path.join( - config_dict['out_dir'], cfg.name, f"vid/{data['name']}_cloth.mp4") - overlap_path = os.path.join( - config_dict['out_dir'], cfg.name, f"png/{data['name']}_overlap.png") - - # clean all the variables - for element in dir(): - if 'path' not in element: - del locals()[element] - gc.collect() - torch.cuda.empty_cache() - - return [smpl_glb_path, smpl_obj_path,smpl_npy_path, - refine_glb_path, refine_obj_path, - video_path, video_path, overlap_path] + # always export visualized video regardless of the cloth refinment + if args.export_video: + if final is not None: + verts_lst = [verts_pr, final.vertices] + faces_lst = [faces_pr, final.faces] + else: + verts_lst = [verts_pr] + faces_lst = [faces_pr] + + # self-rotated video + dataset.render.load_meshes( + verts_lst, faces_lst) + dataset.render.get_rendered_video( + [data["ori_image"], rgb_norm], + os.path.join(args.out_dir, cfg.name, + f"vid/{data['name']}_cloth.mp4"), + ) + + # garment extraction from deepfashion images + if not (args.seg_dir is None): + if final is not None: + recon_obj = final.copy() + + os.makedirs(os.path.join( + args.out_dir, cfg.name, "clothes"), exist_ok=True) + os.makedirs(os.path.join(args.out_dir, cfg.name, + "clothes", "info"), exist_ok=True) + for seg in data['segmentations']: + # These matrices work for PyMaf, not sure about the other hps type + K = np.array([[1.0000, 0.0000, 0.0000, 0.0000], + [0.0000, 1.0000, 0.0000, 0.0000], + [0.0000, 0.0000, -0.5000, 0.0000], + [-0.0000, -0.0000, 0.5000, 1.0000]]).T + + R = np.array([[-1., 0., 0.], + [0., 1., 0.], + [0., 0., -1.]]) + + t = np.array([[-0., -0., 100.]]) + clothing_obj = extract_cloth(recon_obj, seg, K, R, t, smpl_obj) + if clothing_obj is not None: + cloth_type = seg['type'].replace(' ', '_') + cloth_info = { + 'betas': optimed_betas, + 'body_pose': optimed_pose, + 'global_orient': optimed_orient, + 'pose2rot': False, + 'clothing_type': cloth_type, + } + + file_id = f"{data['name']}_{cloth_type}" + with open(os.path.join(args.out_dir, cfg.name, "clothes", "info", f"{file_id}_info.pkl"), 'wb') as fp: + pickle.dump(cloth_info, fp) + + clothing_obj.export(os.path.join( + args.out_dir, cfg.name, "clothes", f"{file_id}.obj")) + else: + print( + f"Unable to extract clothing of type {seg['type']} from image {data['name']}") diff --git a/assets/garment_teaser.png b/assets/garment_teaser.png index 15f249d06f6ae0f8f69badad21189d3221122ae5..95200eae30f68d1a16797bf9f95065d0a885d2b5 100644 Binary files a/assets/garment_teaser.png and b/assets/garment_teaser.png differ diff --git a/assets/intermediate_results.png b/assets/intermediate_results.png index a10ee888465f36e04bf3f54ef2a99ab5056f54c0..e1ca1303d711b5f5c728fc45aa33fca3994be4bc 100644 Binary files a/assets/intermediate_results.png and b/assets/intermediate_results.png differ diff --git a/assets/teaser.gif b/assets/teaser.gif index e7d00f4bd21c4bde181111b863231c6e8e32963d..30fb76214a17d73fd0760f43430e4fb933c39176 100644 Binary files a/assets/teaser.gif and b/assets/teaser.gif differ diff --git a/assets/thumbnail.png b/assets/thumbnail.png deleted file mode 100644 index e7db2a9dac3b1bf4d2e0e57e30faea862df6916f..0000000000000000000000000000000000000000 --- a/assets/thumbnail.png +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:5259d6e413242c63afe88027122eed783612ff9a9e48b9a9c51313f6bf66fb94 -size 51470 diff --git a/configs/icon-filter.yaml b/configs/icon-filter.yaml index f2ac13b840233de1fa6f980ca9bb6359787acb83..a8131461ddd0ebd6cb8ab4e3e2d2301517c88bf1 100644 --- a/configs/icon-filter.yaml +++ b/configs/icon-filter.yaml @@ -1,7 +1,7 @@ name: icon-filter ckpt_dir: "./data/ckpt/" -resume_path: "https://huggingface.co/Yuliang/ICON/resolve/main/icon-filter.ckpt" -normal_path: "https://huggingface.co/Yuliang/ICON/resolve/main/normal.ckpt" +resume_path: "./data/ckpt/icon-filter.ckpt" +normal_path: "./data/ckpt/normal.ckpt" test_mode: True batch_size: 1 diff --git a/configs/icon-nofilter.yaml b/configs/icon-nofilter.yaml index 756cd4f3e18b9dce2b2d14aaf1c9d7f10b41ed90..506b9b22a1e97a5bb99ce2ba10662de500dfb147 100644 --- a/configs/icon-nofilter.yaml +++ b/configs/icon-nofilter.yaml @@ -1,7 +1,7 @@ name: icon-nofilter ckpt_dir: "./data/ckpt/" -resume_path: "https://huggingface.co/Yuliang/ICON/resolve/main/icon-nofilter.ckpt" -normal_path: "https://huggingface.co/Yuliang/ICON/resolve/main/normal.ckpt" +resume_path: "./data/ckpt/icon-nofilter.ckpt" +normal_path: "./data/ckpt/normal.ckpt" test_mode: True batch_size: 1 diff --git a/configs/pamir.yaml b/configs/pamir.yaml index a59a7531d24216c48666cc939a281755a45edcfa..a06e96779a6b3bd1c0e0f1e53f75cc609c380c8b 100644 --- a/configs/pamir.yaml +++ b/configs/pamir.yaml @@ -1,7 +1,7 @@ name: pamir ckpt_dir: "./data/ckpt/" -resume_path: "https://huggingface.co/Yuliang/ICON/resolve/main/pamir.ckpt" -normal_path: "https://huggingface.co/Yuliang/ICON/resolve/main/normal.ckpt" +resume_path: "./data/ckpt/pamir.ckpt" +normal_path: "./data/ckpt/normal.ckpt" test_mode: True batch_size: 1 diff --git a/configs/pifu.yaml b/configs/pifu.yaml index 69438539d03913ac51c527517e3f33e19e1c9bb3..f1d0d23929c7d69dcc121dd87484eeb73d732364 100644 --- a/configs/pifu.yaml +++ b/configs/pifu.yaml @@ -1,7 +1,7 @@ name: pifu ckpt_dir: "./data/ckpt/" -resume_path: "https://huggingface.co/Yuliang/ICON/resolve/main/pifu.ckpt" -normal_path: "https://huggingface.co/Yuliang/ICON/resolve/main/normal.ckpt" +resume_path: "./data/ckpt/pifu.ckpt" +normal_path: "./data/ckpt/normal.ckpt" test_mode: True batch_size: 1 diff --git a/environment.yaml b/environment.yaml new file mode 100644 index 0000000000000000000000000000000000000000..23cb949f3c160bef04bccca04456384c3ded0256 --- /dev/null +++ b/environment.yaml @@ -0,0 +1,16 @@ +name: icon +channels: + - pytorch-lts + - nvidia + - conda-forge + - fvcore + - iopath + - bottler + - defaults +dependencies: + - pytorch + - torchvision + - fvcore + - iopath + - nvidiacub + - pyembree \ No newline at end of file diff --git a/examples/22097467bffc92d4a5c4246f7d4edb75.png b/examples/22097467bffc92d4a5c4246f7d4edb75.png index 2664dc2840b0019c5e3d0a5a002448821efe3600..7139dd8327b4cc0f2e152fb6c481baefd9b3feb0 100644 Binary files a/examples/22097467bffc92d4a5c4246f7d4edb75.png and b/examples/22097467bffc92d4a5c4246f7d4edb75.png differ diff --git a/examples/44c0f84c957b6b9bdf77662af5bb7078.png b/examples/44c0f84c957b6b9bdf77662af5bb7078.png index 10a7155d5ceeaf50ca94cadfc3fb87f6bf78f72e..c0b7006e8d022dd0b7e831123e00386ae7eabb6a 100644 Binary files a/examples/44c0f84c957b6b9bdf77662af5bb7078.png and b/examples/44c0f84c957b6b9bdf77662af5bb7078.png differ diff --git a/examples/5a6a25963db2f667441d5076972c207c.png b/examples/5a6a25963db2f667441d5076972c207c.png index f1a9a17e3e508d31f3839c0142ae925dc93a2ee2..035a0f74ba5f8377c3f44ed1d5f3bc16719e17d9 100644 Binary files a/examples/5a6a25963db2f667441d5076972c207c.png and b/examples/5a6a25963db2f667441d5076972c207c.png differ diff --git a/examples/8da7ceb94669c2f65cbd28022e1f9876.png b/examples/8da7ceb94669c2f65cbd28022e1f9876.png index 33af4a270f4a1a61b9173aa941b7fb77b073e134..ef8f0d8a7221334f87c28feb1af2cbdb27fb3c9a 100644 Binary files a/examples/8da7ceb94669c2f65cbd28022e1f9876.png and b/examples/8da7ceb94669c2f65cbd28022e1f9876.png differ diff --git a/examples/923d65f767c85a42212cae13fba3750b.png b/examples/923d65f767c85a42212cae13fba3750b.png index 48ee4bcbe3eea59fdaf4728feac55adb16e33493..60d12587cd7ee1a227dc6e8e4769e81efa21af26 100644 Binary files a/examples/923d65f767c85a42212cae13fba3750b.png and b/examples/923d65f767c85a42212cae13fba3750b.png differ diff --git a/examples/959c4c726a69901ce71b93a9242ed900.png b/examples/959c4c726a69901ce71b93a9242ed900.png new file mode 100644 index 0000000000000000000000000000000000000000..adfb527b5b1cf4a73f4cc75a64646ae7a3791705 Binary files /dev/null and b/examples/959c4c726a69901ce71b93a9242ed900.png differ diff --git a/examples/c9856a2bc31846d684cbb965457fad59.png b/examples/c9856a2bc31846d684cbb965457fad59.png index a78d43f3dd2a81f4d915d9292f1480ddd7dfc18e..ff8bc4d132348fb4d8961091300165673998e680 100644 Binary files a/examples/c9856a2bc31846d684cbb965457fad59.png and b/examples/c9856a2bc31846d684cbb965457fad59.png differ diff --git a/examples/e1e7622af7074a022f5d96dc16672517.png b/examples/e1e7622af7074a022f5d96dc16672517.png index 1e351d7818f5e7313827b6b88f10687a79010ad7..a38826046400c3c3cbe013e834ba2a37af1b4a3e 100644 Binary files a/examples/e1e7622af7074a022f5d96dc16672517.png and b/examples/e1e7622af7074a022f5d96dc16672517.png differ diff --git a/examples/fb9d20fdb93750584390599478ecf86e.png b/examples/fb9d20fdb93750584390599478ecf86e.png index f6771e483af896854f0e39f450026c3e966b7f9b..1150008a65b386c876410d7a0c7b2a655bebbe24 100644 Binary files a/examples/fb9d20fdb93750584390599478ecf86e.png and b/examples/fb9d20fdb93750584390599478ecf86e.png differ diff --git a/examples/segmentation/003883.jpg b/examples/segmentation/003883.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9e9d08a207946883f2ba92f608422d236b3cc51f Binary files /dev/null and b/examples/segmentation/003883.jpg differ diff --git a/examples/segmentation/003883.json b/examples/segmentation/003883.json new file mode 100644 index 0000000000000000000000000000000000000000..438947e9d91bb578bc8344769aea65195d0eeb2d --- /dev/null +++ b/examples/segmentation/003883.json @@ -0,0 +1,136 @@ +{ + "item2": { + "segmentation": [ + [ + 232.29572649572654, 34.447388414055126, 237.0364672364673, + 40.57084520417861, 244.9377018043686, 47.089363722697165, + 252.04881291547974, 49.65726495726508, 262.5179487179489, + 51.43504273504287, 269.233998100665, 50.447388414055204, + 277.5446343779678, 49.12725546058881, 285.64339981006657, + 46.16429249762584, 294.9273504273506, 41.22602089268754, + 299.9377967711301, 36.514245014245084, 304.67853751187084, + 30.588319088319132, 306.0612535612536, 25.65004748338083, + 307.64150047483383, 23.477207977207982, 311.19705603038943, + 24.859924026590704, 317.12298195631536, 28.020417853751216, + 323.04890788224134, 29.008072174738874, 331.34520417853764, + 30.193257359924065, 339.4439696106365, 34.7364672364673, + 346.75261158594515, 39.279677113010536, 350.11063627730323, + 44.61301044634389, 355.00541310541314, 61.422317188983875, + 358.9560303893638, 77.6198480531815, 362.1165242165243, + 90.26182336182353, 364.88195631528976, 103.29886039886063, + 367.6473884140552, 118.11367521367552, 369.42516619183294, + 129.37293447293484, 369.2324786324788, 132.60550807217476, + 365.6769230769232, 134.77834757834762, 359.15840455840464, + 138.3339031339032, 353.43000949667623, 140.70427350427357, + 351.4547008547009, 141.4943969610637, 351.25716999050337, + 138.5314339981007, 351.05963912630585, 136.75365622032294, + 345.7263057929725, 137.34624881291552, 337.8250712250712, + 139.51908831908838, 331.5040835707502, 141.09933523266864, + 324.7880341880341, 143.66723646723653, 322.2201329534662, + 146.43266856600198, 322.2201329534662, 151.5684710351378, + 323.0102564102563, 160.6548907882243, 324.95185185185176, + 173.44615384615395, 325.34691358024685, 190.23627730294416, + 325.93950617283946, 205.64368471035164, 325.93950617283946, + 215.71775878442577, 325.93950617283946, 220.06343779677147, + 322.7790123456789, 223.22393162393197, 315.0753086419752, + 228.55726495726532, 309.34691358024673, 230.53257359924066, + 290.1866096866098, 230.87929724596398, 263.91500474833805, + 229.6941120607788, 236.45821462488112, 229.29905033238373, + 218.48290598290572, 226.73114909781583, 202.65650522317188, + 224.82811016144353, 197.71823361823357, 221.07502374169044, + 195.15033238366567, 214.55650522317188, 195.74292497625825, + 200.53181386514711, 197.125641025641, 180.5811965811964, + 197.33285849952523, 164.68736942070285, 198.51804368471042, + 154.21823361823365, 198.51804368471042, 138.61329534662863, + 193.5797720797721, 136.4404558404558, 185.08594491927823, + 133.08243114909774, 177.77730294396957, 128.73675213675205, + 174.41927825261152, 128.53922127255453, 173.82668566001894, + 133.2799620132953, 174.02421652421646, 136.24292497625825, + 172.83903133903127, 137.03304843304838, 167.11063627730283, + 134.86020892687554, 159.9995251661917, 130.51452991452985, + 159.01187084520404, 129.1318138651471, 159.60446343779662, + 123.60094966761622, 162.6012345679013, 111.57578347578357, + 165.95925925925934, 98.53874643874646, 170.30493827160504, + 82.7362773029439, 173.92307692307693, 70.05584045584048, + 177.08357075023744, 54.84596391263053, 180.58129154795822, + 41.73190883190885, 183.14919278252614, 34.423266856600165, + 188.51623931623936, 30.279962013295354, 195.6273504273505, + 25.539221272554588, 201.75080721747398, 22.971320037986676, + 211.23228869895553, 22.37872744539408, 221.10883190883212, + 20.996011396011355, 224.8619183285852, 20.996011396011355, + 226.04710351377042, 23.56391263057927, 229.01006647673339, + 30.279962013295354 + ] + ], + "category_id": 1, + "category_name": "short sleeve top" + }, + "item1": { + "segmentation": [ + [ + 201.51804815682925, 224.7401022799914, 218.41555508203712, + 227.23317707223518, 236.42109524824218, 228.89522693373104, + 256.91971020669104, 229.44924355422967, 280.188408267633, + 230.2802684849776, 296.53189857234224, 230.2802684849776, + 313.7064138077994, 229.72625186447897, 315.32667803111013, + 236.8076070743661, 317.8197528233539, 240.96273172810572, + 318.65077775410185, 246.2258896228426, 321.4208608565949, + 253.15109737907534, 322.8059024078415, 265.0624547197956, + 324.74496057958663, 273.6497123375242, 325.9612827615598, + 284.4076070743661, 325.40726614106114, 299.9200724483274, + 324.29923290006394, 316.8175793735353, 322.0831664180694, + 325.9588536117625, 320.16803750266354, 336.5366716386107, + 316.0129128489239, 344.01589601534204, 315.18188791817596, + 357.86631152780745, 312.4118048156829, 368.1156190070319, + 308.5336884721926, 378.64193479650567, 306.31762199019806, + 385.29013424248905, 305.76360536969946, 398.3095248242066, + 305.48659705945016, 409.6668655444283, 304.94393777967184, + 419.3418708715109, 302.7278712976774, 427.0981035584915, + 301.3428297464308, 433.74630300447495, 301.3428297464308, + 445.3806520349459, 300.5118048156829, 461.72414233965515, + 299.89735776688684, 467.352311953974, 297.9582995951417, + 477.60161943319844, 295.1882164926486, 491.7290432559132, + 293.52616663115276, 497.2692094608994, 291.8641167696569, + 503.36339228638417, 291.3101001491583, 510.8426166631155, + 289.37104197741314, 513.8897080758579, 287.4433411463882, + 519.2043682079693, 283.0112081823993, 519.7583848284679, + 275.5319838056679, 519.4813765182186, 270.26882591093107, + 518.096334966972, 265.8366929469421, 513.6642020029831, + 263.62062646494763, 509.78608565949276, 264.7286597059449, + 498.9827615597697, 265.2826763264435, 478.76115491157015, + 266.1137012571914, 467.1268058810992, 266.1137012571914, + 454.6614319198803, 264.17464308544623, 441.64204133816276, + 263.06660984444903, 424.19051779245626, 261.5834221180482, + 407.2581504368212, 259.92137225655233, 396.45482633709815, + 257.1512891540592, 380.1113360323889, 257.42829746430857, + 359.05870445344146, 256.8742808438099, 338.56008949499255, + 256.8742808438099, 321.3855742595354, 254.10419774131685, + 320.5545493287875, 251.05710632857443, 326.6487321542723, + 249.39505646707858, 339.1141061154912, 249.11804815682927, + 356.28862135094835, 248.28702322608135, 372.3551033454083, + 245.23993181333896, 387.59056040912026, 243.5766673769444, + 409.1404219049649, 241.91461751544855, 424.92989558917554, + 240.52957596420202, 440.4423609631369, 238.86752610270617, + 455.40080971659955, 238.86752610270617, 470.91327509056083, + 238.31350948220754, 486.42574046452216, 238.81966759002768, + 501.19639889196685, 239.6506925207756, 511.168698060942, + 236.0495844875346, 515.6008310249309, 229.40138504155118, + 519.4789473684212, 221.6451523545705, 520.3099722991692, + 216.65900277008296, 517.2628808864267, 213.33490304709125, + 509.50664819944615, 208.3487534626037, 491.50110803324105, + 205.8556786703599, 475.1576177285318, 203.63961218836545, + 460.75318559556774, 203.63961218836545, 443.3016620498613, + 203.63961218836545, 421.9720221606645, 200.59252077562303, + 415.60083102493036, 197.5052844662264, 406.9847858512679, + 195.28921798423193, 392.0263370978052, 193.35015981248677, + 370.97370551885774, 190.857085020243, 343.82689111442545, + 187.8099936075006, 322.77425953547794, 187.0028979330919, + 309.89237161730256, 186.17187300234397, 291.33281483059886, + 188.11093117408916, 266.67907521841033, 191.15802258683155, + 250.3355849137011, 196.69818879181773, 234.82311953973982 + ] + ], + "category_id": 8, + "category_name": "trousers" + } +} diff --git a/examples/segmentation/028009.jpg b/examples/segmentation/028009.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d0d6453e012c2b1cb9dacb0e712b7723d8b5971b Binary files /dev/null and b/examples/segmentation/028009.jpg differ diff --git a/examples/segmentation/028009.json b/examples/segmentation/028009.json new file mode 100644 index 0000000000000000000000000000000000000000..705031ff1b0d6d043bed95605a94dc51bc4cdf96 --- /dev/null +++ b/examples/segmentation/028009.json @@ -0,0 +1,191 @@ +{ + "item2": { + "segmentation": [ + [ + 314.7474747474744, 204.84848484848482, 328.9696969696967, + 209.7373737373737, 342.74747474747454, 211.95959595959593, + 360.0808080808079, 211.07070707070704, 375.19191919191906, + 210.18181818181816, 384.5252525252524, 207.07070707070704, + 390.30303030303025, 204.84848484848482, 396.080808080808, + 201.29292929292924, 402.3030303030303, 204.40404040404036, + 412.969696969697, 203.9595959595959, 425.8585858585859, + 206.18181818181813, 434.3030303030304, 211.95959595959593, + 439.63636363636374, 223.0707070707071, 444.0808080808082, + 234.18181818181824, 448.52525252525265, 250.62626262626276, + 449.41414141414157, 260.848484848485, 452.08080808080825, + 279.0707070707073, 456.08080808080825, 300.84848484848516, + 457.858585858586, 308.40404040404076, 460.5252525252526, + 315.7575757575756, 460.96969696969705, 329.97979797979787, + 460.5252525252526, 345.9797979797979, 456.969696969697, + 363.75757575757575, 453.41414141414145, 373.5353535353536, + 450.3030303030303, 385.97979797979804, 447.1919191919192, + 393.9797979797981, 443.6363636363636, 401.9797979797981, + 438.3030303030303, 403.7575757575759, 433.85858585858585, + 401.09090909090924, 430.7474747474747, 393.0909090909092, + 426.7474747474747, 383.3131313131314, 424.9696969696969, + 374.8686868686869, 424.9696969696969, 369.0909090909091, + 423.63636363636357, 363.3131313131313, 423.63636363636357, + 359.3131313131313, 423.63636363636357, 352.6464646464646, + 420.9696969696969, 350.86868686868684, 422.74747474747466, + 345.53535353535347, 422.74747474747466, 340.64646464646455, + 422.74747474747466, 332.2020202020201, 421.8585858585858, + 321.53535353535335, 418.74747474747466, 313.0909090909089, + 416.5252525252524, 306.4242424242422, 412.9696969696969, + 314.8686868686867, 410.3030303030302, 320.20202020202004, + 411.6363636363635, 327.3131313131312, 414.74747474747466, + 336.2020202020201, 418.74747474747466, 351.7575757575757, + 420.9696969696969, 365.0909090909091, 423.1919191919191, + 377.0909090909091, 423.1919191919191, 385.0909090909092, + 424.5252525252525, 398.42424242424255, 396.0808080808079, + 398.42424242424255, 374.7474747474745, 400.6464646464648, + 354.7474747474744, 400.6464646464648, 331.6363636363632, + 400.6464646464648, 313.41414141414094, 400.6464646464648, + 305.4141414141409, 399.3131313131314, 297.4141414141409, + 396.6464646464648, 284.525252525252, 396.2020202020203, + 282.8686868686866, 391.59595959595964, 282.42424242424215, + 373.81818181818176, 282.42424242424215, 358.26262626262616, + 281.09090909090884, 334.70707070707056, 281.5353535353533, + 313.37373737373713, 283.31313131313107, 297.3737373737371, + 282.8686868686866, 283.1515151515148, 280.6464646464644, + 266.7070707070703, 271.313131313131, 253.3737373737369, + 264.6464646464643, 246.70707070707022, 257.5353535353532, + 239.59595959595907, 249.9797979797976, 228.9292929292924, + 242.42424242424204, 220.92929292929236, 233.17171717171723, + 209.01010101010093, 225.1717171717172, 194.78787878787867, + 222.06060606060606, 185.4545454545453, 224.2828282828283, + 179.6767676767675, 230.0606060606061, 171.67676767676747, + 232.72727272727278, 169.89898989898967, 243.83838383838392, + 167.67676767676744, 256.2828282828284, 165.4545454545452, + 274.06060606060623, 165.4545454545452, 291.8383838383841, + 167.67676767676744, 302.5050505050508, 168.1212121212119, + 310.94949494949526, 177.0101010101008, 314.0606060606064, + 181.45454545454527, 314.94949494949526, 187.2323232323231, + 312.7272727272731, 193.01010101010087, 307.8383838383842, + 191.2323232323231, 302.94949494949526, 193.45454545454533, + 292.727272727273, 193.45454545454533, 290.50505050505075, + 195.67676767676755, 287.39393939393966, 197.45454545454533, + 285.61616161616183, 197.45454545454533, 283.3939393939396, + 193.89898989898978, 278.94949494949515, 197.45454545454533, + 274.94949494949515, 199.67676767676755, 279.83838383838406, + 201.45454545454535, 286.50505050505075, 201.45454545454535, + 291.8383838383841, 201.8989898989898, 296.2828282828286, + 202.7878787878787, 303.3939393939397, 202.34343434343424 + ] + ], + "category_id": 2, + "category_name": "long sleeve top" + }, + "item1": { + "segmentation": [ + [ + 346.9494949494949, 660.6868686868687, 397.6161616161618, + 661.5757575757576, 398.06060606060623, 674.0202020202021, + 398.94949494949515, 691.3535353535356, 397.6161616161618, + 710.0202020202022, 395.838383838384, 726.0202020202023, + 393.1717171717173, 742.0202020202023, 346.9494949494949, + 738.9090909090912, 346.50505050505046, 724.2424242424245, + 347.3939393939394, 713.5757575757578, 348.72727272727275, + 706.0202020202022, 349.17171717171715, 686.0202020202022, + 348.72727272727275, 675.7979797979799, 347.3939393939394, + 667.7979797979799 + ], + [ + 283.71717171717165, 396.68686868686876, 289.9393939393939, + 396.68686868686876, 303.27272727272725, 397.1313131313132, + 312.16161616161617, 399.7979797979799, 334.3838383838385, + 400.68686868686876, 351.7171717171719, 400.68686868686876, + 361.93939393939417, 401.5757575757577, 376.60606060606085, + 401.5757575757577, 390.82828282828314, 398.46464646464653, + 410.3838383838388, 397.5757575757577, 425.0505050505055, + 394.46464646464653, 431.71717171717216, 422.9090909090911, + 434.38383838383885, 447.79797979798, 430.38383838383885, + 478.0202020202024, 423.2727272727277, 507.79797979798025, + 418.3838383838388, 530.0202020202025, 411.8787878787878, + 557.3333333333333, 403.43434343434336, 590.6666666666666, + 400.7676767676767, 611.5555555555557, 399.8787878787878, + 619.1111111111112, 399.8787878787878, 630.6666666666669, + 398.10101010101, 635.1111111111113, 399.43434343434336, + 641.7777777777779, 399.43434343434336, 656.4444444444447, + 398.10101010101, 662.666666666667, 347.4343434343432, 662.666666666667, + 346.1010101010098, 637.7777777777779, 347.4343434343432, + 610.6666666666667, 349.21212121212096, 576.4444444444445, + 350.98989898989873, 556.4444444444443, 349.6565656565654, + 541.3333333333331, 348.32323232323205, 535.9999999999998, + 348.32323232323205, 523.5555555555553, 349.21212121212096, + 505.33333333333303, 342.5454545454543, 511.5555555555553, + 338.9898989898987, 516.8888888888887, 334.5454545454542, + 523.5555555555553, 325.6565656565653, 543.111111111111, + 319.87878787878753, 556.4444444444443, 314.1010101010097, + 568.4444444444443, 307.8787878787875, 583.1111111111111, + 300.3232323232319, 608.0000000000001, 298.10101010100965, + 617.7777777777778, 298.5454545454541, 624.0000000000001, + 295.43434343434296, 628.0000000000001, 293.2121212121208, + 628.0000000000001, 293.6565656565652, 632.4444444444446, + 291.43434343434296, 638.6666666666669, 290.54545454545405, + 644.4444444444447, 292.3232323232319, 648.8888888888891, + 303.8787878787875, 667.1111111111114, 313.65656565656525, + 684.0000000000003, 319.87878787878753, 700.8888888888893, + 322.54545454545416, 712.8888888888894, 324.323232323232, + 720.0000000000005, 327.87878787878753, 731.5555555555561, + 330.9898989898987, 738.6666666666672, 331.87878787878753, + 743.1111111111117, 334.5454545454542, 745.7777777777783, + 336.3232323232325, 749.1313131313133, 338.54545454545473, + 754.0202020202022, 338.54545454545473, 757.5757575757577, + 341.6565656565658, 760.6868686868688, 344.76767676767696, + 767.3535353535356, 345.2121212121214, 770.9090909090911, + 346.9898989898992, 754.0202020202022, 347.43434343434365, + 738.909090909091, 393.2121212121216, 740.6868686868687, + 389.65656565656604, 764.6868686868688, 386.5454545454549, + 784.2424242424245, 384.3232323232327, 806.9090909090912, + 382.54545454545485, 812.686868686869, 381.13131313131316, + 818.7070707070708, 378.020202020202, 828.4848484848485, + 375.35353535353534, 839.5959595959597, 374.9090909090909, + 854.2626262626264, 373.1313131313131, 856.9292929292931, + 376.24242424242425, 864.9292929292931, 372.24242424242425, + 874.2626262626264, 366.4646464646464, 880.9292929292932, + 357.13131313131305, 872.9292929292932, 345.13131313131305, + 868.0404040404043, 337.131313131313, 867.1515151515154, + 337.131313131313, 856.0404040404042, 338.4646464646463, + 850.7070707070709, 336.2424242424241, 846.2626262626264, + 335.3535353535352, 841.3737373737375, 338.4646464646463, + 827.5959595959597, 342.0202020202019, 815.5959595959596, + 344.6868686868686, 809.3737373737374, 344.6868686868686, + 796.4848484848484, 344.6868686868686, 786.7070707070707, + 346.0202020202019, 779.151515151515, 344.24242424242414, + 776.0404040404039, 343.3535353535352, 786.2626262626262, + 342.0202020202019, 796.0404040404039, 338.90909090909076, + 801.8181818181818, 333.57575757575745, 809.3737373737374, + 326.02020202020185, 813.8181818181819, 320.242424242424, + 812.4848484848485, 318.02020202020185, 810.7070707070707, + 317.13131313131294, 807.1515151515151, 315.79797979797956, + 803.5959595959596, 313.57575757575734, 799.5959595959596, + 311.3535353535351, 793.8181818181818, 306.90909090909065, + 791.1515151515151, 305.57575757575734, 787.5959595959595, + 304.242424242424, 782.7070707070706, 302.02020202020174, + 776.4848484848484, 298.90909090909065, 773.8181818181816, + 294.90909090909065, 771.151515151515, 290.34343434343435, + 758.909090909091, 284.5656565656566, 742.020202020202, + 278.78787878787875, 729.5757575757575, 270.3434343434343, + 713.131313131313, 257.8989898989898, 689.1313131313129, + 247.2323232323231, 669.1313131313128, 239.23232323232307, + 657.5757575757573, 233.89898989898973, 642.9090909090905, + 233.0101010101008, 634.0202020202016, 233.45454545454527, + 630.0202020202016, 235.23232323232304, 611.7979797979793, + 241.93939393939402, 583.0707070707073, 245.93939393939405, + 567.5151515151516, 251.2727272727274, 540.4040404040404, + 256.1616161616163, 518.6262626262626, 260.60606060606074, + 501.2929292929292, 263.7171717171719, 493.7373737373736, + 268.16161616161634, 481.73737373737356, 270.38383838383857, + 469.73737373737356, 272.6060606060608, 462.18181818181796, + 276.1616161616164, 457.7373737373735, 276.1616161616164, + 454.1818181818179, 277.05050505050525, 450.1818181818179, + 278.828282828283, 433.292929292929, 278.3838383838386, + 419.0707070707067, 278.828282828283, 417.29292929292893, + 281.0505050505053, 414.1818181818178, 281.93939393939417, + 404.8484848484844, 283.71717171717194, 401.2929292929289 + ] + ], + "category_id": 8, + "category_name": "trousers" + } +} diff --git a/examples/slack_trial2-000150.png b/examples/slack_trial2-000150.png new file mode 100644 index 0000000000000000000000000000000000000000..e2f13ebb881e0bef4d57971cf0744ccfb831fda0 Binary files /dev/null and b/examples/slack_trial2-000150.png differ diff --git a/fetch_data.sh b/fetch_data.sh new file mode 100644 index 0000000000000000000000000000000000000000..ca7ea81c569a34f287fa589653ff40312ce617eb --- /dev/null +++ b/fetch_data.sh @@ -0,0 +1,60 @@ +#!/bin/bash +urle () { [[ "${1}" ]] || return 1; local LANG=C i x; for (( i = 0; i < ${#1}; i++ )); do x="${1:i:1}"; [[ "${x}" == [a-zA-Z0-9.~-] ]] && echo -n "${x}" || printf '%%%02X' "'${x}"; done; echo; } + +mkdir -p data/smpl_related/models + +# username and password input +echo -e "\nYou need to register at https://icon.is.tue.mpg.de/, according to Installation Instruction." +read -p "Username (ICON):" username +read -p "Password (ICON):" password +username=$(urle $username) +password=$(urle $password) + +# SMPL (Male, Female) +echo -e "\nDownloading SMPL..." +wget --post-data "username=$username&password=$password" 'https://download.is.tue.mpg.de/download.php?domain=smpl&sfile=SMPL_python_v.1.0.0.zip&resume=1' -O './data/smpl_related/models/SMPL_python_v.1.0.0.zip' --no-check-certificate --continue +unzip data/smpl_related/models/SMPL_python_v.1.0.0.zip -d data/smpl_related/models +mv data/smpl_related/models/smpl/models/basicModel_f_lbs_10_207_0_v1.0.0.pkl data/smpl_related/models/smpl/SMPL_FEMALE.pkl +mv data/smpl_related/models/smpl/models/basicmodel_m_lbs_10_207_0_v1.0.0.pkl data/smpl_related/models/smpl/SMPL_MALE.pkl +cd data/smpl_related/models +rm -rf *.zip __MACOSX smpl/models smpl/smpl_webuser +cd ../../.. + +# SMPL (Neutral, from SMPLIFY) +echo -e "\nDownloading SMPLify..." +wget --post-data "username=$username&password=$password" 'https://download.is.tue.mpg.de/download.php?domain=smplify&sfile=mpips_smplify_public_v2.zip&resume=1' -O './data/smpl_related/models/mpips_smplify_public_v2.zip' --no-check-certificate --continue +unzip data/smpl_related/models/mpips_smplify_public_v2.zip -d data/smpl_related/models +mv data/smpl_related/models/smplify_public/code/models/basicModel_neutral_lbs_10_207_0_v1.0.0.pkl data/smpl_related/models/smpl/SMPL_NEUTRAL.pkl +cd data/smpl_related/models +rm -rf *.zip smplify_public +cd ../../.. + +# ICON +echo -e "\nDownloading ICON..." +wget --post-data "username=$username&password=$password" 'https://download.is.tue.mpg.de/download.php?domain=icon&sfile=icon_data.zip&resume=1' -O './data/icon_data.zip' --no-check-certificate --continue +cd data && unzip icon_data.zip +mv smpl_data smpl_related/ +rm -f icon_data.zip +cd .. + +function download_for_training () { + + # SMPL-X (optional) + echo -e "\nDownloading SMPL-X..." + wget --post-data "username=$1&password=$2" 'https://download.is.tue.mpg.de/download.php?domain=smplx&sfile=models_smplx_v1_1.zip&resume=1' -O './data/smpl_related/models/models_smplx_v1_1.zip' --no-check-certificate --continue + unzip data/smpl_related/models/models_smplx_v1_1.zip -d data/smpl_related + rm -f data/smpl_related/models/models_smplx_v1_1.zip + + # SMIL (optional) + echo -e "\nDownloading SMIL..." + wget --post-data "username=$1&password=$2" 'https://download.is.tue.mpg.de/download.php?domain=agora&sfile=smpl_kid_template.npy&resume=1' -O './data/smpl_related/models/smpl/smpl_kid_template.npy' --no-check-certificate --continue + wget --post-data "username=$1&password=$2" 'https://download.is.tue.mpg.de/download.php?domain=agora&sfile=smplx_kid_template.npy&resume=1' -O './data/smpl_related/models/smplx/smplx_kid_template.npy' --no-check-certificate --continue +} + + +read -p "(optional) Download models used for training (y/n)?" choice +case "$choice" in + y|Y ) download_for_training $username $password;; + n|N ) echo "Great job! Try the demo for now!";; + * ) echo "Invalid input! Please use y|Y or n|N";; +esac \ No newline at end of file diff --git a/install.sh b/install.sh new file mode 100644 index 0000000000000000000000000000000000000000..64b3a3ef17bf2a09007e672f34f92c2691289477 --- /dev/null +++ b/install.sh @@ -0,0 +1,16 @@ +# # conda installation +# wget https://repo.anaconda.com/miniconda/Miniconda3-py38_4.10.3-Linux-x86_64.sh +# chmod +x Miniconda3-py38_4.10.3-Linux-x86_64.sh +# bash Miniconda3-py38_4.10.3-Linux-x86_64.sh -b -f -p /home/user/.local +# rm Miniconda3-py38_4.10.3-Linux-x86_64.sh +# conda config --env --set always_yes true +# conda update -n base -c defaults conda -y + +# # conda environment setup +# conda env create -f environment.yaml +# conda init bash +# source /home/user/.bashrc +# source activate icon +nvidia-smi +pip install torch==1.8.2 torchvision==0.9.2 --extra-index-url https://download.pytorch.org/whl/lts/1.8/cu111 +pip install -r requirement.txt \ No newline at end of file diff --git a/lib/common/render.py b/lib/common/render.py index 1874a77301e9de606f5234a02dc5cfd981874642..fce2d1658a24a8af3b393a92aa7354d06ad79952 100644 --- a/lib/common/render.py +++ b/lib/common/render.py @@ -33,15 +33,14 @@ from pytorch3d.renderer import ( ) from pytorch3d.renderer.mesh import TexturesVertex from pytorch3d.structures import Meshes - -import os, subprocess - from lib.dataset.mesh_util import SMPLX, get_visibility + import lib.common.render_utils as util import torch import numpy as np from PIL import Image from tqdm import tqdm +import os import cv2 import math from termcolor import colored @@ -327,10 +326,8 @@ class Render: def get_rendered_video(self, images, save_path): - tmp_path = save_path.replace('cloth', 'tmp') - self.cam_pos = [] - for angle in range(0, 360, 3): + for angle in range(360): self.cam_pos.append( ( 100.0 * math.cos(np.pi / 180 * angle), @@ -345,7 +342,7 @@ class Render: fourcc = cv2.VideoWriter_fourcc(*"mp4v") video = cv2.VideoWriter( - tmp_path, fourcc, 30, (self.size * len(self.meshes) + + save_path, fourcc, 30, (self.size * len(self.meshes) + new_shape[1] * len(images), self.size) ) @@ -375,8 +372,6 @@ class Render: video.write(final_img) video.release() - - os.system(f'ffmpeg -y -loglevel quiet -stats -i {tmp_path} -c:v libx264 {save_path}') def get_silhouette_image(self, cam_ids=[0, 2]): diff --git a/lib/common/train_util.py b/lib/common/train_util.py index 36184ec91e7534ba63f09e2e1f4b1ff6271fc905..eb1b690c3abdfef9e0190a2d99109ac54ca2bc16 100644 --- a/lib/common/train_util.py +++ b/lib/common/train_util.py @@ -32,8 +32,6 @@ import os from termcolor import colored - - def reshape_sample_tensor(sample_tensor, num_views): if num_views == 1: return sample_tensor diff --git a/lib/dataloader_demo.py b/lib/dataloader_demo.py new file mode 100644 index 0000000000000000000000000000000000000000..d9856826485b582e3932f66a9b1afe0c3ddefbbe --- /dev/null +++ b/lib/dataloader_demo.py @@ -0,0 +1,58 @@ +import argparse +from lib.common.config import get_cfg_defaults +from lib.dataset.PIFuDataset import PIFuDataset + +if __name__ == '__main__': + + parser = argparse.ArgumentParser() + parser.add_argument('-v', + '--show', + action='store_true', + help='vis sampler 3D') + parser.add_argument('-s', + '--speed', + action='store_true', + help='vis sampler 3D') + parser.add_argument('-l', + '--list', + action='store_true', + help='vis sampler 3D') + parser.add_argument('-c', + '--config', + default='./configs/train/icon-filter.yaml', + help='vis sampler 3D') + args_c = parser.parse_args() + + args = get_cfg_defaults() + args.merge_from_file(args_c.config) + + dataset = PIFuDataset(args, split='train', vis=args_c.show) + print(f"Number of subjects :{len(dataset.subject_list)}") + data_dict = dataset[0] + + if args_c.list: + for k in data_dict.keys(): + if not hasattr(data_dict[k], "shape"): + print(f"{k}: {data_dict[k]}") + else: + print(f"{k}: {data_dict[k].shape}") + + if args_c.show: + # for item in dataset: + item = dataset[0] + dataset.visualize_sampling3D(item, mode='occ') + + if args_c.speed: + # original: 2 it/s + # smpl online compute: 2 it/s + # normal online compute: 1.5 it/s + from tqdm import tqdm + for item in tqdm(dataset): + # pass + for k in item.keys(): + if 'voxel' in k: + if not hasattr(item[k], "shape"): + print(f"{k}: {item[k]}") + else: + print(f"{k}: {item[k].shape}") + print("--------------------") diff --git a/lib/dataset/Evaluator.py b/lib/dataset/Evaluator.py index 83d25a4426741f49fd7666719249ff4c9ee7393c..42aff71b4161c028e64aec2388f515dcc7c916cf 100644 --- a/lib/dataset/Evaluator.py +++ b/lib/dataset/Evaluator.py @@ -15,11 +15,11 @@ # # Contact: ps-license@tuebingen.mpg.de - from lib.renderer.gl.normal_render import NormalRender from lib.dataset.mesh_util import projection from lib.common.render import Render from PIL import Image +import os import numpy as np import torch from torch import nn diff --git a/lib/dataset/PIFuDataset.py b/lib/dataset/PIFuDataset.py index 9bf240c1f1a35b45c4bd38aed6c1827bdd09e56d..f9709d3586f431b9fd33b9d45e8d8f8c32e7f4eb 100644 --- a/lib/dataset/PIFuDataset.py +++ b/lib/dataset/PIFuDataset.py @@ -9,12 +9,12 @@ import os.path as osp import numpy as np from PIL import Image import random -import os import trimesh import torch +import vedo from kaolin.ops.mesh import check_sign import torchvision.transforms as transforms -from huggingface_hub import hf_hub_download, cached_download +from ipdb import set_trace class PIFuDataset(): @@ -343,9 +343,9 @@ class PIFuDataset(): torch.as_tensor(smpl_param['full_pose'][0])).numpy() smpl_betas = smpl_param["betas"] - smpl_path = cached_download(osp.join(self.smplx.model_dir, "smpl/SMPL_MALE.pkl"), use_auth_token=os.environ['ICON']) - tetra_path = cached_download(osp.join(self.smplx.tedra_dir, - "tetra_male_adult_smpl.npz"), use_auth_token=os.environ['ICON']) + smpl_path = osp.join(self.smplx.model_dir, "smpl/SMPL_MALE.pkl") + tetra_path = osp.join(self.smplx.tedra_dir, + "tetra_male_adult_smpl.npz") smpl_model = TetraSMPLModel(smpl_path, tetra_path, 'adult') @@ -365,7 +365,7 @@ class PIFuDataset(): verts = (np.concatenate([smpl_model.verts, smpl_model.verts_added], axis=0) * smplx_param["scale"] + smplx_param["translation"] ) * self.datasets_dict[data_dict['dataset']]['scale'] - faces = np.loadtxt(cached_download(osp.join(self.smplx.tedra_dir, "tetrahedrons_male_adult.txt"), use_auth_token=os.environ['ICON']), + faces = np.loadtxt(osp.join(self.smplx.tedra_dir, "tetrahedrons_male_adult.txt"), dtype=np.int32) - 1 pad_v_num = int(8000 - verts.shape[0]) @@ -586,4 +586,77 @@ class PIFuDataset(): labels = torch.from_numpy(labels).float() normals = torch.from_numpy(normals).float() - return {'samples_geo': samples, 'labels_geo': labels} \ No newline at end of file + return {'samples_geo': samples, 'labels_geo': labels} + + def visualize_sampling3D(self, data_dict, mode='vis'): + + # create plot + vp = vedo.Plotter(title="", size=(1500, 1500), axes=0, bg='white') + vis_list = [] + + assert mode in ['vis', 'sdf', 'normal', 'cmap', 'occ'] + + # sdf-1 cmap-3 norm-3 vis-1 + if mode == 'vis': + labels = data_dict[f'smpl_feat'][:, [-1]] # visibility + colors = np.concatenate([labels, labels, labels], axis=1) + elif mode == 'occ': + labels = data_dict[f'labels_geo'][..., None] # occupancy + colors = np.concatenate([labels, labels, labels], axis=1) + elif mode == 'sdf': + labels = data_dict[f'smpl_feat'][:, [0]] # sdf + labels -= labels.min() + labels /= labels.max() + colors = np.concatenate([labels, labels, labels], axis=1) + elif mode == 'normal': + labels = data_dict[f'smpl_feat'][:, -4:-1] # normal + colors = (labels + 1.0) * 0.5 + elif mode == 'cmap': + labels = data_dict[f'smpl_feat'][:, -7:-4] # colormap + colors = np.array(labels) + + points = projection(data_dict['samples_geo'], data_dict['calib']) + verts = projection(data_dict['verts'], data_dict['calib']) + points[:, 1] *= -1 + verts[:, 1] *= -1 + + # create a mesh + mesh = trimesh.Trimesh(verts, data_dict['faces'], process=True) + mesh.visual.vertex_colors = [128.0, 128.0, 128.0, 255.0] + vis_list.append(mesh) + + if 'voxel_verts' in data_dict.keys(): + print(colored("voxel verts", "green")) + voxel_verts = data_dict['voxel_verts'] * 2.0 + voxel_faces = data_dict['voxel_faces'] + voxel_verts[:, 1] *= -1 + voxel = trimesh.Trimesh( + voxel_verts, voxel_faces[:, [0, 2, 1]], process=False, maintain_order=True) + voxel.visual.vertex_colors = [0.0, 128.0, 0.0, 255.0] + vis_list.append(voxel) + + if 'smpl_verts' in data_dict.keys(): + print(colored("smpl verts", "green")) + smplx_verts = data_dict['smpl_verts'] + smplx_faces = data_dict['smpl_faces'] + smplx_verts[:, 1] *= -1 + smplx = trimesh.Trimesh( + smplx_verts, smplx_faces[:, [0, 2, 1]], process=False, maintain_order=True) + smplx.visual.vertex_colors = [128.0, 128.0, 0.0, 255.0] + vis_list.append(smplx) + + # create a picure + img_pos = [1.0, 0.0, -1.0] + for img_id, img_key in enumerate(['normal_F', 'image', 'T_normal_B']): + image_arr = (data_dict[img_key].detach().cpu().permute( + 1, 2, 0).numpy() + 1.0) * 0.5 * 255.0 + image_dim = image_arr.shape[0] + image = vedo.Picture(image_arr).scale( + 2.0 / image_dim).pos(-1.0, -1.0, img_pos[img_id]) + vis_list.append(image) + + # create a pointcloud + pc = vedo.Points(points, r=15, c=np.float32(colors)) + vis_list.append(pc) + + vp.show(*vis_list, bg="white", axes=1.0, interactive=True) diff --git a/lib/dataset/TestDataset.py b/lib/dataset/TestDataset.py index 6872a4ca1a83cf380bc2864e62cd5ac15a59cbdb..716eb455a9dabea227dfc707f453dbcfbef56b8e 100644 --- a/lib/dataset/TestDataset.py +++ b/lib/dataset/TestDataset.py @@ -15,9 +15,7 @@ # # Contact: ps-license@tuebingen.mpg.de -import os - -import lib.smplx as smplx +import smplx from lib.pymaf.utils.geometry import rotation_matrix_to_angle_axis, batch_rodrigues from lib.pymaf.utils.imutils import process_image from lib.pymaf.core import path_config @@ -27,12 +25,14 @@ from lib.common.render import Render from lib.dataset.body_model import TetraSMPLModel from lib.dataset.mesh_util import get_visibility, SMPLX import os.path as osp +import os import torch +import glob import numpy as np import random +import human_det from termcolor import colored from PIL import ImageFile -from huggingface_hub import cached_download ImageFile.LOAD_TRUNCATED_IMAGES = True @@ -42,7 +42,7 @@ class TestDataset(): random.seed(1993) - self.image_path = cfg['image_path'] + self.image_dir = cfg['image_dir'] self.seg_dir = cfg['seg_dir'] self.has_det = cfg['has_det'] self.hps_type = cfg['hps_type'] @@ -51,7 +51,19 @@ class TestDataset(): self.device = device - self.subject_list = [self.image_path] + if self.has_det: + self.det = human_det.Detection() + else: + self.det = None + + keep_lst = sorted(glob.glob(f"{self.image_dir}/*")) + img_fmts = ['jpg', 'png', 'jpeg', "JPG", 'bmp'] + keep_lst = [ + item for item in keep_lst if item.split(".")[-1] in img_fmts + ] + + self.subject_list = sorted( + [item for item in keep_lst if item.split(".")[-1] in img_fmts]) # smpl related self.smpl_data = SMPLX() @@ -100,9 +112,9 @@ class TestDataset(): def compute_voxel_verts(self, body_pose, global_orient, betas, trans, scale): - smpl_path = cached_download(osp.join(self.smpl_data.model_dir, "smpl/SMPL_NEUTRAL.pkl"), use_auth_token=os.environ['ICON']) - tetra_path = cached_download(osp.join(self.smpl_data.tedra_dir, - 'tetra_neutral_adult_smpl.npz'), use_auth_token=os.environ['ICON']) + smpl_path = osp.join(self.smpl_data.model_dir, "smpl/SMPL_NEUTRAL.pkl") + tetra_path = osp.join(self.smpl_data.tedra_dir, + 'tetra_neutral_adult_smpl.npz') smpl_model = TetraSMPLModel(smpl_path, tetra_path, 'adult') pose = torch.cat([global_orient[0], body_pose[0]], dim=0) @@ -112,8 +124,8 @@ class TestDataset(): verts = np.concatenate( [smpl_model.verts, smpl_model.verts_added], axis=0) * scale.item() + trans.detach().cpu().numpy() - faces = np.loadtxt(cached_download(osp.join(self.smpl_data.tedra_dir, - 'tetrahedrons_neutral_adult.txt'), use_auth_token=os.environ['ICON']), + faces = np.loadtxt(osp.join(self.smpl_data.tedra_dir, + 'tetrahedrons_neutral_adult.txt'), dtype=np.int32) - 1 pad_v_num = int(8000 - verts.shape[0]) @@ -148,7 +160,7 @@ class TestDataset(): if self.seg_dir is None: img_icon, img_hps, img_ori, img_mask, uncrop_param = process_image( - img_path, self.hps_type, 512, self.device) + img_path, self.det, self.hps_type, 512, self.device) data_dict = { 'name': img_name, @@ -160,7 +172,7 @@ class TestDataset(): else: img_icon, img_hps, img_ori, img_mask, uncrop_param, segmentations = process_image( - img_path, self.hps_type, 512, self.device, + img_path, self.det, self.hps_type, 512, self.device, seg_path=os.path.join(self.seg_dir, f'{img_name}.json')) data_dict = { 'name': img_name, @@ -233,11 +245,6 @@ class TestDataset(): # body_pose - [1, 23, 3, 3] / [1, 21, 3, 3] # global_orient - [1, 1, 3, 3] # smpl_verts - [1, 6890, 3] / [1, 10475, 3] - - # from rot_mat to rot_6d for better optimization - N_body = data_dict["body_pose"].shape[1] - data_dict["body_pose"] = data_dict["body_pose"][:, :, :, :2].reshape(1, N_body,-1) - data_dict["global_orient"] = data_dict["global_orient"][:, :, :, :2].reshape(1, 1,-1) return data_dict @@ -252,3 +259,84 @@ class TestDataset(): # render optimized mesh (normal, T_normal, image [-1,1]) self.render.load_meshes(verts, faces) return self.render.get_depth_map(cam_ids=[0, 2]) + + def visualize_alignment(self, data): + + import vedo + import trimesh + + if self.hps_type != 'pixie': + smpl_out = self.smpl_model(betas=data['betas'], + body_pose=data['body_pose'], + global_orient=data['global_orient'], + pose2rot=False) + smpl_verts = ( + (smpl_out.vertices + data['trans']) * data['scale']).detach().cpu().numpy()[0] + else: + smpl_verts, _, _ = self.smpl_model(shape_params=data['betas'], + expression_params=data['exp'], + body_pose=data['body_pose'], + global_pose=data['global_orient'], + jaw_pose=data['jaw_pose'], + left_hand_pose=data['left_hand_pose'], + right_hand_pose=data['right_hand_pose']) + + smpl_verts = ( + (smpl_verts + data['trans']) * data['scale']).detach().cpu().numpy()[0] + + smpl_verts *= np.array([1.0, -1.0, -1.0]) + faces = data['smpl_faces'][0].detach().cpu().numpy() + + image_P = data['image'] + image_F, image_B = self.render_normal(smpl_verts, faces) + + # create plot + vp = vedo.Plotter(title="", size=(1500, 1500)) + vis_list = [] + + image_F = ( + 0.5 * (1.0 + image_F[0].permute(1, 2, 0).detach().cpu().numpy()) * 255.0) + image_B = ( + 0.5 * (1.0 + image_B[0].permute(1, 2, 0).detach().cpu().numpy()) * 255.0) + image_P = ( + 0.5 * (1.0 + image_P[0].permute(1, 2, 0).detach().cpu().numpy()) * 255.0) + + vis_list.append(vedo.Picture(image_P*0.5+image_F * + 0.5).scale(2.0/image_P.shape[0]).pos(-1.0, -1.0, 1.0)) + vis_list.append(vedo.Picture(image_F).scale( + 2.0/image_F.shape[0]).pos(-1.0, -1.0, -0.5)) + vis_list.append(vedo.Picture(image_B).scale( + 2.0/image_B.shape[0]).pos(-1.0, -1.0, -1.0)) + + # create a mesh + mesh = trimesh.Trimesh(smpl_verts, faces, process=False) + mesh.visual.vertex_colors = [200, 200, 0] + vis_list.append(mesh) + + vp.show(*vis_list, bg="white", axes=1, interactive=True) + + +if __name__ == '__main__': + + cfg.merge_from_file("./configs/icon-filter.yaml") + cfg.merge_from_file('./lib/pymaf/configs/pymaf_config.yaml') + + cfg_show_list = [ + 'test_gpus', ['0'], 'mcube_res', 512, 'clean_mesh', False + ] + + cfg.merge_from_list(cfg_show_list) + cfg.freeze() + + os.environ['CUDA_VISIBLE_DEVICES'] = "0" + device = torch.device('cuda:0') + + dataset = TestDataset( + { + 'image_dir': "./examples", + 'has_det': True, # w/ or w/o detection + 'hps_type': 'bev' # pymaf/pare/pixie/hybrik/bev + }, device) + + for i in range(len(dataset)): + dataset.visualize_alignment(dataset[i]) diff --git a/lib/dataset/mesh_util.py b/lib/dataset/mesh_util.py index 2fe5ce5de07eddf08bd44e2f086994a137def3b1..1e2ec31b52a1ebe41061ad2b5b7f5c6ee0c0e817 100644 --- a/lib/dataset/mesh_util.py +++ b/lib/dataset/mesh_util.py @@ -22,13 +22,13 @@ import torch import torchvision import trimesh from pytorch3d.io import load_obj +import os from termcolor import colored +import os.path as osp from scipy.spatial import cKDTree from pytorch3d.structures import Meshes import torch.nn.functional as F - -import os from lib.pymaf.utils.imutils import uncrop from lib.common.render_utils import Pytorch3dRasterizer, face_vertices @@ -42,24 +42,6 @@ from pytorch3d.loss import ( mesh_normal_consistency ) -from huggingface_hub import hf_hub_download, hf_hub_url, cached_download - -def rot6d_to_rotmat(x): - """Convert 6D rotation representation to 3x3 rotation matrix. - Based on Zhou et al., "On the Continuity of Rotation Representations in Neural Networks", CVPR 2019 - Input: - (B,6) Batch of 6-D rotation representations - Output: - (B,3,3) Batch of corresponding rotation matrices - """ - x = x.view(-1, 3, 2) - a1 = x[:, :, 0] - a2 = x[:, :, 1] - b1 = F.normalize(a1) - b2 = F.normalize(a2 - torch.einsum("bi,bi->b", b1, a2).unsqueeze(-1) * b1) - b3 = torch.cross(b1, b2) - return torch.stack((b1, b2, b3), dim=-1) - def tensor2variable(tensor, device): # [1,23,3,3] @@ -138,14 +120,16 @@ def mesh_edge_loss(meshes, target_length: float = 0.0): def remesh(obj_path, perc, device): - mesh = trimesh.load(obj_path) - mesh = mesh.simplify_quadratic_decimation(50000) - mesh = trimesh.smoothing.filter_humphrey( - mesh, alpha=0.1, beta=0.5, iterations=10, laplacian_operator=None - ) - mesh.export(obj_path.replace("recon", "remesh")) - verts_pr = torch.tensor(mesh.vertices).float().unsqueeze(0).to(device) - faces_pr = torch.tensor(mesh.faces).long().unsqueeze(0).to(device) + + ms = pymeshlab.MeshSet() + ms.load_new_mesh(obj_path) + ms.laplacian_smooth() + ms.remeshing_isotropic_explicit_remeshing( + targetlen=pymeshlab.Percentage(perc), adaptive=True) + ms.save_current_mesh(obj_path.replace("recon", "remesh")) + polished_mesh = trimesh.load_mesh(obj_path.replace("recon", "remesh")) + verts_pr = torch.tensor(polished_mesh.vertices).float().unsqueeze(0).to(device) + faces_pr = torch.tensor(polished_mesh.faces).long().unsqueeze(0).to(device) return verts_pr, faces_pr @@ -221,31 +205,33 @@ def load_checkpoint(model, cfg): device = torch.device(f"cuda:{cfg['test_gpus'][0]}") - main_dict = torch.load(cached_download(cfg.resume_path, use_auth_token=os.environ['ICON']), - map_location=device)['state_dict'] - - main_dict = { - k: v - for k, v in main_dict.items() - if k in model_dict and v.shape == model_dict[k].shape and ( - 'reconEngine' not in k) and ("normal_filter" not in k) and ( - 'voxelization' not in k) - } - print(colored(f"Resume MLP weights from {cfg.resume_path}", 'green')) - - normal_dict = torch.load(cached_download(cfg.normal_path, use_auth_token=os.environ['ICON']), - map_location=device)['state_dict'] - - for key in normal_dict.keys(): - normal_dict = rename(normal_dict, key, - key.replace("netG", "netG.normal_filter")) - - normal_dict = { - k: v - for k, v in normal_dict.items() - if k in model_dict and v.shape == model_dict[k].shape - } - print(colored(f"Resume normal model from {cfg.normal_path}", 'green')) + if os.path.exists(cfg.resume_path) and cfg.resume_path.endswith("ckpt"): + main_dict = torch.load(cfg.resume_path, + map_location=device)['state_dict'] + + main_dict = { + k: v + for k, v in main_dict.items() + if k in model_dict and v.shape == model_dict[k].shape and ( + 'reconEngine' not in k) and ("normal_filter" not in k) and ( + 'voxelization' not in k) + } + print(colored(f"Resume MLP weights from {cfg.resume_path}", 'green')) + + if os.path.exists(cfg.normal_path) and cfg.normal_path.endswith("ckpt"): + normal_dict = torch.load(cfg.normal_path, + map_location=device)['state_dict'] + + for key in normal_dict.keys(): + normal_dict = rename(normal_dict, key, + key.replace("netG", "netG.normal_filter")) + + normal_dict = { + k: v + for k, v in normal_dict.items() + if k in model_dict and v.shape == model_dict[k].shape + } + print(colored(f"Resume normal model from {cfg.normal_path}", 'green')) model_dict.update(main_dict) model_dict.update(normal_dict) @@ -266,7 +252,7 @@ def load_checkpoint(model, cfg): def read_smpl_constants(folder): """Load smpl vertex code""" - smpl_vtx_std = np.loadtxt(cached_download(os.path.join(folder, 'vertices.txt'), use_auth_token=os.environ['ICON'])) + smpl_vtx_std = np.loadtxt(os.path.join(folder, 'vertices.txt')) min_x = np.min(smpl_vtx_std[:, 0]) max_x = np.max(smpl_vtx_std[:, 0]) min_y = np.min(smpl_vtx_std[:, 1]) @@ -279,12 +265,12 @@ def read_smpl_constants(folder): smpl_vtx_std[:, 2] = (smpl_vtx_std[:, 2] - min_z) / (max_z - min_z) smpl_vertex_code = np.float32(np.copy(smpl_vtx_std)) """Load smpl faces & tetrahedrons""" - smpl_faces = np.loadtxt(cached_download(os.path.join(folder, 'faces.txt'), use_auth_token=os.environ['ICON']), + smpl_faces = np.loadtxt(os.path.join(folder, 'faces.txt'), dtype=np.int32) - 1 smpl_face_code = (smpl_vertex_code[smpl_faces[:, 0]] + smpl_vertex_code[smpl_faces[:, 1]] + smpl_vertex_code[smpl_faces[:, 2]]) / 3.0 - smpl_tetras = np.loadtxt(cached_download(os.path.join(folder, 'tetrahedrons.txt'), use_auth_token=os.environ['ICON']), + smpl_tetras = np.loadtxt(os.path.join(folder, 'tetrahedrons.txt'), dtype=np.int32) - 1 return smpl_vertex_code, smpl_face_code, smpl_faces, smpl_tetras @@ -410,12 +396,11 @@ def cal_sdf_batch(verts, faces, cmaps, vis, points): bary_weights = barycentric_coordinates_of_projection( points.view(-1, 3), closest_triangles) - pts_cmap = (closest_cmaps*bary_weights[:, :, None]).sum(1).unsqueeze(0).clamp_(min=0.0, max=1.0) + pts_cmap = (closest_cmaps*bary_weights[:, :, None]).sum(1).unsqueeze(0) pts_vis = (closest_vis*bary_weights[:, :, None]).sum(1).unsqueeze(0).ge(1e-1) pts_norm = (closest_normals*bary_weights[:, :, None]).sum( 1).unsqueeze(0) * torch.tensor([-1.0, 1.0, -1.0]).type_as(normals) - pts_norm = F.normalize(pts_norm, dim=2) pts_dist = torch.sqrt(residues) / torch.sqrt(torch.tensor(3)) pts_signs = 2.0 * (check_sign(verts, faces[0], points).float() - 0.5) @@ -857,21 +842,26 @@ def mesh_move(mesh_lst, step, scale=1.0): class SMPLX(): def __init__(self): - - REPO_ID = "Yuliang/SMPL" - self.smpl_verts_path = hf_hub_download(REPO_ID, filename='smpl_data/smpl_verts.npy', use_auth_token=os.environ['ICON']) - self.smplx_verts_path = hf_hub_download(REPO_ID, filename='smpl_data/smplx_verts.npy', use_auth_token=os.environ['ICON']) - self.faces_path = hf_hub_download(REPO_ID, filename='smpl_data/smplx_faces.npy', use_auth_token=os.environ['ICON']) - self.cmap_vert_path = hf_hub_download(REPO_ID, filename='smpl_data/smplx_cmap.npy', use_auth_token=os.environ['ICON']) + self.current_dir = osp.join(osp.dirname(__file__), + "../../data/smpl_related") + + self.smpl_verts_path = osp.join(self.current_dir, + "smpl_data/smpl_verts.npy") + self.smplx_verts_path = osp.join(self.current_dir, + "smpl_data/smplx_verts.npy") + self.faces_path = osp.join(self.current_dir, + "smpl_data/smplx_faces.npy") + self.cmap_vert_path = osp.join(self.current_dir, + "smpl_data/smplx_cmap.npy") self.faces = np.load(self.faces_path) self.verts = np.load(self.smplx_verts_path) self.smpl_verts = np.load(self.smpl_verts_path) - self.model_dir = hf_hub_url(REPO_ID, filename='models') - self.tedra_dir = hf_hub_url(REPO_ID, filename='tedra_data') - + self.model_dir = osp.join(self.current_dir, "models") + self.tedra_dir = osp.join(self.current_dir, "../tedra_data") + def get_smpl_mat(self, vert_ids): mat = torch.as_tensor(np.load(self.cmap_vert_path)).float() diff --git a/lib/net/FBNet.py b/lib/net/FBNet.py index a4392c0544e259b3407559effad9174723590584..23541cc6559b7ae68c8a1f606f0637bb712542f1 100644 --- a/lib/net/FBNet.py +++ b/lib/net/FBNet.py @@ -81,8 +81,7 @@ def define_G(input_nc, # print(netG) if len(gpu_ids) > 0: assert (torch.cuda.is_available()) - device=torch.device(f"cuda:{gpu_ids[0]}") - netG = netG.to(device) + netG.cuda(gpu_ids[0]) netG.apply(weights_init) return netG diff --git a/lib/net/HGPIFuNet.py b/lib/net/HGPIFuNet.py index ab60048d6da45cf49ede7f65d44e7aadf9653814..e80f553768177d59caec8d569712fd0dcf461174 100644 --- a/lib/net/HGPIFuNet.py +++ b/lib/net/HGPIFuNet.py @@ -26,6 +26,7 @@ from termcolor import colored from lib.net.BasePIFuNet import BasePIFuNet import torch.nn as nn import torch +import os maskout = False @@ -293,8 +294,14 @@ class HGPIFuNet(BasePIFuNet): # smpl_cmap [B, N, 3] # smpl_vis [B, N, 1] + # set ourlier point features as uniform values + smpl_outlier = torch.abs(smpl_sdf).ge(self.sdf_clip) + smpl_sdf[smpl_outlier] = torch.sign(smpl_sdf[smpl_outlier]) + feat_lst = [smpl_sdf] if 'cmap' in self.smpl_feats: + smpl_cmap[smpl_outlier.repeat( + 1, 1, 3)] = smpl_sdf[smpl_outlier].repeat(1, 1, 3) feat_lst.append(smpl_cmap) if 'norm' in self.smpl_feats: feat_lst.append(smpl_norm) diff --git a/lib/net/net_util.py b/lib/net/net_util.py index 2a5028754ca35a69853edd6b9c9f87c4c8c9dda0..5eea98f5c4ce0960aa123b7c529aeac31917d45f 100644 --- a/lib/net/net_util.py +++ b/lib/net/net_util.py @@ -316,7 +316,7 @@ class Vgg19(torch.nn.Module): class VGGLoss(nn.Module): def __init__(self): super(VGGLoss, self).__init__() - self.vgg = Vgg19() + self.vgg = Vgg19().cuda() self.criterion = nn.L1Loss() self.weights = [1.0 / 32, 1.0 / 16, 1.0 / 8, 1.0 / 4, 1.0] diff --git a/lib/pymaf/core/path_config.py b/lib/pymaf/core/path_config.py index 3af447b2bec0e5fb3ba54b9a8336f955e2d5839a..0623abf2552c75688aa4e7c0ad2e03ae74c5f698 100644 --- a/lib/pymaf/core/path_config.py +++ b/lib/pymaf/core/path_config.py @@ -6,18 +6,33 @@ for the datasets and data files necessary to run the code. Things you need to change: *_ROOT that indicate the path to each dataset """ import os -from huggingface_hub import hf_hub_url, cached_download # pymaf -pymaf_data_dir = hf_hub_url('Yuliang/PyMAF', '') -smpl_data_dir = hf_hub_url('Yuliang/SMPL', '') -SMPL_MODEL_DIR = os.path.join(smpl_data_dir, 'models/smpl') +pymaf_data_dir = os.path.join(os.path.dirname(__file__), + "../../../data/pymaf_data") -SMPL_MEAN_PARAMS = cached_download(os.path.join(pymaf_data_dir, 'smpl_mean_params.npz'), use_auth_token=os.environ['ICON']) -MESH_DOWNSAMPLEING = cached_download(os.path.join(pymaf_data_dir, 'mesh_downsampling.npz'), use_auth_token=os.environ['ICON']) -CUBE_PARTS_FILE = cached_download(os.path.join(pymaf_data_dir, 'cube_parts.npy'), use_auth_token=os.environ['ICON']) -JOINT_REGRESSOR_TRAIN_EXTRA = cached_download(os.path.join(pymaf_data_dir, 'J_regressor_extra.npy'), use_auth_token=os.environ['ICON']) -JOINT_REGRESSOR_H36M = cached_download(os.path.join(pymaf_data_dir, 'J_regressor_h36m.npy'), use_auth_token=os.environ['ICON']) -VERTEX_TEXTURE_FILE = cached_download(os.path.join(pymaf_data_dir, 'vertex_texture.npy'), use_auth_token=os.environ['ICON']) -SMPL_MEAN_PARAMS = cached_download(os.path.join(pymaf_data_dir, 'smpl_mean_params.npz'), use_auth_token=os.environ['ICON']) -CHECKPOINT_FILE = cached_download(os.path.join(pymaf_data_dir, 'pretrained_model/PyMAF_model_checkpoint.pt'), use_auth_token=os.environ['ICON']) +SMPL_MEAN_PARAMS = os.path.join(pymaf_data_dir, 'smpl_mean_params.npz') +SMPL_MODEL_DIR = os.path.join(pymaf_data_dir, '../smpl_related/models/smpl') + +CUBE_PARTS_FILE = os.path.join(pymaf_data_dir, 'cube_parts.npy') +JOINT_REGRESSOR_TRAIN_EXTRA = os.path.join(pymaf_data_dir, + 'J_regressor_extra.npy') +JOINT_REGRESSOR_H36M = os.path.join(pymaf_data_dir, 'J_regressor_h36m.npy') +VERTEX_TEXTURE_FILE = os.path.join(pymaf_data_dir, 'vertex_texture.npy') +SMPL_MEAN_PARAMS = os.path.join(pymaf_data_dir, 'smpl_mean_params.npz') +SMPL_MODEL_DIR = os.path.join(pymaf_data_dir, '../smpl_related/models/smpl') +CHECKPOINT_FILE = os.path.join(pymaf_data_dir, + 'pretrained_model/PyMAF_model_checkpoint.pt') + +# pare +pare_data_dir = os.path.join(os.path.dirname(__file__), + "../../../data/pare_data") +CFG = os.path.join(pare_data_dir, 'pare/checkpoints/pare_w_3dpw_config.yaml') +CKPT = os.path.join(pare_data_dir, + 'pare/checkpoints/pare_w_3dpw_checkpoint.ckpt') + +# hybrik +hybrik_data_dir = os.path.join(os.path.dirname(__file__), + "../../../data/hybrik_data") +HYBRIK_CFG = os.path.join(hybrik_data_dir, 'hybrik_config.yaml') +HYBRIK_CKPT = os.path.join(hybrik_data_dir, 'pretrained_w_cam.pth') diff --git a/lib/pymaf/models/maf_extractor.py b/lib/pymaf/models/maf_extractor.py index b5ca2279b5ca470b5abc8b3c477951ffcac323a8..8a4a1b2ef69ee16f0fd8a5cf15642ff32766ca97 100644 --- a/lib/pymaf/models/maf_extractor.py +++ b/lib/pymaf/models/maf_extractor.py @@ -3,13 +3,13 @@ from packaging import version import torch import scipy +import os import numpy as np import torch.nn as nn import torch.nn.functional as F from lib.common.config import cfg from lib.pymaf.utils.geometry import projection -from lib.pymaf.core.path_config import MESH_DOWNSAMPLEING import logging @@ -48,7 +48,10 @@ class MAF_Extractor(nn.Module): # downsample SMPL mesh and assign part labels # from https://github.com/nkolot/GraphCMR/blob/master/data/mesh_downsampling.npz - smpl_mesh_graph = np.load(MESH_DOWNSAMPLEING, + mesh_downsampling_path = os.path.join( + os.path.dirname(__file__), + "../../../data/pymaf_data/mesh_downsampling.npz") + smpl_mesh_graph = np.load(mesh_downsampling_path, allow_pickle=True, encoding='latin1') diff --git a/lib/pymaf/models/res_module.py b/lib/pymaf/models/res_module.py index dc283f8f5ca946e5edc6e0c17a91763d95ddda75..35d23f4388be7eab70203ff3b6002eba561525b7 100644 --- a/lib/pymaf/models/res_module.py +++ b/lib/pymaf/models/res_module.py @@ -4,11 +4,11 @@ from __future__ import absolute_import from __future__ import division from __future__ import print_function +import os import torch import torch.nn as nn import torch.nn.functional as F from collections import OrderedDict -import os from lib.pymaf.core.cfgs import cfg import logging diff --git a/lib/pymaf/models/smpl.py b/lib/pymaf/models/smpl.py index ad0059acc3d88d7bf13d6bca25ed9da1b82bb5fe..1037f2e048ce9a102a2281e3e865b5d24fe60fe6 100644 --- a/lib/pymaf/models/smpl.py +++ b/lib/pymaf/models/smpl.py @@ -2,9 +2,9 @@ import torch import numpy as np -from lib.smplx import SMPL as _SMPL -from lib.smplx.body_models import ModelOutput -from lib.smplx.lbs import vertices2joints +from smplx import SMPL as _SMPL +from smplx.body_models import ModelOutput +from smplx.lbs import vertices2joints from collections import namedtuple from lib.pymaf.core import path_config, constants diff --git a/lib/pymaf/utils/imutils.py b/lib/pymaf/utils/imutils.py index a5068cc078d4c45df3eb76a076f343a33906bb90..268822521a49dd3d4913f92b9f14667499354932 100644 --- a/lib/pymaf/utils/imutils.py +++ b/lib/pymaf/utils/imutils.py @@ -1,14 +1,15 @@ """ This file contains functions that are used to perform data augmentation. """ +from turtle import reset import cv2 import io import torch import numpy as np +import scipy.misc from PIL import Image -from rembg import remove -from rembg.session_factory import new_session -from torchvision.models import detection +from rembg.bg import remove +import human_det from lib.pymaf.core import constants from lib.pymaf.utils.streamer import aug_matrix @@ -44,7 +45,6 @@ def get_bbox(img, det): bbox = bboxes[0, 0, 0].cpu().numpy() return bbox -# Michael Black is def get_transformer(input_res): @@ -86,7 +86,7 @@ def get_transformer(input_res): return [image_to_tensor, mask_to_tensor, image_to_pymaf_tensor, image_to_pixie_tensor, image_to_hybrik_tensor] -def process_image(img_file, hps_type, input_res=512, device=None, seg_path=None): +def process_image(img_file, det, hps_type, input_res=512, device=None, seg_path=None): """Read image, do preprocessing and possibly crop it according to the bounding box. If there are bounding box annotations, use them to crop the image. If no bounding box is specified but openpose detections are available, use them to get the bounding box. @@ -104,19 +104,21 @@ def process_image(img_file, hps_type, input_res=512, device=None, seg_path=None) img_for_crop = cv2.warpAffine(img_ori, M[0:2, :], (input_res*2, input_res*2), flags=cv2.INTER_CUBIC) - # detection for bbox - detector = detection.maskrcnn_resnet50_fpn(pretrained=True) - detector.eval() - predictions = detector( - [torch.from_numpy(img_for_crop).permute(2, 0, 1) / 255.])[0] - human_ids = torch.where( - predictions["scores"] == predictions["scores"][predictions['labels'] == 1].max()) - bbox = predictions["boxes"][human_ids, :].flatten().detach().cpu().numpy() - - width = bbox[2] - bbox[0] - height = bbox[3] - bbox[1] - center = np.array([(bbox[0] + bbox[2]) / 2.0, - (bbox[1] + bbox[3]) / 2.0]) + if det is not None: + + # detection for bbox + bbox = get_bbox(img_for_crop, det) + + width = bbox[2] - bbox[0] + height = bbox[3] - bbox[1] + center = np.array([(bbox[0] + bbox[2]) / 2.0, + (bbox[1] + bbox[3]) / 2.0]) + + else: + # Assume that the person is centerered in the image + height = img_for_crop.shape[0] + width = img_for_crop.shape[1] + center = np.array([width // 2, height // 2]) scale = max(height, width) / 180 @@ -127,8 +129,12 @@ def process_image(img_file, hps_type, input_res=512, device=None, seg_path=None) img_np, cropping_parameters = crop( img_for_crop, center, scale, (input_res, input_res)) - img_pil = Image.fromarray(remove(img_np, post_process_mask=True, session=new_session("u2net"))) - + with torch.no_grad(): + buf = io.BytesIO() + Image.fromarray(img_np).save(buf, format='png') + img_pil = Image.open( + io.BytesIO(remove(buf.getvalue()))).convert("RGBA") + # for icon img_rgb = image_to_tensor(img_pil.convert("RGB")) img_mask = torch.tensor(1.0) - (mask_to_tensor(img_pil.split()[-1]) < diff --git a/lib/renderer/mesh.py b/lib/renderer/mesh.py index 1bba90625694abd908c86089914956b63afe0ed6..437467f4e0ebe93bb18b4bf5d91df73db59328b1 100755 --- a/lib/renderer/mesh.py +++ b/lib/renderer/mesh.py @@ -18,7 +18,7 @@ from lib.dataset.mesh_util import SMPLX from lib.common.render_utils import face_vertices import numpy as np -import lib.smplx as smplx +import smplx import trimesh import torch import torch.nn.functional as F diff --git a/lib/renderer/opengl_util.py b/lib/renderer/opengl_util.py index 32fcc949800792f800244ff71037fbe262854314..a9f8801d8e6e47ee5591f1a860dbc9ced79ba4c1 100644 --- a/lib/renderer/opengl_util.py +++ b/lib/renderer/opengl_util.py @@ -15,10 +15,11 @@ # # Contact: ps-license@tuebingen.mpg.de -import os - -from lib.renderer.mesh import load_scan, compute_tangent +from lib.renderer.mesh import load_scan, compute_tangent, compute_normal, load_obj_mesh_mtl +from lib.dataset.mesh_util import projection +from lib.renderer.gl.prt_render import PRTRender from lib.renderer.camera import Camera +import os import cv2 import math import random diff --git a/lib/smplx/.gitignore b/lib/smplx/.gitignore deleted file mode 100644 index bc56b5dee27e9be17481f64707571bad46f75446..0000000000000000000000000000000000000000 --- a/lib/smplx/.gitignore +++ /dev/null @@ -1,114 +0,0 @@ -#### joe made this: http://goel.io/joe - -#####=== Python ===##### - -# Byte-compiled / optimized / DLL files -__pycache__/ -*.py[cod] -*$py.class - -# C extensions -*.so - -# Distribution / packaging -.Python -build/ -develop-eggs/ -dist/ -downloads/ -eggs/ -.eggs/ -lib/ -lib64/ -parts/ -sdist/ -var/ -wheels/ -*.egg-info/ -.installed.cfg -*.egg -MANIFEST - -# PyInstaller -# Usually these files are written by a python script from a template -# before PyInstaller builds the exe, so as to inject date/other infos into it. -*.manifest -*.spec - -# Installer logs -pip-log.txt -pip-delete-this-directory.txt - -# Unit test / coverage reports -htmlcov/ -.tox/ -.coverage -.coverage.* -.cache -nosetests.xml -coverage.xml -*.cover -.hypothesis/ -.pytest_cache/ - -# Translations -*.mo -*.pot - -# Django stuff: -*.log -local_settings.py -db.sqlite3 - -# Flask stuff: -instance/ -.webassets-cache - -# Scrapy stuff: -.scrapy - -# Sphinx documentation -docs/_build/ - -# PyBuilder -target/ - -# Jupyter Notebook -.ipynb_checkpoints - -# pyenv -.python-version - -# celery beat schedule file -celerybeat-schedule - -# SageMath parsed files -*.sage.py - -# Environments -.env -.venv -env/ -venv/ -ENV/ -env.bak/ -venv.bak/ - -# Spyder project settings -.spyderproject -.spyproject - -# Rope project settings -.ropeproject - -# mkdocs documentation -/site - -# mypy -.mypy_cache/ -models/ -output/ -outputs/ -transfer_data/ -torch-trust-ncg/ -build/ diff --git a/lib/smplx/LICENSE b/lib/smplx/LICENSE deleted file mode 100644 index 3034a97b164d6e006655493e950314ec58e200cd..0000000000000000000000000000000000000000 --- a/lib/smplx/LICENSE +++ /dev/null @@ -1,58 +0,0 @@ -License - -Software Copyright License for non-commercial scientific research purposes -Please read carefully the following terms and conditions and any accompanying documentation before you download and/or use the SMPL-X/SMPLify-X model, data and software, (the "Model & Software"), including 3D meshes, blend weights, blend shapes, textures, software, scripts, and animations. By downloading and/or using the Model & Software (including downloading, cloning, installing, and any other use of this github repository), you acknowledge that you have read these terms and conditions, understand them, and agree to be bound by them. If you do not agree with these terms and conditions, you must not download and/or use the Model & Software. Any infringement of the terms of this agreement will automatically terminate your rights under this License - -Ownership / Licensees -The Software and the associated materials has been developed at the - -Max Planck Institute for Intelligent Systems (hereinafter "MPI"). - -Any copyright or patent right is owned by and proprietary material of the - -Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (hereinafter “MPG”; MPI and MPG hereinafter collectively “Max-Planck”) - -hereinafter the “Licensor”. - -License Grant -Licensor grants you (Licensee) personally a single-user, non-exclusive, non-transferable, free of charge right: - -To install the Model & Software on computers owned, leased or otherwise controlled by you and/or your organization; -To use the Model & Software for the sole purpose of performing non-commercial scientific research, non-commercial education, or non-commercial artistic projects; -Any other use, in particular any use for commercial purposes, is prohibited. This includes, without limitation, incorporation in a commercial product, use in a commercial service, or production of other artifacts for commercial purposes. The Model & Software may not be reproduced, modified and/or made available in any form to any third party without Max-Planck’s prior written permission. - -The Model & Software may not be used for pornographic purposes or to generate pornographic material whether commercial or not. This license also prohibits the use of the Model & Software to train methods/algorithms/neural networks/etc. for commercial use of any kind. By downloading the Model & Software, you agree not to reverse engineer it. - -No Distribution -The Model & Software and the license herein granted shall not be copied, shared, distributed, re-sold, offered for re-sale, transferred or sub-licensed in whole or in part except that you may make one copy for archive purposes only. - -Disclaimer of Representations and Warranties -You expressly acknowledge and agree that the Model & Software results from basic research, is provided “AS IS”, may contain errors, and that any use of the Model & Software is at your sole risk. LICENSOR MAKES NO REPRESENTATIONS OR WARRANTIES OF ANY KIND CONCERNING THE MODEL & SOFTWARE, NEITHER EXPRESS NOR IMPLIED, AND THE ABSENCE OF ANY LEGAL OR ACTUAL DEFECTS, WHETHER DISCOVERABLE OR NOT. Specifically, and not to limit the foregoing, licensor makes no representations or warranties (i) regarding the merchantability or fitness for a particular purpose of the Model & Software, (ii) that the use of the Model & Software will not infringe any patents, copyrights or other intellectual property rights of a third party, and (iii) that the use of the Model & Software will not cause any damage of any kind to you or a third party. - -Limitation of Liability -Because this Model & Software License Agreement qualifies as a donation, according to Section 521 of the German Civil Code (Bürgerliches Gesetzbuch – BGB) Licensor as a donor is liable for intent and gross negligence only. If the Licensor fraudulently conceals a legal or material defect, they are obliged to compensate the Licensee for the resulting damage. -Licensor shall be liable for loss of data only up to the amount of typical recovery costs which would have arisen had proper and regular data backup measures been taken. For the avoidance of doubt Licensor shall be liable in accordance with the German Product Liability Act in the event of product liability. The foregoing applies also to Licensor’s legal representatives or assistants in performance. Any further liability shall be excluded. -Patent claims generated through the usage of the Model & Software cannot be directed towards the copyright holders. -The Model & Software is provided in the state of development the licensor defines. If modified or extended by Licensee, the Licensor makes no claims about the fitness of the Model & Software and is not responsible for any problems such modifications cause. - -No Maintenance Services -You understand and agree that Licensor is under no obligation to provide either maintenance services, update services, notices of latent defects, or corrections of defects with regard to the Model & Software. Licensor nevertheless reserves the right to update, modify, or discontinue the Model & Software at any time. - -Defects of the Model & Software must be notified in writing to the Licensor with a comprehensible description of the error symptoms. The notification of the defect should enable the reproduction of the error. The Licensee is encouraged to communicate any use, results, modification or publication. - -Publications using the Model & Software -You acknowledge that the Model & Software is a valuable scientific resource and agree to appropriately reference the following paper in any publication making use of the Model & Software. - -Citation: - - -@inproceedings{SMPL-X:2019, - title = {Expressive Body Capture: 3D Hands, Face, and Body from a Single Image}, - author = {Pavlakos, Georgios and Choutas, Vasileios and Ghorbani, Nima and Bolkart, Timo and Osman, Ahmed A. A. and Tzionas, Dimitrios and Black, Michael J.}, - booktitle = {Proceedings IEEE Conf. on Computer Vision and Pattern Recognition (CVPR)}, - year = {2019} -} -Commercial licensing opportunities -For commercial uses of the Software, please send email to ps-license@tue.mpg.de - -This Agreement shall be governed by the laws of the Federal Republic of Germany except for the UN Sales Convention. diff --git a/lib/smplx/README.md b/lib/smplx/README.md deleted file mode 100644 index e000e63af4569d8fae38346be370ba815662674d..0000000000000000000000000000000000000000 --- a/lib/smplx/README.md +++ /dev/null @@ -1,207 +0,0 @@ -## SMPL-X: A new joint 3D model of the human body, face and hands together - -[[Paper Page](https://smpl-x.is.tue.mpg.de)] [[Paper](https://ps.is.tuebingen.mpg.de/uploads_file/attachment/attachment/497/SMPL-X.pdf)] -[[Supp. Mat.](https://ps.is.tuebingen.mpg.de/uploads_file/attachment/attachment/498/SMPL-X-supp.pdf)] - -![SMPL-X Examples](./images/teaser_fig.png) - -## Table of Contents - * [License](#license) - * [Description](#description) - * [News](#news) - * [Installation](#installation) - * [Downloading the model](#downloading-the-model) - * [Loading SMPL-X, SMPL+H and SMPL](#loading-smpl-x-smplh-and-smpl) - * [SMPL and SMPL+H setup](#smpl-and-smplh-setup) - * [Model loading](https://github.com/vchoutas/smplx#model-loading) - * [MANO and FLAME correspondences](#mano-and-flame-correspondences) - * [Example](#example) - * [Modifying the global pose of the model](#modifying-the-global-pose-of-the-model) - * [Citation](#citation) - * [Acknowledgments](#acknowledgments) - * [Contact](#contact) - -## License - -Software Copyright License for **non-commercial scientific research purposes**. -Please read carefully the [terms and conditions](https://github.com/vchoutas/smplx/blob/master/LICENSE) and any accompanying documentation before you download and/or use the SMPL-X/SMPLify-X model, data and software, (the "Model & Software"), including 3D meshes, blend weights, blend shapes, textures, software, scripts, and animations. By downloading and/or using the Model & Software (including downloading, cloning, installing, and any other use of this github repository), you acknowledge that you have read these terms and conditions, understand them, and agree to be bound by them. If you do not agree with these terms and conditions, you must not download and/or use the Model & Software. Any infringement of the terms of this agreement will automatically terminate your rights under this [License](./LICENSE). - -## Disclaimer - -The original images used for the figures 1 and 2 of the paper can be found in this link. -The images in the paper are used under license from gettyimages.com. -We have acquired the right to use them in the publication, but redistribution is not allowed. -Please follow the instructions on the given link to acquire right of usage. -Our results are obtained on the 483 × 724 pixels resolution of the original images. - -## Description - -*SMPL-X* (SMPL eXpressive) is a unified body model with shape parameters trained jointly for the -face, hands and body. *SMPL-X* uses standard vertex based linear blend skinning with learned corrective blend -shapes, has N = 10, 475 vertices and K = 54 joints, -which include joints for the neck, jaw, eyeballs and fingers. -SMPL-X is defined by a function M(θ, β, ψ), where θ is the pose parameters, β the shape parameters and -ψ the facial expression parameters. - -## News - -- 3 November 2020: We release the code to transfer between the models in the - SMPL family. For more details on the code, go to this [readme - file](./transfer_model/README.md). A detailed explanation on how the mappings - were extracted can be found [here](./transfer_model/docs/transfer.md). -- 23 September 2020: A UV map is now available for SMPL-X, please check the - Downloads section of the website. -- 20 August 2020: The full shape and expression space of SMPL-X are now available. - -## Installation - -To install the model please follow the next steps in the specified order: -1. To install from PyPi simply run: - ```Shell - pip install smplx[all] - ``` -2. Clone this repository and install it using the *setup.py* script: -```Shell -git clone https://github.com/vchoutas/smplx -python setup.py install -``` - -## Downloading the model - -To download the *SMPL-X* model go to [this project website](https://smpl-x.is.tue.mpg.de) and register to get access to the downloads section. - -To download the *SMPL+H* model go to [this project website](http://mano.is.tue.mpg.de) and register to get access to the downloads section. - -To download the *SMPL* model go to [this](http://smpl.is.tue.mpg.de) (male and female models) and [this](http://smplify.is.tue.mpg.de) (gender neutral model) project website and register to get access to the downloads section. - -## Loading SMPL-X, SMPL+H and SMPL - -### SMPL and SMPL+H setup - -The loader gives the option to use any of the SMPL-X, SMPL+H, SMPL, and MANO models. Depending on the model you want to use, please follow the respective download instructions. To switch between MANO, SMPL, SMPL+H and SMPL-X just change the *model_path* or *model_type* parameters. For more details please check the docs of the model classes. -Before using SMPL and SMPL+H you should follow the instructions in [tools/README.md](./tools/README.md) to remove the -Chumpy objects from both model pkls, as well as merge the MANO parameters with SMPL+H. - -### Model loading - -You can either use the [create](https://github.com/vchoutas/smplx/blob/c63c02b478c5c6f696491ed9167e3af6b08d89b1/smplx/body_models.py#L54) -function from [body_models](./smplx/body_models.py) or directly call the constructor for the -[SMPL](https://github.com/vchoutas/smplx/blob/c63c02b478c5c6f696491ed9167e3af6b08d89b1/smplx/body_models.py#L106), -[SMPL+H](https://github.com/vchoutas/smplx/blob/c63c02b478c5c6f696491ed9167e3af6b08d89b1/smplx/body_models.py#L395) and -[SMPL-X](https://github.com/vchoutas/smplx/blob/c63c02b478c5c6f696491ed9167e3af6b08d89b1/smplx/body_models.py#L628) model. The path to the model can either be the path to the file with the parameters or a directory with the following structure: -```bash -models -├── smpl -│   ├── SMPL_FEMALE.pkl -│   └── SMPL_MALE.pkl -│   └── SMPL_NEUTRAL.pkl -├── smplh -│   ├── SMPLH_FEMALE.pkl -│   └── SMPLH_MALE.pkl -├── mano -| ├── MANO_RIGHT.pkl -| └── MANO_LEFT.pkl -└── smplx - ├── SMPLX_FEMALE.npz - ├── SMPLX_FEMALE.pkl - ├── SMPLX_MALE.npz - ├── SMPLX_MALE.pkl - ├── SMPLX_NEUTRAL.npz - └── SMPLX_NEUTRAL.pkl -``` - - -## MANO and FLAME correspondences - -The vertex correspondences between SMPL-X and MANO, FLAME can be downloaded -from [the project website](https://smpl-x.is.tue.mpg.de). If you have extracted -the correspondence data in the folder *correspondences*, then use the following -scripts to visualize them: - -1. To view MANO correspondences run the following command: - -``` -python examples/vis_mano_vertices.py --model-folder $SMPLX_FOLDER --corr-fname correspondences/MANO_SMPLX_vertex_ids.pkl -``` - -2. To view FLAME correspondences run the following command: - -``` -python examples/vis_flame_vertices.py --model-folder $SMPLX_FOLDER --corr-fname correspondences/SMPL-X__FLAME_vertex_ids.npy -``` - -## Example - -After installing the *smplx* package and downloading the model parameters you should be able to run the *demo.py* -script to visualize the results. For this step you have to install the [pyrender](https://pyrender.readthedocs.io/en/latest/index.html) and [trimesh](https://trimsh.org/) packages. - -`python examples/demo.py --model-folder $SMPLX_FOLDER --plot-joints=True --gender="neutral"` - -![SMPL-X Examples](./images/example.png) - -## Modifying the global pose of the model - -If you want to modify the global pose of the model, i.e. the root rotation and -translation, to a new coordinate system for example, you need to take into -account that the model rotation uses the pelvis as the center of rotation. A -more detailed description can be found in the following -[link](https://www.dropbox.com/scl/fi/zkatuv5shs8d4tlwr8ecc/Change-parameters-to-new-coordinate-system.paper?dl=0&rlkey=lotq1sh6wzkmyttisc05h0in0). -If something is not clear, please let me know so that I can update the -description. - -## Citation - -Depending on which model is loaded for your project, i.e. SMPL-X or SMPL+H or SMPL, please cite the most relevant work below, listed in the same order: - -``` -@inproceedings{SMPL-X:2019, - title = {Expressive Body Capture: 3D Hands, Face, and Body from a Single Image}, - author = {Pavlakos, Georgios and Choutas, Vasileios and Ghorbani, Nima and Bolkart, Timo and Osman, Ahmed A. A. and Tzionas, Dimitrios and Black, Michael J.}, - booktitle = {Proceedings IEEE Conf. on Computer Vision and Pattern Recognition (CVPR)}, - year = {2019} -} -``` - -``` -@article{MANO:SIGGRAPHASIA:2017, - title = {Embodied Hands: Modeling and Capturing Hands and Bodies Together}, - author = {Romero, Javier and Tzionas, Dimitrios and Black, Michael J.}, - journal = {ACM Transactions on Graphics, (Proc. SIGGRAPH Asia)}, - volume = {36}, - number = {6}, - series = {245:1--245:17}, - month = nov, - year = {2017}, - month_numeric = {11} - } -``` - -``` -@article{SMPL:2015, - author = {Loper, Matthew and Mahmood, Naureen and Romero, Javier and Pons-Moll, Gerard and Black, Michael J.}, - title = {{SMPL}: A Skinned Multi-Person Linear Model}, - journal = {ACM Transactions on Graphics, (Proc. SIGGRAPH Asia)}, - month = oct, - number = {6}, - pages = {248:1--248:16}, - publisher = {ACM}, - volume = {34}, - year = {2015} -} -``` - -This repository was originally developed for SMPL-X / SMPLify-X (CVPR 2019), you might be interested in having a look: [https://smpl-x.is.tue.mpg.de](https://smpl-x.is.tue.mpg.de). - -## Acknowledgments - -### Facial Contour - -Special thanks to [Soubhik Sanyal](https://github.com/soubhiksanyal) for sharing the Tensorflow code used for the facial -landmarks. - -## Contact -The code of this repository was implemented by [Vassilis Choutas](vassilis.choutas@tuebingen.mpg.de). - -For questions, please contact [smplx@tue.mpg.de](smplx@tue.mpg.de). - -For commercial licensing (and all related questions for business applications), please contact [ps-licensing@tue.mpg.de](ps-licensing@tue.mpg.de). diff --git a/lib/smplx/__init__.py b/lib/smplx/__init__.py deleted file mode 100644 index 886949df670691d1ef5995737cafa285224826c4..0000000000000000000000000000000000000000 --- a/lib/smplx/__init__.py +++ /dev/null @@ -1,30 +0,0 @@ -# -*- coding: utf-8 -*- - -# Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is -# holder of all proprietary rights on this computer program. -# You can only use this computer program if you have closed -# a license agreement with MPG or you get the right to use the computer -# program from someone who is authorized to grant you that right. -# Any use of the computer program without a valid license is prohibited and -# liable to prosecution. -# -# Copyright©2019 Max-Planck-Gesellschaft zur Förderung -# der Wissenschaften e.V. (MPG). acting on behalf of its Max Planck Institute -# for Intelligent Systems. All rights reserved. -# -# Contact: ps-license@tuebingen.mpg.de - -from .body_models import ( - create, - SMPL, - SMPLH, - SMPLX, - MANO, - FLAME, - build_layer, - SMPLLayer, - SMPLHLayer, - SMPLXLayer, - MANOLayer, - FLAMELayer, -) diff --git a/lib/smplx/body_models.py b/lib/smplx/body_models.py deleted file mode 100644 index 9311a731e8385530b0395e5307f7c46919b4b932..0000000000000000000000000000000000000000 --- a/lib/smplx/body_models.py +++ /dev/null @@ -1,2416 +0,0 @@ -# -*- coding: utf-8 -*- - -# Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is -# holder of all proprietary rights on this computer program. -# You can only use this computer program if you have closed -# a license agreement with MPG or you get the right to use the computer -# program from someone who is authorized to grant you that right. -# Any use of the computer program without a valid license is prohibited and -# liable to prosecution. -# -# Copyright©2019 Max-Planck-Gesellschaft zur Förderung -# der Wissenschaften e.V. (MPG). acting on behalf of its Max Planck Institute -# for Intelligent Systems. All rights reserved. -# -# Contact: ps-license@tuebingen.mpg.de - -from typing import Optional, Dict, Union -import os -import os.path as osp -import pickle - -import numpy as np -from termcolor import colored - -import torch -import torch.nn as nn -from collections import namedtuple -from huggingface_hub import cached_download - -import logging -logging.getLogger("smplx").setLevel(logging.ERROR) - -from .lbs import ( - lbs, vertices2landmarks, find_dynamic_lmk_idx_and_bcoords) - -from .vertex_ids import vertex_ids as VERTEX_IDS -from .utils import ( - Struct, to_np, to_tensor, Tensor, Array, - SMPLOutput, - SMPLHOutput, - SMPLXOutput, - MANOOutput, - FLAMEOutput, - find_joint_kin_chain) -from .vertex_joint_selector import VertexJointSelector - -ModelOutput = namedtuple('ModelOutput', - ['vertices', 'joints', 'full_pose', 'betas', - 'global_orient', - 'body_pose', 'expression', - 'left_hand_pose', 'right_hand_pose', - 'jaw_pose']) -ModelOutput.__new__.__defaults__ = (None,) * len(ModelOutput._fields) - -class SMPL(nn.Module): - - NUM_JOINTS = 23 - NUM_BODY_JOINTS = 23 - SHAPE_SPACE_DIM = 300 - - def __init__( - self, model_path: str, - kid_template_path: str = '', - data_struct: Optional[Struct] = None, - create_betas: bool = True, - betas: Optional[Tensor] = None, - num_betas: int = 10, - create_global_orient: bool = True, - global_orient: Optional[Tensor] = None, - create_body_pose: bool = True, - body_pose: Optional[Tensor] = None, - create_transl: bool = True, - transl: Optional[Tensor] = None, - dtype=torch.float32, - batch_size: int = 1, - joint_mapper=None, - gender: str = 'neutral', - age: str = 'adult', - vertex_ids: Dict[str, int] = None, - v_template: Optional[Union[Tensor, Array]] = None, - v_personal: Optional[Union[Tensor, Array]] = None, - **kwargs - ) -> None: - ''' SMPL model constructor - - Parameters - ---------- - model_path: str - The path to the folder or to the file where the model - parameters are stored - data_struct: Strct - A struct object. If given, then the parameters of the model are - read from the object. Otherwise, the model tries to read the - parameters from the given `model_path`. (default = None) - create_global_orient: bool, optional - Flag for creating a member variable for the global orientation - of the body. (default = True) - global_orient: torch.tensor, optional, Bx3 - The default value for the global orientation variable. - (default = None) - create_body_pose: bool, optional - Flag for creating a member variable for the pose of the body. - (default = True) - body_pose: torch.tensor, optional, Bx(Body Joints * 3) - The default value for the body pose variable. - (default = None) - num_betas: int, optional - Number of shape components to use - (default = 10). - create_betas: bool, optional - Flag for creating a member variable for the shape space - (default = True). - betas: torch.tensor, optional, Bx10 - The default value for the shape member variable. - (default = None) - create_transl: bool, optional - Flag for creating a member variable for the translation - of the body. (default = True) - transl: torch.tensor, optional, Bx3 - The default value for the transl variable. - (default = None) - dtype: torch.dtype, optional - The data type for the created variables - batch_size: int, optional - The batch size used for creating the member variables - joint_mapper: object, optional - An object that re-maps the joints. Useful if one wants to - re-order the SMPL joints to some other convention (e.g. MSCOCO) - (default = None) - gender: str, optional - Which gender to load - vertex_ids: dict, optional - A dictionary containing the indices of the extra vertices that - will be selected - ''' - - self.gender = gender - self.age = age - - if data_struct is None: - model_fn = 'SMPL_{}.{ext}'.format(gender.upper(), ext='pkl') - smpl_path = cached_download(os.path.join(model_path, model_fn), use_auth_token=os.environ['ICON']) - - with open(smpl_path, 'rb') as smpl_file: - data_struct = Struct(**pickle.load(smpl_file, - encoding='latin1')) - - super(SMPL, self).__init__() - self.batch_size = batch_size - shapedirs = data_struct.shapedirs - if (shapedirs.shape[-1] < self.SHAPE_SPACE_DIM): - # print(f'WARNING: You are using a {self.name()} model, with only' - # ' 10 shape coefficients.') - num_betas = min(num_betas, 10) - else: - num_betas = min(num_betas, self.SHAPE_SPACE_DIM) - - if self.age=='kid': - v_template_smil = np.load(kid_template_path) - v_template_smil -= np.mean(v_template_smil, axis=0) - v_template_diff = np.expand_dims(v_template_smil - data_struct.v_template, axis=2) - shapedirs = np.concatenate((shapedirs[:, :, :num_betas], v_template_diff), axis=2) - num_betas = num_betas + 1 - - self._num_betas = num_betas - shapedirs = shapedirs[:, :, :num_betas] - # The shape components - self.register_buffer( - 'shapedirs', - to_tensor(to_np(shapedirs), dtype=dtype)) - - if vertex_ids is None: - # SMPL and SMPL-H share the same topology, so any extra joints can - # be drawn from the same place - vertex_ids = VERTEX_IDS['smplh'] - - self.dtype = dtype - - self.joint_mapper = joint_mapper - - self.vertex_joint_selector = VertexJointSelector( - vertex_ids=vertex_ids, **kwargs) - - self.faces = data_struct.f - self.register_buffer('faces_tensor', - to_tensor(to_np(self.faces, dtype=np.int64), - dtype=torch.long)) - - if create_betas: - if betas is None: - default_betas = torch.zeros( - [batch_size, self.num_betas], dtype=dtype) - else: - if torch.is_tensor(betas): - default_betas = betas.clone().detach() - else: - default_betas = torch.tensor(betas, dtype=dtype) - - self.register_parameter( - 'betas', nn.Parameter(default_betas, requires_grad=True)) - - # The tensor that contains the global rotation of the model - # It is separated from the pose of the joints in case we wish to - # optimize only over one of them - if create_global_orient: - if global_orient is None: - default_global_orient = torch.zeros( - [batch_size, 3], dtype=dtype) - else: - if torch.is_tensor(global_orient): - default_global_orient = global_orient.clone().detach() - else: - default_global_orient = torch.tensor( - global_orient, dtype=dtype) - - global_orient = nn.Parameter(default_global_orient, - requires_grad=True) - self.register_parameter('global_orient', global_orient) - - if create_body_pose: - if body_pose is None: - default_body_pose = torch.zeros( - [batch_size, self.NUM_BODY_JOINTS * 3], dtype=dtype) - else: - if torch.is_tensor(body_pose): - default_body_pose = body_pose.clone().detach() - else: - default_body_pose = torch.tensor(body_pose, - dtype=dtype) - self.register_parameter( - 'body_pose', - nn.Parameter(default_body_pose, requires_grad=True)) - - if create_transl: - if transl is None: - default_transl = torch.zeros([batch_size, 3], - dtype=dtype, - requires_grad=True) - else: - default_transl = torch.tensor(transl, dtype=dtype) - self.register_parameter( - 'transl', nn.Parameter(default_transl, requires_grad=True)) - - if v_template is None: - v_template = data_struct.v_template - - if not torch.is_tensor(v_template): - v_template = to_tensor(to_np(v_template), dtype=dtype) - - if v_personal is not None: - v_personal = to_tensor(to_np(v_personal), dtype=dtype) - v_template += v_personal - - # The vertices of the template model - self.register_buffer('v_template', v_template) - - j_regressor = to_tensor(to_np( - data_struct.J_regressor), dtype=dtype) - self.register_buffer('J_regressor', j_regressor) - - # Pose blend shape basis: 6890 x 3 x 207, reshaped to 6890*3 x 207 - num_pose_basis = data_struct.posedirs.shape[-1] - # 207 x 20670 - posedirs = np.reshape(data_struct.posedirs, [-1, num_pose_basis]).T - self.register_buffer('posedirs', - to_tensor(to_np(posedirs), dtype=dtype)) - - # indices of parents for each joints - parents = to_tensor(to_np(data_struct.kintree_table[0])).long() - parents[0] = -1 - self.register_buffer('parents', parents) - - self.register_buffer( - 'lbs_weights', to_tensor(to_np(data_struct.weights), dtype=dtype)) - - @property - def num_betas(self): - return self._num_betas - - @property - def num_expression_coeffs(self): - return 0 - - def create_mean_pose(self, data_struct) -> Tensor: - pass - - def name(self) -> str: - return 'SMPL' - - @torch.no_grad() - def reset_params(self, **params_dict) -> None: - for param_name, param in self.named_parameters(): - if param_name in params_dict: - param[:] = torch.tensor(params_dict[param_name]) - else: - param.fill_(0) - - def get_num_verts(self) -> int: - return self.v_template.shape[0] - - def get_num_faces(self) -> int: - return self.faces.shape[0] - - def extra_repr(self) -> str: - msg = [ - f'Gender: {self.gender.upper()}', - f'Number of joints: {self.J_regressor.shape[0]}', - f'Betas: {self.num_betas}', - ] - return '\n'.join(msg) - - def forward( - self, - betas: Optional[Tensor] = None, - body_pose: Optional[Tensor] = None, - global_orient: Optional[Tensor] = None, - transl: Optional[Tensor] = None, - return_verts=True, - return_full_pose: bool = False, - pose2rot: bool = True, - **kwargs - ) -> SMPLOutput: - ''' Forward pass for the SMPL model - - Parameters - ---------- - global_orient: torch.tensor, optional, shape Bx3 - If given, ignore the member variable and use it as the global - rotation of the body. Useful if someone wishes to predicts this - with an external model. (default=None) - betas: torch.tensor, optional, shape BxN_b - If given, ignore the member variable `betas` and use it - instead. For example, it can used if shape parameters - `betas` are predicted from some external model. - (default=None) - body_pose: torch.tensor, optional, shape Bx(J*3) - If given, ignore the member variable `body_pose` and use it - instead. For example, it can used if someone predicts the - pose of the body joints are predicted from some external model. - It should be a tensor that contains joint rotations in - axis-angle format. (default=None) - transl: torch.tensor, optional, shape Bx3 - If given, ignore the member variable `transl` and use it - instead. For example, it can used if the translation - `transl` is predicted from some external model. - (default=None) - return_verts: bool, optional - Return the vertices. (default=True) - return_full_pose: bool, optional - Returns the full axis-angle pose vector (default=False) - - Returns - ------- - ''' - # If no shape and pose parameters are passed along, then use the - # ones from the module - global_orient = (global_orient if global_orient is not None else - self.global_orient) - body_pose = body_pose if body_pose is not None else self.body_pose - betas = betas if betas is not None else self.betas - - apply_trans = transl is not None or hasattr(self, 'transl') - if transl is None and hasattr(self, 'transl'): - transl = self.transl - - full_pose = torch.cat([global_orient, body_pose], dim=1) - - batch_size = max(betas.shape[0], global_orient.shape[0], - body_pose.shape[0]) - - if betas.shape[0] != batch_size: - num_repeats = int(batch_size / betas.shape[0]) - betas = betas.expand(num_repeats, -1) - - vertices, joints = lbs(betas, full_pose, self.v_template, - self.shapedirs, self.posedirs, - self.J_regressor, self.parents, - self.lbs_weights, pose2rot=pose2rot) - - joints = self.vertex_joint_selector(vertices, joints) - # Map the joints to the current dataset - if self.joint_mapper is not None: - joints = self.joint_mapper(joints) - - if apply_trans: - joints += transl.unsqueeze(dim=1) - vertices += transl.unsqueeze(dim=1) - - output = SMPLOutput(vertices=vertices if return_verts else None, - global_orient=global_orient, - body_pose=body_pose, - joints=joints, - betas=betas, - full_pose=full_pose if return_full_pose else None) - - return output - - -class SMPLLayer(SMPL): - def __init__( - self, - *args, - **kwargs - ) -> None: - # Just create a SMPL module without any member variables - super(SMPLLayer, self).__init__( - create_body_pose=False, - create_betas=False, - create_global_orient=False, - create_transl=False, - *args, - **kwargs, - ) - - def forward( - self, - betas: Optional[Tensor] = None, - body_pose: Optional[Tensor] = None, - global_orient: Optional[Tensor] = None, - transl: Optional[Tensor] = None, - return_verts=True, - return_full_pose: bool = False, - pose2rot: bool = True, - **kwargs - ) -> SMPLOutput: - ''' Forward pass for the SMPL model - - Parameters - ---------- - global_orient: torch.tensor, optional, shape Bx3x3 - Global rotation of the body. Useful if someone wishes to - predicts this with an external model. It is expected to be in - rotation matrix format. (default=None) - betas: torch.tensor, optional, shape BxN_b - Shape parameters. For example, it can used if shape parameters - `betas` are predicted from some external model. - (default=None) - body_pose: torch.tensor, optional, shape BxJx3x3 - Body pose. For example, it can used if someone predicts the - pose of the body joints are predicted from some external model. - It should be a tensor that contains joint rotations in - rotation matrix format. (default=None) - transl: torch.tensor, optional, shape Bx3 - Translation vector of the body. - For example, it can used if the translation - `transl` is predicted from some external model. - (default=None) - return_verts: bool, optional - Return the vertices. (default=True) - return_full_pose: bool, optional - Returns the full axis-angle pose vector (default=False) - - Returns - ------- - ''' - model_vars = [betas, global_orient, body_pose, transl] - batch_size = 1 - for var in model_vars: - if var is None: - continue - batch_size = max(batch_size, len(var)) - device, dtype = self.shapedirs.device, self.shapedirs.dtype - if global_orient is None: - global_orient = torch.eye(3, device=device, dtype=dtype).view( - 1, 1, 3, 3).expand(batch_size, -1, -1, -1).contiguous() - if body_pose is None: - body_pose = torch.eye(3, device=device, dtype=dtype).view( - 1, 1, 3, 3).expand( - batch_size, self.NUM_BODY_JOINTS, -1, -1).contiguous() - if betas is None: - betas = torch.zeros([batch_size, self.num_betas], - dtype=dtype, device=device) - if transl is None: - transl = torch.zeros([batch_size, 3], dtype=dtype, device=device) - full_pose = torch.cat( - [global_orient.reshape(-1, 1, 3, 3), - body_pose.reshape(-1, self.NUM_BODY_JOINTS, 3, 3)], - dim=1) - - vertices, joints = lbs(betas, full_pose, self.v_template, - self.shapedirs, self.posedirs, - self.J_regressor, self.parents, - self.lbs_weights, - pose2rot=False) - - joints = self.vertex_joint_selector(vertices, joints) - # Map the joints to the current dataset - if self.joint_mapper is not None: - joints = self.joint_mapper(joints) - - if transl is not None: - joints += transl.unsqueeze(dim=1) - vertices += transl.unsqueeze(dim=1) - - output = SMPLOutput(vertices=vertices if return_verts else None, - global_orient=global_orient, - body_pose=body_pose, - joints=joints, - betas=betas, - full_pose=full_pose if return_full_pose else None) - - return output - - -class SMPLH(SMPL): - - # The hand joints are replaced by MANO - NUM_BODY_JOINTS = SMPL.NUM_JOINTS - 2 - NUM_HAND_JOINTS = 15 - NUM_JOINTS = NUM_BODY_JOINTS + 2 * NUM_HAND_JOINTS - - def __init__( - self, model_path, - kid_template_path: str = '', - data_struct: Optional[Struct] = None, - create_left_hand_pose: bool = True, - left_hand_pose: Optional[Tensor] = None, - create_right_hand_pose: bool = True, - right_hand_pose: Optional[Tensor] = None, - use_pca: bool = True, - num_pca_comps: int = 6, - flat_hand_mean: bool = False, - batch_size: int = 1, - gender: str = 'neutral', - age: str = 'adult', - dtype=torch.float32, - vertex_ids=None, - use_compressed: bool = True, - ext: str = 'pkl', - **kwargs - ) -> None: - ''' SMPLH model constructor - - Parameters - ---------- - model_path: str - The path to the folder or to the file where the model - parameters are stored - data_struct: Strct - A struct object. If given, then the parameters of the model are - read from the object. Otherwise, the model tries to read the - parameters from the given `model_path`. (default = None) - create_left_hand_pose: bool, optional - Flag for creating a member variable for the pose of the left - hand. (default = True) - left_hand_pose: torch.tensor, optional, BxP - The default value for the left hand pose member variable. - (default = None) - create_right_hand_pose: bool, optional - Flag for creating a member variable for the pose of the right - hand. (default = True) - right_hand_pose: torch.tensor, optional, BxP - The default value for the right hand pose member variable. - (default = None) - num_pca_comps: int, optional - The number of PCA components to use for each hand. - (default = 6) - flat_hand_mean: bool, optional - If False, then the pose of the hand is initialized to False. - batch_size: int, optional - The batch size used for creating the member variables - gender: str, optional - Which gender to load - dtype: torch.dtype, optional - The data type for the created variables - vertex_ids: dict, optional - A dictionary containing the indices of the extra vertices that - will be selected - ''' - - self.num_pca_comps = num_pca_comps - # If no data structure is passed, then load the data from the given - # model folder - if data_struct is None: - # Load the model - if osp.isdir(model_path): - model_fn = 'SMPLH_{}.{ext}'.format(gender.upper(), ext=ext) - smplh_path = os.path.join(model_path, model_fn) - else: - smplh_path = model_path - assert osp.exists(smplh_path), 'Path {} does not exist!'.format( - smplh_path) - - if ext == 'pkl': - with open(smplh_path, 'rb') as smplh_file: - model_data = pickle.load(smplh_file, encoding='latin1') - elif ext == 'npz': - model_data = np.load(smplh_path, allow_pickle=True) - else: - raise ValueError('Unknown extension: {}'.format(ext)) - data_struct = Struct(**model_data) - - if vertex_ids is None: - vertex_ids = VERTEX_IDS['smplh'] - - super(SMPLH, self).__init__( - model_path=model_path, - kid_template_path=kid_template_path, - data_struct=data_struct, - batch_size=batch_size, vertex_ids=vertex_ids, gender=gender, age=age, - use_compressed=use_compressed, dtype=dtype, ext=ext, **kwargs) - - self.use_pca = use_pca - self.num_pca_comps = num_pca_comps - self.flat_hand_mean = flat_hand_mean - - left_hand_components = data_struct.hands_componentsl[:num_pca_comps] - right_hand_components = data_struct.hands_componentsr[:num_pca_comps] - - self.np_left_hand_components = left_hand_components - self.np_right_hand_components = right_hand_components - if self.use_pca: - self.register_buffer( - 'left_hand_components', - torch.tensor(left_hand_components, dtype=dtype)) - self.register_buffer( - 'right_hand_components', - torch.tensor(right_hand_components, dtype=dtype)) - - if self.flat_hand_mean: - left_hand_mean = np.zeros_like(data_struct.hands_meanl) - else: - left_hand_mean = data_struct.hands_meanl - - if self.flat_hand_mean: - right_hand_mean = np.zeros_like(data_struct.hands_meanr) - else: - right_hand_mean = data_struct.hands_meanr - - self.register_buffer('left_hand_mean', - to_tensor(left_hand_mean, dtype=self.dtype)) - self.register_buffer('right_hand_mean', - to_tensor(right_hand_mean, dtype=self.dtype)) - - # Create the buffers for the pose of the left hand - hand_pose_dim = num_pca_comps if use_pca else 3 * self.NUM_HAND_JOINTS - if create_left_hand_pose: - if left_hand_pose is None: - default_lhand_pose = torch.zeros([batch_size, hand_pose_dim], - dtype=dtype) - else: - default_lhand_pose = torch.tensor(left_hand_pose, dtype=dtype) - - left_hand_pose_param = nn.Parameter(default_lhand_pose, - requires_grad=True) - self.register_parameter('left_hand_pose', - left_hand_pose_param) - - if create_right_hand_pose: - if right_hand_pose is None: - default_rhand_pose = torch.zeros([batch_size, hand_pose_dim], - dtype=dtype) - else: - default_rhand_pose = torch.tensor(right_hand_pose, dtype=dtype) - - right_hand_pose_param = nn.Parameter(default_rhand_pose, - requires_grad=True) - self.register_parameter('right_hand_pose', - right_hand_pose_param) - - # Create the buffer for the mean pose. - pose_mean_tensor = self.create_mean_pose( - data_struct, flat_hand_mean=flat_hand_mean) - if not torch.is_tensor(pose_mean_tensor): - pose_mean_tensor = torch.tensor(pose_mean_tensor, dtype=dtype) - self.register_buffer('pose_mean', pose_mean_tensor) - - def create_mean_pose(self, data_struct, flat_hand_mean=False): - # Create the array for the mean pose. If flat_hand is false, then use - # the mean that is given by the data, rather than the flat open hand - global_orient_mean = torch.zeros([3], dtype=self.dtype) - body_pose_mean = torch.zeros([self.NUM_BODY_JOINTS * 3], - dtype=self.dtype) - - pose_mean = torch.cat([global_orient_mean, body_pose_mean, - self.left_hand_mean, - self.right_hand_mean], dim=0) - return pose_mean - - def name(self) -> str: - return 'SMPL+H' - - def extra_repr(self): - msg = super(SMPLH, self).extra_repr() - msg = [msg] - if self.use_pca: - msg.append(f'Number of PCA components: {self.num_pca_comps}') - msg.append(f'Flat hand mean: {self.flat_hand_mean}') - return '\n'.join(msg) - - def forward( - self, - betas: Optional[Tensor] = None, - global_orient: Optional[Tensor] = None, - body_pose: Optional[Tensor] = None, - left_hand_pose: Optional[Tensor] = None, - right_hand_pose: Optional[Tensor] = None, - transl: Optional[Tensor] = None, - return_verts: bool = True, - return_full_pose: bool = False, - pose2rot: bool = True, - **kwargs - ) -> SMPLHOutput: - ''' - ''' - - # If no shape and pose parameters are passed along, then use the - # ones from the module - global_orient = (global_orient if global_orient is not None else - self.global_orient) - body_pose = body_pose if body_pose is not None else self.body_pose - betas = betas if betas is not None else self.betas - left_hand_pose = (left_hand_pose if left_hand_pose is not None else - self.left_hand_pose) - right_hand_pose = (right_hand_pose if right_hand_pose is not None else - self.right_hand_pose) - - apply_trans = transl is not None or hasattr(self, 'transl') - if transl is None: - if hasattr(self, 'transl'): - transl = self.transl - - if self.use_pca: - left_hand_pose = torch.einsum( - 'bi,ij->bj', [left_hand_pose, self.left_hand_components]) - right_hand_pose = torch.einsum( - 'bi,ij->bj', [right_hand_pose, self.right_hand_components]) - - full_pose = torch.cat([global_orient, body_pose, - left_hand_pose, - right_hand_pose], dim=1) - - full_pose += self.pose_mean - - vertices, joints = lbs(betas, full_pose, self.v_template, - self.shapedirs, self.posedirs, - self.J_regressor, self.parents, - self.lbs_weights, pose2rot=pose2rot) - - # Add any extra joints that might be needed - joints = self.vertex_joint_selector(vertices, joints) - if self.joint_mapper is not None: - joints = self.joint_mapper(joints) - - if apply_trans: - joints += transl.unsqueeze(dim=1) - vertices += transl.unsqueeze(dim=1) - - output = SMPLHOutput(vertices=vertices if return_verts else None, - joints=joints, - betas=betas, - global_orient=global_orient, - body_pose=body_pose, - left_hand_pose=left_hand_pose, - right_hand_pose=right_hand_pose, - full_pose=full_pose if return_full_pose else None) - - return output - - -class SMPLHLayer(SMPLH): - - def __init__( - self, *args, **kwargs - ) -> None: - ''' SMPL+H as a layer model constructor - ''' - super(SMPLHLayer, self).__init__( - create_global_orient=False, - create_body_pose=False, - create_left_hand_pose=False, - create_right_hand_pose=False, - create_betas=False, - create_transl=False, - *args, - **kwargs) - - def forward( - self, - betas: Optional[Tensor] = None, - global_orient: Optional[Tensor] = None, - body_pose: Optional[Tensor] = None, - left_hand_pose: Optional[Tensor] = None, - right_hand_pose: Optional[Tensor] = None, - transl: Optional[Tensor] = None, - return_verts: bool = True, - return_full_pose: bool = False, - pose2rot: bool = True, - **kwargs - ) -> SMPLHOutput: - ''' Forward pass for the SMPL+H model - - Parameters - ---------- - global_orient: torch.tensor, optional, shape Bx3x3 - Global rotation of the body. Useful if someone wishes to - predicts this with an external model. It is expected to be in - rotation matrix format. (default=None) - betas: torch.tensor, optional, shape BxN_b - Shape parameters. For example, it can used if shape parameters - `betas` are predicted from some external model. - (default=None) - body_pose: torch.tensor, optional, shape BxJx3x3 - If given, ignore the member variable `body_pose` and use it - instead. For example, it can used if someone predicts the - pose of the body joints are predicted from some external model. - It should be a tensor that contains joint rotations in - rotation matrix format. (default=None) - left_hand_pose: torch.tensor, optional, shape Bx15x3x3 - If given, contains the pose of the left hand. - It should be a tensor that contains joint rotations in - rotation matrix format. (default=None) - right_hand_pose: torch.tensor, optional, shape Bx15x3x3 - If given, contains the pose of the right hand. - It should be a tensor that contains joint rotations in - rotation matrix format. (default=None) - transl: torch.tensor, optional, shape Bx3 - Translation vector of the body. - For example, it can used if the translation - `transl` is predicted from some external model. - (default=None) - return_verts: bool, optional - Return the vertices. (default=True) - return_full_pose: bool, optional - Returns the full axis-angle pose vector (default=False) - - Returns - ------- - ''' - model_vars = [betas, global_orient, body_pose, transl, left_hand_pose, - right_hand_pose] - batch_size = 1 - for var in model_vars: - if var is None: - continue - batch_size = max(batch_size, len(var)) - device, dtype = self.shapedirs.device, self.shapedirs.dtype - if global_orient is None: - global_orient = torch.eye(3, device=device, dtype=dtype).view( - 1, 1, 3, 3).expand(batch_size, -1, -1, -1).contiguous() - if body_pose is None: - body_pose = torch.eye(3, device=device, dtype=dtype).view( - 1, 1, 3, 3).expand(batch_size, 21, -1, -1).contiguous() - if left_hand_pose is None: - left_hand_pose = torch.eye(3, device=device, dtype=dtype).view( - 1, 1, 3, 3).expand(batch_size, 15, -1, -1).contiguous() - if right_hand_pose is None: - right_hand_pose = torch.eye(3, device=device, dtype=dtype).view( - 1, 1, 3, 3).expand(batch_size, 15, -1, -1).contiguous() - if betas is None: - betas = torch.zeros([batch_size, self.num_betas], - dtype=dtype, device=device) - if transl is None: - transl = torch.zeros([batch_size, 3], dtype=dtype, device=device) - - # Concatenate all pose vectors - full_pose = torch.cat( - [global_orient.reshape(-1, 1, 3, 3), - body_pose.reshape(-1, self.NUM_BODY_JOINTS, 3, 3), - left_hand_pose.reshape(-1, self.NUM_HAND_JOINTS, 3, 3), - right_hand_pose.reshape(-1, self.NUM_HAND_JOINTS, 3, 3)], - dim=1) - - vertices, joints = lbs(betas, full_pose, self.v_template, - self.shapedirs, self.posedirs, - self.J_regressor, self.parents, - self.lbs_weights, pose2rot=False) - - # Add any extra joints that might be needed - joints = self.vertex_joint_selector(vertices, joints) - if self.joint_mapper is not None: - joints = self.joint_mapper(joints) - - if transl is not None: - joints += transl.unsqueeze(dim=1) - vertices += transl.unsqueeze(dim=1) - - output = SMPLHOutput(vertices=vertices if return_verts else None, - joints=joints, - betas=betas, - global_orient=global_orient, - body_pose=body_pose, - left_hand_pose=left_hand_pose, - right_hand_pose=right_hand_pose, - full_pose=full_pose if return_full_pose else None) - - return output - - -class SMPLX(SMPLH): - ''' - SMPL-X (SMPL eXpressive) is a unified body model, with shape parameters - trained jointly for the face, hands and body. - SMPL-X uses standard vertex based linear blend skinning with learned - corrective blend shapes, has N=10475 vertices and K=54 joints, - which includes joints for the neck, jaw, eyeballs and fingers. - ''' - - NUM_BODY_JOINTS = SMPLH.NUM_BODY_JOINTS # 21 - NUM_HAND_JOINTS = 15 - NUM_FACE_JOINTS = 3 - NUM_JOINTS = NUM_BODY_JOINTS + 2 * NUM_HAND_JOINTS + NUM_FACE_JOINTS - EXPRESSION_SPACE_DIM = 100 - NECK_IDX = 12 - - def __init__( - self, model_path: str, - kid_template_path: str = '', - num_expression_coeffs: int = 10, - create_expression: bool = True, - expression: Optional[Tensor] = None, - create_jaw_pose: bool = True, - jaw_pose: Optional[Tensor] = None, - create_leye_pose: bool = True, - leye_pose: Optional[Tensor] = None, - create_reye_pose=True, - reye_pose: Optional[Tensor] = None, - use_face_contour: bool = False, - batch_size: int = 1, - gender: str = 'neutral', - age: str = 'adult', - dtype=torch.float32, - ext: str = 'npz', - **kwargs - ) -> None: - ''' SMPLX model constructor - - Parameters - ---------- - model_path: str - The path to the folder or to the file where the model - parameters are stored - num_expression_coeffs: int, optional - Number of expression components to use - (default = 10). - create_expression: bool, optional - Flag for creating a member variable for the expression space - (default = True). - expression: torch.tensor, optional, Bx10 - The default value for the expression member variable. - (default = None) - create_jaw_pose: bool, optional - Flag for creating a member variable for the jaw pose. - (default = False) - jaw_pose: torch.tensor, optional, Bx3 - The default value for the jaw pose variable. - (default = None) - create_leye_pose: bool, optional - Flag for creating a member variable for the left eye pose. - (default = False) - leye_pose: torch.tensor, optional, Bx10 - The default value for the left eye pose variable. - (default = None) - create_reye_pose: bool, optional - Flag for creating a member variable for the right eye pose. - (default = False) - reye_pose: torch.tensor, optional, Bx10 - The default value for the right eye pose variable. - (default = None) - use_face_contour: bool, optional - Whether to compute the keypoints that form the facial contour - batch_size: int, optional - The batch size used for creating the member variables - gender: str, optional - Which gender to load - dtype: torch.dtype - The data type for the created variables - ''' - - # Load the model - if osp.isdir(model_path): - model_fn = 'SMPLX_{}.{ext}'.format(gender.upper(), ext=ext) - smplx_path = os.path.join(model_path, model_fn) - else: - smplx_path = model_path - assert osp.exists(smplx_path), 'Path {} does not exist!'.format( - smplx_path) - - if ext == 'pkl': - with open(smplx_path, 'rb') as smplx_file: - model_data = pickle.load(smplx_file, encoding='latin1') - elif ext == 'npz': - model_data = np.load(smplx_path, allow_pickle=True) - else: - raise ValueError('Unknown extension: {}'.format(ext)) - - # print(colored(f"Use SMPL-X: {smplx_path}", "green")) - - data_struct = Struct(**model_data) - - super(SMPLX, self).__init__( - model_path=model_path, - kid_template_path=kid_template_path, - data_struct=data_struct, - dtype=dtype, - batch_size=batch_size, - vertex_ids=VERTEX_IDS['smplx'], - gender=gender, age=age, ext=ext, - **kwargs) - - lmk_faces_idx = data_struct.lmk_faces_idx - self.register_buffer('lmk_faces_idx', - torch.tensor(lmk_faces_idx, dtype=torch.long)) - lmk_bary_coords = data_struct.lmk_bary_coords - self.register_buffer('lmk_bary_coords', - torch.tensor(lmk_bary_coords, dtype=dtype)) - - self.use_face_contour = use_face_contour - if self.use_face_contour: - dynamic_lmk_faces_idx = data_struct.dynamic_lmk_faces_idx - dynamic_lmk_faces_idx = torch.tensor( - dynamic_lmk_faces_idx, - dtype=torch.long) - self.register_buffer('dynamic_lmk_faces_idx', - dynamic_lmk_faces_idx) - - dynamic_lmk_bary_coords = data_struct.dynamic_lmk_bary_coords - dynamic_lmk_bary_coords = torch.tensor( - dynamic_lmk_bary_coords, dtype=dtype) - self.register_buffer('dynamic_lmk_bary_coords', - dynamic_lmk_bary_coords) - - neck_kin_chain = find_joint_kin_chain(self.NECK_IDX, self.parents) - self.register_buffer( - 'neck_kin_chain', - torch.tensor(neck_kin_chain, dtype=torch.long)) - - if create_jaw_pose: - if jaw_pose is None: - default_jaw_pose = torch.zeros([batch_size, 3], dtype=dtype) - else: - default_jaw_pose = torch.tensor(jaw_pose, dtype=dtype) - jaw_pose_param = nn.Parameter(default_jaw_pose, - requires_grad=True) - self.register_parameter('jaw_pose', jaw_pose_param) - - if create_leye_pose: - if leye_pose is None: - default_leye_pose = torch.zeros([batch_size, 3], dtype=dtype) - else: - default_leye_pose = torch.tensor(leye_pose, dtype=dtype) - leye_pose_param = nn.Parameter(default_leye_pose, - requires_grad=True) - self.register_parameter('leye_pose', leye_pose_param) - - if create_reye_pose: - if reye_pose is None: - default_reye_pose = torch.zeros([batch_size, 3], dtype=dtype) - else: - default_reye_pose = torch.tensor(reye_pose, dtype=dtype) - reye_pose_param = nn.Parameter(default_reye_pose, - requires_grad=True) - self.register_parameter('reye_pose', reye_pose_param) - - shapedirs = data_struct.shapedirs - if len(shapedirs.shape) < 3: - shapedirs = shapedirs[:, :, None] - if (shapedirs.shape[-1] < self.SHAPE_SPACE_DIM + - self.EXPRESSION_SPACE_DIM): - # print(f'WARNING: You are using a {self.name()} model, with only' - # ' 10 shape and 10 expression coefficients.') - expr_start_idx = 10 - expr_end_idx = 20 - num_expression_coeffs = min(num_expression_coeffs, 10) - else: - expr_start_idx = self.SHAPE_SPACE_DIM - expr_end_idx = self.SHAPE_SPACE_DIM + num_expression_coeffs - num_expression_coeffs = min( - num_expression_coeffs, self.EXPRESSION_SPACE_DIM) - - self._num_expression_coeffs = num_expression_coeffs - - expr_dirs = shapedirs[:, :, expr_start_idx:expr_end_idx] - self.register_buffer( - 'expr_dirs', to_tensor(to_np(expr_dirs), dtype=dtype)) - - if create_expression: - if expression is None: - default_expression = torch.zeros( - [batch_size, self.num_expression_coeffs], dtype=dtype) - else: - default_expression = torch.tensor(expression, dtype=dtype) - expression_param = nn.Parameter(default_expression, - requires_grad=True) - self.register_parameter('expression', expression_param) - - def name(self) -> str: - return 'SMPL-X' - - @property - def num_expression_coeffs(self): - return self._num_expression_coeffs - - def create_mean_pose(self, data_struct, flat_hand_mean=False): - # Create the array for the mean pose. If flat_hand is false, then use - # the mean that is given by the data, rather than the flat open hand - global_orient_mean = torch.zeros([3], dtype=self.dtype) - body_pose_mean = torch.zeros([self.NUM_BODY_JOINTS * 3], - dtype=self.dtype) - jaw_pose_mean = torch.zeros([3], dtype=self.dtype) - leye_pose_mean = torch.zeros([3], dtype=self.dtype) - reye_pose_mean = torch.zeros([3], dtype=self.dtype) - - pose_mean = np.concatenate([global_orient_mean, body_pose_mean, - jaw_pose_mean, - leye_pose_mean, reye_pose_mean, - self.left_hand_mean, self.right_hand_mean], - axis=0) - - return pose_mean - - def extra_repr(self): - msg = super(SMPLX, self).extra_repr() - msg = [ - msg, - f'Number of Expression Coefficients: {self.num_expression_coeffs}' - ] - return '\n'.join(msg) - - def forward( - self, - betas: Optional[Tensor] = None, - global_orient: Optional[Tensor] = None, - body_pose: Optional[Tensor] = None, - left_hand_pose: Optional[Tensor] = None, - right_hand_pose: Optional[Tensor] = None, - transl: Optional[Tensor] = None, - expression: Optional[Tensor] = None, - jaw_pose: Optional[Tensor] = None, - leye_pose: Optional[Tensor] = None, - reye_pose: Optional[Tensor] = None, - return_verts: bool = True, - return_full_pose: bool = False, - pose2rot: bool = True, - return_joint_transformation: bool = False, - return_vertex_transformation: bool = False, - **kwargs - ) -> SMPLXOutput: - ''' - Forward pass for the SMPLX model - - Parameters - ---------- - global_orient: torch.tensor, optional, shape Bx3 - If given, ignore the member variable and use it as the global - rotation of the body. Useful if someone wishes to predicts this - with an external model. (default=None) - betas: torch.tensor, optional, shape BxN_b - If given, ignore the member variable `betas` and use it - instead. For example, it can used if shape parameters - `betas` are predicted from some external model. - (default=None) - expression: torch.tensor, optional, shape BxN_e - If given, ignore the member variable `expression` and use it - instead. For example, it can used if expression parameters - `expression` are predicted from some external model. - body_pose: torch.tensor, optional, shape Bx(J*3) - If given, ignore the member variable `body_pose` and use it - instead. For example, it can used if someone predicts the - pose of the body joints are predicted from some external model. - It should be a tensor that contains joint rotations in - axis-angle format. (default=None) - left_hand_pose: torch.tensor, optional, shape BxP - If given, ignore the member variable `left_hand_pose` and - use this instead. It should either contain PCA coefficients or - joint rotations in axis-angle format. - right_hand_pose: torch.tensor, optional, shape BxP - If given, ignore the member variable `right_hand_pose` and - use this instead. It should either contain PCA coefficients or - joint rotations in axis-angle format. - jaw_pose: torch.tensor, optional, shape Bx3 - If given, ignore the member variable `jaw_pose` and - use this instead. It should either joint rotations in - axis-angle format. - transl: torch.tensor, optional, shape Bx3 - If given, ignore the member variable `transl` and use it - instead. For example, it can used if the translation - `transl` is predicted from some external model. - (default=None) - return_verts: bool, optional - Return the vertices. (default=True) - return_full_pose: bool, optional - Returns the full axis-angle pose vector (default=False) - - Returns - ------- - output: ModelOutput - A named tuple of type `ModelOutput` - ''' - - # If no shape and pose parameters are passed along, then use the - # ones from the module - global_orient = (global_orient if global_orient is not None else - self.global_orient) - body_pose = body_pose if body_pose is not None else self.body_pose - betas = betas if betas is not None else self.betas - - left_hand_pose = (left_hand_pose if left_hand_pose is not None else - self.left_hand_pose) - right_hand_pose = (right_hand_pose if right_hand_pose is not None else - self.right_hand_pose) - jaw_pose = jaw_pose if jaw_pose is not None else self.jaw_pose - leye_pose = leye_pose if leye_pose is not None else self.leye_pose - reye_pose = reye_pose if reye_pose is not None else self.reye_pose - expression = expression if expression is not None else self.expression - - apply_trans = transl is not None or hasattr(self, 'transl') - if transl is None: - if hasattr(self, 'transl'): - transl = self.transl - - if self.use_pca: - left_hand_pose = torch.einsum('bi,ij->bj', [left_hand_pose, self.left_hand_components]) - right_hand_pose = torch.einsum( - 'bi,ij->bj', [right_hand_pose, self.right_hand_components]) - - full_pose = torch.cat([global_orient, body_pose, - jaw_pose, leye_pose, reye_pose, - left_hand_pose, - right_hand_pose], dim=1) - - # Add the mean pose of the model. Does not affect the body, only the - # hands when flat_hand_mean == False - full_pose += self.pose_mean - - batch_size = max(betas.shape[0], global_orient.shape[0], - body_pose.shape[0]) - # Concatenate the shape and expression coefficients - scale = int(batch_size / betas.shape[0]) - if scale > 1: - betas = betas.expand(scale, -1) - shape_components = torch.cat([betas, expression], dim=-1) - - shapedirs = torch.cat([self.shapedirs, self.expr_dirs], dim=-1) - - if return_joint_transformation or return_vertex_transformation: - vertices, joints, joint_transformation, vertex_transformation = lbs(shape_components, full_pose, self.v_template, - shapedirs, self.posedirs, - self.J_regressor, self.parents, - self.lbs_weights, pose2rot=pose2rot, return_transformation=True - ) - else: - vertices, joints = lbs(shape_components, full_pose, self.v_template, - shapedirs, self.posedirs, - self.J_regressor, self.parents, - self.lbs_weights, pose2rot=pose2rot, - ) - - lmk_faces_idx = self.lmk_faces_idx.unsqueeze( - dim=0).expand(batch_size, -1).contiguous() - lmk_bary_coords = self.lmk_bary_coords.unsqueeze(dim=0).repeat( - self.batch_size, 1, 1) - if self.use_face_contour: - lmk_idx_and_bcoords = find_dynamic_lmk_idx_and_bcoords( - vertices, full_pose, self.dynamic_lmk_faces_idx, - self.dynamic_lmk_bary_coords, - self.neck_kin_chain, - pose2rot=True, - ) - dyn_lmk_faces_idx, dyn_lmk_bary_coords = lmk_idx_and_bcoords - - lmk_faces_idx = torch.cat([lmk_faces_idx, - dyn_lmk_faces_idx], 1) - lmk_bary_coords = torch.cat( - [lmk_bary_coords.expand(batch_size, -1, -1), - dyn_lmk_bary_coords], 1) - - landmarks = vertices2landmarks(vertices, self.faces_tensor, - lmk_faces_idx, - lmk_bary_coords) - - # Add any extra joints that might be needed - joints = self.vertex_joint_selector(vertices, joints) - # Add the landmarks to the joints - joints = torch.cat([joints, landmarks], dim=1) - # Map the joints to the current dataset - - if self.joint_mapper is not None: - joints = self.joint_mapper(joints=joints, vertices=vertices) - - if apply_trans: - joints += transl.unsqueeze(dim=1) - vertices += transl.unsqueeze(dim=1) - - output = SMPLXOutput(vertices=vertices if return_verts else None, - joints=joints, - betas=betas, - expression=expression, - global_orient=global_orient, - body_pose=body_pose, - left_hand_pose=left_hand_pose, - right_hand_pose=right_hand_pose, - jaw_pose=jaw_pose, - full_pose=full_pose if return_full_pose else None, - joint_transformation=joint_transformation if return_joint_transformation else None, - vertex_transformation=vertex_transformation if return_vertex_transformation else None) - return output - - -class SMPLXLayer(SMPLX): - def __init__( - self, - *args, - **kwargs - ) -> None: - # Just create a SMPLX module without any member variables - super(SMPLXLayer, self).__init__( - create_global_orient=False, - create_body_pose=False, - create_left_hand_pose=False, - create_right_hand_pose=False, - create_jaw_pose=False, - create_leye_pose=False, - create_reye_pose=False, - create_betas=False, - create_expression=False, - create_transl=False, - *args, **kwargs, - ) - - def forward( - self, - betas: Optional[Tensor] = None, - global_orient: Optional[Tensor] = None, - body_pose: Optional[Tensor] = None, - left_hand_pose: Optional[Tensor] = None, - right_hand_pose: Optional[Tensor] = None, - transl: Optional[Tensor] = None, - expression: Optional[Tensor] = None, - jaw_pose: Optional[Tensor] = None, - leye_pose: Optional[Tensor] = None, - reye_pose: Optional[Tensor] = None, - return_verts: bool = True, - return_full_pose: bool = False, - **kwargs - ) -> SMPLXOutput: - ''' - Forward pass for the SMPLX model - - Parameters - ---------- - global_orient: torch.tensor, optional, shape Bx3x3 - If given, ignore the member variable and use it as the global - rotation of the body. Useful if someone wishes to predicts this - with an external model. It is expected to be in rotation matrix - format. (default=None) - betas: torch.tensor, optional, shape BxN_b - If given, ignore the member variable `betas` and use it - instead. For example, it can used if shape parameters - `betas` are predicted from some external model. - (default=None) - expression: torch.tensor, optional, shape BxN_e - Expression coefficients. - For example, it can used if expression parameters - `expression` are predicted from some external model. - body_pose: torch.tensor, optional, shape BxJx3x3 - If given, ignore the member variable `body_pose` and use it - instead. For example, it can used if someone predicts the - pose of the body joints are predicted from some external model. - It should be a tensor that contains joint rotations in - rotation matrix format. (default=None) - left_hand_pose: torch.tensor, optional, shape Bx15x3x3 - If given, contains the pose of the left hand. - It should be a tensor that contains joint rotations in - rotation matrix format. (default=None) - right_hand_pose: torch.tensor, optional, shape Bx15x3x3 - If given, contains the pose of the right hand. - It should be a tensor that contains joint rotations in - rotation matrix format. (default=None) - jaw_pose: torch.tensor, optional, shape Bx3x3 - Jaw pose. It should either joint rotations in - rotation matrix format. - transl: torch.tensor, optional, shape Bx3 - Translation vector of the body. - For example, it can used if the translation - `transl` is predicted from some external model. - (default=None) - return_verts: bool, optional - Return the vertices. (default=True) - return_full_pose: bool, optional - Returns the full pose vector (default=False) - Returns - ------- - output: ModelOutput - A data class that contains the posed vertices and joints - ''' - device, dtype = self.shapedirs.device, self.shapedirs.dtype - - model_vars = [betas, global_orient, body_pose, transl, - expression, left_hand_pose, right_hand_pose, jaw_pose] - batch_size = 1 - for var in model_vars: - if var is None: - continue - batch_size = max(batch_size, len(var)) - - if global_orient is None: - global_orient = torch.eye(3, device=device, dtype=dtype).view( - 1, 1, 3, 3).expand(batch_size, -1, -1, -1).contiguous() - if body_pose is None: - body_pose = torch.eye(3, device=device, dtype=dtype).view( - 1, 1, 3, 3).expand( - batch_size, self.NUM_BODY_JOINTS, -1, -1).contiguous() - if left_hand_pose is None: - left_hand_pose = torch.eye(3, device=device, dtype=dtype).view( - 1, 1, 3, 3).expand(batch_size, 15, -1, -1).contiguous() - if right_hand_pose is None: - right_hand_pose = torch.eye(3, device=device, dtype=dtype).view( - 1, 1, 3, 3).expand(batch_size, 15, -1, -1).contiguous() - if jaw_pose is None: - jaw_pose = torch.eye(3, device=device, dtype=dtype).view( - 1, 1, 3, 3).expand(batch_size, -1, -1, -1).contiguous() - if leye_pose is None: - leye_pose = torch.eye(3, device=device, dtype=dtype).view( - 1, 1, 3, 3).expand(batch_size, -1, -1, -1).contiguous() - if reye_pose is None: - reye_pose = torch.eye(3, device=device, dtype=dtype).view( - 1, 1, 3, 3).expand(batch_size, -1, -1, -1).contiguous() - if expression is None: - expression = torch.zeros([batch_size, self.num_expression_coeffs], - dtype=dtype, device=device) - if betas is None: - betas = torch.zeros([batch_size, self.num_betas], - dtype=dtype, device=device) - if transl is None: - transl = torch.zeros([batch_size, 3], dtype=dtype, device=device) - - # Concatenate all pose vectors - full_pose = torch.cat( - [global_orient.reshape(-1, 1, 3, 3), - body_pose.reshape(-1, self.NUM_BODY_JOINTS, 3, 3), - jaw_pose.reshape(-1, 1, 3, 3), - leye_pose.reshape(-1, 1, 3, 3), - reye_pose.reshape(-1, 1, 3, 3), - left_hand_pose.reshape(-1, self.NUM_HAND_JOINTS, 3, 3), - right_hand_pose.reshape(-1, self.NUM_HAND_JOINTS, 3, 3)], - dim=1) - shape_components = torch.cat([betas, expression], dim=-1) - - shapedirs = torch.cat([self.shapedirs, self.expr_dirs], dim=-1) - - vertices, joints = lbs(shape_components, full_pose, self.v_template, - shapedirs, self.posedirs, - self.J_regressor, self.parents, - self.lbs_weights, - pose2rot=False, - ) - - lmk_faces_idx = self.lmk_faces_idx.unsqueeze( - dim=0).expand(batch_size, -1).contiguous() - lmk_bary_coords = self.lmk_bary_coords.unsqueeze(dim=0).repeat( - batch_size, 1, 1) - if self.use_face_contour: - lmk_idx_and_bcoords = find_dynamic_lmk_idx_and_bcoords( - vertices, full_pose, - self.dynamic_lmk_faces_idx, - self.dynamic_lmk_bary_coords, - self.neck_kin_chain, - pose2rot=False, - ) - dyn_lmk_faces_idx, dyn_lmk_bary_coords = lmk_idx_and_bcoords - - lmk_faces_idx = torch.cat([lmk_faces_idx, dyn_lmk_faces_idx], 1) - lmk_bary_coords = torch.cat( - [lmk_bary_coords.expand(batch_size, -1, -1), - dyn_lmk_bary_coords], 1) - - landmarks = vertices2landmarks(vertices, self.faces_tensor, - lmk_faces_idx, - lmk_bary_coords) - - # Add any extra joints that might be needed - joints = self.vertex_joint_selector(vertices, joints) - # Add the landmarks to the joints - joints = torch.cat([joints, landmarks], dim=1) - # Map the joints to the current dataset - - if self.joint_mapper is not None: - joints = self.joint_mapper(joints=joints, vertices=vertices) - - if transl is not None: - joints += transl.unsqueeze(dim=1) - vertices += transl.unsqueeze(dim=1) - - output = SMPLXOutput(vertices=vertices if return_verts else None, - joints=joints, - betas=betas, - expression=expression, - global_orient=global_orient, - body_pose=body_pose, - left_hand_pose=left_hand_pose, - right_hand_pose=right_hand_pose, - jaw_pose=jaw_pose, - transl=transl, - full_pose=full_pose if return_full_pose else None) - return output - - -class MANO(SMPL): - # The hand joints are replaced by MANO - NUM_BODY_JOINTS = 1 - NUM_HAND_JOINTS = 15 - NUM_JOINTS = NUM_BODY_JOINTS + NUM_HAND_JOINTS - - def __init__( - self, - model_path: str, - is_rhand: bool = True, - data_struct: Optional[Struct] = None, - create_hand_pose: bool = True, - hand_pose: Optional[Tensor] = None, - use_pca: bool = True, - num_pca_comps: int = 6, - flat_hand_mean: bool = False, - batch_size: int = 1, - dtype=torch.float32, - vertex_ids=None, - use_compressed: bool = True, - ext: str = 'pkl', - **kwargs - ) -> None: - ''' MANO model constructor - - Parameters - ---------- - model_path: str - The path to the folder or to the file where the model - parameters are stored - data_struct: Strct - A struct object. If given, then the parameters of the model are - read from the object. Otherwise, the model tries to read the - parameters from the given `model_path`. (default = None) - create_hand_pose: bool, optional - Flag for creating a member variable for the pose of the right - hand. (default = True) - hand_pose: torch.tensor, optional, BxP - The default value for the right hand pose member variable. - (default = None) - num_pca_comps: int, optional - The number of PCA components to use for each hand. - (default = 6) - flat_hand_mean: bool, optional - If False, then the pose of the hand is initialized to False. - batch_size: int, optional - The batch size used for creating the member variables - dtype: torch.dtype, optional - The data type for the created variables - vertex_ids: dict, optional - A dictionary containing the indices of the extra vertices that - will be selected - ''' - - self.num_pca_comps = num_pca_comps - self.is_rhand = is_rhand - # If no data structure is passed, then load the data from the given - # model folder - if data_struct is None: - # Load the model - if osp.isdir(model_path): - model_fn = 'MANO_{}.{ext}'.format( - 'RIGHT' if is_rhand else 'LEFT', ext=ext) - mano_path = os.path.join(model_path, model_fn) - else: - mano_path = model_path - self.is_rhand = True if 'RIGHT' in os.path.basename( - model_path) else False - assert osp.exists(mano_path), 'Path {} does not exist!'.format( - mano_path) - - if ext == 'pkl': - with open(mano_path, 'rb') as mano_file: - model_data = pickle.load(mano_file, encoding='latin1') - elif ext == 'npz': - model_data = np.load(mano_path, allow_pickle=True) - else: - raise ValueError('Unknown extension: {}'.format(ext)) - data_struct = Struct(**model_data) - - if vertex_ids is None: - vertex_ids = VERTEX_IDS['smplh'] - - super(MANO, self).__init__( - model_path=model_path, data_struct=data_struct, - batch_size=batch_size, vertex_ids=vertex_ids, - use_compressed=use_compressed, dtype=dtype, ext=ext, **kwargs) - - # add only MANO tips to the extra joints - self.vertex_joint_selector.extra_joints_idxs = to_tensor( - list(VERTEX_IDS['mano'].values()), dtype=torch.long) - - self.use_pca = use_pca - self.num_pca_comps = num_pca_comps - if self.num_pca_comps == 45: - self.use_pca = False - self.flat_hand_mean = flat_hand_mean - - hand_components = data_struct.hands_components[:num_pca_comps] - - self.np_hand_components = hand_components - - if self.use_pca: - self.register_buffer( - 'hand_components', - torch.tensor(hand_components, dtype=dtype)) - - if self.flat_hand_mean: - hand_mean = np.zeros_like(data_struct.hands_mean) - else: - hand_mean = data_struct.hands_mean - - self.register_buffer('hand_mean', - to_tensor(hand_mean, dtype=self.dtype)) - - # Create the buffers for the pose of the left hand - hand_pose_dim = num_pca_comps if use_pca else 3 * self.NUM_HAND_JOINTS - if create_hand_pose: - if hand_pose is None: - default_hand_pose = torch.zeros([batch_size, hand_pose_dim], - dtype=dtype) - else: - default_hand_pose = torch.tensor(hand_pose, dtype=dtype) - - hand_pose_param = nn.Parameter(default_hand_pose, - requires_grad=True) - self.register_parameter('hand_pose', - hand_pose_param) - - # Create the buffer for the mean pose. - pose_mean = self.create_mean_pose( - data_struct, flat_hand_mean=flat_hand_mean) - pose_mean_tensor = pose_mean.clone().to(dtype) - # pose_mean_tensor = torch.tensor(pose_mean, dtype=dtype) - self.register_buffer('pose_mean', pose_mean_tensor) - - def name(self) -> str: - return 'MANO' - - def create_mean_pose(self, data_struct, flat_hand_mean=False): - # Create the array for the mean pose. If flat_hand is false, then use - # the mean that is given by the data, rather than the flat open hand - global_orient_mean = torch.zeros([3], dtype=self.dtype) - pose_mean = torch.cat([global_orient_mean, self.hand_mean], dim=0) - return pose_mean - - def extra_repr(self): - msg = [super(MANO, self).extra_repr()] - if self.use_pca: - msg.append(f'Number of PCA components: {self.num_pca_comps}') - msg.append(f'Flat hand mean: {self.flat_hand_mean}') - return '\n'.join(msg) - - def forward( - self, - betas: Optional[Tensor] = None, - global_orient: Optional[Tensor] = None, - hand_pose: Optional[Tensor] = None, - transl: Optional[Tensor] = None, - return_verts: bool = True, - return_full_pose: bool = False, - **kwargs - ) -> MANOOutput: - ''' Forward pass for the MANO model - ''' - # If no shape and pose parameters are passed along, then use the - # ones from the module - global_orient = (global_orient if global_orient is not None else - self.global_orient) - betas = betas if betas is not None else self.betas - hand_pose = (hand_pose if hand_pose is not None else - self.hand_pose) - - apply_trans = transl is not None or hasattr(self, 'transl') - if transl is None: - if hasattr(self, 'transl'): - transl = self.transl - - if self.use_pca: - hand_pose = torch.einsum( - 'bi,ij->bj', [hand_pose, self.hand_components]) - - full_pose = torch.cat([global_orient, hand_pose], dim=1) - full_pose += self.pose_mean - - vertices, joints = lbs(betas, full_pose, self.v_template, - self.shapedirs, self.posedirs, - self.J_regressor, self.parents, - self.lbs_weights, pose2rot=True, - ) - - # # Add pre-selected extra joints that might be needed - # joints = self.vertex_joint_selector(vertices, joints) - - if self.joint_mapper is not None: - joints = self.joint_mapper(joints) - - if apply_trans: - joints = joints + transl.unsqueeze(dim=1) - vertices = vertices + transl.unsqueeze(dim=1) - - output = MANOOutput(vertices=vertices if return_verts else None, - joints=joints if return_verts else None, - betas=betas, - global_orient=global_orient, - hand_pose=hand_pose, - full_pose=full_pose if return_full_pose else None) - - return output - - -class MANOLayer(MANO): - def __init__(self, *args, **kwargs) -> None: - ''' MANO as a layer model constructor - ''' - super(MANOLayer, self).__init__( - create_global_orient=False, - create_hand_pose=False, - create_betas=False, - create_transl=False, - *args, **kwargs) - - def name(self) -> str: - return 'MANO' - - def forward( - self, - betas: Optional[Tensor] = None, - global_orient: Optional[Tensor] = None, - hand_pose: Optional[Tensor] = None, - transl: Optional[Tensor] = None, - return_verts: bool = True, - return_full_pose: bool = False, - **kwargs - ) -> MANOOutput: - ''' Forward pass for the MANO model - ''' - device, dtype = self.shapedirs.device, self.shapedirs.dtype - if global_orient is None: - batch_size = 1 - global_orient = torch.eye(3, device=device, dtype=dtype).view( - 1, 1, 3, 3).expand(batch_size, -1, -1, -1).contiguous() - else: - batch_size = global_orient.shape[0] - if hand_pose is None: - hand_pose = torch.eye(3, device=device, dtype=dtype).view( - 1, 1, 3, 3).expand(batch_size, 15, -1, -1).contiguous() - if betas is None: - betas = torch.zeros( - [batch_size, self.num_betas], dtype=dtype, device=device) - if transl is None: - transl = torch.zeros([batch_size, 3], dtype=dtype, device=device) - - full_pose = torch.cat([global_orient, hand_pose], dim=1) - vertices, joints = lbs(betas, full_pose, self.v_template, - self.shapedirs, self.posedirs, - self.J_regressor, self.parents, - self.lbs_weights, pose2rot=False) - - if self.joint_mapper is not None: - joints = self.joint_mapper(joints) - - if transl is not None: - joints = joints + transl.unsqueeze(dim=1) - vertices = vertices + transl.unsqueeze(dim=1) - - output = MANOOutput( - vertices=vertices if return_verts else None, - joints=joints if return_verts else None, - betas=betas, - global_orient=global_orient, - hand_pose=hand_pose, - full_pose=full_pose if return_full_pose else None) - - return output - - -class FLAME(SMPL): - NUM_JOINTS = 5 - SHAPE_SPACE_DIM = 300 - EXPRESSION_SPACE_DIM = 100 - NECK_IDX = 0 - - def __init__( - self, - model_path: str, - data_struct=None, - num_expression_coeffs=10, - create_expression: bool = True, - expression: Optional[Tensor] = None, - create_neck_pose: bool = True, - neck_pose: Optional[Tensor] = None, - create_jaw_pose: bool = True, - jaw_pose: Optional[Tensor] = None, - create_leye_pose: bool = True, - leye_pose: Optional[Tensor] = None, - create_reye_pose=True, - reye_pose: Optional[Tensor] = None, - use_face_contour=False, - batch_size: int = 1, - gender: str = 'neutral', - dtype: torch.dtype = torch.float32, - ext='pkl', - **kwargs - ) -> None: - ''' FLAME model constructor - - Parameters - ---------- - model_path: str - The path to the folder or to the file where the model - parameters are stored - num_expression_coeffs: int, optional - Number of expression components to use - (default = 10). - create_expression: bool, optional - Flag for creating a member variable for the expression space - (default = True). - expression: torch.tensor, optional, Bx10 - The default value for the expression member variable. - (default = None) - create_neck_pose: bool, optional - Flag for creating a member variable for the neck pose. - (default = False) - neck_pose: torch.tensor, optional, Bx3 - The default value for the neck pose variable. - (default = None) - create_jaw_pose: bool, optional - Flag for creating a member variable for the jaw pose. - (default = False) - jaw_pose: torch.tensor, optional, Bx3 - The default value for the jaw pose variable. - (default = None) - create_leye_pose: bool, optional - Flag for creating a member variable for the left eye pose. - (default = False) - leye_pose: torch.tensor, optional, Bx10 - The default value for the left eye pose variable. - (default = None) - create_reye_pose: bool, optional - Flag for creating a member variable for the right eye pose. - (default = False) - reye_pose: torch.tensor, optional, Bx10 - The default value for the right eye pose variable. - (default = None) - use_face_contour: bool, optional - Whether to compute the keypoints that form the facial contour - batch_size: int, optional - The batch size used for creating the member variables - gender: str, optional - Which gender to load - dtype: torch.dtype - The data type for the created variables - ''' - model_fn = f'FLAME_{gender.upper()}.{ext}' - flame_path = os.path.join(model_path, model_fn) - assert osp.exists(flame_path), 'Path {} does not exist!'.format( - flame_path) - if ext == 'npz': - file_data = np.load(flame_path, allow_pickle=True) - elif ext == 'pkl': - with open(flame_path, 'rb') as smpl_file: - file_data = pickle.load(smpl_file, encoding='latin1') - else: - raise ValueError('Unknown extension: {}'.format(ext)) - data_struct = Struct(**file_data) - - super(FLAME, self).__init__( - model_path=model_path, - data_struct=data_struct, - dtype=dtype, - batch_size=batch_size, - gender=gender, - ext=ext, - **kwargs) - - self.use_face_contour = use_face_contour - - self.vertex_joint_selector.extra_joints_idxs = to_tensor( - [], dtype=torch.long) - - if create_neck_pose: - if neck_pose is None: - default_neck_pose = torch.zeros([batch_size, 3], dtype=dtype) - else: - default_neck_pose = torch.tensor(neck_pose, dtype=dtype) - neck_pose_param = nn.Parameter( - default_neck_pose, requires_grad=True) - self.register_parameter('neck_pose', neck_pose_param) - - if create_jaw_pose: - if jaw_pose is None: - default_jaw_pose = torch.zeros([batch_size, 3], dtype=dtype) - else: - default_jaw_pose = torch.tensor(jaw_pose, dtype=dtype) - jaw_pose_param = nn.Parameter(default_jaw_pose, - requires_grad=True) - self.register_parameter('jaw_pose', jaw_pose_param) - - if create_leye_pose: - if leye_pose is None: - default_leye_pose = torch.zeros([batch_size, 3], dtype=dtype) - else: - default_leye_pose = torch.tensor(leye_pose, dtype=dtype) - leye_pose_param = nn.Parameter(default_leye_pose, - requires_grad=True) - self.register_parameter('leye_pose', leye_pose_param) - - if create_reye_pose: - if reye_pose is None: - default_reye_pose = torch.zeros([batch_size, 3], dtype=dtype) - else: - default_reye_pose = torch.tensor(reye_pose, dtype=dtype) - reye_pose_param = nn.Parameter(default_reye_pose, - requires_grad=True) - self.register_parameter('reye_pose', reye_pose_param) - - shapedirs = data_struct.shapedirs - if len(shapedirs.shape) < 3: - shapedirs = shapedirs[:, :, None] - if (shapedirs.shape[-1] < self.SHAPE_SPACE_DIM + - self.EXPRESSION_SPACE_DIM): - # print(f'WARNING: You are using a {self.name()} model, with only' - # ' 10 shape and 10 expression coefficients.') - expr_start_idx = 10 - expr_end_idx = 20 - num_expression_coeffs = min(num_expression_coeffs, 10) - else: - expr_start_idx = self.SHAPE_SPACE_DIM - expr_end_idx = self.SHAPE_SPACE_DIM + num_expression_coeffs - num_expression_coeffs = min( - num_expression_coeffs, self.EXPRESSION_SPACE_DIM) - - self._num_expression_coeffs = num_expression_coeffs - - expr_dirs = shapedirs[:, :, expr_start_idx:expr_end_idx] - self.register_buffer( - 'expr_dirs', to_tensor(to_np(expr_dirs), dtype=dtype)) - - if create_expression: - if expression is None: - default_expression = torch.zeros( - [batch_size, self.num_expression_coeffs], dtype=dtype) - else: - default_expression = torch.tensor(expression, dtype=dtype) - expression_param = nn.Parameter(default_expression, - requires_grad=True) - self.register_parameter('expression', expression_param) - - # The pickle file that contains the barycentric coordinates for - # regressing the landmarks - landmark_bcoord_filename = osp.join( - model_path, 'flame_static_embedding.pkl') - - with open(landmark_bcoord_filename, 'rb') as fp: - landmarks_data = pickle.load(fp, encoding='latin1') - - lmk_faces_idx = landmarks_data['lmk_face_idx'].astype(np.int64) - self.register_buffer('lmk_faces_idx', - torch.tensor(lmk_faces_idx, dtype=torch.long)) - lmk_bary_coords = landmarks_data['lmk_b_coords'] - self.register_buffer('lmk_bary_coords', - torch.tensor(lmk_bary_coords, dtype=dtype)) - if self.use_face_contour: - face_contour_path = os.path.join( - model_path, 'flame_dynamic_embedding.npy') - contour_embeddings = np.load(face_contour_path, - allow_pickle=True, - encoding='latin1')[()] - - dynamic_lmk_faces_idx = np.array( - contour_embeddings['lmk_face_idx'], dtype=np.int64) - dynamic_lmk_faces_idx = torch.tensor( - dynamic_lmk_faces_idx, - dtype=torch.long) - self.register_buffer('dynamic_lmk_faces_idx', - dynamic_lmk_faces_idx) - - dynamic_lmk_b_coords = torch.tensor( - contour_embeddings['lmk_b_coords'], dtype=dtype) - self.register_buffer( - 'dynamic_lmk_bary_coords', dynamic_lmk_b_coords) - - neck_kin_chain = find_joint_kin_chain(self.NECK_IDX, self.parents) - self.register_buffer( - 'neck_kin_chain', - torch.tensor(neck_kin_chain, dtype=torch.long)) - - @property - def num_expression_coeffs(self): - return self._num_expression_coeffs - - def name(self) -> str: - return 'FLAME' - - def extra_repr(self): - msg = [ - super(FLAME, self).extra_repr(), - f'Number of Expression Coefficients: {self.num_expression_coeffs}', - f'Use face contour: {self.use_face_contour}', - ] - return '\n'.join(msg) - - def forward( - self, - betas: Optional[Tensor] = None, - global_orient: Optional[Tensor] = None, - neck_pose: Optional[Tensor] = None, - transl: Optional[Tensor] = None, - expression: Optional[Tensor] = None, - jaw_pose: Optional[Tensor] = None, - leye_pose: Optional[Tensor] = None, - reye_pose: Optional[Tensor] = None, - return_verts: bool = True, - return_full_pose: bool = False, - pose2rot: bool = True, - **kwargs - ) -> FLAMEOutput: - ''' - Forward pass for the SMPLX model - - Parameters - ---------- - global_orient: torch.tensor, optional, shape Bx3 - If given, ignore the member variable and use it as the global - rotation of the body. Useful if someone wishes to predicts this - with an external model. (default=None) - betas: torch.tensor, optional, shape Bx10 - If given, ignore the member variable `betas` and use it - instead. For example, it can used if shape parameters - `betas` are predicted from some external model. - (default=None) - expression: torch.tensor, optional, shape Bx10 - If given, ignore the member variable `expression` and use it - instead. For example, it can used if expression parameters - `expression` are predicted from some external model. - jaw_pose: torch.tensor, optional, shape Bx3 - If given, ignore the member variable `jaw_pose` and - use this instead. It should either joint rotations in - axis-angle format. - jaw_pose: torch.tensor, optional, shape Bx3 - If given, ignore the member variable `jaw_pose` and - use this instead. It should either joint rotations in - axis-angle format. - transl: torch.tensor, optional, shape Bx3 - If given, ignore the member variable `transl` and use it - instead. For example, it can used if the translation - `transl` is predicted from some external model. - (default=None) - return_verts: bool, optional - Return the vertices. (default=True) - return_full_pose: bool, optional - Returns the full axis-angle pose vector (default=False) - - Returns - ------- - output: ModelOutput - A named tuple of type `ModelOutput` - ''' - - # If no shape and pose parameters are passed along, then use the - # ones from the module - global_orient = (global_orient if global_orient is not None else - self.global_orient) - jaw_pose = jaw_pose if jaw_pose is not None else self.jaw_pose - neck_pose = neck_pose if neck_pose is not None else self.neck_pose - - leye_pose = leye_pose if leye_pose is not None else self.leye_pose - reye_pose = reye_pose if reye_pose is not None else self.reye_pose - - betas = betas if betas is not None else self.betas - expression = expression if expression is not None else self.expression - - apply_trans = transl is not None or hasattr(self, 'transl') - if transl is None: - if hasattr(self, 'transl'): - transl = self.transl - - full_pose = torch.cat( - [global_orient, neck_pose, jaw_pose, leye_pose, reye_pose], dim=1) - - batch_size = max(betas.shape[0], global_orient.shape[0], - jaw_pose.shape[0]) - # Concatenate the shape and expression coefficients - scale = int(batch_size / betas.shape[0]) - if scale > 1: - betas = betas.expand(scale, -1) - shape_components = torch.cat([betas, expression], dim=-1) - shapedirs = torch.cat([self.shapedirs, self.expr_dirs], dim=-1) - - vertices, joints = lbs(shape_components, full_pose, self.v_template, - shapedirs, self.posedirs, - self.J_regressor, self.parents, - self.lbs_weights, pose2rot=pose2rot, - ) - - lmk_faces_idx = self.lmk_faces_idx.unsqueeze( - dim=0).expand(batch_size, -1).contiguous() - lmk_bary_coords = self.lmk_bary_coords.unsqueeze(dim=0).repeat( - self.batch_size, 1, 1) - if self.use_face_contour: - lmk_idx_and_bcoords = find_dynamic_lmk_idx_and_bcoords( - vertices, full_pose, self.dynamic_lmk_faces_idx, - self.dynamic_lmk_bary_coords, - self.neck_kin_chain, - pose2rot=True, - ) - dyn_lmk_faces_idx, dyn_lmk_bary_coords = lmk_idx_and_bcoords - lmk_faces_idx = torch.cat([lmk_faces_idx, - dyn_lmk_faces_idx], 1) - lmk_bary_coords = torch.cat( - [lmk_bary_coords.expand(batch_size, -1, -1), - dyn_lmk_bary_coords], 1) - - landmarks = vertices2landmarks(vertices, self.faces_tensor, - lmk_faces_idx, - lmk_bary_coords) - - # Add any extra joints that might be needed - joints = self.vertex_joint_selector(vertices, joints) - # Add the landmarks to the joints - joints = torch.cat([joints, landmarks], dim=1) - - # Map the joints to the current dataset - if self.joint_mapper is not None: - joints = self.joint_mapper(joints=joints, vertices=vertices) - - if apply_trans: - joints += transl.unsqueeze(dim=1) - vertices += transl.unsqueeze(dim=1) - - output = FLAMEOutput(vertices=vertices if return_verts else None, - joints=joints, - betas=betas, - expression=expression, - global_orient=global_orient, - neck_pose=neck_pose, - jaw_pose=jaw_pose, - full_pose=full_pose if return_full_pose else None) - return output - - -class FLAMELayer(FLAME): - def __init__(self, *args, **kwargs) -> None: - ''' FLAME as a layer model constructor ''' - super(FLAMELayer, self).__init__( - create_betas=False, - create_expression=False, - create_global_orient=False, - create_neck_pose=False, - create_jaw_pose=False, - create_leye_pose=False, - create_reye_pose=False, - *args, - **kwargs) - - def forward( - self, - betas: Optional[Tensor] = None, - global_orient: Optional[Tensor] = None, - neck_pose: Optional[Tensor] = None, - transl: Optional[Tensor] = None, - expression: Optional[Tensor] = None, - jaw_pose: Optional[Tensor] = None, - leye_pose: Optional[Tensor] = None, - reye_pose: Optional[Tensor] = None, - return_verts: bool = True, - return_full_pose: bool = False, - pose2rot: bool = True, - **kwargs - ) -> FLAMEOutput: - ''' - Forward pass for the SMPLX model - - Parameters - ---------- - global_orient: torch.tensor, optional, shape Bx3x3 - Global rotation of the body. Useful if someone wishes to - predicts this with an external model. It is expected to be in - rotation matrix format. (default=None) - betas: torch.tensor, optional, shape BxN_b - Shape parameters. For example, it can used if shape parameters - `betas` are predicted from some external model. - (default=None) - expression: torch.tensor, optional, shape BxN_e - If given, ignore the member variable `expression` and use it - instead. For example, it can used if expression parameters - `expression` are predicted from some external model. - jaw_pose: torch.tensor, optional, shape Bx3x3 - Jaw pose. It should either joint rotations in - rotation matrix format. - transl: torch.tensor, optional, shape Bx3 - Translation vector of the body. - For example, it can used if the translation - `transl` is predicted from some external model. - (default=None) - return_verts: bool, optional - Return the vertices. (default=True) - return_full_pose: bool, optional - Returns the full axis-angle pose vector (default=False) - - Returns - ------- - output: ModelOutput - A named tuple of type `ModelOutput` - ''' - device, dtype = self.shapedirs.device, self.shapedirs.dtype - if global_orient is None: - batch_size = 1 - global_orient = torch.eye(3, device=device, dtype=dtype).view( - 1, 1, 3, 3).expand(batch_size, -1, -1, -1).contiguous() - else: - batch_size = global_orient.shape[0] - if neck_pose is None: - neck_pose = torch.eye(3, device=device, dtype=dtype).view( - 1, 1, 3, 3).expand(batch_size, 1, -1, -1).contiguous() - if jaw_pose is None: - jaw_pose = torch.eye(3, device=device, dtype=dtype).view( - 1, 1, 3, 3).expand(batch_size, -1, -1, -1).contiguous() - if leye_pose is None: - leye_pose = torch.eye(3, device=device, dtype=dtype).view( - 1, 1, 3, 3).expand(batch_size, -1, -1, -1).contiguous() - if reye_pose is None: - reye_pose = torch.eye(3, device=device, dtype=dtype).view( - 1, 1, 3, 3).expand(batch_size, -1, -1, -1).contiguous() - if betas is None: - betas = torch.zeros([batch_size, self.num_betas], - dtype=dtype, device=device) - if expression is None: - expression = torch.zeros([batch_size, self.num_expression_coeffs], - dtype=dtype, device=device) - if transl is None: - transl = torch.zeros([batch_size, 3], dtype=dtype, device=device) - - full_pose = torch.cat( - [global_orient, neck_pose, jaw_pose, leye_pose, reye_pose], dim=1) - - shape_components = torch.cat([betas, expression], dim=-1) - shapedirs = torch.cat([self.shapedirs, self.expr_dirs], dim=-1) - - vertices, joints = lbs(shape_components, full_pose, self.v_template, - shapedirs, self.posedirs, - self.J_regressor, self.parents, - self.lbs_weights, pose2rot=False, - ) - - lmk_faces_idx = self.lmk_faces_idx.unsqueeze( - dim=0).expand(batch_size, -1).contiguous() - lmk_bary_coords = self.lmk_bary_coords.unsqueeze(dim=0).repeat( - self.batch_size, 1, 1) - if self.use_face_contour: - lmk_idx_and_bcoords = find_dynamic_lmk_idx_and_bcoords( - vertices, full_pose, self.dynamic_lmk_faces_idx, - self.dynamic_lmk_bary_coords, - self.neck_kin_chain, - pose2rot=False, - ) - dyn_lmk_faces_idx, dyn_lmk_bary_coords = lmk_idx_and_bcoords - lmk_faces_idx = torch.cat([lmk_faces_idx, - dyn_lmk_faces_idx], 1) - lmk_bary_coords = torch.cat( - [lmk_bary_coords.expand(batch_size, -1, -1), - dyn_lmk_bary_coords], 1) - - landmarks = vertices2landmarks(vertices, self.faces_tensor, - lmk_faces_idx, - lmk_bary_coords) - - # Add any extra joints that might be needed - joints = self.vertex_joint_selector(vertices, joints) - # Add the landmarks to the joints - joints = torch.cat([joints, landmarks], dim=1) - - # Map the joints to the current dataset - if self.joint_mapper is not None: - joints = self.joint_mapper(joints=joints, vertices=vertices) - - joints += transl.unsqueeze(dim=1) - vertices += transl.unsqueeze(dim=1) - - output = FLAMEOutput(vertices=vertices if return_verts else None, - joints=joints, - betas=betas, - expression=expression, - global_orient=global_orient, - neck_pose=neck_pose, - jaw_pose=jaw_pose, - full_pose=full_pose if return_full_pose else None) - return output - - -def build_layer( - model_path: str, - model_type: str = 'smpl', - **kwargs -) -> Union[SMPLLayer, SMPLHLayer, SMPLXLayer, MANOLayer, FLAMELayer]: - ''' Method for creating a model from a path and a model type - - Parameters - ---------- - model_path: str - Either the path to the model you wish to load or a folder, - where each subfolder contains the differents types, i.e.: - model_path: - | - |-- smpl - |-- SMPL_FEMALE - |-- SMPL_NEUTRAL - |-- SMPL_MALE - |-- smplh - |-- SMPLH_FEMALE - |-- SMPLH_MALE - |-- smplx - |-- SMPLX_FEMALE - |-- SMPLX_NEUTRAL - |-- SMPLX_MALE - |-- mano - |-- MANO RIGHT - |-- MANO LEFT - |-- flame - |-- FLAME_FEMALE - |-- FLAME_MALE - |-- FLAME_NEUTRAL - - model_type: str, optional - When model_path is a folder, then this parameter specifies the - type of model to be loaded - **kwargs: dict - Keyword arguments - - Returns - ------- - body_model: nn.Module - The PyTorch module that implements the corresponding body model - Raises - ------ - ValueError: In case the model type is not one of SMPL, SMPLH, - SMPLX, MANO or FLAME - ''' - - if osp.isdir(model_path): - model_path = os.path.join(model_path, model_type) - else: - model_type = osp.basename(model_path).split('_')[0].lower() - - if model_type.lower() == 'smpl': - return SMPLLayer(model_path, **kwargs) - elif model_type.lower() == 'smplh': - return SMPLHLayer(model_path, **kwargs) - elif model_type.lower() == 'smplx': - return SMPLXLayer(model_path, **kwargs) - elif 'mano' in model_type.lower(): - return MANOLayer(model_path, **kwargs) - elif 'flame' in model_type.lower(): - return FLAMELayer(model_path, **kwargs) - else: - raise ValueError(f'Unknown model type {model_type}, exiting!') - - -def create( - model_path: str, - model_type: str = 'smpl', - **kwargs -) -> Union[SMPL, SMPLH, SMPLX, MANO, FLAME]: - ''' Method for creating a model from a path and a model type - - Parameters - ---------- - model_path: str - Either the path to the model you wish to load or a folder, - where each subfolder contains the differents types, i.e.: - model_path: - | - |-- smpl - |-- SMPL_FEMALE - |-- SMPL_NEUTRAL - |-- SMPL_MALE - |-- smplh - |-- SMPLH_FEMALE - |-- SMPLH_MALE - |-- smplx - |-- SMPLX_FEMALE - |-- SMPLX_NEUTRAL - |-- SMPLX_MALE - |-- mano - |-- MANO RIGHT - |-- MANO LEFT - - model_type: str, optional - When model_path is a folder, then this parameter specifies the - type of model to be loaded - **kwargs: dict - Keyword arguments - - Returns - ------- - body_model: nn.Module - The PyTorch module that implements the corresponding body model - Raises - ------ - ValueError: In case the model type is not one of SMPL, SMPLH, - SMPLX, MANO or FLAME - ''' - - model_path = os.path.join(model_path, model_type) - - if model_type.lower() == 'smpl': - return SMPL(model_path, **kwargs) - elif model_type.lower() == 'smplh': - return SMPLH(model_path, **kwargs) - elif model_type.lower() == 'smplx': - return SMPLX(model_path, **kwargs) - elif 'mano' in model_type.lower(): - return MANO(model_path, **kwargs) - elif 'flame' in model_type.lower(): - return FLAME(model_path, **kwargs) - else: - raise ValueError(f'Unknown model type {model_type}, exiting!') diff --git a/lib/smplx/joint_names.py b/lib/smplx/joint_names.py deleted file mode 100644 index 0a3a10f8cef8b50075dc9f680459fc5d596a0013..0000000000000000000000000000000000000000 --- a/lib/smplx/joint_names.py +++ /dev/null @@ -1,163 +0,0 @@ -# -*- coding: utf-8 -*- - -# Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is -# holder of all proprietary rights on this computer program. -# You can only use this computer program if you have closed -# a license agreement with MPG or you get the right to use the computer -# program from someone who is authorized to grant you that right. -# Any use of the computer program without a valid license is prohibited and -# liable to prosecution. -# -# Copyright©2019 Max-Planck-Gesellschaft zur Förderung -# der Wissenschaften e.V. (MPG). acting on behalf of its Max Planck Institute -# for Intelligent Systems. All rights reserved. -# -# Contact: ps-license@tuebingen.mpg.de - -JOINT_NAMES = [ - 'pelvis', - 'left_hip', - 'right_hip', - 'spine1', - 'left_knee', - 'right_knee', - 'spine2', - 'left_ankle', - 'right_ankle', - 'spine3', - 'left_foot', - 'right_foot', - 'neck', - 'left_collar', - 'right_collar', - 'head', - 'left_shoulder', - 'right_shoulder', - 'left_elbow', - 'right_elbow', - 'left_wrist', - 'right_wrist', - 'jaw', - 'left_eye_smplhf', - 'right_eye_smplhf', - 'left_index1', - 'left_index2', - 'left_index3', - 'left_middle1', - 'left_middle2', - 'left_middle3', - 'left_pinky1', - 'left_pinky2', - 'left_pinky3', - 'left_ring1', - 'left_ring2', - 'left_ring3', - 'left_thumb1', - 'left_thumb2', - 'left_thumb3', - 'right_index1', - 'right_index2', - 'right_index3', - 'right_middle1', - 'right_middle2', - 'right_middle3', - 'right_pinky1', - 'right_pinky2', - 'right_pinky3', - 'right_ring1', - 'right_ring2', - 'right_ring3', - 'right_thumb1', - 'right_thumb2', - 'right_thumb3', - 'nose', - 'right_eye', - 'left_eye', - 'right_ear', - 'left_ear', - 'left_big_toe', - 'left_small_toe', - 'left_heel', - 'right_big_toe', - 'right_small_toe', - 'right_heel', - 'left_thumb', - 'left_index', - 'left_middle', - 'left_ring', - 'left_pinky', - 'right_thumb', - 'right_index', - 'right_middle', - 'right_ring', - 'right_pinky', - 'right_eye_brow1', - 'right_eye_brow2', - 'right_eye_brow3', - 'right_eye_brow4', - 'right_eye_brow5', - 'left_eye_brow5', - 'left_eye_brow4', - 'left_eye_brow3', - 'left_eye_brow2', - 'left_eye_brow1', - 'nose1', - 'nose2', - 'nose3', - 'nose4', - 'right_nose_2', - 'right_nose_1', - 'nose_middle', - 'left_nose_1', - 'left_nose_2', - 'right_eye1', - 'right_eye2', - 'right_eye3', - 'right_eye4', - 'right_eye5', - 'right_eye6', - 'left_eye4', - 'left_eye3', - 'left_eye2', - 'left_eye1', - 'left_eye6', - 'left_eye5', - 'right_mouth_1', - 'right_mouth_2', - 'right_mouth_3', - 'mouth_top', - 'left_mouth_3', - 'left_mouth_2', - 'left_mouth_1', - 'left_mouth_5', # 59 in OpenPose output - 'left_mouth_4', # 58 in OpenPose output - 'mouth_bottom', - 'right_mouth_4', - 'right_mouth_5', - 'right_lip_1', - 'right_lip_2', - 'lip_top', - 'left_lip_2', - 'left_lip_1', - 'left_lip_3', - 'lip_bottom', - 'right_lip_3', - # Face contour - 'right_contour_1', - 'right_contour_2', - 'right_contour_3', - 'right_contour_4', - 'right_contour_5', - 'right_contour_6', - 'right_contour_7', - 'right_contour_8', - 'contour_middle', - 'left_contour_8', - 'left_contour_7', - 'left_contour_6', - 'left_contour_5', - 'left_contour_4', - 'left_contour_3', - 'left_contour_2', - 'left_contour_1', -] diff --git a/lib/smplx/lbs.py b/lib/smplx/lbs.py deleted file mode 100644 index a0c23f6c3595d763bf615a18a0a881c8b81e9e3b..0000000000000000000000000000000000000000 --- a/lib/smplx/lbs.py +++ /dev/null @@ -1,405 +0,0 @@ -# -*- coding: utf-8 -*- - -# Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is -# holder of all proprietary rights on this computer program. -# You can only use this computer program if you have closed -# a license agreement with MPG or you get the right to use the computer -# program from someone who is authorized to grant you that right. -# Any use of the computer program without a valid license is prohibited and -# liable to prosecution. -# -# Copyright©2019 Max-Planck-Gesellschaft zur Förderung -# der Wissenschaften e.V. (MPG). acting on behalf of its Max Planck Institute -# for Intelligent Systems. All rights reserved. -# -# Contact: ps-license@tuebingen.mpg.de - -from __future__ import absolute_import -from __future__ import print_function -from __future__ import division - -from typing import Tuple, List, Optional -import numpy as np - -import torch -import torch.nn.functional as F - -from .utils import rot_mat_to_euler, Tensor - - -def find_dynamic_lmk_idx_and_bcoords( - vertices: Tensor, - pose: Tensor, - dynamic_lmk_faces_idx: Tensor, - dynamic_lmk_b_coords: Tensor, - neck_kin_chain: List[int], - pose2rot: bool = True, -) -> Tuple[Tensor, Tensor]: - ''' Compute the faces, barycentric coordinates for the dynamic landmarks - - - To do so, we first compute the rotation of the neck around the y-axis - and then use a pre-computed look-up table to find the faces and the - barycentric coordinates that will be used. - - Special thanks to Soubhik Sanyal (soubhik.sanyal@tuebingen.mpg.de) - for providing the original TensorFlow implementation and for the LUT. - - Parameters - ---------- - vertices: torch.tensor BxVx3, dtype = torch.float32 - The tensor of input vertices - pose: torch.tensor Bx(Jx3), dtype = torch.float32 - The current pose of the body model - dynamic_lmk_faces_idx: torch.tensor L, dtype = torch.long - The look-up table from neck rotation to faces - dynamic_lmk_b_coords: torch.tensor Lx3, dtype = torch.float32 - The look-up table from neck rotation to barycentric coordinates - neck_kin_chain: list - A python list that contains the indices of the joints that form the - kinematic chain of the neck. - dtype: torch.dtype, optional - - Returns - ------- - dyn_lmk_faces_idx: torch.tensor, dtype = torch.long - A tensor of size BxL that contains the indices of the faces that - will be used to compute the current dynamic landmarks. - dyn_lmk_b_coords: torch.tensor, dtype = torch.float32 - A tensor of size BxL that contains the indices of the faces that - will be used to compute the current dynamic landmarks. - ''' - - dtype = vertices.dtype - batch_size = vertices.shape[0] - - if pose2rot: - aa_pose = torch.index_select(pose.view(batch_size, -1, 3), 1, - neck_kin_chain) - rot_mats = batch_rodrigues( - aa_pose.view(-1, 3)).view(batch_size, -1, 3, 3) - else: - rot_mats = torch.index_select( - pose.view(batch_size, -1, 3, 3), 1, neck_kin_chain) - - rel_rot_mat = torch.eye( - 3, device=vertices.device, dtype=dtype).unsqueeze_(dim=0).repeat( - batch_size, 1, 1) - for idx in range(len(neck_kin_chain)): - rel_rot_mat = torch.bmm(rot_mats[:, idx], rel_rot_mat) - - y_rot_angle = torch.round( - torch.clamp(-rot_mat_to_euler(rel_rot_mat) * 180.0 / np.pi, - max=39)).to(dtype=torch.long) - neg_mask = y_rot_angle.lt(0).to(dtype=torch.long) - mask = y_rot_angle.lt(-39).to(dtype=torch.long) - neg_vals = mask * 78 + (1 - mask) * (39 - y_rot_angle) - y_rot_angle = (neg_mask * neg_vals + - (1 - neg_mask) * y_rot_angle) - - dyn_lmk_faces_idx = torch.index_select(dynamic_lmk_faces_idx, - 0, y_rot_angle) - dyn_lmk_b_coords = torch.index_select(dynamic_lmk_b_coords, - 0, y_rot_angle) - - return dyn_lmk_faces_idx, dyn_lmk_b_coords - - -def vertices2landmarks( - vertices: Tensor, - faces: Tensor, - lmk_faces_idx: Tensor, - lmk_bary_coords: Tensor -) -> Tensor: - ''' Calculates landmarks by barycentric interpolation - - Parameters - ---------- - vertices: torch.tensor BxVx3, dtype = torch.float32 - The tensor of input vertices - faces: torch.tensor Fx3, dtype = torch.long - The faces of the mesh - lmk_faces_idx: torch.tensor L, dtype = torch.long - The tensor with the indices of the faces used to calculate the - landmarks. - lmk_bary_coords: torch.tensor Lx3, dtype = torch.float32 - The tensor of barycentric coordinates that are used to interpolate - the landmarks - - Returns - ------- - landmarks: torch.tensor BxLx3, dtype = torch.float32 - The coordinates of the landmarks for each mesh in the batch - ''' - # Extract the indices of the vertices for each face - # BxLx3 - batch_size, num_verts = vertices.shape[:2] - device = vertices.device - - lmk_faces = torch.index_select(faces, 0, lmk_faces_idx.view(-1)).view( - batch_size, -1, 3) - - lmk_faces += torch.arange( - batch_size, dtype=torch.long, device=device).view(-1, 1, 1) * num_verts - - lmk_vertices = vertices.view(-1, 3)[lmk_faces].view( - batch_size, -1, 3, 3) - - landmarks = torch.einsum('blfi,blf->bli', [lmk_vertices, lmk_bary_coords]) - return landmarks - - -def lbs( - betas: Tensor, - pose: Tensor, - v_template: Tensor, - shapedirs: Tensor, - posedirs: Tensor, - J_regressor: Tensor, - parents: Tensor, - lbs_weights: Tensor, - pose2rot: bool = True, - return_transformation: bool = False, -) -> Tuple[Tensor, Tensor, Optional[Tensor], Optional[Tensor]]: - ''' Performs Linear Blend Skinning with the given shape and pose parameters - - Parameters - ---------- - betas : torch.tensor BxNB - The tensor of shape parameters - pose : torch.tensor Bx(J + 1) * 3 - The pose parameters in axis-angle format - v_template torch.tensor BxVx3 - The template mesh that will be deformed - shapedirs : torch.tensor 1xNB - The tensor of PCA shape displacements - posedirs : torch.tensor Px(V * 3) - The pose PCA coefficients - J_regressor : torch.tensor JxV - The regressor array that is used to calculate the joints from - the position of the vertices - parents: torch.tensor J - The array that describes the kinematic tree for the model - lbs_weights: torch.tensor N x V x (J + 1) - The linear blend skinning weights that represent how much the - rotation matrix of each part affects each vertex - pose2rot: bool, optional - Flag on whether to convert the input pose tensor to rotation - matrices. The default value is True. If False, then the pose tensor - should already contain rotation matrices and have a size of - Bx(J + 1)x9 - dtype: torch.dtype, optional - - Returns - ------- - verts: torch.tensor BxVx3 - The vertices of the mesh after applying the shape and pose - displacements. - joints: torch.tensor BxJx3 - The joints of the model - ''' - - batch_size = max(betas.shape[0], pose.shape[0]) - device, dtype = betas.device, betas.dtype - - # Add shape contribution - v_shaped = v_template + blend_shapes(betas, shapedirs) - - # Get the joints - # NxJx3 array - J = vertices2joints(J_regressor, v_shaped) - - # 3. Add pose blend shapes - # N x J x 3 x 3 - ident = torch.eye(3, dtype=dtype, device=device) - if pose2rot: - rot_mats = batch_rodrigues(pose.view(-1, 3)).view( - [batch_size, -1, 3, 3]) - - pose_feature = (rot_mats[:, 1:, :, :] - ident).view([batch_size, -1]) - # (N x P) x (P, V * 3) -> N x V x 3 - pose_offsets = torch.matmul( - pose_feature, posedirs).view(batch_size, -1, 3) - else: - pose_feature = pose[:, 1:].view(batch_size, -1, 3, 3) - ident - rot_mats = pose.view(batch_size, -1, 3, 3) - - pose_offsets = torch.matmul(pose_feature.view(batch_size, -1), - posedirs).view(batch_size, -1, 3) - - v_posed = pose_offsets + v_shaped - # 4. Get the global joint location - J_transformed, A = batch_rigid_transform(rot_mats, J, parents, dtype=dtype) - - # 5. Do skinning: - # W is N x V x (J + 1) - W = lbs_weights.unsqueeze(dim=0).expand([batch_size, -1, -1]) - # (N x V x (J + 1)) x (N x (J + 1) x 16) - num_joints = J_regressor.shape[0] - T = torch.matmul(W, A.view(batch_size, num_joints, 16)) \ - .view(batch_size, -1, 4, 4) - - homogen_coord = torch.ones([batch_size, v_posed.shape[1], 1], - dtype=dtype, device=device) - v_posed_homo = torch.cat([v_posed, homogen_coord], dim=2) - v_homo = torch.matmul(T, torch.unsqueeze(v_posed_homo, dim=-1)) - - verts = v_homo[:, :, :3, 0] - - if return_transformation: - return verts, J_transformed, A, T - - return verts, J_transformed - - -def vertices2joints(J_regressor: Tensor, vertices: Tensor) -> Tensor: - ''' Calculates the 3D joint locations from the vertices - - Parameters - ---------- - J_regressor : torch.tensor JxV - The regressor array that is used to calculate the joints from the - position of the vertices - vertices : torch.tensor BxVx3 - The tensor of mesh vertices - - Returns - ------- - torch.tensor BxJx3 - The location of the joints - ''' - - return torch.einsum('bik,ji->bjk', [vertices, J_regressor]) - - -def blend_shapes(betas: Tensor, shape_disps: Tensor) -> Tensor: - ''' Calculates the per vertex displacement due to the blend shapes - - - Parameters - ---------- - betas : torch.tensor Bx(num_betas) - Blend shape coefficients - shape_disps: torch.tensor Vx3x(num_betas) - Blend shapes - - Returns - ------- - torch.tensor BxVx3 - The per-vertex displacement due to shape deformation - ''' - - # Displacement[b, m, k] = sum_{l} betas[b, l] * shape_disps[m, k, l] - # i.e. Multiply each shape displacement by its corresponding beta and - # then sum them. - blend_shape = torch.einsum('bl,mkl->bmk', [betas, shape_disps]) - return blend_shape - - -def batch_rodrigues( - rot_vecs: Tensor, - epsilon: float = 1e-8, -) -> Tensor: - ''' Calculates the rotation matrices for a batch of rotation vectors - Parameters - ---------- - rot_vecs: torch.tensor Nx3 - array of N axis-angle vectors - Returns - ------- - R: torch.tensor Nx3x3 - The rotation matrices for the given axis-angle parameters - ''' - - batch_size = rot_vecs.shape[0] - device, dtype = rot_vecs.device, rot_vecs.dtype - - angle = torch.norm(rot_vecs + 1e-8, dim=1, keepdim=True) - rot_dir = rot_vecs / angle - - cos = torch.unsqueeze(torch.cos(angle), dim=1) - sin = torch.unsqueeze(torch.sin(angle), dim=1) - - # Bx1 arrays - rx, ry, rz = torch.split(rot_dir, 1, dim=1) - K = torch.zeros((batch_size, 3, 3), dtype=dtype, device=device) - - zeros = torch.zeros((batch_size, 1), dtype=dtype, device=device) - K = torch.cat([zeros, -rz, ry, rz, zeros, -rx, -ry, rx, zeros], dim=1) \ - .view((batch_size, 3, 3)) - - ident = torch.eye(3, dtype=dtype, device=device).unsqueeze(dim=0) - rot_mat = ident + sin * K + (1 - cos) * torch.bmm(K, K) - return rot_mat - - -def transform_mat(R: Tensor, t: Tensor) -> Tensor: - ''' Creates a batch of transformation matrices - Args: - - R: Bx3x3 array of a batch of rotation matrices - - t: Bx3x1 array of a batch of translation vectors - Returns: - - T: Bx4x4 Transformation matrix - ''' - # No padding left or right, only add an extra row - return torch.cat([F.pad(R, [0, 0, 0, 1]), - F.pad(t, [0, 0, 0, 1], value=1)], dim=2) - - -def batch_rigid_transform( - rot_mats: Tensor, - joints: Tensor, - parents: Tensor, - dtype=torch.float32 -) -> Tensor: - """ - Applies a batch of rigid transformations to the joints - - Parameters - ---------- - rot_mats : torch.tensor BxNx3x3 - Tensor of rotation matrices - joints : torch.tensor BxNx3 - Locations of joints - parents : torch.tensor BxN - The kinematic tree of each object - dtype : torch.dtype, optional: - The data type of the created tensors, the default is torch.float32 - - Returns - ------- - posed_joints : torch.tensor BxNx3 - The locations of the joints after applying the pose rotations - rel_transforms : torch.tensor BxNx4x4 - The relative (with respect to the root joint) rigid transformations - for all the joints - """ - - joints = torch.unsqueeze(joints, dim=-1) - - rel_joints = joints.clone() - rel_joints[:, 1:] -= joints[:, parents[1:]] - - transforms_mat = transform_mat( - rot_mats.reshape(-1, 3, 3), - rel_joints.reshape(-1, 3, 1)).reshape(-1, joints.shape[1], 4, 4) - - transform_chain = [transforms_mat[:, 0]] - for i in range(1, parents.shape[0]): - # Subtract the joint location at the rest pose - # No need for rotation, since it's identity when at rest - curr_res = torch.matmul(transform_chain[parents[i]], - transforms_mat[:, i]) - transform_chain.append(curr_res) - - transforms = torch.stack(transform_chain, dim=1) - - # The last column of the transformations contains the posed joints - posed_joints = transforms[:, :, :3, 3] - - joints_homogen = F.pad(joints, [0, 0, 0, 1]) - - rel_transforms = transforms - F.pad( - torch.matmul(transforms, joints_homogen), [3, 0, 0, 0, 0, 0, 0, 0]) - - return posed_joints, rel_transforms diff --git a/lib/smplx/utils.py b/lib/smplx/utils.py deleted file mode 100644 index 7af19ac23578743b5245f817e938a1cc16736410..0000000000000000000000000000000000000000 --- a/lib/smplx/utils.py +++ /dev/null @@ -1,127 +0,0 @@ -# -*- coding: utf-8 -*- - -# Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is -# holder of all proprietary rights on this computer program. -# You can only use this computer program if you have closed -# a license agreement with MPG or you get the right to use the computer -# program from someone who is authorized to grant you that right. -# Any use of the computer program without a valid license is prohibited and -# liable to prosecution. -# -# Copyright©2019 Max-Planck-Gesellschaft zur Förderung -# der Wissenschaften e.V. (MPG). acting on behalf of its Max Planck Institute -# for Intelligent Systems. All rights reserved. -# -# Contact: ps-license@tuebingen.mpg.de - -from typing import NewType, Union, Optional -from dataclasses import dataclass, asdict, fields -import numpy as np -import torch - -Tensor = NewType('Tensor', torch.Tensor) -Array = NewType('Array', np.ndarray) - - -@dataclass -class ModelOutput: - vertices: Optional[Tensor] = None - joints: Optional[Tensor] = None - full_pose: Optional[Tensor] = None - global_orient: Optional[Tensor] = None - transl: Optional[Tensor] = None - - def __getitem__(self, key): - return getattr(self, key) - - def get(self, key, default=None): - return getattr(self, key, default) - - def __iter__(self): - return self.keys() - - def keys(self): - keys = [t.name for t in fields(self)] - return iter(keys) - - def values(self): - values = [getattr(self, t.name) for t in fields(self)] - return iter(values) - - def items(self): - data = [(t.name, getattr(self, t.name)) for t in fields(self)] - return iter(data) - - -@dataclass -class SMPLOutput(ModelOutput): - betas: Optional[Tensor] = None - body_pose: Optional[Tensor] = None - - -@dataclass -class SMPLHOutput(SMPLOutput): - left_hand_pose: Optional[Tensor] = None - right_hand_pose: Optional[Tensor] = None - transl: Optional[Tensor] = None - - -@dataclass -class SMPLXOutput(SMPLHOutput): - expression: Optional[Tensor] = None - jaw_pose: Optional[Tensor] = None - joint_transformation: Optional[Tensor] = None - vertex_transformation: Optional[Tensor] = None - - -@dataclass -class MANOOutput(ModelOutput): - betas: Optional[Tensor] = None - hand_pose: Optional[Tensor] = None - - -@dataclass -class FLAMEOutput(ModelOutput): - betas: Optional[Tensor] = None - expression: Optional[Tensor] = None - jaw_pose: Optional[Tensor] = None - neck_pose: Optional[Tensor] = None - - -def find_joint_kin_chain(joint_id, kinematic_tree): - kin_chain = [] - curr_idx = joint_id - while curr_idx != -1: - kin_chain.append(curr_idx) - curr_idx = kinematic_tree[curr_idx] - return kin_chain - - -def to_tensor( - array: Union[Array, Tensor], dtype=torch.float32 -) -> Tensor: - if torch.is_tensor(array): - return array - else: - return torch.tensor(array, dtype=dtype) - - -class Struct(object): - def __init__(self, **kwargs): - for key, val in kwargs.items(): - setattr(self, key, val) - - -def to_np(array, dtype=np.float32): - if 'scipy.sparse' in str(type(array)): - array = array.todense() - return np.array(array, dtype=dtype) - - -def rot_mat_to_euler(rot_mats): - # Calculates rotation matrix to euler angles - # Careful for extreme cases of eular angles like [0.0, pi, 0.0] - - sy = torch.sqrt(rot_mats[:, 0, 0] * rot_mats[:, 0, 0] + - rot_mats[:, 1, 0] * rot_mats[:, 1, 0]) - return torch.atan2(-rot_mats[:, 2, 0], sy) diff --git a/lib/smplx/vertex_ids.py b/lib/smplx/vertex_ids.py deleted file mode 100644 index 0e7a4c36700f002da54a9e181eabbd47af2a95bc..0000000000000000000000000000000000000000 --- a/lib/smplx/vertex_ids.py +++ /dev/null @@ -1,77 +0,0 @@ -# -*- coding: utf-8 -*- - -# Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is -# holder of all proprietary rights on this computer program. -# You can only use this computer program if you have closed -# a license agreement with MPG or you get the right to use the computer -# program from someone who is authorized to grant you that right. -# Any use of the computer program without a valid license is prohibited and -# liable to prosecution. -# -# Copyright©2019 Max-Planck-Gesellschaft zur Förderung -# der Wissenschaften e.V. (MPG). acting on behalf of its Max Planck Institute -# for Intelligent Systems. All rights reserved. -# -# Contact: ps-license@tuebingen.mpg.de - -from __future__ import print_function -from __future__ import absolute_import -from __future__ import division - -# Joint name to vertex mapping. SMPL/SMPL-H/SMPL-X vertices that correspond to -# MSCOCO and OpenPose joints -vertex_ids = { - 'smplh': { - 'nose': 332, - 'reye': 6260, - 'leye': 2800, - 'rear': 4071, - 'lear': 583, - 'rthumb': 6191, - 'rindex': 5782, - 'rmiddle': 5905, - 'rring': 6016, - 'rpinky': 6133, - 'lthumb': 2746, - 'lindex': 2319, - 'lmiddle': 2445, - 'lring': 2556, - 'lpinky': 2673, - 'LBigToe': 3216, - 'LSmallToe': 3226, - 'LHeel': 3387, - 'RBigToe': 6617, - 'RSmallToe': 6624, - 'RHeel': 6787 - }, - 'smplx': { - 'nose': 9120, - 'reye': 9929, - 'leye': 9448, - 'rear': 616, - 'lear': 6, - 'rthumb': 8079, - 'rindex': 7669, - 'rmiddle': 7794, - 'rring': 7905, - 'rpinky': 8022, - 'lthumb': 5361, - 'lindex': 4933, - 'lmiddle': 5058, - 'lring': 5169, - 'lpinky': 5286, - 'LBigToe': 5770, - 'LSmallToe': 5780, - 'LHeel': 8846, - 'RBigToe': 8463, - 'RSmallToe': 8474, - 'RHeel': 8635 - }, - 'mano': { - 'thumb': 744, - 'index': 320, - 'middle': 443, - 'ring': 554, - 'pinky': 671, - } -} diff --git a/lib/smplx/vertex_joint_selector.py b/lib/smplx/vertex_joint_selector.py deleted file mode 100644 index 4b8298bd5e087731f86c1c699703b5219e046c5c..0000000000000000000000000000000000000000 --- a/lib/smplx/vertex_joint_selector.py +++ /dev/null @@ -1,77 +0,0 @@ -# -*- coding: utf-8 -*- - -# Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is -# holder of all proprietary rights on this computer program. -# You can only use this computer program if you have closed -# a license agreement with MPG or you get the right to use the computer -# program from someone who is authorized to grant you that right. -# Any use of the computer program without a valid license is prohibited and -# liable to prosecution. -# -# Copyright©2019 Max-Planck-Gesellschaft zur Förderung -# der Wissenschaften e.V. (MPG). acting on behalf of its Max Planck Institute -# for Intelligent Systems. All rights reserved. -# -# Contact: ps-license@tuebingen.mpg.de - -from __future__ import absolute_import -from __future__ import print_function -from __future__ import division - -import numpy as np - -import torch -import torch.nn as nn - -from .utils import to_tensor - - -class VertexJointSelector(nn.Module): - - def __init__(self, vertex_ids=None, - use_hands=True, - use_feet_keypoints=True, **kwargs): - super(VertexJointSelector, self).__init__() - - extra_joints_idxs = [] - - face_keyp_idxs = np.array([ - vertex_ids['nose'], - vertex_ids['reye'], - vertex_ids['leye'], - vertex_ids['rear'], - vertex_ids['lear']], dtype=np.int64) - - extra_joints_idxs = np.concatenate([extra_joints_idxs, - face_keyp_idxs]) - - if use_feet_keypoints: - feet_keyp_idxs = np.array([vertex_ids['LBigToe'], - vertex_ids['LSmallToe'], - vertex_ids['LHeel'], - vertex_ids['RBigToe'], - vertex_ids['RSmallToe'], - vertex_ids['RHeel']], dtype=np.int32) - - extra_joints_idxs = np.concatenate( - [extra_joints_idxs, feet_keyp_idxs]) - - if use_hands: - self.tip_names = ['thumb', 'index', 'middle', 'ring', 'pinky'] - - tips_idxs = [] - for hand_id in ['l', 'r']: - for tip_name in self.tip_names: - tips_idxs.append(vertex_ids[hand_id + tip_name]) - - extra_joints_idxs = np.concatenate( - [extra_joints_idxs, tips_idxs]) - - self.register_buffer('extra_joints_idxs', - to_tensor(extra_joints_idxs, dtype=torch.long)) - - def forward(self, vertices, joints): - extra_joints = torch.index_select(vertices, 1, self.extra_joints_idxs) - joints = torch.cat([joints, extra_joints], dim=1) - - return joints diff --git a/packages.txt b/packages.txt new file mode 100644 index 0000000000000000000000000000000000000000..496bac0380dffa74dc3416a0294380ced58f4d8f --- /dev/null +++ b/packages.txt @@ -0,0 +1,9 @@ +libgl1 +freeglut3-dev +unzip +ffmpeg +libsm6 +libxext6 +libgl1-mesa-dri +libegl1-mesa +libgbm1 \ No newline at end of file diff --git a/requirement.txt b/requirement.txt new file mode 100644 index 0000000000000000000000000000000000000000..ca34dfc68926c5a63d2e2de705de9b5ba4980ed4 --- /dev/null +++ b/requirement.txt @@ -0,0 +1,37 @@ +iopath +fvcore +pyembree +matplotlib==3.5.0 +Pillow==9.0.0 +PyOpenGL +PyOpenGL_accelerate +PyYAML>=6.0 +yacs>=0.1.8 +scikit-image==0.19.1 +termcolor +tqdm==4.62.3 +loguru==0.5.3 +trimesh==3.9.35 +flatten_dict==0.4.2 +ipykernel==5.3.4 +ipywidgets==7.6.5 +jpeg4py +shapely==1.7.1 +vedo==2020.4.2 +rtree==0.9.7 +pytorch_lightning==1.2.5 +PyMCubes +kornia +chumpy +cython==0.29.20 +rembg>=2.0.3 +opencv-python +opencv_contrib_python +scikit-learn +protobuf==3.20.0 +gdown==4.4.0 +git+https://github.com/Project-Splinter/human_det +git+https://github.com/YuliangXiu/smplx.git +git+https://github.com/facebookresearch/pytorch3d.git +git+https://github.com/YuliangXiu/neural_voxelization_layer.git +git+https://github.com/NVIDIAGameWorks/kaolin.git diff --git a/requirements.txt b/requirements.txt deleted file mode 100644 index 7ae0d818bf35e79e75c3f066ec6b3ea37bccc46a..0000000000000000000000000000000000000000 --- a/requirements.txt +++ /dev/null @@ -1,29 +0,0 @@ -pip>=23.3.2 -numpy==1.22.4 -matplotlib==3.5.0 -Pillow==9.2.0 -PyOpenGL -PyOpenGL_accelerate -PyYAML>=6.0 -yacs>=0.1.8 -scikit-image==0.19.1 -termcolor -tqdm -trimesh==3.9.35 -flatten_dict==0.4.2 -jpeg4py -shapely==1.7.1 -rtree==0.9.7 -pytorch_lightning==1.2.5 -PyMCubes -scikit-learn -protobuf==3.20.0 -pymeshlab -iopath -fvcore -chumpy -open3d -gradio -git+https://github.com/YuliangXiu/rembg.git -opencv-python-headless==4.8.1.78 -opencv-contrib-python==4.8.1.78 \ No newline at end of file