|
import os |
|
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID" |
|
os.environ["CUDA_VISIBLE_DEVICES"]="0" |
|
try: |
|
os.system("pip install --upgrade torch==1.11.0+cu113 torchvision==0.12.0+cu113 -f https://download.pytorch.org/whl/cu113/torch_stable.html") |
|
except Exception as e: |
|
print(e) |
|
|
|
from pydoc import describe |
|
from huggingface_hub import hf_hub_download |
|
import gradio as gr |
|
import os |
|
from datetime import datetime |
|
from PIL import Image |
|
import torch |
|
import torchvision |
|
import skimage |
|
import paddlehub |
|
import numpy as np |
|
from lib.options import BaseOptions |
|
from apps.crop_img import process_img |
|
from apps.eval import Evaluator |
|
from types import SimpleNamespace |
|
import trimesh |
|
import glob |
|
|
|
print( |
|
"torch: ", torch.__version__, |
|
"\ntorchvision: ", torchvision.__version__, |
|
"\nskimage:", skimage.__version__ |
|
) |
|
|
|
print("EnV", os.environ) |
|
|
|
net_C = hf_hub_download("radames/PIFu-upright-standing", filename="net_C") |
|
net_G = hf_hub_download("radames/PIFu-upright-standing", filename="net_G") |
|
|
|
|
|
opt = BaseOptions() |
|
opts = opt.parse_to_dict() |
|
opts['batch_size'] = 1 |
|
opts['mlp_dim'] = [257, 1024, 512, 256, 128, 1] |
|
opts['mlp_dim_color'] = [513, 1024, 512, 256, 128, 3] |
|
opts['num_stack'] = 4 |
|
opts['num_hourglass'] = 2 |
|
opts['resolution'] = 128 |
|
opts['hg_down'] = 'ave_pool' |
|
opts['norm'] = 'group' |
|
opts['norm_color'] = 'group' |
|
opts['load_netG_checkpoint_path'] = net_G |
|
opts['load_netC_checkpoint_path'] = net_C |
|
opts['results_path'] = "./results" |
|
opts['name'] = "spaces_demo" |
|
opts = SimpleNamespace(**opts) |
|
print("Params", opts) |
|
evaluator = Evaluator(opts) |
|
bg_remover_model = paddlehub.Module(name="U2Net") |
|
|
|
|
|
def process(img_path): |
|
base = os.path.basename(img_path) |
|
img_name = os.path.splitext(base)[0] |
|
print("\n\n\nStarting Process", datetime.now()) |
|
print("image name", img_name) |
|
img_raw = Image.open(img_path).convert('RGB') |
|
|
|
img = img_raw.resize( |
|
(512, int(512 * img_raw.size[1] / img_raw.size[0])), |
|
Image.Resampling.LANCZOS) |
|
|
|
try: |
|
|
|
print("Removing Background") |
|
masks = bg_remover_model.Segmentation( |
|
images=[np.array(img)], |
|
paths=None, |
|
batch_size=1, |
|
input_size=320, |
|
output_dir='./PIFu/inputs', |
|
visualization=False) |
|
mask = masks[0]["mask"] |
|
front = masks[0]["front"] |
|
except Exception as e: |
|
print(e) |
|
|
|
print("Aliging mask with input training image") |
|
print("Not aligned", front.shape, mask.shape) |
|
img_new, msk_new = process_img(front, mask) |
|
print("Aligned", img_new.shape, msk_new.shape) |
|
|
|
try: |
|
time = datetime.now() |
|
data = evaluator.load_image_from_memory(img_new, msk_new, img_name) |
|
print("Evaluating via PIFu", time) |
|
evaluator.eval(data, True) |
|
print("Success Evaluating via PIFu", datetime.now() - time) |
|
result_path = f'./{opts.results_path}/{opts.name}/result_{img_name}' |
|
except Exception as e: |
|
print("Error evaluating via PIFu", e) |
|
|
|
try: |
|
mesh = trimesh.load(result_path + '.obj') |
|
|
|
mesh.apply_transform([[-1, 0, 0, 0], |
|
[0, 1, 0, 0], |
|
[0, 0, -1, 0], |
|
[0, 0, 0, 1]]) |
|
mesh.export(file_obj=result_path + '.glb') |
|
result_gltf = result_path + '.glb' |
|
return [result_gltf, result_gltf] |
|
|
|
except Exception as e: |
|
print("error generating MESH", e) |
|
|
|
|
|
examples = sorted(glob.glob('examples/*.png')) |
|
description = ''' |
|
# PIFu Clothed Human Digitization |
|
### PIFu: Pixel-Aligned Implicit Function for High-Resolution Clothed Human Digitization |
|
<base target="_blank"> |
|
|
|
This is a demo for <a href="https://github.com/shunsukesaito/PIFu" target="_blank"> PIFu model </a>. |
|
The pre-trained model has the following warning: |
|
> Warning: The released model is trained with mostly upright standing scans with weak perspectie projection and the pitch angle of 0 degree. Reconstruction quality may degrade for images highly deviated from trainining data. |
|
|
|
**The inference takes about 180seconds for a new image.** |
|
|
|
<details> |
|
<summary>More</summary> |
|
|
|
#### Image Credits |
|
|
|
* Julien and Clem |
|
* [StyleGAN Humans](https://huggingface.co/spaces/hysts/StyleGAN-Human) |
|
* [Renderpeople: Dennis](https://renderpeople.com) |
|
|
|
|
|
#### More |
|
* https://phorhum.github.io/ |
|
* https://github.com/yuliangxiu/icon |
|
* https://shunsukesaito.github.io/PIFuHD/ |
|
|
|
</details> |
|
''' |
|
|
|
iface = gr.Interface( |
|
fn=process, |
|
description=description, |
|
inputs=gr.Image(type="filepath", label="Input Image"), |
|
outputs=[ |
|
gr.Model3D( |
|
clear_color=[0.0, 0.0, 0.0, 0.0], label="3D Model"), |
|
gr.File(label="Download 3D Model") |
|
], |
|
examples=examples, |
|
allow_flagging="never", |
|
cache_examples=True |
|
) |
|
|
|
if __name__ == "__main__": |
|
iface.launch(debug=True, enable_queue=False) |
|
|