# install import gradio as gr import os import subprocess if os.getenv('SYSTEM') == 'spaces': # subprocess.run('pip install pyembree'.split()) try: import pytorch3d except ImportError: subprocess.run( 'pip install --no-index --no-cache-dir pytorch3d -f https://dl.fbaipublicfiles.com/pytorch3d/packaging/wheels/py38_cu116_pyt1130/download.html' .split() ) subprocess.run("python setup.py build_ext --inplace".split(), cwd="./lib/common/libmesh/") subprocess.run("python setup.py build_ext --inplace".split(), cwd="./lib/common/libvoxelize/") from apps.infer import generate_model, generate_video # running title = ''' # Unconstrained & Detailed Clothed Human Digitization (ECON + ControlNet) ### ECON: Explicit Clothed humans Optimized via Normal integration (CVPR 2023, Highlight) ''' bottom = ''' #### Citation ``` @inproceedings{xiu2023econ, title = {{ECON: Explicit Clothed humans Optimized via Normal integration}}, author = {Xiu, Yuliang and Yang, Jinlong and Cao, Xu and Tzionas, Dimitrios and Black, Michael J.}, booktitle = {Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)}, month = {June}, year = {2023}, } ```
More #### Acknowledgments: - [controlnet-openpose](https://huggingface.co/spaces/diffusers/controlnet-openpose) - [TEXTure](https://huggingface.co/spaces/TEXTurePaper/TEXTure) #### Image Credits * [Pinterest](https://www.pinterest.com/search/pins/?q=parkour&rs=sitelinks_searchbox) #### Related works * [ICON @ MPI-IS](https://icon.is.tue.mpg.de/) * [MonoPort @ USC](https://xiuyuliang.cn/monoport) * [Phorhum @ Google](https://phorhum.github.io/) * [PIFuHD @ Meta](https://shunsukesaito.github.io/PIFuHD/) * [PaMIR @ Tsinghua](http://www.liuyebin.com/pamir/pamir.html)

Generate pose & prompt-guided images / Upload photos / Use examples → Submit Image (~3min) → Generate Video (~3min)

ECON is only suitable for "humanoid images" and will not work well on cartoons with non-human shapes.

''' description = '''

  • Colab Notebook Google Colab
  • Blender Plugin Blender
  • Docker Image Docker
  • Windows Setup

Twitter Follow
''' from controlnet_aux import OpenposeDetector from diffusers import StableDiffusionControlNetPipeline, ControlNetModel from diffusers import UniPCMultistepScheduler import gradio as gr import torch import base64 from io import BytesIO from PIL import Image # live conditioning canvas_html = "" load_js = """ async () => { const url = "https://huggingface.co/datasets/radames/gradio-components/raw/main/pose-gradio.js" fetch(url) .then(res => res.text()) .then(text => { const script = document.createElement('script'); script.type = "module" script.src = URL.createObjectURL(new Blob([text], { type: 'application/javascript' })); document.head.appendChild(script); }); } """ get_js_image = """ async (image_in_img, prompt, image_file_live_opt, live_conditioning) => { const canvasEl = document.getElementById("canvas-root"); const data = canvasEl? canvasEl._data : null; return [image_in_img, prompt, image_file_live_opt, data] } """ # Constants cached = False # Models pose_model = OpenposeDetector.from_pretrained("lllyasviel/ControlNet") controlnet = ControlNetModel.from_pretrained( "lllyasviel/sd-controlnet-openpose", torch_dtype=torch.float16 ) pipe = StableDiffusionControlNetPipeline.from_pretrained( "runwayml/stable-diffusion-v1-5", controlnet=controlnet, safety_checker=None, torch_dtype=torch.float16 ) pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config) # This command loads the individual model components on GPU on-demand. So, we don't # need to explicitly call pipe.to("cuda"). pipe.enable_model_cpu_offload() # xformers pipe.enable_xformers_memory_efficient_attention() # Generator seed, generator = torch.manual_seed(0) hint_prompts = ''' Hints:
best quality, extremely detailed, solid color background, super detail, high detail, edge lighting, soft focus, light and dark contrast, 8k, edge lighting, 3d, c4d, blender, oc renderer, ultra high definition, 3d rendering ''' def get_pose(image): return pose_model(image) import sys def read_logs(): sys.stdout.flush() with open("output.log", "r") as f: return f.read() def generate_images(image, prompt, image_file_live_opt='file', live_conditioning=None): if image is None and 'image' not in live_conditioning: raise gr.Error("Please provide an image") try: if image_file_live_opt == 'file': pose = get_pose(image) elif image_file_live_opt == 'webcam': base64_img = live_conditioning['image'] image_data = base64.b64decode(base64_img.split(',')[1]) pose = Image.open(BytesIO(image_data)).convert('RGB').resize((512, 512)) output = pipe( prompt, pose, generator=generator, num_images_per_prompt=3, num_inference_steps=50, ) all_outputs = [] all_outputs.append(pose) for image in output.images: all_outputs.append(image) return all_outputs, all_outputs except Exception as e: raise gr.Error(str(e)) def toggle(choice): if choice == "file": return gr.update(visible=True, value=None), gr.update(visible=False, value=None) elif choice == "webcam": return gr.update(visible=False, value=None), gr.update(visible=True, value=canvas_html) examples_pose = 'examples/pose' examples_cloth = 'examples/cloth' def show_video(): return gr.update(visible=True), gr.update(visible=True) with gr.Blocks() as demo: gr.Markdown(title) gr.HTML(description) gr.Markdown(bottom) out_lst = [] with gr.Row(): with gr.Column(): with gr.Row(): live_conditioning = gr.JSON(value={}, visible=False) with gr.Column(): image_file_live_opt = gr.Radio(["file", "webcam"], value="file", label="How would you like to upload your image?") with gr.Row(): image_in_img = gr.Image( visible=True, type="pil", label="Image for Pose" ) canvas = gr.HTML(None, elem_id="canvas_html", visible=False) image_file_live_opt.change( fn=toggle, inputs=[image_file_live_opt], outputs=[image_in_img, canvas], queue=False ) prompt = gr.Textbox( label="Enter your prompt to synthesise the image", max_lines=10, placeholder="best quality, extremely detailed", ) gr.Markdown(hint_prompts) with gr.Column(): gallery = gr.Gallery(label="Generated Images", columns=[2],rows=[2]) gallery_cache = gr.State() gr.Markdown( '''
Click the target generated image for Reconstruction.
''' ) inp = gr.Image(type="filepath", label="Input Image for Reconstruction") fitting_step = gr.Slider( 10, 100, step=10, label='Fitting steps (Slower yet Better-aligned SMPL-X)', value=50 ) with gr.Row(): btn_sample = gr.Button("Generate Image") btn_submit = gr.Button("Submit Image (~3min)") btn_sample.click( fn=generate_images, inputs=[image_in_img, prompt, image_file_live_opt, live_conditioning], outputs=[gallery, gallery_cache], js=get_js_image ) def get_select_index(cache, evt: gr.SelectData): return cache[evt.index] gallery.select( fn=get_select_index, inputs=[gallery_cache], outputs=[inp], ) with gr.Row(): gr.Examples( examples=examples_pose, inputs=[inp], cache_examples=cached, fn=generate_model, outputs=out_lst, label="Hard Pose Examples" ) gr.Examples( examples=examples_cloth, inputs=[inp], cache_examples=cached, fn=generate_model, outputs=out_lst, label="Loose Cloth Examples" ) with gr.Column(): overlap_inp = gr.Image(type="filepath", label="Image Normal Overlap") out_final = gr.Model3D( clear_color=[0.0, 0.0, 0.0, 0.0], label="Clothed human", elem_id="avatar" ) out_smpl = gr.Model3D( clear_color=[0.0, 0.0, 0.0, 0.0], label="SMPL-X body (via PIXIE)", elem_id="avatar" ) vis_tensor_path = gr.State() # logs = gr.Textbox(max_lines=10, label="Logs") btn_video = gr.Button("Generate Video (~3min)", visible=False) out_vid = gr.Video(label="Shared on Twitter with #ECON", visible=False) out_lst = [out_smpl, out_final, overlap_inp, vis_tensor_path] btn_video.click( fn=generate_video, inputs=[vis_tensor_path], outputs=[out_vid], ) btn_submit.click(fn=generate_model, inputs=[inp, fitting_step], outputs=out_lst) btn_submit.click(fn=show_video, outputs=[btn_video, out_vid]) # demo.load(read_logs, None, logs, every=1, queue=True, scroll_to_output=True) demo.load(None, None, None, js=load_js) if __name__ == "__main__": demo.queue() demo.launch(max_threads=4) # demo.launch(max_threads=2, debug=True, server_port=8888, server_name="0.0.0.0")