Spaces:
Runtime error
Runtime error
File size: 6,055 Bytes
375ee53 46a74b1 375ee53 46a74b1 375ee53 46a74b1 375ee53 46a74b1 375ee53 46a74b1 375ee53 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 |
import os
import tempfile
import gradio as gr
import numpy as np
from launch.utils import find_cuda
import spaces
import torch
from diffusers import DiffusionPipeline, EulerAncestralDiscreteScheduler
from einops import rearrange
from huggingface_hub import hf_hub_download
from omegaconf import OmegaConf
from PIL import Image
from pytorch_lightning import seed_everything
from torchvision.transforms import v2
from src.utils.camera_util import (FOV_to_intrinsics, get_circular_camera_poses,
get_zero123plus_input_cameras)
from src.utils.mesh_util import save_glb, save_obj
from src.utils.train_util import instantiate_from_config
# Configuration
cuda_path = find_cuda()
config_path = 'configs/instant-mesh-large.yaml'
config = OmegaConf.load(config_path)
config_name = os.path.basename(config_path).replace('.yaml', '')
model_config = config.model_config
infer_config = config.infer_config
IS_FLEXICUBES = config_name.startswith('instant-mesh')
device = torch.device('cuda')
# Load diffusion model
print('Loading diffusion model ...')
pipeline = DiffusionPipeline.from_pretrained(
"sudo-ai/zero123plus-v1.2",
custom_pipeline="zero123plus",
torch_dtype=torch.float16,
)
pipeline.scheduler = EulerAncestralDiscreteScheduler.from_config(
pipeline.scheduler.config, timestep_spacing='trailing'
)
unet_ckpt_path = hf_hub_download(
repo_id="TencentARC/InstantMesh", filename="diffusion_pytorch_model.bin", repo_type="model")
state_dict = torch.load(unet_ckpt_path, map_location='cpu')
pipeline.unet.load_state_dict(state_dict, strict=True)
pipeline = pipeline.to(device)
# Load reconstruction model
print('Loading reconstruction model ...')
model_ckpt_path = hf_hub_download(
repo_id="TencentARC/InstantMesh", filename="instant_mesh_large.ckpt", repo_type="model")
model = instantiate_from_config(model_config)
state_dict = torch.load(model_ckpt_path, map_location='cpu')['state_dict']
state_dict = {k[14:]: v for k, v in state_dict.items() if k.startswith(
'lrm_generator.') and 'source_camera' not in k}
model.load_state_dict(state_dict, strict=True)
model = model.to(device)
def get_render_cameras(batch_size=1, M=120, radius=2.5, elevation=10.0, is_flexicubes=False):
c2ws = get_circular_camera_poses(M=M, radius=radius, elevation=elevation)
if is_flexicubes:
cameras = torch.linalg.inv(c2ws)
cameras = cameras.unsqueeze(0).repeat(batch_size, 1, 1, 1)
else:
extrinsics = c2ws.flatten(-2)
intrinsics = FOV_to_intrinsics(50.0).unsqueeze(
0).repeat(M, 1, 1).float().flatten(-2)
cameras = torch.cat([extrinsics, intrinsics], dim=-1)
cameras = cameras.unsqueeze(0).repeat(batch_size, 1, 1)
return cameras
@spaces.GPU
def generate_mvs(input_image):
sample_seed = np.random.randint(0, 1000000)
seed_everything(sample_seed)
sample_steps = 75
z123_image = pipeline(
input_image, num_inference_steps=sample_steps).images[0]
show_image = np.asarray(z123_image, dtype=np.uint8)
show_image = torch.from_numpy(show_image)
show_image = rearrange(
show_image, '(n h) (m w) c -> (n m) h w c', n=3, m=2)
show_image = rearrange(
show_image, '(n m) h w c -> (n h) (m w) c', n=2, m=3)
show_image = Image.fromarray(show_image.numpy())
return z123_image, show_image
@spaces.GPU
def make3d(images):
global model
if IS_FLEXICUBES:
model.init_flexicubes_geometry(device, use_renderer=False)
model = model.eval()
images = np.asarray(images, dtype=np.float32) / 255.0
images = torch.from_numpy(images).permute(2, 0, 1).contiguous().float()
images = rearrange(images, 'c (n h) (m w) -> (n m) c h w', n=3, m=2)
input_cameras = get_zero123plus_input_cameras(
batch_size=1, radius=4.0).to(device)
render_cameras = get_render_cameras(
batch_size=1, radius=2.5, is_flexicubes=IS_FLEXICUBES).to(device)
images = images.unsqueeze(0).to(device)
images = v2.functional.resize(
images, (320, 320), interpolation=3, antialias=True).clamp(0, 1)
mesh_fpath = tempfile.NamedTemporaryFile(suffix=f".obj", delete=False).name
print(mesh_fpath)
mesh_basename = os.path.basename(mesh_fpath).split('.')[0]
mesh_dirname = os.path.dirname(mesh_fpath)
mesh_glb_fpath = os.path.join(mesh_dirname, f"{mesh_basename}.glb")
with torch.no_grad():
planes = model.forward_planes(images, input_cameras)
mesh_out = model.extract_mesh(
planes, use_texture_map=False, **infer_config)
vertices, faces, vertex_colors = mesh_out
vertices = vertices[:, [1, 2, 0]]
save_glb(vertices, faces, vertex_colors, mesh_glb_fpath)
save_obj(vertices, faces, vertex_colors, mesh_fpath)
print(f"Mesh saved to {mesh_fpath}")
return mesh_fpath, mesh_glb_fpath
def model_generation_ui(processed_image):
with gr.Column():
with gr.Row():
submit_mesh = gr.Button(
"Generate 3D Model", elem_id="generate", variant="primary")
with gr.Row():
with gr.Column():
mv_show_images = gr.Image(
label="Generated Multi-views",
type="pil",
interactive=False
)
with gr.Column():
with gr.Tab("OBJ"):
output_model_obj = gr.Model3D(
label="Output Model (OBJ Format)",
interactive=False,
)
with gr.Tab("GLB"):
output_model_glb = gr.Model3D(
label="Output Model (GLB Format)",
interactive=False,
)
mv_images = gr.State()
submit_mesh.click(fn=generate_mvs, inputs=[processed_image], outputs=[mv_images, mv_show_images]).success(
fn=make3d, inputs=[mv_images], outputs=[
output_model_obj, output_model_glb]
)
return output_model_obj, output_model_glb
|