Spaces:
Sleeping
Sleeping
File size: 4,876 Bytes
7e0376e a9e44d5 7e0376e b197a35 5a483b6 a9e44d5 b197a35 7e0376e 23e4ec1 7e0376e 23e4ec1 7e0376e 23e4ec1 7e0376e b197a35 7e0376e b88f82b 7e0376e fbbd4eb 7e0376e fbbd4eb 7e0376e b197a35 7e0376e a9e44d5 7e0376e b197a35 7e0376e fbbd4eb 7e0376e fbbd4eb b197a35 7e0376e 78efa10 b197a35 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 |
import logging
import os
import tempfile
import time
import gradio as gr
import numpy as np
import rembg
import torch
from PIL import Image
from functools import partial
from tsr.system import TSR
from tsr.utils import remove_background, resize_foreground, to_gradio_3d_orientation
#HF_TOKEN = os.getenv("HF_TOKEN")
HEADER = """
**TripoSR** is a state-of-the-art open-source model for **fast** feedforward 3D reconstruction from a single image, developed in collaboration between [Tripo AI](https://www.tripo3d.ai/) and [Stability AI](https://stability.ai/).
**Tips:**
1. If you find the result is unsatisfied, please try to change the foreground ratio. It might improve the results.
2. Please disable "Remove Background" option only if your input image is RGBA with transparent background, image contents are centered and occupy more than 70% of image width or height.
"""
if torch.cuda.is_available():
device = "cuda:0"
else:
device = "cpu"
d = os.environ.get("DEVICE", None)
if d != None:
device = d
model = TSR.from_pretrained(
"stabilityai/TripoSR",
config_name="config.yaml",
weight_name="model.ckpt",
# token=HF_TOKEN
)
model.renderer.set_chunk_size(131072)
model.to(device)
rembg_session = rembg.new_session()
def check_input_image(input_image):
if input_image is None:
raise gr.Error("No image uploaded!")
def preprocess(input_image, do_remove_background, foreground_ratio):
def fill_background(image):
image = np.array(image).astype(np.float32) / 255.0
image = image[:, :, :3] * image[:, :, 3:4] + (1 - image[:, :, 3:4]) * 0.5
image = Image.fromarray((image * 255.0).astype(np.uint8))
return image
if do_remove_background:
image = input_image.convert("RGB")
image = remove_background(image, rembg_session)
image = resize_foreground(image, foreground_ratio)
image = fill_background(image)
else:
image = input_image
if image.mode == "RGBA":
image = fill_background(image)
return image
def generate(image):
scene_codes = model(image, device=device)
mesh = model.extract_mesh(scene_codes)[0]
mesh = to_gradio_3d_orientation(mesh)
mesh_path = tempfile.NamedTemporaryFile(suffix=".obj", delete=False)
mesh_path2 = tempfile.NamedTemporaryFile(suffix=".glb", delete=False)
mesh.export(mesh_path.name)
mesh.export(mesh_path2.name)
return mesh_path.name, mesh_path2.name
def run_example(image_pil):
preprocessed = preprocess(image_pil, False, 0.9)
mesh_name, mesn_name2 = generate(preprocessed)
return preprocessed, mesh_name, mesh_name2
with gr.Blocks() as demo:
gr.Markdown(HEADER)
with gr.Row(variant="panel"):
with gr.Column():
with gr.Row():
input_image = gr.Image(
label="Input Image",
image_mode="RGBA",
sources="upload",
type="pil",
elem_id="content_image",
)
processed_image = gr.Image(label="Processed Image", interactive=False)
with gr.Row():
with gr.Group():
do_remove_background = gr.Checkbox(
label="Remove Background", value=True
)
foreground_ratio = gr.Slider(
label="Foreground Ratio",
minimum=0.5,
maximum=1.0,
value=0.85,
step=0.05,
)
with gr.Row():
submit = gr.Button("Generate", elem_id="generate", variant="primary")
with gr.Column():
with gr.Tab("obj"):
output_model = gr.Model3D(
label="Output Model",
interactive=False,
)
with gr.Tab("glb"):
output_model2 = gr.Model3D(
label="Output Model",
interactive=False,
)
with gr.Row(variant="panel"):
gr.Examples(
examples=[
os.path.join("examples", img_name) for img_name in sorted(os.listdir("examples"))
],
inputs=[input_image],
outputs=[processed_image, output_model, output_model2],
#cache_examples=True,
fn=partial(run_example),
label="Examples",
examples_per_page=20
)
submit.click(fn=check_input_image, inputs=[input_image]).success(
fn=preprocess,
inputs=[input_image, do_remove_background, foreground_ratio],
outputs=[processed_image],
).success(
fn=generate,
inputs=[processed_image],
outputs=[output_model, output_model2],
)
demo.queue(max_size=10)
demo.launch() |