Spaces:
Running
on
L40S
Running
on
L40S
Upload app_hg.py with huggingface_hub
Browse files
app_hg.py
CHANGED
|
@@ -21,7 +21,7 @@
|
|
| 21 |
# optimizer states), machine-learning model code, inference-enabling code, training-enabling code,
|
| 22 |
# fine-tuning enabling code and other elements of the foregoing made publicly available
|
| 23 |
# by Tencent in accordance with TENCENT HUNYUAN COMMUNITY LICENSE AGREEMENT.
|
| 24 |
-
|
| 25 |
import os
|
| 26 |
import warnings
|
| 27 |
import argparse
|
|
@@ -33,56 +33,35 @@ import numpy as np
|
|
| 33 |
from PIL import Image
|
| 34 |
from einops import rearrange
|
| 35 |
import pandas as pd
|
| 36 |
-
from huggingface_hub import snapshot_download
|
| 37 |
|
| 38 |
import sys
|
|
|
|
| 39 |
import subprocess
|
| 40 |
-
from
|
| 41 |
|
| 42 |
-
|
| 43 |
-
#
|
| 44 |
-
|
| 45 |
-
|
| 46 |
-
|
| 47 |
-
|
| 48 |
-
|
| 49 |
-
|
| 50 |
-
|
| 51 |
-
|
| 52 |
-
|
| 53 |
-
|
| 54 |
-
|
| 55 |
-
|
| 56 |
-
#
|
| 57 |
-
|
| 58 |
|
| 59 |
-
# install_cuda_toolkit()
|
| 60 |
-
|
| 61 |
def install_requirements():
|
| 62 |
-
# Install the packages listed in requirements.txt
|
| 63 |
subprocess.check_call([sys.executable, "-m", "pip", "install", "git+https://github.com/NVlabs/nvdiffrast"])
|
| 64 |
subprocess.check_call([sys.executable, "-m", "pip", "install", "git+https://github.com/facebookresearch/pytorch3d@stable"])
|
| 65 |
-
|
| 66 |
-
install_requirements()
|
| 67 |
|
| 68 |
-
|
| 69 |
-
|
| 70 |
-
from infer import seed_everything, save_gif
|
| 71 |
-
from infer import Text2Image, Removebg, Image2Views, Views2Mesh, GifRenderer
|
| 72 |
-
from third_party.check import check_bake_available
|
| 73 |
-
|
| 74 |
-
warnings.simplefilter('ignore', category=UserWarning)
|
| 75 |
-
warnings.simplefilter('ignore', category=FutureWarning)
|
| 76 |
-
warnings.simplefilter('ignore', category=DeprecationWarning)
|
| 77 |
-
|
| 78 |
-
parser = argparse.ArgumentParser()
|
| 79 |
-
parser.add_argument("--use_lite", default=False, action="store_true")
|
| 80 |
-
parser.add_argument("--mv23d_cfg_path", default="./svrm/configs/svrm.yaml", type=str)
|
| 81 |
-
parser.add_argument("--mv23d_ckt_path", default="weights/svrm/svrm.safetensors", type=str)
|
| 82 |
-
parser.add_argument("--text2image_path", default="weights/hunyuanDiT", type=str)
|
| 83 |
-
parser.add_argument("--save_memory", default=False)
|
| 84 |
-
parser.add_argument("--device", default="cuda:0", type=str)
|
| 85 |
-
args = parser.parse_args()
|
| 86 |
|
| 87 |
def download_models():
|
| 88 |
os.makedirs("weights", exist_ok=True)
|
|
@@ -116,7 +95,10 @@ def download_models():
|
|
| 116 |
except Exception as e:
|
| 117 |
print(f"Error downloading DUSt3R: {e}")
|
| 118 |
|
| 119 |
-
|
|
|
|
|
|
|
|
|
|
| 120 |
|
| 121 |
try:
|
| 122 |
from third_party.mesh_baker import MeshBaker
|
|
@@ -124,9 +106,23 @@ try:
|
|
| 124 |
BAKE_AVAILEBLE = True
|
| 125 |
except Exception as err:
|
| 126 |
print(err)
|
| 127 |
-
print("import baking related
|
| 128 |
BAKE_AVAILEBLE = False
|
| 129 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 130 |
|
| 131 |
################################################################
|
| 132 |
# initial setting
|
|
@@ -170,13 +166,13 @@ example_ts = get_example_txt_list()
|
|
| 170 |
# initial models
|
| 171 |
################################################################
|
| 172 |
|
|
|
|
| 173 |
print(f"loading {args.text2image_path}")
|
| 174 |
worker_t2i = Text2Image(
|
| 175 |
pretrain = args.text2image_path,
|
| 176 |
device = args.device,
|
| 177 |
save_memory = args.save_memory
|
| 178 |
)
|
| 179 |
-
worker_xbg = Removebg()
|
| 180 |
worker_i2v = Image2Views(
|
| 181 |
use_lite = args.use_lite,
|
| 182 |
device = args.device,
|
|
@@ -189,38 +185,38 @@ worker_v23 = Views2Mesh(
|
|
| 189 |
device = args.device,
|
| 190 |
save_memory = args.save_memory
|
| 191 |
)
|
| 192 |
-
worker_gif = GifRenderer(
|
| 193 |
-
|
| 194 |
|
| 195 |
if BAKE_AVAILEBLE:
|
| 196 |
-
worker_baker = MeshBaker(
|
| 197 |
|
| 198 |
|
| 199 |
### functional modules
|
| 200 |
-
|
| 201 |
-
def
|
| 202 |
os.makedirs('./outputs/app_output', exist_ok=True)
|
| 203 |
exists = set(int(_) for _ in os.listdir('./outputs/app_output') if not _.startswith("."))
|
| 204 |
-
if len(exists) ==
|
| 205 |
-
|
| 206 |
-
|
| 207 |
-
|
|
|
|
|
|
|
|
|
|
| 208 |
save_folder = f'./outputs/app_output/{cur_id}'
|
| 209 |
os.makedirs(save_folder, exist_ok=True)
|
|
|
|
|
|
|
| 210 |
|
|
|
|
|
|
|
| 211 |
dst = save_folder + '/img.png'
|
| 212 |
-
|
| 213 |
-
if not text:
|
| 214 |
-
if image is None:
|
| 215 |
-
return dst, save_folder
|
| 216 |
-
raise gr.Error("Upload image or provide text ...")
|
| 217 |
-
image.save(dst)
|
| 218 |
-
return dst, save_folder
|
| 219 |
-
|
| 220 |
image = worker_t2i(text, seed, step)
|
| 221 |
image.save(dst)
|
| 222 |
-
|
| 223 |
-
|
|
|
|
|
|
|
| 224 |
|
| 225 |
def stage_1_xbg(image, save_folder, force_remove):
|
| 226 |
if isinstance(image, str):
|
|
@@ -230,7 +226,7 @@ def stage_1_xbg(image, save_folder, force_remove):
|
|
| 230 |
rgba.save(dst)
|
| 231 |
return dst
|
| 232 |
|
| 233 |
-
@
|
| 234 |
def stage_2_i2v(image, seed, step, save_folder):
|
| 235 |
if isinstance(image, str):
|
| 236 |
image = Image.open(image)
|
|
@@ -245,7 +241,7 @@ def stage_2_i2v(image, seed, step, save_folder):
|
|
| 245 |
show_img = Image.fromarray(show_img)
|
| 246 |
return views_img, cond_img, show_img
|
| 247 |
|
| 248 |
-
@
|
| 249 |
def stage_3_v23(
|
| 250 |
views_pil,
|
| 251 |
cond_pil,
|
|
@@ -268,34 +264,32 @@ def stage_3_v23(
|
|
| 268 |
obj_dst = save_folder + '/mesh_vertex_colors.obj' # gradio just only can show vertex shading
|
| 269 |
return obj_dst, glb_dst
|
| 270 |
|
| 271 |
-
@
|
| 272 |
-
def stage_3p_baking(save_folder, color, bake):
|
| 273 |
if color == "texture" and bake:
|
| 274 |
-
obj_dst = worker_baker(save_folder)
|
| 275 |
glb_dst = obj_dst.replace(".obj", ".glb")
|
| 276 |
return glb_dst
|
| 277 |
else:
|
| 278 |
return None
|
| 279 |
-
|
| 280 |
-
@
|
| 281 |
def stage_4_gif(save_folder, color, bake, render):
|
| 282 |
if not render: return None
|
| 283 |
-
|
| 284 |
-
|
| 285 |
-
|
| 286 |
-
obj_dst = save_folder + '/view_0/bake/mesh.obj'
|
| 287 |
-
elif os.path.exists(save_folder + '/mesh.obj'):
|
| 288 |
-
obj_dst = save_folder + '/mesh.obj'
|
| 289 |
-
else:
|
| 290 |
-
print(save_folder)
|
| 291 |
-
raise FileNotFoundError("mesh obj file not found")
|
| 292 |
gif_dst = obj_dst.replace(".obj", ".gif")
|
| 293 |
worker_gif(obj_dst, gif_dst_path=gif_dst)
|
| 294 |
return gif_dst
|
| 295 |
|
| 296 |
|
| 297 |
def check_image_available(image):
|
| 298 |
-
if image
|
|
|
|
|
|
|
|
|
|
|
|
|
| 299 |
data = np.array(image)
|
| 300 |
alpha_channel = data[:, :, 3]
|
| 301 |
unique_alpha_values = np.unique(alpha_channel)
|
|
@@ -311,11 +305,29 @@ def check_image_available(image):
|
|
| 311 |
else:
|
| 312 |
raise Exception("Image Error")
|
| 313 |
|
| 314 |
-
|
| 315 |
-
|
| 316 |
-
|
| 317 |
-
|
| 318 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 319 |
|
| 320 |
# ===============================================================
|
| 321 |
# gradio display
|
|
@@ -335,31 +347,113 @@ with gr.Blocks() as demo:
|
|
| 335 |
with gr.Column():
|
| 336 |
text = gr.TextArea('一只黑白相间的熊猫在白色背景上居中坐着,呈现出卡通风格和可爱氛围。',
|
| 337 |
lines=3, max_lines=20, label='Input text')
|
| 338 |
-
with gr.Row():
|
| 339 |
-
textgen_color = gr.Radio(choices=["vertex", "texture"], label="Color", value="vertex")
|
| 340 |
-
with gr.Row():
|
| 341 |
-
textgen_render = gr.Checkbox(label="Do Rendering", value=True, interactive=True)
|
| 342 |
-
if BAKE_AVAILEBLE:
|
| 343 |
-
textgen_bake = gr.Checkbox(label="Do Baking", value=False, interactive=True)
|
| 344 |
-
else:
|
| 345 |
-
textgen_bake = gr.Checkbox(label="Do Baking", value=False, interactive=False)
|
| 346 |
-
|
| 347 |
-
textgen_color.change(fn=update_bake_render, inputs=textgen_color, outputs=[textgen_bake, textgen_render])
|
| 348 |
|
| 349 |
-
|
| 350 |
-
|
| 351 |
-
|
| 352 |
-
|
| 353 |
-
|
| 354 |
-
|
| 355 |
-
|
| 356 |
-
|
| 357 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 358 |
with gr.Row():
|
| 359 |
textgen_submit = gr.Button("Generate", variant="primary")
|
| 360 |
|
| 361 |
with gr.Row():
|
| 362 |
gr.Examples(examples=example_ts, inputs=[text], label="Text examples", examples_per_page=10)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 363 |
|
| 364 |
### Image iutput region
|
| 365 |
|
|
@@ -369,31 +463,112 @@ with gr.Blocks() as demo:
|
|
| 369 |
image_mode="RGBA", sources="upload", interactive=True)
|
| 370 |
with gr.Row():
|
| 371 |
alert_message = gr.Markdown("") # for warning
|
| 372 |
-
with gr.Row():
|
| 373 |
-
imggen_color = gr.Radio(choices=["vertex", "texture"], label="Color", value="texture")
|
| 374 |
-
with gr.Row():
|
| 375 |
-
imggen_removebg = gr.Checkbox(label="Remove Background", value=True, interactive=True)
|
| 376 |
-
imggen_render = gr.Checkbox(label="Do Rendering", value=True, interactive=True)
|
| 377 |
-
if BAKE_AVAILEBLE:
|
| 378 |
-
imggen_bake = gr.Checkbox(label="Do Baking", value=False, interactive=True)
|
| 379 |
-
else:
|
| 380 |
-
imggen_bake = gr.Checkbox(label="Do Baking", value=False, interactive=False)
|
| 381 |
|
| 382 |
-
|
| 383 |
-
|
| 384 |
-
|
| 385 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 386 |
imggen_SEED = gr.Number(value=0, label="Gen seed", precision=0, interactive=True)
|
| 387 |
-
|
| 388 |
-
|
| 389 |
-
|
| 390 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 391 |
with gr.Row():
|
| 392 |
imggen_submit = gr.Button("Generate", variant="primary")
|
| 393 |
|
| 394 |
with gr.Row():
|
| 395 |
gr.Examples(examples=example_is, inputs=[input_image],
|
| 396 |
label="Img examples", examples_per_page=10)
|
|
|
|
| 397 |
|
| 398 |
gr.Markdown(CONST_NOTE)
|
| 399 |
|
|
@@ -423,7 +598,7 @@ with gr.Blocks() as demo:
|
|
| 423 |
camera_position=[90, 90, None],
|
| 424 |
interactive=False
|
| 425 |
)
|
| 426 |
-
|
| 427 |
result_3dglb_texture = gr.Model3D(
|
| 428 |
clear_color=[0.0, 0.0, 0.0, 0.0],
|
| 429 |
label="GLB texture color",
|
|
@@ -454,17 +629,23 @@ with gr.Blocks() as demo:
|
|
| 454 |
# gradio running code
|
| 455 |
#===============================================================
|
| 456 |
|
| 457 |
-
none = gr.State(None)
|
| 458 |
save_folder = gr.State()
|
| 459 |
cond_image = gr.State()
|
| 460 |
views_image = gr.State()
|
| 461 |
-
text_image = gr.State()
|
| 462 |
-
|
| 463 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 464 |
textgen_submit.click(
|
|
|
|
|
|
|
|
|
|
|
|
|
| 465 |
fn=stage_0_t2i,
|
| 466 |
-
inputs=[text,
|
| 467 |
-
outputs=[rem_bg_image
|
| 468 |
).success(
|
| 469 |
fn=stage_2_i2v,
|
| 470 |
inputs=[rem_bg_image, textgen_SEED, textgen_STEP, save_folder],
|
|
@@ -475,7 +656,8 @@ with gr.Blocks() as demo:
|
|
| 475 |
outputs=[result_3dobj, result_3dglb_texture],
|
| 476 |
).success(
|
| 477 |
fn=stage_3p_baking,
|
| 478 |
-
inputs=[save_folder, textgen_color, textgen_bake
|
|
|
|
| 479 |
outputs=[result_3dglb_baked],
|
| 480 |
).success(
|
| 481 |
fn=stage_4_gif,
|
|
@@ -485,12 +667,12 @@ with gr.Blocks() as demo:
|
|
| 485 |
|
| 486 |
|
| 487 |
imggen_submit.click(
|
| 488 |
-
fn=
|
| 489 |
-
inputs=[
|
| 490 |
-
outputs=[
|
| 491 |
).success(
|
| 492 |
fn=stage_1_xbg,
|
| 493 |
-
inputs=[
|
| 494 |
outputs=[rem_bg_image],
|
| 495 |
).success(
|
| 496 |
fn=stage_2_i2v,
|
|
@@ -502,7 +684,8 @@ with gr.Blocks() as demo:
|
|
| 502 |
outputs=[result_3dobj, result_3dglb_texture],
|
| 503 |
).success(
|
| 504 |
fn=stage_3p_baking,
|
| 505 |
-
inputs=[save_folder, imggen_color, imggen_bake
|
|
|
|
| 506 |
outputs=[result_3dglb_baked],
|
| 507 |
).success(
|
| 508 |
fn=stage_4_gif,
|
|
@@ -513,7 +696,10 @@ with gr.Blocks() as demo:
|
|
| 513 |
#===============================================================
|
| 514 |
# start gradio server
|
| 515 |
#===============================================================
|
|
|
|
|
|
|
|
|
|
| 516 |
|
| 517 |
demo.queue()
|
| 518 |
demo.launch()
|
| 519 |
-
|
|
|
|
| 21 |
# optimizer states), machine-learning model code, inference-enabling code, training-enabling code,
|
| 22 |
# fine-tuning enabling code and other elements of the foregoing made publicly available
|
| 23 |
# by Tencent in accordance with TENCENT HUNYUAN COMMUNITY LICENSE AGREEMENT.
|
| 24 |
+
|
| 25 |
import os
|
| 26 |
import warnings
|
| 27 |
import argparse
|
|
|
|
| 33 |
from PIL import Image
|
| 34 |
from einops import rearrange
|
| 35 |
import pandas as pd
|
|
|
|
| 36 |
|
| 37 |
import sys
|
| 38 |
+
import spaces
|
| 39 |
import subprocess
|
| 40 |
+
from huggingface_hub import snapshot_download
|
| 41 |
|
| 42 |
+
def install_cuda_toolkit():
|
| 43 |
+
# CUDA_TOOLKIT_URL = "https://developer.download.nvidia.com/compute/cuda/11.8.0/local_installers/cuda_11.8.0_520.61.05_linux.run"
|
| 44 |
+
CUDA_TOOLKIT_URL = "https://developer.download.nvidia.com/compute/cuda/12.2.0/local_installers/cuda_12.2.0_535.54.03_linux.run"
|
| 45 |
+
CUDA_TOOLKIT_FILE = "/tmp/%s" % os.path.basename(CUDA_TOOLKIT_URL)
|
| 46 |
+
subprocess.call(["wget", "-q", CUDA_TOOLKIT_URL, "-O", CUDA_TOOLKIT_FILE])
|
| 47 |
+
subprocess.call(["chmod", "+x", CUDA_TOOLKIT_FILE])
|
| 48 |
+
subprocess.call([CUDA_TOOLKIT_FILE, "--silent", "--toolkit"])
|
| 49 |
+
|
| 50 |
+
os.environ["CUDA_HOME"] = "/usr/local/cuda"
|
| 51 |
+
os.environ["PATH"] = "%s/bin:%s" % (os.environ["CUDA_HOME"], os.environ["PATH"])
|
| 52 |
+
os.environ["LD_LIBRARY_PATH"] = "%s/lib:%s" % (
|
| 53 |
+
os.environ["CUDA_HOME"],
|
| 54 |
+
"" if "LD_LIBRARY_PATH" not in os.environ else os.environ["LD_LIBRARY_PATH"],
|
| 55 |
+
)
|
| 56 |
+
# Fix: arch_list[-1] += '+PTX'; IndexError: list index out of range
|
| 57 |
+
os.environ["TORCH_CUDA_ARCH_LIST"] = "8.0;8.6"
|
| 58 |
|
|
|
|
|
|
|
| 59 |
def install_requirements():
|
|
|
|
| 60 |
subprocess.check_call([sys.executable, "-m", "pip", "install", "git+https://github.com/NVlabs/nvdiffrast"])
|
| 61 |
subprocess.check_call([sys.executable, "-m", "pip", "install", "git+https://github.com/facebookresearch/pytorch3d@stable"])
|
|
|
|
|
|
|
| 62 |
|
| 63 |
+
# install_cuda_toolkit()
|
| 64 |
+
install_requirements()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 65 |
|
| 66 |
def download_models():
|
| 67 |
os.makedirs("weights", exist_ok=True)
|
|
|
|
| 95 |
except Exception as e:
|
| 96 |
print(f"Error downloading DUSt3R: {e}")
|
| 97 |
|
| 98 |
+
|
| 99 |
+
from infer import seed_everything, save_gif
|
| 100 |
+
from infer import Text2Image, Removebg, Image2Views, Views2Mesh, GifRenderer
|
| 101 |
+
from third_party.check import check_bake_available
|
| 102 |
|
| 103 |
try:
|
| 104 |
from third_party.mesh_baker import MeshBaker
|
|
|
|
| 106 |
BAKE_AVAILEBLE = True
|
| 107 |
except Exception as err:
|
| 108 |
print(err)
|
| 109 |
+
print("import baking related fail, run without baking")
|
| 110 |
BAKE_AVAILEBLE = False
|
| 111 |
|
| 112 |
+
warnings.simplefilter('ignore', category=UserWarning)
|
| 113 |
+
warnings.simplefilter('ignore', category=FutureWarning)
|
| 114 |
+
warnings.simplefilter('ignore', category=DeprecationWarning)
|
| 115 |
+
|
| 116 |
+
parser = argparse.ArgumentParser()
|
| 117 |
+
parser.add_argument("--use_lite", default=False, action="store_true")
|
| 118 |
+
parser.add_argument("--mv23d_cfg_path", default="./svrm/configs/svrm.yaml", type=str)
|
| 119 |
+
parser.add_argument("--mv23d_ckt_path", default="weights/svrm/svrm.safetensors", type=str)
|
| 120 |
+
parser.add_argument("--text2image_path", default="weights/hunyuanDiT", type=str)
|
| 121 |
+
parser.add_argument("--save_memory", default=False)
|
| 122 |
+
parser.add_argument("--device", default="cuda:0", type=str)
|
| 123 |
+
args = parser.parse_args()
|
| 124 |
+
|
| 125 |
+
download_models() ### download weights !!!!
|
| 126 |
|
| 127 |
################################################################
|
| 128 |
# initial setting
|
|
|
|
| 166 |
# initial models
|
| 167 |
################################################################
|
| 168 |
|
| 169 |
+
worker_xbg = Removebg()
|
| 170 |
print(f"loading {args.text2image_path}")
|
| 171 |
worker_t2i = Text2Image(
|
| 172 |
pretrain = args.text2image_path,
|
| 173 |
device = args.device,
|
| 174 |
save_memory = args.save_memory
|
| 175 |
)
|
|
|
|
| 176 |
worker_i2v = Image2Views(
|
| 177 |
use_lite = args.use_lite,
|
| 178 |
device = args.device,
|
|
|
|
| 185 |
device = args.device,
|
| 186 |
save_memory = args.save_memory
|
| 187 |
)
|
| 188 |
+
worker_gif = GifRenderer(args.device)
|
|
|
|
| 189 |
|
| 190 |
if BAKE_AVAILEBLE:
|
| 191 |
+
worker_baker = MeshBaker()
|
| 192 |
|
| 193 |
|
| 194 |
### functional modules
|
| 195 |
+
|
| 196 |
+
def gen_save_folder(max_size=30):
|
| 197 |
os.makedirs('./outputs/app_output', exist_ok=True)
|
| 198 |
exists = set(int(_) for _ in os.listdir('./outputs/app_output') if not _.startswith("."))
|
| 199 |
+
if len(exists) == max_size:
|
| 200 |
+
shutil.rmtree(f"./outputs/app_output/0")
|
| 201 |
+
cur_id = 0
|
| 202 |
+
else:
|
| 203 |
+
cur_id = min(set(range(max_size)) - exists)
|
| 204 |
+
if os.path.exists(f"./outputs/app_output/{(cur_id + 1) % max_size}"):
|
| 205 |
+
shutil.rmtree(f"./outputs/app_output/{(cur_id + 1) % max_size}")
|
| 206 |
save_folder = f'./outputs/app_output/{cur_id}'
|
| 207 |
os.makedirs(save_folder, exist_ok=True)
|
| 208 |
+
print(f"mkdir {save_folder} suceess !!!")
|
| 209 |
+
return save_folder
|
| 210 |
|
| 211 |
+
@space.GPU(duration=120)
|
| 212 |
+
def stage_0_t2i(text, seed, step, save_folder):
|
| 213 |
dst = save_folder + '/img.png'
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 214 |
image = worker_t2i(text, seed, step)
|
| 215 |
image.save(dst)
|
| 216 |
+
img_nobg = worker_xbg(image, force=True)
|
| 217 |
+
dst = save_folder + '/img_nobg.png'
|
| 218 |
+
img_nobg.save(dst)
|
| 219 |
+
return dst
|
| 220 |
|
| 221 |
def stage_1_xbg(image, save_folder, force_remove):
|
| 222 |
if isinstance(image, str):
|
|
|
|
| 226 |
rgba.save(dst)
|
| 227 |
return dst
|
| 228 |
|
| 229 |
+
@space.GPU
|
| 230 |
def stage_2_i2v(image, seed, step, save_folder):
|
| 231 |
if isinstance(image, str):
|
| 232 |
image = Image.open(image)
|
|
|
|
| 241 |
show_img = Image.fromarray(show_img)
|
| 242 |
return views_img, cond_img, show_img
|
| 243 |
|
| 244 |
+
@space.GPU
|
| 245 |
def stage_3_v23(
|
| 246 |
views_pil,
|
| 247 |
cond_pil,
|
|
|
|
| 264 |
obj_dst = save_folder + '/mesh_vertex_colors.obj' # gradio just only can show vertex shading
|
| 265 |
return obj_dst, glb_dst
|
| 266 |
|
| 267 |
+
@space.GPU
|
| 268 |
+
def stage_3p_baking(save_folder, color, bake, force, front, others, align_times):
|
| 269 |
if color == "texture" and bake:
|
| 270 |
+
obj_dst = worker_baker(save_folder, force, front, others, align_times)
|
| 271 |
glb_dst = obj_dst.replace(".obj", ".glb")
|
| 272 |
return glb_dst
|
| 273 |
else:
|
| 274 |
return None
|
| 275 |
+
|
| 276 |
+
@space.GPU
|
| 277 |
def stage_4_gif(save_folder, color, bake, render):
|
| 278 |
if not render: return None
|
| 279 |
+
baked_fld_list = sorted(glob(save_folder + '/view_*/bake/mesh.obj'))
|
| 280 |
+
obj_dst = baked_fld_list[-1] if len(baked_fld_list)>=1 else save_folder+'/mesh.obj'
|
| 281 |
+
assert os.path.exists(obj_dst), f"{obj_dst} file not found"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 282 |
gif_dst = obj_dst.replace(".obj", ".gif")
|
| 283 |
worker_gif(obj_dst, gif_dst_path=gif_dst)
|
| 284 |
return gif_dst
|
| 285 |
|
| 286 |
|
| 287 |
def check_image_available(image):
|
| 288 |
+
if image is None:
|
| 289 |
+
return "Please upload image", gr.update()
|
| 290 |
+
elif not hasattr(image, 'mode'):
|
| 291 |
+
return "Not support, please upload other image", gr.update()
|
| 292 |
+
elif image.mode == "RGBA":
|
| 293 |
data = np.array(image)
|
| 294 |
alpha_channel = data[:, :, 3]
|
| 295 |
unique_alpha_values = np.unique(alpha_channel)
|
|
|
|
| 305 |
else:
|
| 306 |
raise Exception("Image Error")
|
| 307 |
|
| 308 |
+
|
| 309 |
+
def update_mode(mode):
|
| 310 |
+
color_change = {
|
| 311 |
+
'Quick': gr.update(value='vertex'),
|
| 312 |
+
'Moderate': gr.update(value='texture'),
|
| 313 |
+
'Appearance': gr.update(value='texture')
|
| 314 |
+
}[mode]
|
| 315 |
+
bake_change = {
|
| 316 |
+
'Quick': gr.update(value=False, interactive=False, visible=False),
|
| 317 |
+
'Moderate': gr.update(value=False),
|
| 318 |
+
'Appearance': gr.update(value=BAKE_AVAILEBLE)
|
| 319 |
+
}[mode]
|
| 320 |
+
face_change = {
|
| 321 |
+
'Quick': gr.update(value=120000, maximum=300000),
|
| 322 |
+
'Moderate': gr.update(value=60000, maximum=300000),
|
| 323 |
+
'Appearance': gr.update(value=10000, maximum=60000)
|
| 324 |
+
}[mode]
|
| 325 |
+
render_change = {
|
| 326 |
+
'Quick': gr.update(value=False, interactive=False, visible=False),
|
| 327 |
+
'Moderate': gr.update(value=True),
|
| 328 |
+
'Appearance': gr.update(value=True)
|
| 329 |
+
}[mode]
|
| 330 |
+
return color_change, bake_change, face_change, render_change
|
| 331 |
|
| 332 |
# ===============================================================
|
| 333 |
# gradio display
|
|
|
|
| 347 |
with gr.Column():
|
| 348 |
text = gr.TextArea('一只黑白相间的熊猫在白色背景上居中坐着,呈现出卡通风格和可爱氛围。',
|
| 349 |
lines=3, max_lines=20, label='Input text')
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 350 |
|
| 351 |
+
textgen_mode = gr.Radio(
|
| 352 |
+
choices=['Quick', 'Moderate', 'Appearance'],
|
| 353 |
+
label="Simple settings",
|
| 354 |
+
value='Appearance',
|
| 355 |
+
interactive=True
|
| 356 |
+
)
|
| 357 |
+
|
| 358 |
+
with gr.Accordion("Custom settings", open=False):
|
| 359 |
+
textgen_color = gr.Radio(choices=["vertex", "texture"], label="Color", value="texture")
|
| 360 |
+
|
| 361 |
+
with gr.Row():
|
| 362 |
+
textgen_render = gr.Checkbox(
|
| 363 |
+
label="Do Rendering",
|
| 364 |
+
value=True,
|
| 365 |
+
interactive=True
|
| 366 |
+
)
|
| 367 |
+
textgen_bake = gr.Checkbox(
|
| 368 |
+
label="Do Baking",
|
| 369 |
+
value=True if BAKE_AVAILEBLE else False,
|
| 370 |
+
interactive=True if BAKE_AVAILEBLE else False
|
| 371 |
+
)
|
| 372 |
+
|
| 373 |
+
with gr.Row():
|
| 374 |
+
textgen_seed = gr.Number(value=0, label="T2I seed", precision=0, interactive=True)
|
| 375 |
+
textgen_SEED = gr.Number(value=0, label="Gen seed", precision=0, interactive=True)
|
| 376 |
+
|
| 377 |
+
textgen_step = gr.Slider(
|
| 378 |
+
value=25,
|
| 379 |
+
minimum=15,
|
| 380 |
+
maximum=50,
|
| 381 |
+
step=1,
|
| 382 |
+
label="T2I steps",
|
| 383 |
+
interactive=True
|
| 384 |
+
)
|
| 385 |
+
textgen_STEP = gr.Slider(
|
| 386 |
+
value=50,
|
| 387 |
+
minimum=20,
|
| 388 |
+
maximum=80,
|
| 389 |
+
step=1,
|
| 390 |
+
label="Gen steps",
|
| 391 |
+
interactive=True
|
| 392 |
+
)
|
| 393 |
+
textgen_max_faces =gr.Slider(
|
| 394 |
+
value=10000,
|
| 395 |
+
minimum=2000,
|
| 396 |
+
maximum=60000,
|
| 397 |
+
step=1000,
|
| 398 |
+
label="Face number limit",
|
| 399 |
+
interactive=True
|
| 400 |
+
)
|
| 401 |
+
|
| 402 |
+
with gr.Accordion("Baking Options", open=False):
|
| 403 |
+
textgen_force_bake = gr.Checkbox(
|
| 404 |
+
label="Force (Ignore the degree of matching)",
|
| 405 |
+
value=False,
|
| 406 |
+
interactive=True
|
| 407 |
+
)
|
| 408 |
+
textgen_front_baking = gr.Radio(
|
| 409 |
+
choices=['input image', 'multi-view front view', 'auto'],
|
| 410 |
+
label="Front view baking",
|
| 411 |
+
value='auto',
|
| 412 |
+
interactive=True,
|
| 413 |
+
visible=True
|
| 414 |
+
)
|
| 415 |
+
textgen_other_views = gr.CheckboxGroup(
|
| 416 |
+
choices=['60°', '120°', '180°', '240°', '300°'],
|
| 417 |
+
label="Other views Baking",
|
| 418 |
+
value=['180°'],
|
| 419 |
+
interactive=True,
|
| 420 |
+
visible=True
|
| 421 |
+
)
|
| 422 |
+
textgen_align_times =gr.Slider(
|
| 423 |
+
value=3,
|
| 424 |
+
minimum=1,
|
| 425 |
+
maximum=5,
|
| 426 |
+
step=1,
|
| 427 |
+
label="Number of alignment attempts per view",
|
| 428 |
+
interactive=True
|
| 429 |
+
)
|
| 430 |
+
|
| 431 |
with gr.Row():
|
| 432 |
textgen_submit = gr.Button("Generate", variant="primary")
|
| 433 |
|
| 434 |
with gr.Row():
|
| 435 |
gr.Examples(examples=example_ts, inputs=[text], label="Text examples", examples_per_page=10)
|
| 436 |
+
|
| 437 |
+
|
| 438 |
+
textgen_mode.change(
|
| 439 |
+
fn=update_mode,
|
| 440 |
+
inputs=textgen_mode,
|
| 441 |
+
outputs=[textgen_color, textgen_bake, textgen_max_faces, textgen_render]
|
| 442 |
+
)
|
| 443 |
+
textgen_color.change(
|
| 444 |
+
fn=lambda x:[
|
| 445 |
+
gr.update(value=(x=='texture'), interactive=(x=='texture'), visible=(x=='texture')),
|
| 446 |
+
gr.update(value=(x=='texture'), interactive=(x=='texture'), visible=(x=='texture')),
|
| 447 |
+
],
|
| 448 |
+
inputs=textgen_color,
|
| 449 |
+
outputs=[textgen_bake, textgen_render]
|
| 450 |
+
)
|
| 451 |
+
textgen_bake.change(
|
| 452 |
+
fn= lambda x:[gr.update(visible=x)]*4+[gr.update(value=10000, minimum=2000, maximum=60000 if x else 300000)],
|
| 453 |
+
inputs=textgen_bake,
|
| 454 |
+
outputs=[textgen_front_baking, textgen_other_views, textgen_align_times, textgen_force_bake, textgen_max_faces]
|
| 455 |
+
)
|
| 456 |
+
|
| 457 |
|
| 458 |
### Image iutput region
|
| 459 |
|
|
|
|
| 463 |
image_mode="RGBA", sources="upload", interactive=True)
|
| 464 |
with gr.Row():
|
| 465 |
alert_message = gr.Markdown("") # for warning
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 466 |
|
| 467 |
+
imggen_mode = gr.Radio(
|
| 468 |
+
choices=['Quick', 'Moderate', 'Appearance'],
|
| 469 |
+
label="Simple settings",
|
| 470 |
+
value='Appearance',
|
| 471 |
+
interactive=True
|
| 472 |
+
)
|
| 473 |
+
|
| 474 |
+
with gr.Accordion("Custom settings", open=False):
|
| 475 |
+
imggen_color = gr.Radio(choices=["vertex", "texture"], label="Color", value="texture")
|
| 476 |
+
|
| 477 |
+
with gr.Row():
|
| 478 |
+
imggen_removebg = gr.Checkbox(
|
| 479 |
+
label="Remove Background",
|
| 480 |
+
value=True,
|
| 481 |
+
interactive=True
|
| 482 |
+
)
|
| 483 |
+
imggen_render = gr.Checkbox(
|
| 484 |
+
label="Do Rendering",
|
| 485 |
+
value=True,
|
| 486 |
+
interactive=True
|
| 487 |
+
)
|
| 488 |
+
imggen_bake = gr.Checkbox(
|
| 489 |
+
label="Do Baking",
|
| 490 |
+
value=True if BAKE_AVAILEBLE else False,
|
| 491 |
+
interactive=True if BAKE_AVAILEBLE else False
|
| 492 |
+
)
|
| 493 |
imggen_SEED = gr.Number(value=0, label="Gen seed", precision=0, interactive=True)
|
| 494 |
+
|
| 495 |
+
imggen_STEP = gr.Slider(
|
| 496 |
+
value=50,
|
| 497 |
+
minimum=20,
|
| 498 |
+
maximum=80,
|
| 499 |
+
step=1,
|
| 500 |
+
label="Gen steps",
|
| 501 |
+
interactive=True
|
| 502 |
+
)
|
| 503 |
+
imggen_max_faces =gr.Slider(
|
| 504 |
+
value=10000,
|
| 505 |
+
minimum=2000,
|
| 506 |
+
maximum=60000,
|
| 507 |
+
step=1000,
|
| 508 |
+
label="Face number limit",
|
| 509 |
+
interactive=True
|
| 510 |
+
)
|
| 511 |
+
|
| 512 |
+
with gr.Accordion("Baking Options", open=False):
|
| 513 |
+
imggen_force_bake = gr.Checkbox(
|
| 514 |
+
label="Force (Ignore the degree of matching)",
|
| 515 |
+
value=False,
|
| 516 |
+
interactive=True
|
| 517 |
+
)
|
| 518 |
+
imggen_front_baking = gr.Radio(
|
| 519 |
+
choices=['input image', 'multi-view front view', 'auto'],
|
| 520 |
+
label="Front view baking",
|
| 521 |
+
value='auto',
|
| 522 |
+
interactive=True,
|
| 523 |
+
visible=True
|
| 524 |
+
)
|
| 525 |
+
imggen_other_views = gr.CheckboxGroup(
|
| 526 |
+
choices=['60°', '120°', '180°', '240°', '300°'],
|
| 527 |
+
label="Other views Baking",
|
| 528 |
+
value=['180°'],
|
| 529 |
+
interactive=True,
|
| 530 |
+
visible=True
|
| 531 |
+
)
|
| 532 |
+
imggen_align_times =gr.Slider(
|
| 533 |
+
value=3,
|
| 534 |
+
minimum=1,
|
| 535 |
+
maximum=5,
|
| 536 |
+
step=1,
|
| 537 |
+
label="Number of alignment attempts per view",
|
| 538 |
+
interactive=True
|
| 539 |
+
)
|
| 540 |
+
|
| 541 |
+
input_image.change(
|
| 542 |
+
fn=check_image_available,
|
| 543 |
+
inputs=input_image,
|
| 544 |
+
outputs=[alert_message, imggen_removebg]
|
| 545 |
+
)
|
| 546 |
+
|
| 547 |
+
imggen_mode.change(
|
| 548 |
+
fn=update_mode,
|
| 549 |
+
inputs=imggen_mode,
|
| 550 |
+
outputs=[imggen_color, imggen_bake, imggen_max_faces, imggen_render]
|
| 551 |
+
)
|
| 552 |
+
|
| 553 |
+
imggen_color.change(
|
| 554 |
+
fn=lambda x:[gr.update(value=(x=='texture'), interactive=(x=='texture'), visible=(x=='texture'))]*2,
|
| 555 |
+
inputs=imggen_color,
|
| 556 |
+
outputs=[imggen_bake, imggen_render]
|
| 557 |
+
)
|
| 558 |
+
|
| 559 |
+
imggen_bake.change(
|
| 560 |
+
fn= lambda x:[gr.update(visible=x)]*4+[gr.update(value=120000, minimum=2000, maximum=60000 if x else 300000)],
|
| 561 |
+
inputs=imggen_bake,
|
| 562 |
+
outputs=[imggen_front_baking, imggen_other_views, imggen_align_times, imggen_force_bake, imggen_max_faces]
|
| 563 |
+
)
|
| 564 |
+
|
| 565 |
with gr.Row():
|
| 566 |
imggen_submit = gr.Button("Generate", variant="primary")
|
| 567 |
|
| 568 |
with gr.Row():
|
| 569 |
gr.Examples(examples=example_is, inputs=[input_image],
|
| 570 |
label="Img examples", examples_per_page=10)
|
| 571 |
+
|
| 572 |
|
| 573 |
gr.Markdown(CONST_NOTE)
|
| 574 |
|
|
|
|
| 598 |
camera_position=[90, 90, None],
|
| 599 |
interactive=False
|
| 600 |
)
|
| 601 |
+
|
| 602 |
result_3dglb_texture = gr.Model3D(
|
| 603 |
clear_color=[0.0, 0.0, 0.0, 0.0],
|
| 604 |
label="GLB texture color",
|
|
|
|
| 629 |
# gradio running code
|
| 630 |
#===============================================================
|
| 631 |
|
|
|
|
| 632 |
save_folder = gr.State()
|
| 633 |
cond_image = gr.State()
|
| 634 |
views_image = gr.State()
|
|
|
|
|
|
|
| 635 |
|
| 636 |
+
def handle_click(save_folder):
|
| 637 |
+
if save_folder is None:
|
| 638 |
+
save_folder = gen_save_folder()
|
| 639 |
+
return save_folder
|
| 640 |
+
|
| 641 |
textgen_submit.click(
|
| 642 |
+
fn=handle_click,
|
| 643 |
+
inputs=[save_folder],
|
| 644 |
+
outputs=[save_folder]
|
| 645 |
+
).success(
|
| 646 |
fn=stage_0_t2i,
|
| 647 |
+
inputs=[text, textgen_seed, textgen_step, save_folder],
|
| 648 |
+
outputs=[rem_bg_image],
|
| 649 |
).success(
|
| 650 |
fn=stage_2_i2v,
|
| 651 |
inputs=[rem_bg_image, textgen_SEED, textgen_STEP, save_folder],
|
|
|
|
| 656 |
outputs=[result_3dobj, result_3dglb_texture],
|
| 657 |
).success(
|
| 658 |
fn=stage_3p_baking,
|
| 659 |
+
inputs=[save_folder, textgen_color, textgen_bake,
|
| 660 |
+
textgen_force_bake, textgen_front_baking, textgen_other_views, textgen_align_times],
|
| 661 |
outputs=[result_3dglb_baked],
|
| 662 |
).success(
|
| 663 |
fn=stage_4_gif,
|
|
|
|
| 667 |
|
| 668 |
|
| 669 |
imggen_submit.click(
|
| 670 |
+
fn=handle_click,
|
| 671 |
+
inputs=[save_folder],
|
| 672 |
+
outputs=[save_folder]
|
| 673 |
).success(
|
| 674 |
fn=stage_1_xbg,
|
| 675 |
+
inputs=[input_image, save_folder, imggen_removebg],
|
| 676 |
outputs=[rem_bg_image],
|
| 677 |
).success(
|
| 678 |
fn=stage_2_i2v,
|
|
|
|
| 684 |
outputs=[result_3dobj, result_3dglb_texture],
|
| 685 |
).success(
|
| 686 |
fn=stage_3p_baking,
|
| 687 |
+
inputs=[save_folder, imggen_color, imggen_bake,
|
| 688 |
+
imggen_force_bake, imggen_front_baking, imggen_other_views, imggen_align_times],
|
| 689 |
outputs=[result_3dglb_baked],
|
| 690 |
).success(
|
| 691 |
fn=stage_4_gif,
|
|
|
|
| 696 |
#===============================================================
|
| 697 |
# start gradio server
|
| 698 |
#===============================================================
|
| 699 |
+
CONST_PORT = 8080
|
| 700 |
+
CONST_MAX_QUEUE = 1
|
| 701 |
+
CONST_SERVER = '0.0.0.0'
|
| 702 |
|
| 703 |
demo.queue()
|
| 704 |
demo.launch()
|
| 705 |
+
|