Spaces:
Running
Running
import gradio as gr | |
import glob | |
import utils.utiles as ut | |
import numpy as np | |
from pywavefront import Wavefront | |
from pathlib import Path | |
# https://huggingface.co/spaces/radames/dpt-depth-estimation-3d-voxels/blob/main/app.py | |
def create_cube_around_point(point, cube_size, idx=0): | |
x, y, z = point | |
half_size = cube_size / 2.0 | |
vertices = [ | |
# Vertices delanteros inferiores | |
(x - half_size, y - half_size, z - half_size), | |
(x + half_size, y - half_size, z - half_size), | |
(x + half_size, y + half_size, z - half_size), | |
(x - half_size, y + half_size, z - half_size), | |
# Vertices traseros inferiores | |
(x - half_size, y - half_size, z + half_size), | |
(x + half_size, y - half_size, z + half_size), | |
(x + half_size, y + half_size, z + half_size), | |
(x - half_size, y + half_size, z + half_size), | |
] | |
faces = [ | |
(1, 2, 3, 4), # Cara delantera | |
(5, 6, 7, 8), # Cara trasera | |
(1, 5, 8, 4), # Lado izquierdo | |
(2, 6, 7, 3), # Lado derecho | |
(1, 2, 6, 5), # Lado inferior | |
(4, 3, 7, 8), # Lado superior | |
] | |
return vertices, list(np.array(faces) + (idx*8)) | |
def vox_to_obj(voxel_array, name_obj): | |
vertices = [] | |
faces = [] | |
vertices_reales = [] | |
faces_reales = [] | |
idx = 0 | |
for x in range(voxel_array.shape[0]): | |
for y in range(voxel_array.shape[1]): | |
for z in range(voxel_array.shape[2]): | |
if voxel_array[x, y, z] > 0: | |
v, f = create_cube_around_point((x, y, z), 0.7, idx) | |
vertices_reales += v | |
faces_reales += f | |
idx += 1 | |
vertices.append((x, y, z)) | |
#print(np.mean(v, 0), (x, y, z)) | |
# Crear un archivo .obj | |
obj_file = open(name_obj, "w") | |
for vertex in vertices_reales: | |
obj_file.write(f"v {vertex[0]} {vertex[1]} {vertex[2]}\n") | |
for face in faces_reales: | |
obj_file.write(f"f {' '.join(map(str, face))}\n") | |
obj_file.close() | |
def obj_to_vox(obj_file): | |
# Leer el archivo .obj | |
obj_mesh = Wavefront(obj_file) | |
# Crear un nuevo arreglo voxel | |
new_voxel_array = np.zeros((64, 64, 64), dtype=np.uint8) | |
for vertex in np.array(obj_mesh.vertices).reshape(-1, 8, 3): | |
x, y, z = np.mean(vertex, 0) | |
new_voxel_array[round(x), round(y), round(z)] = 1 | |
return new_voxel_array | |
def process_image(v): | |
vox_frag = obj_to_vox(v) | |
image_path = Path(v) | |
PATH = './G_checkpoint.pkl' | |
G_encode_decode = ut.load_generator(PATH) | |
fake = ut.generate(G_encode_decode, vox_frag) | |
_, fake_posprocess = ut.posprocessing(fake, vox_frag) | |
gltf_path = f'./{image_path.stem}_result.obj' | |
vox_to_obj(fake_posprocess, gltf_path) | |
print(gltf_path) | |
return gltf_path | |
title = "IberianVoxel: Automatic Completion of Iberian Ceramics for Cultural Heritage Studies" | |
description = "Accurate completion of archaeological artifacts is a critical aspect in several archaeological studies, including documentation of variations in style, inference of chronological and ethnic groups, and trading routes trends, among many others. However, most available pottery is fragmented, leading to missing textural and morphological cues. Currently, the reassembly and completion of fragmented ceramics is a daunting and time-consuming task, done almost exclusively by hand, which requires the physical manipulation of the fragments. To overcome the challenges of manual reconstruction, reduce the materials' exposure and deterioration, and improve the quality of reconstructed samples, we present IberianVoxel, a novel 3D Autoencoder Generative Adversarial Network (3D AE-GAN) framework tested on an extensive database with complete and fragmented references. We generated a collection of 3D voxelized samples and their fragmented references from Iberian wheel-made pottery profiles. The fragments generated are stratified into different size groups and across multiple pottery classes. Lastly, we provide quantitative and qualitative assessments to measure the quality of the reconstructed voxelized samples by our proposed method and archaeologists' evaluation." | |
examples = [img for img in glob.glob("./examples/*.obj")] | |
# background-color: black; | |
#css = svelte-wn75i6 | |
iface = gr.Interface(fn=process_image, | |
inputs=[ | |
gr.Model3D(label="Fragment Input", | |
elem_id="model-in", clear_color=['black']), | |
], | |
outputs=[ | |
gr.Model3D(label="3d mesh reconstruction", | |
clear_color=['black']) | |
], | |
title=title, | |
description=description, | |
examples=examples, | |
cache_examples=False, | |
) | |
iface.launch() | |