Jose Pablo Navarro commited on
Commit
7949150
1 Parent(s): f21d94d
G_checkpoint.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:423997d54f967d4ac949a2fdc2008a25a70e1c005954eaf18d7151728b974023
3
+ size 121792171
app.py ADDED
@@ -0,0 +1,122 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import glob
3
+ import utils.utiles as ut
4
+ import numpy as np
5
+ from pywavefront import Wavefront
6
+ from pathlib import Path
7
+
8
+ # https://huggingface.co/spaces/radames/dpt-depth-estimation-3d-voxels/blob/main/app.py
9
+
10
+
11
+ def create_cube_around_point(point, cube_size, idx=0):
12
+ x, y, z = point
13
+ half_size = cube_size / 2.0
14
+
15
+ vertices = [
16
+ # Vertices delanteros inferiores
17
+ (x - half_size, y - half_size, z - half_size),
18
+ (x + half_size, y - half_size, z - half_size),
19
+ (x + half_size, y + half_size, z - half_size),
20
+ (x - half_size, y + half_size, z - half_size),
21
+ # Vertices traseros inferiores
22
+ (x - half_size, y - half_size, z + half_size),
23
+ (x + half_size, y - half_size, z + half_size),
24
+ (x + half_size, y + half_size, z + half_size),
25
+ (x - half_size, y + half_size, z + half_size),
26
+ ]
27
+
28
+ faces = [
29
+ (1, 2, 3, 4), # Cara delantera
30
+ (5, 6, 7, 8), # Cara trasera
31
+ (1, 5, 8, 4), # Lado izquierdo
32
+ (2, 6, 7, 3), # Lado derecho
33
+ (1, 2, 6, 5), # Lado inferior
34
+ (4, 3, 7, 8), # Lado superior
35
+ ]
36
+
37
+ return vertices, list(np.array(faces) + (idx*8))
38
+
39
+
40
+ def vox_to_obj(voxel_array, name_obj):
41
+
42
+ vertices = []
43
+ faces = []
44
+ vertices_reales = []
45
+ faces_reales = []
46
+
47
+ idx = 0
48
+
49
+ for x in range(voxel_array.shape[0]):
50
+ for y in range(voxel_array.shape[1]):
51
+ for z in range(voxel_array.shape[2]):
52
+ if voxel_array[x, y, z] > 0:
53
+ v, f = create_cube_around_point((x, y, z), 0.7, idx)
54
+ vertices_reales += v
55
+ faces_reales += f
56
+ idx += 1
57
+ vertices.append((x, y, z))
58
+ #print(np.mean(v, 0), (x, y, z))
59
+
60
+ # Crear un archivo .obj
61
+ obj_file = open(name_obj, "w")
62
+ for vertex in vertices_reales:
63
+ obj_file.write(f"v {vertex[0]} {vertex[1]} {vertex[2]}\n")
64
+
65
+ for face in faces_reales:
66
+ obj_file.write(f"f {' '.join(map(str, face))}\n")
67
+
68
+ obj_file.close()
69
+
70
+
71
+ def obj_to_vox(obj_file):
72
+
73
+ # Leer el archivo .obj
74
+ obj_mesh = Wavefront(obj_file)
75
+
76
+ # Crear un nuevo arreglo voxel
77
+ new_voxel_array = np.zeros((64, 64, 64), dtype=np.uint8)
78
+ for vertex in np.array(obj_mesh.vertices).reshape(-1, 8, 3):
79
+ x, y, z = np.mean(vertex, 0)
80
+ new_voxel_array[round(x), round(y), round(z)] = 1
81
+ return new_voxel_array
82
+
83
+
84
+ def process_image(v):
85
+ vox_frag = obj_to_vox(v)
86
+ image_path = Path(v)
87
+
88
+ PATH = './G_checkpoint.pkl'
89
+ G_encode_decode = ut.load_generator(PATH)
90
+ fake = ut.generate(G_encode_decode, vox_frag)
91
+ _, fake_posprocess = ut.posprocessing(fake, vox_frag)
92
+ gltf_path = f'./{image_path.stem}_result.obj'
93
+ vox_to_obj(fake_posprocess, gltf_path)
94
+ print(gltf_path)
95
+ return gltf_path
96
+
97
+
98
+ title = "IberianVoxel: Automatic Completion of Iberian Ceramics for Cultural Heritage Studies"
99
+ description = "Accurate completion of archaeological artifacts is a critical aspect in several archaeological studies, including documentation of variations in style, inference of chronological and ethnic groups, and trading routes trends, among many others. However, most available pottery is fragmented, leading to missing textural and morphological cues. Currently, the reassembly and completion of fragmented ceramics is a daunting and time-consuming task, done almost exclusively by hand, which requires the physical manipulation of the fragments. To overcome the challenges of manual reconstruction, reduce the materials' exposure and deterioration, and improve the quality of reconstructed samples, we present IberianVoxel, a novel 3D Autoencoder Generative Adversarial Network (3D AE-GAN) framework tested on an extensive database with complete and fragmented references. We generated a collection of 3D voxelized samples and their fragmented references from Iberian wheel-made pottery profiles. The fragments generated are stratified into different size groups and across multiple pottery classes. Lastly, we provide quantitative and qualitative assessments to measure the quality of the reconstructed voxelized samples by our proposed method and archaeologists' evaluation."
100
+ examples = [img for img in glob.glob("examples/*.obj")]
101
+
102
+ # background-color: black;
103
+ #css = svelte-wn75i6
104
+
105
+
106
+ iface = gr.Interface(fn=process_image,
107
+ inputs=[
108
+ gr.Model3D(label="Fragment Input",
109
+ elem_id="model-in", clear_color=['black']),
110
+
111
+ ],
112
+ outputs=[
113
+ gr.Model3D(label="3d mesh reconstruction",
114
+ clear_color=['black'])
115
+
116
+ ],
117
+ title=title,
118
+ description=description,
119
+ examples=examples,
120
+
121
+ )
122
+ iface.launch(share=False)
examples/AL_07K.obj ADDED
The diff for this file is too large to render. See raw diff
 
examples/AL_11D.obj ADDED
The diff for this file is too large to render. See raw diff
 
examples/BA_106_1.obj ADDED
The diff for this file is too large to render. See raw diff
 
examples/BA_18_3.obj ADDED
The diff for this file is too large to render. See raw diff
 
pyvox/__init__.py ADDED
@@ -0,0 +1 @@
 
 
1
+ __version__ = '0.1'
pyvox/defaultpalette.py ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ default_palette = [
3
+ 0x00000000, 0xffffffff, 0xffccffff, 0xff99ffff, 0xff66ffff, 0xff33ffff, 0xff00ffff, 0xffffccff, 0xffccccff, 0xff99ccff, 0xff66ccff, 0xff33ccff, 0xff00ccff, 0xffff99ff, 0xffcc99ff, 0xff9999ff,
4
+ 0xff6699ff, 0xff3399ff, 0xff0099ff, 0xffff66ff, 0xffcc66ff, 0xff9966ff, 0xff6666ff, 0xff3366ff, 0xff0066ff, 0xffff33ff, 0xffcc33ff, 0xff9933ff, 0xff6633ff, 0xff3333ff, 0xff0033ff, 0xffff00ff,
5
+ 0xffcc00ff, 0xff9900ff, 0xff6600ff, 0xff3300ff, 0xff0000ff, 0xffffffcc, 0xffccffcc, 0xff99ffcc, 0xff66ffcc, 0xff33ffcc, 0xff00ffcc, 0xffffcccc, 0xffcccccc, 0xff99cccc, 0xff66cccc, 0xff33cccc,
6
+ 0xff00cccc, 0xffff99cc, 0xffcc99cc, 0xff9999cc, 0xff6699cc, 0xff3399cc, 0xff0099cc, 0xffff66cc, 0xffcc66cc, 0xff9966cc, 0xff6666cc, 0xff3366cc, 0xff0066cc, 0xffff33cc, 0xffcc33cc, 0xff9933cc,
7
+ 0xff6633cc, 0xff3333cc, 0xff0033cc, 0xffff00cc, 0xffcc00cc, 0xff9900cc, 0xff6600cc, 0xff3300cc, 0xff0000cc, 0xffffff99, 0xffccff99, 0xff99ff99, 0xff66ff99, 0xff33ff99, 0xff00ff99, 0xffffcc99,
8
+ 0xffcccc99, 0xff99cc99, 0xff66cc99, 0xff33cc99, 0xff00cc99, 0xffff9999, 0xffcc9999, 0xff999999, 0xff669999, 0xff339999, 0xff009999, 0xffff6699, 0xffcc6699, 0xff996699, 0xff666699, 0xff336699,
9
+ 0xff006699, 0xffff3399, 0xffcc3399, 0xff993399, 0xff663399, 0xff333399, 0xff003399, 0xffff0099, 0xffcc0099, 0xff990099, 0xff660099, 0xff330099, 0xff000099, 0xffffff66, 0xffccff66, 0xff99ff66,
10
+ 0xff66ff66, 0xff33ff66, 0xff00ff66, 0xffffcc66, 0xffcccc66, 0xff99cc66, 0xff66cc66, 0xff33cc66, 0xff00cc66, 0xffff9966, 0xffcc9966, 0xff999966, 0xff669966, 0xff339966, 0xff009966, 0xffff6666,
11
+ 0xffcc6666, 0xff996666, 0xff666666, 0xff336666, 0xff006666, 0xffff3366, 0xffcc3366, 0xff993366, 0xff663366, 0xff333366, 0xff003366, 0xffff0066, 0xffcc0066, 0xff990066, 0xff660066, 0xff330066,
12
+ 0xff000066, 0xffffff33, 0xffccff33, 0xff99ff33, 0xff66ff33, 0xff33ff33, 0xff00ff33, 0xffffcc33, 0xffcccc33, 0xff99cc33, 0xff66cc33, 0xff33cc33, 0xff00cc33, 0xffff9933, 0xffcc9933, 0xff999933,
13
+ 0xff669933, 0xff339933, 0xff009933, 0xffff6633, 0xffcc6633, 0xff996633, 0xff666633, 0xff336633, 0xff006633, 0xffff3333, 0xffcc3333, 0xff993333, 0xff663333, 0xff333333, 0xff003333, 0xffff0033,
14
+ 0xffcc0033, 0xff990033, 0xff660033, 0xff330033, 0xff000033, 0xffffff00, 0xffccff00, 0xff99ff00, 0xff66ff00, 0xff33ff00, 0xff00ff00, 0xffffcc00, 0xffcccc00, 0xff99cc00, 0xff66cc00, 0xff33cc00,
15
+ 0xff00cc00, 0xffff9900, 0xffcc9900, 0xff999900, 0xff669900, 0xff339900, 0xff009900, 0xffff6600, 0xffcc6600, 0xff996600, 0xff666600, 0xff336600, 0xff006600, 0xffff3300, 0xffcc3300, 0xff993300,
16
+ 0xff663300, 0xff333300, 0xff003300, 0xffff0000, 0xffcc0000, 0xff990000, 0xff660000, 0xff330000, 0xff0000ee, 0xff0000dd, 0xff0000bb, 0xff0000aa, 0xff000088, 0xff000077, 0xff000055, 0xff000044,
17
+ 0xff000022, 0xff000011, 0xff00ee00, 0xff00dd00, 0xff00bb00, 0xff00aa00, 0xff008800, 0xff007700, 0xff005500, 0xff004400, 0xff002200, 0xff001100, 0xffee0000, 0xffdd0000, 0xffbb0000, 0xffaa0000,
18
+ 0xff880000, 0xff770000, 0xff550000, 0xff440000, 0xff220000, 0xff110000, 0xffeeeeee, 0xffdddddd, 0xffbbbbbb, 0xffaaaaaa, 0xff888888, 0xff777777, 0xff555555, 0xff444444, 0xff222222, 0xff111111
19
+ ]
pyvox/models.py ADDED
@@ -0,0 +1,87 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from collections import namedtuple
2
+
3
+ from .defaultpalette import default_palette
4
+ from .utils import chunks
5
+
6
+ Size = namedtuple('Size', 'x y z')
7
+ Color = namedtuple('Color', 'r g b a')
8
+ Voxel = namedtuple('Voxel', 'x y z c')
9
+ Model = namedtuple('Model', 'size voxels')
10
+ Material = namedtuple('Material', 'id type weight props')
11
+
12
+ def get_default_palette():
13
+ return [ Color( *tuple(i.to_bytes(4,'little')) ) for i in default_palette ]
14
+
15
+
16
+ class Vox(object):
17
+
18
+ def __init__(self, models, palette=None, materials=None):
19
+ self.models = models
20
+ self.default_palette = not palette
21
+ self._palette = palette or get_default_palette()
22
+ self.materials = materials or []
23
+
24
+ @property
25
+ def palette(self):
26
+ return self._palette
27
+
28
+ @palette.setter
29
+ def palette(self, val):
30
+ self._palette = val
31
+ self.default_palette = False
32
+
33
+ def to_dense_rgba(self, model_idx=0):
34
+
35
+ import numpy as np
36
+ m = self.models[model_idx]
37
+ res = np.zeros(( m.size.y, m.size.z, m.size.x, 4 ), dtype='B')
38
+
39
+ for v in m.voxels:
40
+ res[v.y, m.size.z-v.z-1, v.x] = self.palette[v.c]
41
+
42
+ return res
43
+
44
+ def to_dense(self, model_idx=0):
45
+
46
+ import numpy as np
47
+ m = self.models[model_idx]
48
+ res = np.zeros(( m.size.y, m.size.z, m.size.x ), dtype='B')
49
+
50
+ for v in m.voxels:
51
+ res[v.y, m.size.z-v.z-1, v.x] = v.c
52
+
53
+ return res
54
+
55
+ def __str__(self):
56
+ return 'Vox(%s)'%(self.models)
57
+
58
+ @staticmethod
59
+ def from_dense(a, black=[0,0,0]):
60
+
61
+ palette = None
62
+
63
+ if len(a.shape) == 4:
64
+ from PIL import Image
65
+ import numpy as np
66
+
67
+ mask = np.all(a == np.array([[black]]), axis=3)
68
+
69
+ x,y,z,_ = a.shape
70
+
71
+ # color index 0 is reserved for empty, so we get 255 colors
72
+ img = Image.fromarray(a.reshape(x,y*z,3)).quantize(255)
73
+ palette = img.getpalette()
74
+ palette = [ Color(0,0,0,0) ] + [ Color(*c, 255) for c in chunks(palette, 3) ]
75
+ a = np.asarray(img, dtype='B').reshape(x,y,z).copy() + 1
76
+ a[mask] = 0
77
+
78
+
79
+ if len(a.shape) != 3: raise Exception("I expect a 4 or 3 dimensional matrix")
80
+
81
+ y,z,x = a.shape
82
+
83
+ nz = a.nonzero()
84
+
85
+ voxels = [ Voxel( nz[2][i], nz[0][i], z-nz[1][i]-1, a[nz[0][i], nz[1][i], nz[2][i]] ) for i in range(nz[0].shape[0]) ]
86
+
87
+ return Vox([ Model(Size(x,y,z), voxels)], palette)
pyvox/parser.py ADDED
@@ -0,0 +1,136 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from struct import unpack_from as unpack, calcsize
2
+ import logging
3
+
4
+ from .models import Vox, Size, Voxel, Color, Model, Material
5
+
6
+ log = logging.getLogger(__name__)
7
+
8
+ class ParsingException(Exception): pass
9
+
10
+ def bit(val, offset):
11
+ mask = 1 << offset
12
+ return(val & mask)
13
+
14
+ class Chunk(object):
15
+ def __init__(self, id, content=None, chunks=None):
16
+ self.id = id
17
+ self.content = content or b''
18
+ self.chunks = chunks or []
19
+
20
+ if id == b'MAIN':
21
+ if len(self.content): raise ParsingException('Non-empty content for main chunk')
22
+ elif id == b'PACK':
23
+ self.models = unpack('i', content)[0]
24
+ elif id == b'SIZE':
25
+ self.size = Size(*unpack('iii', content))
26
+ elif id == b'XYZI':
27
+ n = unpack('i', content)[0]
28
+ log.debug('xyzi block with %d voxels (len %d)', n, len(content))
29
+ self.voxels = []
30
+ self.voxels = [ Voxel(*unpack('BBBB', content, 4+4*i)) for i in range(n) ]
31
+ elif id == b'RGBA':
32
+ self.palette = [ Color(*unpack('BBBB', content, 4*i)) for i in range(255) ]
33
+ # Docs say: color [0-254] are mapped to palette index [1-255]
34
+ # hmm
35
+ # self.palette = [ Color(0,0,0,0) ] + [ Color(*unpack('BBBB', content, 4*i)) for i in range(255) ]
36
+ elif id == b'MATT':
37
+ _id, _type, weight, flags = unpack('iifi', content)
38
+ props = {}
39
+ offset = 16
40
+ for b,field in [ (0, 'plastic'),
41
+ (1, 'roughness'),
42
+ (2, 'specular'),
43
+ (3, 'IOR'),
44
+ (4, 'attenuation'),
45
+ (5, 'power'),
46
+ (6, 'glow'),
47
+ (7, 'isTotalPower') ]:
48
+ if bit(flags, b) and b<7: # no value for 7 / isTotalPower
49
+ props[field] = unpack('f', content, offset)
50
+ offset += 4
51
+
52
+ self.material = Material(_id, _type, weight, props)
53
+
54
+ else:
55
+ raise ParsingException('Unknown chunk type: %s'%self.id)
56
+
57
+ class VoxParser(object):
58
+
59
+ def __init__(self, filename):
60
+ with open(filename, 'rb') as f:
61
+ self.content = f.read()
62
+
63
+ self.offset = 0
64
+
65
+ def unpack(self, fmt):
66
+
67
+ r = unpack(fmt, self.content, self.offset)
68
+ self.offset += calcsize(fmt)
69
+
70
+ return r
71
+
72
+ def _parseChunk(self):
73
+
74
+ _id, N, M = self.unpack('4sii')
75
+
76
+ log.debug("Found chunk id %s / len %s / children %s", _id, N, M)
77
+
78
+ content = self.unpack('%ds'%N)[0]
79
+
80
+ start = self.offset
81
+ chunks = [ ]
82
+ while self.offset<start+M:
83
+ chunks.append(self._parseChunk())
84
+
85
+ return Chunk(_id, content, chunks)
86
+
87
+ def parse(self):
88
+
89
+ header, version = self.unpack('4si')
90
+ if header != b'VOX ': raise ParsingException("This doesn't look like a vox file to me")
91
+
92
+ if version != 150: raise ParsingException("Unknown vox version: %s expected 150"%version)
93
+
94
+ main = self._parseChunk()
95
+
96
+ if main.id != b'MAIN': raise ParsingException("Missing MAIN Chunk")
97
+
98
+ chunks = list(reversed(main.chunks))
99
+ if chunks[-1].id == b'PACK':
100
+ models = chunks.pop().models
101
+ else:
102
+ models = 1
103
+
104
+ log.debug("file has %d models", models)
105
+
106
+ models = [ self._parseModel(chunks.pop(), chunks.pop()) for _ in range(models) ]
107
+
108
+ if chunks and chunks[0].id == b'RGBA':
109
+ palette = chunks.pop().palette
110
+ else:
111
+ palette = None
112
+
113
+ materials = [ c.material for c in chunks ]
114
+
115
+ return Vox(models, palette, materials)
116
+
117
+
118
+
119
+ def _parseModel(self, size, xyzi):
120
+ if size.id != b'SIZE': raise ParsingException('Expected SIZE chunk, got %s', size.id)
121
+ if xyzi.id != b'XYZI': raise ParsingException('Expected XYZI chunk, got %s', xyzi.id)
122
+
123
+ return Model(size.size, xyzi.voxels)
124
+
125
+
126
+
127
+
128
+ if __name__ == '__main__':
129
+
130
+ import sys
131
+ import coloredlogs
132
+
133
+ coloredlogs.install(level=logging.DEBUG)
134
+
135
+
136
+ VoxParser(sys.argv[1]).parse()
pyvox/utils.py ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ def chunks(l, n):
2
+ """Yield successive n-sized chunks from l."""
3
+ for i in range(0, len(l), n):
4
+ yield l[i:i + n]
pyvox/writer.py ADDED
@@ -0,0 +1,61 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from struct import pack
2
+
3
+
4
+ class VoxWriter(object):
5
+
6
+ def __init__(self, filename, vox):
7
+ self.filename = filename
8
+ self.vox = vox
9
+
10
+ def _chunk(self, id, content, chunks=[]):
11
+
12
+ res = b''
13
+ for c in chunks:
14
+ res += self._chunk(*c)
15
+
16
+ return pack('4sii', id, len(content), len(res)) + content + res
17
+
18
+ def _matflags(self, props):
19
+ flags = 0
20
+ res = b''
21
+ for b,field in [ (0, 'plastic'),
22
+ (1, 'roughness'),
23
+ (2, 'specular'),
24
+ (3, 'IOR'),
25
+ (4, 'attenuation'),
26
+ (5, 'power'),
27
+ (6, 'glow'),
28
+ (7, 'isTotalPower') ]:
29
+ if field in props:
30
+ flags |= 1<<b
31
+ res += pack('f', props[field])
32
+
33
+ return pack('i', flags)+res
34
+
35
+
36
+
37
+ def write(self):
38
+
39
+ res = pack('4si', b'VOX ', 150)
40
+
41
+ chunks = []
42
+
43
+ if len(self.vox.models):
44
+ chunks.append((b'PACK', pack('i', len(self.vox.models))))
45
+
46
+ for m in self.vox.models:
47
+ chunks.append((b'SIZE', pack('iii', *m.size)))
48
+ chunks.append((b'XYZI', pack('i', len(m.voxels)) + b''.join(pack('BBBB', *v) for v in m.voxels)))
49
+
50
+ if not self.vox.default_palette:
51
+ chunks.append((b'RGBA', b''.join(pack('BBBB', *c) for c in self.vox.palette)))
52
+
53
+ for m in self.vox.materials:
54
+ chunks.append((b'MATT', pack('iif', m.id, m.type, m.weight) + self._matflags(m.props)))
55
+
56
+ # TODO materials
57
+
58
+ res += self._chunk(b'MAIN', b'', chunks)
59
+
60
+ with open(self.filename, 'wb') as f:
61
+ f.write(res)
utils/FragmentDataset.py ADDED
@@ -0,0 +1,83 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import glob
2
+ from torch.utils.data import Dataset
3
+ import numpy as np
4
+ import pyvox.parser
5
+
6
+
7
+ class FragmentDataset(Dataset):
8
+ def __init__(self, vox_path, vox_type, dim_size=64, transform=None):
9
+ self.vox_type = vox_type
10
+ self.vox_path = vox_path
11
+ self.transform = transform
12
+ self.dim_size = dim_size
13
+ self.vox_files = sorted(
14
+ glob.glob('{}/{}/*/*.vox'.format(self.vox_path, self.vox_type)))
15
+
16
+ def __len__(self):
17
+ return len(self.vox_files)
18
+
19
+ def __read_vox__(self, path):
20
+ vox = pyvox.parser.VoxParser(path).parse()
21
+ a = vox.to_dense()
22
+ caja = np.zeros((64, 64, 64))
23
+ caja[0:a.shape[0], 0:a.shape[1], 0:a.shape[2]] = a
24
+ return caja
25
+
26
+ def __select_fragment__(self, v):
27
+ frag_id = np.unique(v)[1:]
28
+ #select_frag = np.random.choice(frag_id, np.random.choice(np.arange(1, len(frag_id)), 1)[0], replace=False)
29
+ select_frag = np.random.choice(frag_id, 1, replace=False)
30
+ for f in frag_id:
31
+ if not(f in select_frag):
32
+ v[v == f] = 0
33
+ else:
34
+ v[v == f] = 1
35
+ return v, select_frag
36
+
37
+ def __non_select_fragment__(self, v, select_frag):
38
+ frag_id = np.unique(v)[1:]
39
+ for f in frag_id:
40
+ if not(f in select_frag):
41
+ v[v == f] = 1
42
+ else:
43
+ v[v == f] = 0
44
+ return v
45
+
46
+ def __select_fragment_specific__(self, v, select_frag):
47
+ frag_id = np.unique(v)[1:]
48
+ for f in frag_id:
49
+ if not(f in select_frag):
50
+ v[v == f] = 0
51
+ else:
52
+ v[v == f] = 1
53
+ return v, select_frag
54
+
55
+ def __getitem__(self, idx):
56
+ img_path = self.vox_files[idx]
57
+ vox = self.__read_vox__(img_path)
58
+ label = img_path.replace(self.vox_path, '').split('/')[2]
59
+ frag, select_frag = self.__select_fragment__(vox.copy())
60
+
61
+ if self.transform:
62
+ vox = self.transform(vox)
63
+ frag = self.transform(frag)
64
+
65
+ return frag, vox, # select_frag, int(label)-1#, img_path
66
+
67
+ def __getitem_specific_frag__(self, idx, select_frag):
68
+ img_path = self.vox_files[idx]
69
+ vox = self.__read_vox__(img_path)
70
+ label = img_path.replace(self.vox_path, '').split('/')[2]
71
+ frag, select_frag = self.__select_fragment_specific__(
72
+ vox.copy(), select_frag)
73
+
74
+ if self.transform:
75
+ vox = self.transform(vox)
76
+ frag = self.transform(frag)
77
+
78
+ return frag, vox, # select_frag, int(label)-1, img_path
79
+
80
+ def __getfractures__(self, idx):
81
+ img_path = self.vox_files[idx]
82
+ vox = self.__read_vox__(img_path)
83
+ return np.unique(vox) # select_frag, int(label)-1, img_path
utils/network_vox.py ADDED
@@ -0,0 +1,155 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+
3
+ class _D(torch.nn.Module):
4
+ def __init__(self, cube_len=64):
5
+ super(_D, self).__init__()
6
+ self.leak = 0.2
7
+ self.cube_len = cube_len
8
+
9
+ padd = (0,0,0)
10
+ if self.cube_len == 32:
11
+ padd = (1,1,1)
12
+
13
+ self.layer1 = torch.nn.Sequential(
14
+
15
+ torch.nn.Conv3d(1, self.cube_len, kernel_size=4, stride=2, bias=False, padding=(1, 1, 1)),
16
+ torch.nn.BatchNorm3d(self.cube_len),
17
+ torch.nn.LeakyReLU(self.leak)
18
+ )
19
+ self.layer2 = torch.nn.Sequential(
20
+ torch.nn.Conv3d(self.cube_len, self.cube_len*2, kernel_size=4, stride=2, bias=False, padding=(1, 1, 1)),
21
+ torch.nn.BatchNorm3d(self.cube_len*2),
22
+ torch.nn.LeakyReLU(self.leak)
23
+ )
24
+ self.layer3 = torch.nn.Sequential(
25
+ torch.nn.Conv3d(self.cube_len*2, self.cube_len*4, kernel_size=4, stride=2, bias=False, padding=(1, 1, 1)),
26
+ torch.nn.BatchNorm3d(self.cube_len*4),
27
+ torch.nn.LeakyReLU(self.leak)
28
+ )
29
+ self.layer4 = torch.nn.Sequential(
30
+ torch.nn.Conv3d(self.cube_len*4, self.cube_len*8, kernel_size=4, stride=2, bias=False, padding=(1, 1, 1)),
31
+ torch.nn.BatchNorm3d(self.cube_len*8),
32
+ torch.nn.LeakyReLU(self.leak)
33
+ )
34
+ self.layer5 = torch.nn.Sequential(
35
+ torch.nn.Conv3d(self.cube_len*8, 1, kernel_size=4, stride=2, bias=False, padding=padd),
36
+ )
37
+ self.layer6 = torch.nn.Sequential(
38
+ torch.nn.Sigmoid(),
39
+ #torch.nn.Linear(64, 1),
40
+ #torch.nn.LogSoftmax(dim=1)
41
+ )
42
+
43
+ def forward(self, x):
44
+ out = x.view((-1, 1, self.cube_len, self.cube_len, self.cube_len))
45
+ out = self.layer1(out)
46
+ out = self.layer2(out)
47
+ out = self.layer3(out)
48
+ out = self.layer4(out)
49
+ #print(out.shape)
50
+ out = self.layer5(out).view(-1, 1)
51
+ #print(out.shape)
52
+ out = self.layer6(out)
53
+ #print(out.shape)
54
+ return out
55
+
56
+ class _G_encode_decode(torch.nn.Module):
57
+ def __init__(self, cube_len=64, z_latent_space=64, z_intern_space=64):
58
+ super(_G_encode_decode, self).__init__()
59
+ self.leak = 0.01
60
+ self.cube_len = cube_len
61
+ self.z_latent_space = z_latent_space
62
+ self.z_intern_space = z_intern_space
63
+
64
+ padd = (0,0,0)
65
+ if self.cube_len == 32:
66
+ padd = (1,1,1)
67
+
68
+ self.layer1 = torch.nn.Sequential(
69
+ torch.nn.Conv3d(1, self.cube_len, kernel_size=4, stride=2, bias=False, padding=(1, 1, 1)),
70
+ torch.nn.BatchNorm3d(self.cube_len),
71
+ torch.nn.LeakyReLU(self.leak)
72
+ )
73
+ self.layer2 = torch.nn.Sequential(
74
+ torch.nn.Conv3d(self.cube_len, self.cube_len*2, kernel_size=4, stride=2, bias=False, padding=(1, 1, 1)),
75
+ torch.nn.BatchNorm3d(self.cube_len*2),
76
+ torch.nn.LeakyReLU(self.leak)
77
+ )
78
+ self.layer3 = torch.nn.Sequential(
79
+ torch.nn.Conv3d(self.cube_len*2, self.cube_len*4, kernel_size=4, stride=2, bias=False, padding=(1, 1, 1)),
80
+ torch.nn.BatchNorm3d(self.cube_len*4),
81
+ torch.nn.LeakyReLU(self.leak)
82
+ )
83
+ self.layer4 = torch.nn.Sequential(
84
+ torch.nn.Conv3d(self.cube_len*4, self.cube_len*8, kernel_size=4, stride=2, bias=False, padding=(1, 1, 1)),
85
+ torch.nn.BatchNorm3d(self.cube_len*8),
86
+ torch.nn.LeakyReLU(self.leak)
87
+ )
88
+ self.layer5 = torch.nn.Sequential(
89
+ torch.nn.Conv3d(self.cube_len*8, self.cube_len*2, kernel_size=4, stride=2, bias=False, padding=padd),
90
+ #torch.nn.BatchNorm3d(self.cube_len*8),
91
+
92
+ )
93
+
94
+ self.layer6 = torch.nn.Sequential(
95
+ torch.nn.ConvTranspose3d(self.z_latent_space, self.cube_len*8, kernel_size=4, stride=2, bias=False, padding=padd),
96
+ torch.nn.BatchNorm3d(self.cube_len*8),
97
+ torch.nn.ReLU()
98
+ )
99
+ self.layer7 = torch.nn.Sequential(
100
+ torch.nn.ConvTranspose3d(self.cube_len*8, self.cube_len*4, kernel_size=4, stride=2, bias=False, padding=(1, 1, 1)),
101
+ torch.nn.BatchNorm3d(self.cube_len*4),
102
+ torch.nn.ReLU()
103
+ )
104
+ self.layer8 = torch.nn.Sequential(
105
+ torch.nn.ConvTranspose3d(self.cube_len*4, self.cube_len*2, kernel_size=4, stride=2, bias=False, padding=(1, 1, 1)),
106
+ torch.nn.BatchNorm3d(self.cube_len*2),
107
+ torch.nn.ReLU()
108
+ )
109
+ self.layer9 = torch.nn.Sequential(
110
+ torch.nn.ConvTranspose3d(self.cube_len*2, self.cube_len, kernel_size=4, stride=2, bias=False, padding=(1, 1, 1)),
111
+ torch.nn.BatchNorm3d(self.cube_len),
112
+ torch.nn.ReLU()
113
+ )
114
+ self.layer10 = torch.nn.Sequential(
115
+ torch.nn.ConvTranspose3d(self.cube_len, 1, kernel_size=4, stride=2, bias=False, padding=(1, 1, 1)),
116
+
117
+
118
+ )
119
+ self.linear_layer = torch.nn.Sequential(
120
+ torch.nn.Linear(self.z_intern_space, self.z_latent_space),
121
+ torch.nn.BatchNorm1d(self.z_latent_space),
122
+ torch.nn.ReLU()
123
+ )
124
+ self.normalized_layer = torch.nn.Sequential(
125
+ torch.nn.BatchNorm2d(self.z_latent_space),
126
+ )
127
+
128
+ def normalized_vector(self, z0, z1):
129
+ x_encode = torch.concat((z0, z1), 1)
130
+ return self.linear_layer(x_encode)
131
+
132
+ def forward(self, x):
133
+ x_encode = forward_encode(x)
134
+ x_decode = forward_decode(x)
135
+
136
+ return x_decode
137
+
138
+ def forward_encode(self, x):
139
+ out_x = x.view((-1, 1, self.cube_len, self.cube_len, self.cube_len))
140
+ out = self.layer1(out_x)
141
+ out = self.layer2(out)
142
+ out = self.layer3(out)
143
+ out = self.layer4(out)
144
+ out = self.layer5(out)
145
+
146
+ return out.view(out_x.shape[0], -1)
147
+
148
+ def forward_decode(self, x):
149
+ out = x.view(-1, self.z_latent_space, 1, 1, 1)
150
+ out = self.layer6(out)
151
+ out = self.layer7(out)
152
+ out = self.layer8(out)
153
+ out = self.layer9(out)
154
+ out = self.layer10(out)
155
+ return out
utils/utiles.py ADDED
@@ -0,0 +1,141 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import plotly.graph_objects as go
2
+ import numpy as np
3
+ import pyvox.parser
4
+ import torch
5
+ import utils.network_vox as nv
6
+ from scipy import ndimage as ndi
7
+
8
+ available_device = "cuda" if torch.cuda.is_available() else "cpu"
9
+
10
+
11
+ def __read_vox_frag__(path, fragment_idx):
12
+ vox_pottery = __read_vox__(path)
13
+ try:
14
+ assert(fragment_idx in np.unique(vox_pottery))
15
+ vox_frag = vox_pottery.copy()
16
+ vox_frag[vox_pottery != fragment_idx] = 0
17
+ vox_frag[vox_pottery == fragment_idx] = 1
18
+ return vox_frag
19
+ except AssertionError:
20
+ print('fragment_idx not found. Possible fragment_idx {}'.format(
21
+ np.unique(vox_pottery)[1:]))
22
+
23
+
24
+ def __read_vox__(path):
25
+ vox = pyvox.parser.VoxParser(path).parse()
26
+ a = vox.to_dense()
27
+ caja = np.zeros((64, 64, 64))
28
+ caja[0:a.shape[0], 0:a.shape[1], 0:a.shape[2]] = a
29
+ return caja
30
+
31
+
32
+ def plot(voxel_matrix):
33
+ voxels = np.array(np.where(voxel_matrix)).T
34
+ x, y, z = voxels[:, 0], voxels[:, 1], voxels[:, 2]
35
+ fig = go.Figure(data=go.Scatter3d(x=x, y=y, z=z, mode='markers', marker=dict(size=5, symbol='square', color='#ceabb2', line=dict(width=2,
36
+ color='DarkSlateGrey',))))
37
+ fig.update_layout()
38
+
39
+ fig.show()
40
+
41
+
42
+ def plot_frag(vox_pottery):
43
+ stts = []
44
+ colors = ['#ceabb2', '#d05d86', '#7e1b2f', '#c1375b', '#cdc1c3',
45
+ '#ceabb2', '#d05d86', '#7e1b2f', '#c1375b', '#cdc1c3']
46
+ for i, frag in enumerate(np.unique(vox_pottery)[1:][::-1]):
47
+ vox_frag = vox_pottery.copy()
48
+ vox_frag[vox_pottery != frag] = 0
49
+ voxels = np.array(np.where(vox_frag)).T
50
+ x, y, z = voxels[:, 0], voxels[:, 1], voxels[:, 2]
51
+ # ut.plot(vox_frag)
52
+ scatter = go.Scatter3d(x=x, y=y, z=z,
53
+ mode='markers',
54
+ name='Fragment {} ({})'.format(i+1, int(frag)),
55
+ marker=dict(size=5, symbol='square', color=colors[i],
56
+ line=dict(width=2, color='DarkSlateGrey',)))
57
+ stts.append(scatter)
58
+
59
+ fig = go.Figure(data=stts)
60
+ fig.update_layout()
61
+
62
+ fig.show()
63
+
64
+
65
+ def plot_join(vox_1, vox_2):
66
+ stts = []
67
+ colors = ['#ceabb2', '#d05d86', '#7e1b2f', '#c1375b', '#cdc1c3',
68
+ '#ceabb2', '#d05d86', '#7e1b2f', '#c1375b', '#cdc1c3']
69
+ voxels = np.array(np.where(vox_1)).T
70
+ x, y, z = voxels[:, 0], voxels[:, 1], voxels[:, 2]
71
+ # ut.plot(vox_frag)
72
+ scatter = go.Scatter3d(x=x, y=y, z=z,
73
+ mode='markers',
74
+ name='Fragment 1',
75
+ marker=dict(size=5, symbol='square', color=colors[0],
76
+ line=dict(width=2, color='DarkSlateGrey',)))
77
+ stts.append(scatter)
78
+
79
+ voxels = np.array(np.where(vox_2)).T
80
+ x, y, z = voxels[:, 0], voxels[:, 1], voxels[:, 2]
81
+ # ut.plot(vox_frag)
82
+ scatter = go.Scatter3d(x=x, y=y, z=z,
83
+ mode='markers',
84
+ name='Fragment 2',
85
+ marker=dict(size=5, symbol='square', color=colors[2],
86
+ line=dict(width=2, color='DarkSlateGrey',)))
87
+ stts.append(scatter)
88
+
89
+ fig = go.Figure(data=stts)
90
+ fig.update_layout()
91
+
92
+ fig.show()
93
+
94
+
95
+ def posprocessing(fake, mesh_frag):
96
+ a_p = (mesh_frag > 0.5)
97
+ a_fake = (fake[0] > np.mean(fake[0]))
98
+ #a_fake = (fake[0] > 0.1)
99
+ a_fake = np.array(a_fake, dtype=np.int32).reshape(1, -1)
100
+
101
+ diamond = ndi.generate_binary_structure(rank=3, connectivity=1)
102
+ a_fake = ndi.binary_erosion(a_fake.reshape(
103
+ 64, 64, 64), diamond, iterations=1)
104
+ _a_p = ndi.binary_erosion(a_p.reshape(64, 64, 64), diamond, iterations=1)
105
+
106
+ a_fake = ndi.binary_dilation(
107
+ a_fake.reshape(64, 64, 64), diamond, iterations=1)
108
+
109
+ a_p = ndi.binary_dilation(a_p.reshape(64, 64, 64), diamond, iterations=1)
110
+ a_fake = a_fake + _a_p
111
+ #a_fake = (a_fake > 0.5)
112
+ # make a little 3D diamond:
113
+ diamond = ndi.generate_binary_structure(rank=3, connectivity=1)
114
+ dilated = ndi.binary_erosion(
115
+ a_fake.reshape(64, 64, 64), diamond, iterations=1)
116
+ dilated = ndi.binary_dilation(
117
+ a_fake.reshape(64, 64, 64), diamond, iterations=1)
118
+
119
+ return a_fake, dilated
120
+
121
+
122
+ def load_generator(path_checkpoint):
123
+
124
+ G_encode_decode = nv._G_encode_decode(
125
+ cube_len=64, z_latent_space=128, z_intern_space=136).to(available_device)
126
+ checkpoint = torch.load(path_checkpoint, map_location=available_device)
127
+ G_encode_decode.load_state_dict(checkpoint)
128
+ G_encode_decode = G_encode_decode.eval()
129
+
130
+ return G_encode_decode
131
+
132
+
133
+ def generate(model, vox_frag):
134
+ mesh_frag = torch.Tensor(vox_frag).unsqueeze(
135
+ 0).float().to(available_device)
136
+ output_g_encode = model.forward_encode(mesh_frag)
137
+ fake = model.forward_decode(output_g_encode)
138
+ fake = fake + (mesh_frag.unsqueeze(1))
139
+ fake = fake.detach().cpu().numpy()
140
+ mesh_frag = mesh_frag.detach().cpu().numpy()
141
+ return fake, mesh_frag