Spaces:
Running
on
Zero
Running
on
Zero
Upload ./hy3dgen/texgen/custom_rasterizer/lib/custom_rasterizer_kernel/grid_neighbor.cpp with huggingface_hub
Browse files
hy3dgen/texgen/custom_rasterizer/lib/custom_rasterizer_kernel/grid_neighbor.cpp
ADDED
@@ -0,0 +1,574 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#include "rasterizer.h"
|
2 |
+
#include <fstream>
|
3 |
+
|
4 |
+
inline int pos2key(float* p, int resolution) {
|
5 |
+
int x = (p[0] * 0.5 + 0.5) * resolution;
|
6 |
+
int y = (p[1] * 0.5 + 0.5) * resolution;
|
7 |
+
int z = (p[2] * 0.5 + 0.5) * resolution;
|
8 |
+
return (x * resolution + y) * resolution + z;
|
9 |
+
}
|
10 |
+
|
11 |
+
inline void key2pos(int key, int resolution, float* p) {
|
12 |
+
int x = key / resolution / resolution;
|
13 |
+
int y = key / resolution % resolution;
|
14 |
+
int z = key % resolution;
|
15 |
+
p[0] = ((x + 0.5) / resolution - 0.5) * 2;
|
16 |
+
p[1] = ((y + 0.5) / resolution - 0.5) * 2;
|
17 |
+
p[2] = ((z + 0.5) / resolution - 0.5) * 2;
|
18 |
+
}
|
19 |
+
|
20 |
+
inline void key2cornerpos(int key, int resolution, float* p) {
|
21 |
+
int x = key / resolution / resolution;
|
22 |
+
int y = key / resolution % resolution;
|
23 |
+
int z = key % resolution;
|
24 |
+
p[0] = ((x + 0.75) / resolution - 0.5) * 2;
|
25 |
+
p[1] = ((y + 0.25) / resolution - 0.5) * 2;
|
26 |
+
p[2] = ((z + 0.75) / resolution - 0.5) * 2;
|
27 |
+
}
|
28 |
+
|
29 |
+
inline float* pos_ptr(int l, int i, int j, torch::Tensor t) {
|
30 |
+
float* pdata = t.data_ptr<float>();
|
31 |
+
int height = t.size(1);
|
32 |
+
int width = t.size(2);
|
33 |
+
return &pdata[((l * height + i) * width + j) * 4];
|
34 |
+
}
|
35 |
+
|
36 |
+
struct Grid
|
37 |
+
{
|
38 |
+
std::vector<int> seq2oddcorner;
|
39 |
+
std::vector<int> seq2evencorner;
|
40 |
+
std::vector<int> seq2grid;
|
41 |
+
std::vector<int> seq2normal;
|
42 |
+
std::vector<int> seq2neighbor;
|
43 |
+
std::unordered_map<int, int> grid2seq;
|
44 |
+
std::vector<int> downsample_seq;
|
45 |
+
int num_origin_seq;
|
46 |
+
int resolution;
|
47 |
+
int stride;
|
48 |
+
};
|
49 |
+
|
50 |
+
inline void pos_from_seq(Grid& grid, int seq, float* p) {
|
51 |
+
auto k = grid.seq2grid[seq];
|
52 |
+
key2pos(k, grid.resolution, p);
|
53 |
+
}
|
54 |
+
|
55 |
+
inline int fetch_seq(Grid& grid, int l, int i, int j, torch::Tensor pdata) {
|
56 |
+
float* p = pos_ptr(l, i, j, pdata);
|
57 |
+
if (p[3] == 0)
|
58 |
+
return -1;
|
59 |
+
auto key = pos2key(p, grid.resolution);
|
60 |
+
int seq = grid.grid2seq[key];
|
61 |
+
return seq;
|
62 |
+
}
|
63 |
+
|
64 |
+
inline int fetch_last_seq(Grid& grid, int i, int j, torch::Tensor pdata) {
|
65 |
+
int num_layers = pdata.size(0);
|
66 |
+
int l = 0;
|
67 |
+
int idx = fetch_seq(grid, l, i, j, pdata);
|
68 |
+
while (l < num_layers - 1) {
|
69 |
+
l += 1;
|
70 |
+
int new_idx = fetch_seq(grid, l, i, j, pdata);
|
71 |
+
if (new_idx == -1)
|
72 |
+
break;
|
73 |
+
idx = new_idx;
|
74 |
+
}
|
75 |
+
return idx;
|
76 |
+
}
|
77 |
+
|
78 |
+
inline int fetch_nearest_seq(Grid& grid, int i, int j, int dim, float d, torch::Tensor pdata) {
|
79 |
+
float p[3];
|
80 |
+
float max_dist = 1e10;
|
81 |
+
int best_idx = -1;
|
82 |
+
int num_layers = pdata.size(0);
|
83 |
+
for (int l = 0; l < num_layers; ++l) {
|
84 |
+
int idx = fetch_seq(grid, l, i, j, pdata);
|
85 |
+
if (idx == -1)
|
86 |
+
break;
|
87 |
+
pos_from_seq(grid, idx, p);
|
88 |
+
float dist = std::abs(d - p[(dim + 2) % 3]);
|
89 |
+
if (dist < max_dist) {
|
90 |
+
max_dist = dist;
|
91 |
+
best_idx = idx;
|
92 |
+
}
|
93 |
+
}
|
94 |
+
return best_idx;
|
95 |
+
}
|
96 |
+
|
97 |
+
inline int fetch_nearest_seq_layer(Grid& grid, int i, int j, int dim, float d, torch::Tensor pdata) {
|
98 |
+
float p[3];
|
99 |
+
float max_dist = 1e10;
|
100 |
+
int best_layer = -1;
|
101 |
+
int num_layers = pdata.size(0);
|
102 |
+
for (int l = 0; l < num_layers; ++l) {
|
103 |
+
int idx = fetch_seq(grid, l, i, j, pdata);
|
104 |
+
if (idx == -1)
|
105 |
+
break;
|
106 |
+
pos_from_seq(grid, idx, p);
|
107 |
+
float dist = std::abs(d - p[(dim + 2) % 3]);
|
108 |
+
if (dist < max_dist) {
|
109 |
+
max_dist = dist;
|
110 |
+
best_layer = l;
|
111 |
+
}
|
112 |
+
}
|
113 |
+
return best_layer;
|
114 |
+
}
|
115 |
+
|
116 |
+
void FetchNeighbor(Grid& grid, int seq, float* pos, int dim, int boundary_info, std::vector<torch::Tensor>& view_layer_positions,
|
117 |
+
int* output_indices)
|
118 |
+
{
|
119 |
+
auto t = view_layer_positions[dim];
|
120 |
+
int height = t.size(1);
|
121 |
+
int width = t.size(2);
|
122 |
+
int top = 0;
|
123 |
+
int ci = 0;
|
124 |
+
int cj = 0;
|
125 |
+
if (dim == 0) {
|
126 |
+
ci = (pos[1]/2+0.5)*height;
|
127 |
+
cj = (pos[0]/2+0.5)*width;
|
128 |
+
}
|
129 |
+
else if (dim == 1) {
|
130 |
+
ci = (pos[1]/2+0.5)*height;
|
131 |
+
cj = (pos[2]/2+0.5)*width;
|
132 |
+
}
|
133 |
+
else {
|
134 |
+
ci = (-pos[2]/2+0.5)*height;
|
135 |
+
cj = (pos[0]/2+0.5)*width;
|
136 |
+
}
|
137 |
+
int stride = grid.stride;
|
138 |
+
for (int ni = ci + stride; ni >= ci - stride; ni -= stride) {
|
139 |
+
for (int nj = cj - stride; nj <= cj + stride; nj += stride) {
|
140 |
+
int idx = -1;
|
141 |
+
if (ni == ci && nj == cj)
|
142 |
+
idx = seq;
|
143 |
+
else if (!(ni < 0 || ni >= height || nj < 0 || nj >= width)) {
|
144 |
+
if (boundary_info == -1)
|
145 |
+
idx = fetch_seq(grid, 0, ni, nj, t);
|
146 |
+
else if (boundary_info == 1)
|
147 |
+
idx = fetch_last_seq(grid, ni, nj, t);
|
148 |
+
else
|
149 |
+
idx = fetch_nearest_seq(grid, ni, nj, dim, pos[(dim + 2) % 3], t);
|
150 |
+
}
|
151 |
+
output_indices[top] = idx;
|
152 |
+
top += 1;
|
153 |
+
}
|
154 |
+
}
|
155 |
+
}
|
156 |
+
|
157 |
+
void DownsampleGrid(Grid& src, Grid& tar)
|
158 |
+
{
|
159 |
+
src.downsample_seq.resize(src.seq2grid.size(), -1);
|
160 |
+
tar.resolution = src.resolution / 2;
|
161 |
+
tar.stride = src.stride * 2;
|
162 |
+
float pos[3];
|
163 |
+
std::vector<int> seq2normal_count;
|
164 |
+
for (int i = 0; i < src.seq2grid.size(); ++i) {
|
165 |
+
key2pos(src.seq2grid[i], src.resolution, pos);
|
166 |
+
int k = pos2key(pos, tar.resolution);
|
167 |
+
int s = seq2normal_count.size();
|
168 |
+
if (!tar.grid2seq.count(k)) {
|
169 |
+
tar.grid2seq[k] = tar.seq2grid.size();
|
170 |
+
tar.seq2grid.emplace_back(k);
|
171 |
+
seq2normal_count.emplace_back(0);
|
172 |
+
seq2normal_count.emplace_back(0);
|
173 |
+
seq2normal_count.emplace_back(0);
|
174 |
+
//tar.seq2normal.emplace_back(src.seq2normal[i]);
|
175 |
+
} else {
|
176 |
+
s = tar.grid2seq[k] * 3;
|
177 |
+
}
|
178 |
+
seq2normal_count[s + src.seq2normal[i]] += 1;
|
179 |
+
src.downsample_seq[i] = tar.grid2seq[k];
|
180 |
+
}
|
181 |
+
tar.seq2normal.resize(seq2normal_count.size() / 3);
|
182 |
+
for (int i = 0; i < seq2normal_count.size(); i += 3) {
|
183 |
+
int t = 0;
|
184 |
+
for (int j = 1; j < 3; ++j) {
|
185 |
+
if (seq2normal_count[i + j] > seq2normal_count[i + t])
|
186 |
+
t = j;
|
187 |
+
}
|
188 |
+
tar.seq2normal[i / 3] = t;
|
189 |
+
}
|
190 |
+
}
|
191 |
+
|
192 |
+
void NeighborGrid(Grid& grid, std::vector<torch::Tensor> view_layer_positions, int v)
|
193 |
+
{
|
194 |
+
grid.seq2evencorner.resize(grid.seq2grid.size(), 0);
|
195 |
+
grid.seq2oddcorner.resize(grid.seq2grid.size(), 0);
|
196 |
+
std::unordered_set<int> visited_seq;
|
197 |
+
for (int vd = 0; vd < 3; ++vd) {
|
198 |
+
auto t = view_layer_positions[vd];
|
199 |
+
auto t0 = view_layer_positions[v];
|
200 |
+
int height = t.size(1);
|
201 |
+
int width = t.size(2);
|
202 |
+
int num_layers = t.size(0);
|
203 |
+
int num_view_layers = t0.size(0);
|
204 |
+
for (int i = 0; i < height; ++i) {
|
205 |
+
for (int j = 0; j < width; ++j) {
|
206 |
+
for (int l = 0; l < num_layers; ++l) {
|
207 |
+
int seq = fetch_seq(grid, l, i, j, t);
|
208 |
+
if (seq == -1)
|
209 |
+
break;
|
210 |
+
int dim = grid.seq2normal[seq];
|
211 |
+
if (dim != v)
|
212 |
+
continue;
|
213 |
+
|
214 |
+
float pos[3];
|
215 |
+
pos_from_seq(grid, seq, pos);
|
216 |
+
|
217 |
+
int ci = 0;
|
218 |
+
int cj = 0;
|
219 |
+
if (dim == 0) {
|
220 |
+
ci = (pos[1]/2+0.5)*height;
|
221 |
+
cj = (pos[0]/2+0.5)*width;
|
222 |
+
}
|
223 |
+
else if (dim == 1) {
|
224 |
+
ci = (pos[1]/2+0.5)*height;
|
225 |
+
cj = (pos[2]/2+0.5)*width;
|
226 |
+
}
|
227 |
+
else {
|
228 |
+
ci = (-pos[2]/2+0.5)*height;
|
229 |
+
cj = (pos[0]/2+0.5)*width;
|
230 |
+
}
|
231 |
+
|
232 |
+
if ((ci % (grid.stride * 2) < grid.stride) && (cj % (grid.stride * 2) >= grid.stride))
|
233 |
+
grid.seq2evencorner[seq] = 1;
|
234 |
+
|
235 |
+
if ((ci % (grid.stride * 2) >= grid.stride) && (cj % (grid.stride * 2) < grid.stride))
|
236 |
+
grid.seq2oddcorner[seq] = 1;
|
237 |
+
|
238 |
+
bool is_boundary = false;
|
239 |
+
if (vd == v) {
|
240 |
+
if (l == 0 || l == num_layers - 1)
|
241 |
+
is_boundary = true;
|
242 |
+
else {
|
243 |
+
int seq_new = fetch_seq(grid, l + 1, i, j, t);
|
244 |
+
if (seq_new == -1)
|
245 |
+
is_boundary = true;
|
246 |
+
}
|
247 |
+
}
|
248 |
+
int boundary_info = 0;
|
249 |
+
if (is_boundary && (l == 0))
|
250 |
+
boundary_info = -1;
|
251 |
+
else if (is_boundary)
|
252 |
+
boundary_info = 1;
|
253 |
+
if (visited_seq.count(seq))
|
254 |
+
continue;
|
255 |
+
visited_seq.insert(seq);
|
256 |
+
|
257 |
+
FetchNeighbor(grid, seq, pos, dim, boundary_info, view_layer_positions, &grid.seq2neighbor[seq * 9]);
|
258 |
+
}
|
259 |
+
}
|
260 |
+
}
|
261 |
+
}
|
262 |
+
}
|
263 |
+
|
264 |
+
void PadGrid(Grid& src, Grid& tar, std::vector<torch::Tensor>& view_layer_positions) {
|
265 |
+
auto& downsample_seq = src.downsample_seq;
|
266 |
+
auto& seq2evencorner = src.seq2evencorner;
|
267 |
+
auto& seq2oddcorner = src.seq2oddcorner;
|
268 |
+
int indices[9];
|
269 |
+
std::vector<int> mapped_even_corners(tar.seq2grid.size(), 0);
|
270 |
+
std::vector<int> mapped_odd_corners(tar.seq2grid.size(), 0);
|
271 |
+
for (int i = 0; i < downsample_seq.size(); ++i) {
|
272 |
+
if (seq2evencorner[i] > 0) {
|
273 |
+
mapped_even_corners[downsample_seq[i]] = 1;
|
274 |
+
}
|
275 |
+
if (seq2oddcorner[i] > 0) {
|
276 |
+
mapped_odd_corners[downsample_seq[i]] = 1;
|
277 |
+
}
|
278 |
+
}
|
279 |
+
auto& tar_seq2normal = tar.seq2normal;
|
280 |
+
auto& tar_seq2grid = tar.seq2grid;
|
281 |
+
for (int i = 0; i < tar_seq2grid.size(); ++i) {
|
282 |
+
if (mapped_even_corners[i] == 1 && mapped_odd_corners[i] == 1)
|
283 |
+
continue;
|
284 |
+
auto k = tar_seq2grid[i];
|
285 |
+
float p[3];
|
286 |
+
key2cornerpos(k, tar.resolution, p);
|
287 |
+
|
288 |
+
int src_key = pos2key(p, src.resolution);
|
289 |
+
if (!src.grid2seq.count(src_key)) {
|
290 |
+
int seq = src.seq2grid.size();
|
291 |
+
src.grid2seq[src_key] = seq;
|
292 |
+
src.seq2evencorner.emplace_back((mapped_even_corners[i] == 0));
|
293 |
+
src.seq2oddcorner.emplace_back((mapped_odd_corners[i] == 0));
|
294 |
+
src.seq2grid.emplace_back(src_key);
|
295 |
+
src.seq2normal.emplace_back(tar_seq2normal[i]);
|
296 |
+
FetchNeighbor(src, seq, p, tar_seq2normal[i], 0, view_layer_positions, indices);
|
297 |
+
for (int j = 0; j < 9; ++j) {
|
298 |
+
src.seq2neighbor.emplace_back(indices[j]);
|
299 |
+
}
|
300 |
+
src.downsample_seq.emplace_back(i);
|
301 |
+
} else {
|
302 |
+
int seq = src.grid2seq[src_key];
|
303 |
+
if (mapped_even_corners[i] == 0)
|
304 |
+
src.seq2evencorner[seq] = 1;
|
305 |
+
if (mapped_odd_corners[i] == 0)
|
306 |
+
src.seq2oddcorner[seq] = 1;
|
307 |
+
}
|
308 |
+
}
|
309 |
+
}
|
310 |
+
|
311 |
+
std::vector<std::vector<torch::Tensor>> build_hierarchy(std::vector<torch::Tensor> view_layer_positions,
|
312 |
+
std::vector<torch::Tensor> view_layer_normals, int num_level, int resolution)
|
313 |
+
{
|
314 |
+
if (view_layer_positions.size() != 3 || num_level < 1) {
|
315 |
+
printf("Alert! We require 3 layers and at least 1 level! (%d %d)\n", view_layer_positions.size(), num_level);
|
316 |
+
return {{},{},{},{}};
|
317 |
+
}
|
318 |
+
|
319 |
+
std::vector<Grid> grids;
|
320 |
+
grids.resize(num_level);
|
321 |
+
|
322 |
+
std::vector<float> seq2pos;
|
323 |
+
auto& seq2grid = grids[0].seq2grid;
|
324 |
+
auto& seq2normal = grids[0].seq2normal;
|
325 |
+
auto& grid2seq = grids[0].grid2seq;
|
326 |
+
grids[0].resolution = resolution;
|
327 |
+
grids[0].stride = 1;
|
328 |
+
|
329 |
+
auto int64_options = torch::TensorOptions().dtype(torch::kInt64).requires_grad(false);
|
330 |
+
auto float_options = torch::TensorOptions().dtype(torch::kFloat32).requires_grad(false);
|
331 |
+
|
332 |
+
for (int v = 0; v < 3; ++v) {
|
333 |
+
int num_layers = view_layer_positions[v].size(0);
|
334 |
+
int height = view_layer_positions[v].size(1);
|
335 |
+
int width = view_layer_positions[v].size(2);
|
336 |
+
float* data = view_layer_positions[v].data_ptr<float>();
|
337 |
+
float* data_normal = view_layer_normals[v].data_ptr<float>();
|
338 |
+
for (int l = 0; l < num_layers; ++l) {
|
339 |
+
for (int i = 0; i < height; ++i) {
|
340 |
+
for (int j = 0; j < width; ++j) {
|
341 |
+
float* p = &data[(i * width + j) * 4];
|
342 |
+
float* n = &data_normal[(i * width + j) * 3];
|
343 |
+
if (p[3] == 0)
|
344 |
+
continue;
|
345 |
+
auto k = pos2key(p, resolution);
|
346 |
+
if (!grid2seq.count(k)) {
|
347 |
+
int dim = 0;
|
348 |
+
for (int d = 0; d < 3; ++d) {
|
349 |
+
if (std::abs(n[d]) > std::abs(n[dim]))
|
350 |
+
dim = d;
|
351 |
+
}
|
352 |
+
dim = (dim + 1) % 3;
|
353 |
+
grid2seq[k] = seq2grid.size();
|
354 |
+
seq2grid.emplace_back(k);
|
355 |
+
seq2pos.push_back(p[0]);
|
356 |
+
seq2pos.push_back(p[1]);
|
357 |
+
seq2pos.push_back(p[2]);
|
358 |
+
seq2normal.emplace_back(dim);
|
359 |
+
}
|
360 |
+
}
|
361 |
+
}
|
362 |
+
data += (height * width * 4);
|
363 |
+
data_normal += (height * width * 3);
|
364 |
+
}
|
365 |
+
}
|
366 |
+
|
367 |
+
for (int i = 0; i < num_level - 1; ++i) {
|
368 |
+
DownsampleGrid(grids[i], grids[i + 1]);
|
369 |
+
}
|
370 |
+
|
371 |
+
for (int l = 0; l < num_level; ++l) {
|
372 |
+
grids[l].seq2neighbor.resize(grids[l].seq2grid.size() * 9, -1);
|
373 |
+
grids[l].num_origin_seq = grids[l].seq2grid.size();
|
374 |
+
for (int d = 0; d < 3; ++d) {
|
375 |
+
NeighborGrid(grids[l], view_layer_positions, d);
|
376 |
+
}
|
377 |
+
}
|
378 |
+
|
379 |
+
for (int i = num_level - 2; i >= 0; --i) {
|
380 |
+
PadGrid(grids[i], grids[i + 1], view_layer_positions);
|
381 |
+
}
|
382 |
+
for (int i = grids[0].num_origin_seq; i < grids[0].seq2grid.size(); ++i) {
|
383 |
+
int k = grids[0].seq2grid[i];
|
384 |
+
float p[3];
|
385 |
+
key2pos(k, grids[0].resolution, p);
|
386 |
+
seq2pos.push_back(p[0]);
|
387 |
+
seq2pos.push_back(p[1]);
|
388 |
+
seq2pos.push_back(p[2]);
|
389 |
+
}
|
390 |
+
|
391 |
+
std::vector<torch::Tensor> texture_positions(2);
|
392 |
+
std::vector<torch::Tensor> grid_neighbors(grids.size());
|
393 |
+
std::vector<torch::Tensor> grid_downsamples(grids.size() - 1);
|
394 |
+
std::vector<torch::Tensor> grid_evencorners(grids.size());
|
395 |
+
std::vector<torch::Tensor> grid_oddcorners(grids.size());
|
396 |
+
|
397 |
+
texture_positions[0] = torch::zeros({seq2pos.size() / 3, 3}, float_options);
|
398 |
+
texture_positions[1] = torch::zeros({seq2pos.size() / 3}, float_options);
|
399 |
+
float* positions_out_ptr = texture_positions[0].data_ptr<float>();
|
400 |
+
memcpy(positions_out_ptr, seq2pos.data(), sizeof(float) * seq2pos.size());
|
401 |
+
positions_out_ptr = texture_positions[1].data_ptr<float>();
|
402 |
+
for (int i = 0; i < grids[0].seq2grid.size(); ++i) {
|
403 |
+
positions_out_ptr[i] = (i < grids[0].num_origin_seq);
|
404 |
+
}
|
405 |
+
|
406 |
+
for (int i = 0; i < grids.size(); ++i) {
|
407 |
+
grid_neighbors[i] = torch::zeros({grids[i].seq2grid.size(), 9}, int64_options);
|
408 |
+
long* nptr = grid_neighbors[i].data_ptr<long>();
|
409 |
+
for (int j = 0; j < grids[i].seq2neighbor.size(); ++j) {
|
410 |
+
nptr[j] = grids[i].seq2neighbor[j];
|
411 |
+
}
|
412 |
+
|
413 |
+
grid_evencorners[i] = torch::zeros({grids[i].seq2evencorner.size()}, int64_options);
|
414 |
+
grid_oddcorners[i] = torch::zeros({grids[i].seq2oddcorner.size()}, int64_options);
|
415 |
+
long* dptr = grid_evencorners[i].data_ptr<long>();
|
416 |
+
for (int j = 0; j < grids[i].seq2evencorner.size(); ++j) {
|
417 |
+
dptr[j] = grids[i].seq2evencorner[j];
|
418 |
+
}
|
419 |
+
dptr = grid_oddcorners[i].data_ptr<long>();
|
420 |
+
for (int j = 0; j < grids[i].seq2oddcorner.size(); ++j) {
|
421 |
+
dptr[j] = grids[i].seq2oddcorner[j];
|
422 |
+
}
|
423 |
+
if (i + 1 < grids.size()) {
|
424 |
+
grid_downsamples[i] = torch::zeros({grids[i].downsample_seq.size()}, int64_options);
|
425 |
+
long* dptr = grid_downsamples[i].data_ptr<long>();
|
426 |
+
for (int j = 0; j < grids[i].downsample_seq.size(); ++j) {
|
427 |
+
dptr[j] = grids[i].downsample_seq[j];
|
428 |
+
}
|
429 |
+
}
|
430 |
+
|
431 |
+
}
|
432 |
+
return {texture_positions, grid_neighbors, grid_downsamples, grid_evencorners, grid_oddcorners};
|
433 |
+
}
|
434 |
+
|
435 |
+
std::vector<std::vector<torch::Tensor>> build_hierarchy_with_feat(
|
436 |
+
std::vector<torch::Tensor> view_layer_positions,
|
437 |
+
std::vector<torch::Tensor> view_layer_normals,
|
438 |
+
std::vector<torch::Tensor> view_layer_feats,
|
439 |
+
int num_level, int resolution)
|
440 |
+
{
|
441 |
+
if (view_layer_positions.size() != 3 || num_level < 1) {
|
442 |
+
printf("Alert! We require 3 layers and at least 1 level! (%d %d)\n", view_layer_positions.size(), num_level);
|
443 |
+
return {{},{},{},{}};
|
444 |
+
}
|
445 |
+
|
446 |
+
std::vector<Grid> grids;
|
447 |
+
grids.resize(num_level);
|
448 |
+
|
449 |
+
std::vector<float> seq2pos;
|
450 |
+
std::vector<float> seq2feat;
|
451 |
+
auto& seq2grid = grids[0].seq2grid;
|
452 |
+
auto& seq2normal = grids[0].seq2normal;
|
453 |
+
auto& grid2seq = grids[0].grid2seq;
|
454 |
+
grids[0].resolution = resolution;
|
455 |
+
grids[0].stride = 1;
|
456 |
+
|
457 |
+
auto int64_options = torch::TensorOptions().dtype(torch::kInt64).requires_grad(false);
|
458 |
+
auto float_options = torch::TensorOptions().dtype(torch::kFloat32).requires_grad(false);
|
459 |
+
|
460 |
+
int feat_channel = 3;
|
461 |
+
for (int v = 0; v < 3; ++v) {
|
462 |
+
int num_layers = view_layer_positions[v].size(0);
|
463 |
+
int height = view_layer_positions[v].size(1);
|
464 |
+
int width = view_layer_positions[v].size(2);
|
465 |
+
float* data = view_layer_positions[v].data_ptr<float>();
|
466 |
+
float* data_normal = view_layer_normals[v].data_ptr<float>();
|
467 |
+
float* data_feat = view_layer_feats[v].data_ptr<float>();
|
468 |
+
feat_channel = view_layer_feats[v].size(3);
|
469 |
+
for (int l = 0; l < num_layers; ++l) {
|
470 |
+
for (int i = 0; i < height; ++i) {
|
471 |
+
for (int j = 0; j < width; ++j) {
|
472 |
+
float* p = &data[(i * width + j) * 4];
|
473 |
+
float* n = &data_normal[(i * width + j) * 3];
|
474 |
+
float* f = &data_feat[(i * width + j) * feat_channel];
|
475 |
+
if (p[3] == 0)
|
476 |
+
continue;
|
477 |
+
auto k = pos2key(p, resolution);
|
478 |
+
if (!grid2seq.count(k)) {
|
479 |
+
int dim = 0;
|
480 |
+
for (int d = 0; d < 3; ++d) {
|
481 |
+
if (std::abs(n[d]) > std::abs(n[dim]))
|
482 |
+
dim = d;
|
483 |
+
}
|
484 |
+
dim = (dim + 1) % 3;
|
485 |
+
grid2seq[k] = seq2grid.size();
|
486 |
+
seq2grid.emplace_back(k);
|
487 |
+
seq2pos.push_back(p[0]);
|
488 |
+
seq2pos.push_back(p[1]);
|
489 |
+
seq2pos.push_back(p[2]);
|
490 |
+
for (int c = 0; c < feat_channel; ++c) {
|
491 |
+
seq2feat.emplace_back(f[c]);
|
492 |
+
}
|
493 |
+
seq2normal.emplace_back(dim);
|
494 |
+
}
|
495 |
+
}
|
496 |
+
}
|
497 |
+
data += (height * width * 4);
|
498 |
+
data_normal += (height * width * 3);
|
499 |
+
data_feat += (height * width * feat_channel);
|
500 |
+
}
|
501 |
+
}
|
502 |
+
|
503 |
+
for (int i = 0; i < num_level - 1; ++i) {
|
504 |
+
DownsampleGrid(grids[i], grids[i + 1]);
|
505 |
+
}
|
506 |
+
|
507 |
+
for (int l = 0; l < num_level; ++l) {
|
508 |
+
grids[l].seq2neighbor.resize(grids[l].seq2grid.size() * 9, -1);
|
509 |
+
grids[l].num_origin_seq = grids[l].seq2grid.size();
|
510 |
+
for (int d = 0; d < 3; ++d) {
|
511 |
+
NeighborGrid(grids[l], view_layer_positions, d);
|
512 |
+
}
|
513 |
+
}
|
514 |
+
|
515 |
+
for (int i = num_level - 2; i >= 0; --i) {
|
516 |
+
PadGrid(grids[i], grids[i + 1], view_layer_positions);
|
517 |
+
}
|
518 |
+
for (int i = grids[0].num_origin_seq; i < grids[0].seq2grid.size(); ++i) {
|
519 |
+
int k = grids[0].seq2grid[i];
|
520 |
+
float p[3];
|
521 |
+
key2pos(k, grids[0].resolution, p);
|
522 |
+
seq2pos.push_back(p[0]);
|
523 |
+
seq2pos.push_back(p[1]);
|
524 |
+
seq2pos.push_back(p[2]);
|
525 |
+
for (int c = 0; c < feat_channel; ++c) {
|
526 |
+
seq2feat.emplace_back(0.5);
|
527 |
+
}
|
528 |
+
}
|
529 |
+
|
530 |
+
std::vector<torch::Tensor> texture_positions(2);
|
531 |
+
std::vector<torch::Tensor> texture_feats(1);
|
532 |
+
std::vector<torch::Tensor> grid_neighbors(grids.size());
|
533 |
+
std::vector<torch::Tensor> grid_downsamples(grids.size() - 1);
|
534 |
+
std::vector<torch::Tensor> grid_evencorners(grids.size());
|
535 |
+
std::vector<torch::Tensor> grid_oddcorners(grids.size());
|
536 |
+
|
537 |
+
texture_positions[0] = torch::zeros({seq2pos.size() / 3, 3}, float_options);
|
538 |
+
texture_positions[1] = torch::zeros({seq2pos.size() / 3}, float_options);
|
539 |
+
texture_feats[0] = torch::zeros({seq2feat.size() / feat_channel, feat_channel}, float_options);
|
540 |
+
float* positions_out_ptr = texture_positions[0].data_ptr<float>();
|
541 |
+
memcpy(positions_out_ptr, seq2pos.data(), sizeof(float) * seq2pos.size());
|
542 |
+
positions_out_ptr = texture_positions[1].data_ptr<float>();
|
543 |
+
for (int i = 0; i < grids[0].seq2grid.size(); ++i) {
|
544 |
+
positions_out_ptr[i] = (i < grids[0].num_origin_seq);
|
545 |
+
}
|
546 |
+
float* feats_out_ptr = texture_feats[0].data_ptr<float>();
|
547 |
+
memcpy(feats_out_ptr, seq2feat.data(), sizeof(float) * seq2feat.size());
|
548 |
+
|
549 |
+
for (int i = 0; i < grids.size(); ++i) {
|
550 |
+
grid_neighbors[i] = torch::zeros({grids[i].seq2grid.size(), 9}, int64_options);
|
551 |
+
long* nptr = grid_neighbors[i].data_ptr<long>();
|
552 |
+
for (int j = 0; j < grids[i].seq2neighbor.size(); ++j) {
|
553 |
+
nptr[j] = grids[i].seq2neighbor[j];
|
554 |
+
}
|
555 |
+
grid_evencorners[i] = torch::zeros({grids[i].seq2evencorner.size()}, int64_options);
|
556 |
+
grid_oddcorners[i] = torch::zeros({grids[i].seq2oddcorner.size()}, int64_options);
|
557 |
+
long* dptr = grid_evencorners[i].data_ptr<long>();
|
558 |
+
for (int j = 0; j < grids[i].seq2evencorner.size(); ++j) {
|
559 |
+
dptr[j] = grids[i].seq2evencorner[j];
|
560 |
+
}
|
561 |
+
dptr = grid_oddcorners[i].data_ptr<long>();
|
562 |
+
for (int j = 0; j < grids[i].seq2oddcorner.size(); ++j) {
|
563 |
+
dptr[j] = grids[i].seq2oddcorner[j];
|
564 |
+
}
|
565 |
+
if (i + 1 < grids.size()) {
|
566 |
+
grid_downsamples[i] = torch::zeros({grids[i].downsample_seq.size()}, int64_options);
|
567 |
+
long* dptr = grid_downsamples[i].data_ptr<long>();
|
568 |
+
for (int j = 0; j < grids[i].downsample_seq.size(); ++j) {
|
569 |
+
dptr[j] = grids[i].downsample_seq[j];
|
570 |
+
}
|
571 |
+
}
|
572 |
+
}
|
573 |
+
return {texture_positions, texture_feats, grid_neighbors, grid_downsamples, grid_evencorners, grid_oddcorners};
|
574 |
+
}
|