gvecchio commited on
Commit
244c8ae
1 Parent(s): 62c7ae3

Add dataset building scripts

Browse files
.gitattributes CHANGED
@@ -55,3 +55,9 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
55
  *.webp filter=lfs diff=lfs merge=lfs -text
56
  maps/train/Wood.z01 filter=lfs diff=lfs merge=lfs -text
57
  maps/train/Ground.z01 filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
55
  *.webp filter=lfs diff=lfs merge=lfs -text
56
  maps/train/Wood.z01 filter=lfs diff=lfs merge=lfs -text
57
  maps/train/Ground.z01 filter=lfs diff=lfs merge=lfs -text
58
+ scripts/hdri/DayEnvironmentHDRI015_8K-HDR.exr filter=lfs diff=lfs merge=lfs -text
59
+ scripts/hdri/DaySkyHDRI037B_8K-HDR.exr filter=lfs diff=lfs merge=lfs -text
60
+ scripts/hdri/IndoorEnvironmentHDRI003_8K-HDR.exr filter=lfs diff=lfs merge=lfs -text
61
+ scripts/hdri/IndoorEnvironmentHDRI005_8K-HDR.exr filter=lfs diff=lfs merge=lfs -text
62
+ scripts/hdri/studio_small_02_4k.exr filter=lfs diff=lfs merge=lfs -text
63
+ scripts/render_ambient.blend filter=lfs diff=lfs merge=lfs -text
scripts/assemble_render.py ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ from pathlib import Path
3
+
4
+ import torchvision.transforms.functional as TF
5
+ from PIL import Image
6
+ from tqdm import tqdm
7
+
8
+ if __name__ == "__main__":
9
+ # Create argument parser
10
+ parser = argparse.ArgumentParser(description="Assemble renders.")
11
+ parser.add_argument("--source_dir", required=True, help="Directory where the dataset is stored.")
12
+ args = parser.parse_args()
13
+
14
+ source_dir = Path(args.source_dir)
15
+
16
+ # Find all materials
17
+ for render_dir in tqdm([x for x in source_dir.glob("**/renders/")]):
18
+ passes_dir = render_dir/"passes"
19
+ num_renders = len(list(passes_dir.glob("*diffuse.png")))
20
+
21
+ for i in range(num_renders):
22
+ diff_path = passes_dir/f"render_{i:02d}_diffuse.png"
23
+ glossy_path = passes_dir/f"render_{i:02d}_glossy.png"
24
+
25
+ full_path = render_dir/f"render_{i:02d}.png"
26
+
27
+ diffuse = TF.to_tensor(Image.open(diff_path))
28
+ glossy = TF.to_tensor(Image.open(glossy_path))
29
+
30
+ diffuse = TF.adjust_gamma(diffuse, 2.2)
31
+ glossy = TF.adjust_gamma(glossy, 2.2)
32
+
33
+ render = diffuse + glossy
34
+
35
+ render = TF.adjust_gamma(render, 1/2.2)
36
+ render = TF.to_pil_image(render)
37
+ render.save(full_path)
scripts/hdri/DayEnvironmentHDRI015_8K-HDR.exr ADDED

Git LFS Details

  • SHA256: cbb14b550924402ba0132e6e225c11fc62088313e9cea399695c81a9e6f469b9
  • Pointer size: 134 Bytes
  • Size of remote file: 137 MB
scripts/hdri/DaySkyHDRI037B_8K-HDR.exr ADDED

Git LFS Details

  • SHA256: 36e45a121d7b2d0bfeed0be16e5afb4cfc936e05aeaa72f26e89b5c84ce38ca6
  • Pointer size: 133 Bytes
  • Size of remote file: 40.6 MB
scripts/hdri/IndoorEnvironmentHDRI003_8K-HDR.exr ADDED

Git LFS Details

  • SHA256: aa2935f36a1ac93ceab58d508721c83fd8f1e9e64c12fe231b47752c9448f80a
  • Pointer size: 133 Bytes
  • Size of remote file: 96 MB
scripts/hdri/IndoorEnvironmentHDRI005_8K-HDR.exr ADDED

Git LFS Details

  • SHA256: d7ee98faf82aa261ae193661e3834127276d866f6b0efddf7ff3f2f84efd3f47
  • Pointer size: 134 Bytes
  • Size of remote file: 110 MB
scripts/hdri/studio_small_02_4k.exr ADDED

Git LFS Details

  • SHA256: f1a407bd80d477c482e3a39205d0bbc53905328d77e00a2f024993e07c808215
  • Pointer size: 133 Bytes
  • Size of remote file: 21.7 MB
scripts/make_crops.py ADDED
@@ -0,0 +1,107 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import math
3
+ import threading
4
+ from pathlib import Path
5
+
6
+ import numpy as np
7
+ import torch
8
+ import torchvision.transforms.functional as TF
9
+ from PIL import Image
10
+ from tqdm import tqdm
11
+
12
+
13
+ def rotate_normal_map(normal_map, angle_deg):
14
+ angle_rad = angle_deg * (torch.pi / 180.0)
15
+
16
+ normal_map = normal_map * 2.0 - 1.0 # Convert to [-1, 1]
17
+ normal_map = normal_map.unsqueeze(0) # Add batch dimension
18
+
19
+ # Rotate the Vectors
20
+ rotation_matrix = torch.tensor([[math.cos(angle_rad), -math.sin(angle_rad), 0],
21
+ [math.sin(angle_rad), math.cos(angle_rad), 0],
22
+ [0, 0, 1]], device=normal_map.device)
23
+
24
+ # Reshape for batch matrix multiplication
25
+ reshaped_normal_map = normal_map.view(1, 3, -1) # Reshape to [1, 3, H*W]
26
+ rotation_matrix = rotation_matrix.view(1, 3, 3) # Add batch dimension
27
+
28
+ # Rotate the vectors
29
+ rotated_vectors = torch.bmm(rotation_matrix, reshaped_normal_map)
30
+ rotated_vectors = rotated_vectors.view(1, 3, normal_map.size(2), normal_map.size(3))
31
+
32
+ rotated_vectors = rotated_vectors / 2.0 + 0.5 # Convert back to [0, 1]
33
+
34
+ return rotated_vectors[0]
35
+
36
+ def process_map(map, mat_dest):
37
+ map_name = map.stem
38
+ img = Image.open(map)
39
+ img = TF.to_tensor(img).cuda()
40
+ img = TF.resize(img, (4096, 4096), antialias=True)
41
+
42
+ img = img.repeat(1, 3, 3)
43
+ img = TF.center_crop(img, (5793, 5793))
44
+
45
+ for rot_angle in range(0, 360, 45):
46
+ crop_i = 0
47
+
48
+ if "normal" in map_name:
49
+ rot_img = rotate_normal_map(img, axis='z', angle_deg=rot_angle)
50
+ rot_img = TF.rotate(rot_img, rot_angle)
51
+ else:
52
+ rot_img = TF.rotate(img, rot_angle)
53
+
54
+ rot_img = TF.center_crop(rot_img, (4096, 4096))
55
+
56
+ for crop_res in [4096, 2048, 1024]:
57
+ # split into crops
58
+ crops = rot_img.unfold(1, crop_res, crop_res).unfold(2, crop_res, crop_res)
59
+ crops = crops.permute(1, 2, 0, 3, 4)
60
+ crops = crops.reshape(-1, crops.size(2), crop_res, crop_res)
61
+
62
+ for crop in crops:
63
+ crop_dir = mat_dest / f"rot_{rot_angle:03d}_crop_{crop_i:03d}"
64
+ crop_dir.mkdir(parents=True, exist_ok=True)
65
+
66
+ crop = TF.resize(crop, (1024, 1024), antialias=True)
67
+
68
+ if map_name in ["height", "displacement"]:
69
+ crop = crop.permute(1, 2, 0).cpu().numpy()
70
+ crop = crop.astype(np.uint16)
71
+ crop = Image.fromarray(crop[..., 0])
72
+ crop.save(crop_dir / f"{map_name}.png")
73
+ else:
74
+ TF.to_pil_image(crop).save(crop_dir / f"{map_name}.png")
75
+
76
+ crop_i += 1
77
+
78
+ if __name__ == "__main__":
79
+ # Create argument parser
80
+ parser = argparse.ArgumentParser(description="Make dataset crops.")
81
+ parser.add_argument("--source_dir", required=True, help="Directory where the original 4K maps are stored.")
82
+ parser.add_argument("--dest_dir", required=True , help="Destination directory to store the 1K crops.")
83
+ args = parser.parse_args()
84
+
85
+ source_dir = Path(args.source_dir)
86
+ dest_dir = Path(args.dest_dir)
87
+
88
+ # Find all materials in the source directory
89
+ for file in tqdm([x for x in source_dir.glob("**/basecolor.png")]):
90
+ mat_dir = file.parent
91
+
92
+ name = mat_dir.stem
93
+ category = mat_dir.parent.stem
94
+ split = mat_dir.parent.parent.stem
95
+
96
+ mat_dest = dest_dir / split / category / name
97
+ mat_dest.mkdir(parents=True, exist_ok=True)
98
+
99
+ thread = []
100
+ for map in mat_dir.glob("*.png"):
101
+ t = threading.Thread(target=process_map, args=(map, mat_dest))
102
+ t.start()
103
+
104
+ thread.append(t)
105
+
106
+ for t in thread:
107
+ t.join()
scripts/render_ambient.blend ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:09fce0c9a535c86bfa812378c74f826bae8eaa1077a0629b4b878fc1eae2f6ca
3
+ size 1231560
scripts/render_ambient.py ADDED
@@ -0,0 +1,117 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+ import random
3
+ import sys
4
+ from pathlib import Path
5
+
6
+ import bpy
7
+
8
+ argv = sys.argv
9
+ argv = argv[argv.index("--") + 1:] # get all args after "--"
10
+ dset_dir = Path(argv[0])
11
+
12
+ def create_node_once(nodes, name, location=(0,0)):
13
+ existing = nodes.get(name)
14
+ if existing is not None:
15
+ return existing
16
+ new_node = nodes.new('ShaderNodeTexImage')
17
+ new_node.name = name
18
+ new_node.location = location
19
+ return new_node
20
+
21
+ def load_map_image(node, img_path, name, colorspace="sRGB"):
22
+ existing = bpy.data.images.get(name)
23
+ if existing is not None:
24
+ bpy.data.images.remove(existing)
25
+ node.image = bpy.data.images.load(img_path)
26
+ node.image.colorspace_settings.name = colorspace
27
+ node.image.name = name
28
+
29
+ bpy.context.scene.render.image_settings.file_format = "PNG"
30
+
31
+ material = bpy.data.materials.get("BSDFPlane")
32
+
33
+ # Set node tree editing
34
+ material.use_nodes = True
35
+ nodes = material.node_tree.nodes
36
+
37
+ # Compositing nodes
38
+ scene = bpy.context.scene
39
+ scene.render.use_compositing = True
40
+
41
+ # Get output nodes
42
+ p_out = nodes['Principled Output']
43
+
44
+ # Get principled BSDF node
45
+ bsdf_node = nodes.get("Principled BSDF")
46
+
47
+ normal_map_node = nodes.get("Normal Map")
48
+ displacement_node = nodes.get("Displacement")
49
+ scale_node = nodes.get("Scale")
50
+
51
+ # Get background node
52
+ bg_node = bpy.data.worlds['World'].node_tree.nodes["Background"]
53
+
54
+ # Get camera
55
+ obj_camera = bpy.data.objects["Camera"]
56
+ # Get the camera data block
57
+ camera_data = obj_camera.data
58
+
59
+ for item_bc in [x for x in dset_dir.glob("**/basecolor.png")][:]:
60
+ item = item_bc.parent
61
+
62
+ if (item/"renders/passes").exists() and len(list(Path(item/"renders/passes").glob('*'))) == 10:
63
+ continue
64
+
65
+ # Clear orphan data
66
+ bpy.ops.outliner.orphans_purge()
67
+
68
+ # Set maps paths
69
+ basecolor_path = str(item/"basecolor.png")
70
+ normal_path = str(item/"normal.png")
71
+ roughness_path = str(item/"roughness.png")
72
+ metallic_path = str(item/"metallic.png")
73
+ height_path = str(item/"displacement.png")
74
+
75
+ # Setup nodes
76
+ basecolor_node = nodes.get("Base Color")
77
+ load_map_image(basecolor_node, basecolor_path, name="BaseColor")
78
+
79
+ normal_node = nodes.get("Normal")
80
+ load_map_image(normal_node, normal_path, name="Normal", colorspace="Non-Color")
81
+
82
+ roughness_node = nodes.get("Roughness")
83
+ load_map_image(roughness_node, roughness_path, name="Roughness", colorspace="Non-Color")
84
+
85
+ metallic_node = nodes.get("Metallic")
86
+ load_map_image(metallic_node, metallic_path, name="Metallic", colorspace="Non-Color")
87
+
88
+ height_node = nodes.get("Height")
89
+ load_map_image(height_node, height_path, name="Height", colorspace="Non-Color")
90
+
91
+ scale_node = nodes.get("Scale")
92
+
93
+ # Set render settings
94
+ render_folder = item/"renders/passes"
95
+ render_folder.mkdir(exist_ok=True, parents=True)
96
+
97
+ for hdri_i in range(5):
98
+ env_node = bpy.data.worlds['World'].node_tree.nodes[f"Env {hdri_i}"]
99
+ bpy.data.worlds['World'].node_tree.links.new(env_node.outputs['Color'], bg_node.inputs['Color'])
100
+ bpy.data.worlds['World'].node_tree.nodes[f"HDRIRotation"].outputs[0].default_value = random.random() * math.pi * 2
101
+
102
+ for render_pass in ["diffuse", "glossy"]:
103
+ if render_pass == "diffuse":
104
+ scene.node_tree.nodes.active = scene.node_tree.nodes["DiffusePass"]
105
+ scale_node.outputs[0].default_value = 1.0
106
+ camera_data.type = 'ORTHO'
107
+ elif render_pass == "glossy":
108
+ scene.node_tree.nodes.active = scene.node_tree.nodes['GlossyPass']
109
+ scale_node.outputs[0].default_value = 0.0
110
+ camera_data.type = 'PERSP'
111
+
112
+ # Render
113
+ bpy.context.scene.render.filepath = str(render_folder/f"render_{hdri_i:02d}_{render_pass}.png")
114
+ bpy.ops.render.render(write_still=True)
115
+
116
+
117
+ # Execution command: blender.exe -b .\render_ambient.blend -P .\render_ambient.py -- ..\maps