Spaces:
Running
on
L4
Running
on
L4
Fix some bugs.
Browse files- README.md +3 -0
- app.py +27 -13
- citydreamer/extensions/grid_encoder/__init__.py +7 -3
- citydreamer/inference.py +4 -2
- requirements.txt +2 -3
README.md
CHANGED
@@ -14,3 +14,6 @@ Official demo for **[CityDreamer: Compositional Generative Model of Unbounded 3D
|
|
14 |
- 🔥 CityDreamer is a unbounded 3D city generator.
|
15 |
- 🤗 Try CityDreamer to generate photolistic 3D cities.
|
16 |
- ⚠️ Due to the limited computational resources at Hugging Face, this demo only generates **A SINGLE IMAGE** based on the New York City layout. If you wish to experience more comprehensive functionality, please visit the demo on [GitHub](https://github.com/hzxie/city-dreamer?tab=readme-ov-file#iterative-demo-%EF%B8%8F).
|
|
|
|
|
|
|
|
14 |
- 🔥 CityDreamer is a unbounded 3D city generator.
|
15 |
- 🤗 Try CityDreamer to generate photolistic 3D cities.
|
16 |
- ⚠️ Due to the limited computational resources at Hugging Face, this demo only generates **A SINGLE IMAGE** based on the New York City layout. If you wish to experience more comprehensive functionality, please visit the demo on [GitHub](https://github.com/hzxie/city-dreamer?tab=readme-ov-file#iterative-demo-%EF%B8%8F).
|
17 |
+
|
18 |
+
❕IMPORTANT NOTE: We are working on migrating to ZeroGPU, so the demo might be temporarily unavailable.
|
19 |
+
|
app.py
CHANGED
@@ -4,13 +4,14 @@
|
|
4 |
# @Author: Haozhe Xie
|
5 |
# @Date: 2024-03-02 16:30:00
|
6 |
# @Last Modified by: Haozhe Xie
|
7 |
-
# @Last Modified at: 2024-09-
|
8 |
# @Email: root@haozhexie.com
|
9 |
|
10 |
import gradio as gr
|
11 |
import logging
|
12 |
import numpy as np
|
13 |
import os
|
|
|
14 |
import spaces
|
15 |
import ssl
|
16 |
import subprocess
|
@@ -26,25 +27,35 @@ ssl._create_default_https_context = ssl._create_unverified_context
|
|
26 |
sys.path.append(os.path.join(os.path.dirname(__file__), "citydreamer"))
|
27 |
|
28 |
|
29 |
-
def
|
30 |
try:
|
31 |
-
|
32 |
except Exception as ex:
|
33 |
logging.exception(ex)
|
34 |
|
35 |
-
|
36 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
37 |
ext_dir = os.path.join(os.path.dirname(__file__), "wheels")
|
38 |
for e in os.listdir(ext_dir):
|
39 |
-
|
40 |
-
|
41 |
-
|
|
|
42 |
# Compile CUDA extensions
|
43 |
# ext_dir = os.path.join(os.path.dirname(__file__), "citydreamer", "extensions")
|
44 |
# for e in os.listdir(ext_dir):
|
45 |
# if os.path.isdir(os.path.join(ext_dir, e)):
|
46 |
# subprocess.call(["pip", "install", "."], cwd=os.path.join(ext_dir, e))
|
47 |
|
|
|
|
|
48 |
|
49 |
def get_models(file_name):
|
50 |
import citydreamer.model
|
@@ -55,7 +66,8 @@ def get_models(file_name):
|
|
55 |
file_name,
|
56 |
)
|
57 |
|
58 |
-
|
|
|
59 |
model = citydreamer.model.GanCraftGenerator(ckpt["cfg"])
|
60 |
if torch.cuda.is_available():
|
61 |
model = torch.nn.DataParallel(model).cuda().eval()
|
@@ -65,8 +77,8 @@ def get_models(file_name):
|
|
65 |
|
66 |
|
67 |
def get_city_layout():
|
68 |
-
hf = np.array(Image.open("assets/NYC-HghtFld.png"))
|
69 |
-
seg = np.array(Image.open("assets/NYC-SegMap.png").convert("P"))
|
70 |
return hf, seg
|
71 |
|
72 |
|
@@ -74,12 +86,14 @@ def get_city_layout():
|
|
74 |
def get_generated_city(
|
75 |
radius, altitude, azimuth, map_center, progress=gr.Progress(track_tqdm=True)
|
76 |
):
|
|
|
|
|
77 |
# The import must be done after CUDA extension compilation
|
78 |
import citydreamer.inference
|
79 |
|
80 |
return citydreamer.inference.generate_city(
|
81 |
-
get_generated_city.fgm,
|
82 |
-
get_generated_city.bgm,
|
83 |
get_generated_city.hf.copy(),
|
84 |
get_generated_city.seg.copy(),
|
85 |
map_center,
|
|
|
4 |
# @Author: Haozhe Xie
|
5 |
# @Date: 2024-03-02 16:30:00
|
6 |
# @Last Modified by: Haozhe Xie
|
7 |
+
# @Last Modified at: 2024-09-22 10:31:28
|
8 |
# @Email: root@haozhexie.com
|
9 |
|
10 |
import gradio as gr
|
11 |
import logging
|
12 |
import numpy as np
|
13 |
import os
|
14 |
+
|
15 |
import spaces
|
16 |
import ssl
|
17 |
import subprocess
|
|
|
27 |
sys.path.append(os.path.join(os.path.dirname(__file__), "citydreamer"))
|
28 |
|
29 |
|
30 |
+
def _get_output(cmd):
|
31 |
try:
|
32 |
+
return subprocess.check_output(cmd).decode("utf-8")
|
33 |
except Exception as ex:
|
34 |
logging.exception(ex)
|
35 |
|
36 |
+
return None
|
37 |
+
|
38 |
+
|
39 |
+
def setup_runtime_env():
|
40 |
+
logging.info("Python Version: %s" % _get_output(["python", "--version"]))
|
41 |
+
logging.info("CUDA Version: %s" % _get_output(["nvcc", "--version"]))
|
42 |
+
logging.info("GCC Version: %s" % _get_output(["gcc", "--version"]))
|
43 |
+
|
44 |
+
# Install Pre-compiled CUDA extensions
|
45 |
ext_dir = os.path.join(os.path.dirname(__file__), "wheels")
|
46 |
for e in os.listdir(ext_dir):
|
47 |
+
logging.info("Installing Extensions from %s" % e)
|
48 |
+
subprocess.call(
|
49 |
+
["pip", "install", os.path.join(ext_dir, e)], stderr=subprocess.STDOUT
|
50 |
+
)
|
51 |
# Compile CUDA extensions
|
52 |
# ext_dir = os.path.join(os.path.dirname(__file__), "citydreamer", "extensions")
|
53 |
# for e in os.listdir(ext_dir):
|
54 |
# if os.path.isdir(os.path.join(ext_dir, e)):
|
55 |
# subprocess.call(["pip", "install", "."], cwd=os.path.join(ext_dir, e))
|
56 |
|
57 |
+
logging.info("Installed Python Packages: %s" % _get_output(["pip", "list"]))
|
58 |
+
|
59 |
|
60 |
def get_models(file_name):
|
61 |
import citydreamer.model
|
|
|
66 |
file_name,
|
67 |
)
|
68 |
|
69 |
+
device = "cuda" if torch.cuda.is_available() else "cpu"
|
70 |
+
ckpt = torch.load(file_name, map_location=torch.device(device))
|
71 |
model = citydreamer.model.GanCraftGenerator(ckpt["cfg"])
|
72 |
if torch.cuda.is_available():
|
73 |
model = torch.nn.DataParallel(model).cuda().eval()
|
|
|
77 |
|
78 |
|
79 |
def get_city_layout():
|
80 |
+
hf = np.array(Image.open("assets/NYC-HghtFld.png")).astype(np.int32)
|
81 |
+
seg = np.array(Image.open("assets/NYC-SegMap.png").convert("P")).astype(np.int32)
|
82 |
return hf, seg
|
83 |
|
84 |
|
|
|
86 |
def get_generated_city(
|
87 |
radius, altitude, azimuth, map_center, progress=gr.Progress(track_tqdm=True)
|
88 |
):
|
89 |
+
logging.info("CUDA is available: %s" % torch.cuda.is_available())
|
90 |
+
logging.info("PyTorch is built with CUDA: %s" % torch.version.cuda)
|
91 |
# The import must be done after CUDA extension compilation
|
92 |
import citydreamer.inference
|
93 |
|
94 |
return citydreamer.inference.generate_city(
|
95 |
+
get_generated_city.fgm.to("cuda"),
|
96 |
+
get_generated_city.bgm.to("cuda"),
|
97 |
get_generated_city.hf.copy(),
|
98 |
get_generated_city.seg.copy(),
|
99 |
map_center,
|
citydreamer/extensions/grid_encoder/__init__.py
CHANGED
@@ -4,7 +4,7 @@
|
|
4 |
# @Author: Jiaxiang Tang (@ashawkey)
|
5 |
# @Date: 2023-04-15 10:39:28
|
6 |
# @Last Modified by: Haozhe Xie
|
7 |
-
# @Last Modified at:
|
8 |
# @Email: ashawkey1999@gmail.com
|
9 |
# @Ref: https://github.com/ashawkey/torch-ngp
|
10 |
|
@@ -12,8 +12,6 @@ import math
|
|
12 |
import numpy as np
|
13 |
import torch
|
14 |
|
15 |
-
import grid_encoder_ext
|
16 |
-
|
17 |
|
18 |
class GridEncoderFunction(torch.autograd.Function):
|
19 |
@staticmethod
|
@@ -55,6 +53,9 @@ class GridEncoderFunction(torch.autograd.Function):
|
|
55 |
1, device=inputs.device, dtype=embeddings.dtype
|
56 |
) # placeholder... TODO: a better way?
|
57 |
|
|
|
|
|
|
|
58 |
grid_encoder_ext.forward(
|
59 |
inputs,
|
60 |
embeddings,
|
@@ -96,6 +97,9 @@ class GridEncoderFunction(torch.autograd.Function):
|
|
96 |
else:
|
97 |
grad_inputs = torch.zeros(1, device=inputs.device, dtype=embeddings.dtype)
|
98 |
|
|
|
|
|
|
|
99 |
grid_encoder_ext.backward(
|
100 |
grad,
|
101 |
inputs,
|
|
|
4 |
# @Author: Jiaxiang Tang (@ashawkey)
|
5 |
# @Date: 2023-04-15 10:39:28
|
6 |
# @Last Modified by: Haozhe Xie
|
7 |
+
# @Last Modified at: 2024-09-22 10:26:30
|
8 |
# @Email: ashawkey1999@gmail.com
|
9 |
# @Ref: https://github.com/ashawkey/torch-ngp
|
10 |
|
|
|
12 |
import numpy as np
|
13 |
import torch
|
14 |
|
|
|
|
|
15 |
|
16 |
class GridEncoderFunction(torch.autograd.Function):
|
17 |
@staticmethod
|
|
|
53 |
1, device=inputs.device, dtype=embeddings.dtype
|
54 |
) # placeholder... TODO: a better way?
|
55 |
|
56 |
+
# Fix: libcudart.so.11.0: cannot open shared object file
|
57 |
+
import grid_encoder_ext
|
58 |
+
|
59 |
grid_encoder_ext.forward(
|
60 |
inputs,
|
61 |
embeddings,
|
|
|
97 |
else:
|
98 |
grad_inputs = torch.zeros(1, device=inputs.device, dtype=embeddings.dtype)
|
99 |
|
100 |
+
# Fix: libcudart.so.11.0: cannot open shared object file
|
101 |
+
import grid_encoder_ext
|
102 |
+
|
103 |
grid_encoder_ext.backward(
|
104 |
grad,
|
105 |
inputs,
|
citydreamer/inference.py
CHANGED
@@ -4,7 +4,7 @@
|
|
4 |
# @Author: Haozhe Xie
|
5 |
# @Date: 2024-03-02 16:30:00
|
6 |
# @Last Modified by: Haozhe Xie
|
7 |
-
# @Last Modified at: 2024-
|
8 |
# @Email: root@haozhexie.com
|
9 |
|
10 |
import copy
|
@@ -13,7 +13,6 @@ import logging
|
|
13 |
import math
|
14 |
import numpy as np
|
15 |
import torch
|
16 |
-
import torchvision
|
17 |
|
18 |
import citydreamer.extensions.extrude_tensor
|
19 |
import citydreamer.extensions.voxlib
|
@@ -336,6 +335,9 @@ def render_bg(
|
|
336 |
assert hf_seg.size(2) == CONSTANTS["LAYOUT_VOL_SIZE"]
|
337 |
assert hf_seg.size(3) == CONSTANTS["LAYOUT_VOL_SIZE"]
|
338 |
|
|
|
|
|
|
|
339 |
blurrer = torchvision.transforms.GaussianBlur(kernel_size=3, sigma=(2, 2))
|
340 |
_voxel_id = copy.deepcopy(voxel_id)
|
341 |
_voxel_id[voxel_id >= CONSTANTS["BLD_INS_LABEL_MIN"]] = CLASSES["BLD_FACADE"]
|
|
|
4 |
# @Author: Haozhe Xie
|
5 |
# @Date: 2024-03-02 16:30:00
|
6 |
# @Last Modified by: Haozhe Xie
|
7 |
+
# @Last Modified at: 2024-09-22 10:22:05
|
8 |
# @Email: root@haozhexie.com
|
9 |
|
10 |
import copy
|
|
|
13 |
import math
|
14 |
import numpy as np
|
15 |
import torch
|
|
|
16 |
|
17 |
import citydreamer.extensions.extrude_tensor
|
18 |
import citydreamer.extensions.voxlib
|
|
|
335 |
assert hf_seg.size(2) == CONSTANTS["LAYOUT_VOL_SIZE"]
|
336 |
assert hf_seg.size(3) == CONSTANTS["LAYOUT_VOL_SIZE"]
|
337 |
|
338 |
+
# Fix: operator torchvision::nms does not exist
|
339 |
+
import torchvision
|
340 |
+
|
341 |
blurrer = torchvision.transforms.GaussianBlur(kernel_size=3, sigma=(2, 2))
|
342 |
_voxel_id = copy.deepcopy(voxel_id)
|
343 |
_voxel_id[voxel_id >= CONSTANTS["BLD_INS_LABEL_MIN"]] = CLASSES["BLD_FACADE"]
|
requirements.txt
CHANGED
@@ -1,6 +1,5 @@
|
|
1 |
-
|
2 |
-
|
3 |
-
torchvision
|
4 |
|
5 |
easydict
|
6 |
gradio
|
|
|
1 |
+
torch==2.2.0
|
2 |
+
torchvision==0.17.0
|
|
|
3 |
|
4 |
easydict
|
5 |
gradio
|