File size: 8,352 Bytes
a699001
1299110
a699001
 
 
 
 
 
 
 
8e5d73b
f285f50
8b9a803
a699001
5d7e7c2
a699001
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
65af420
 
 
 
 
a699001
 
 
 
 
8b9a803
 
a699001
 
 
 
 
 
8b9a803
 
 
 
 
 
 
 
 
 
 
 
 
a699001
 
5d7e7c2
a699001
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8b9a803
 
 
 
 
a699001
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5d7e7c2
a699001
 
 
5d7e7c2
f0b144f
a699001
 
 
 
 
 
 
f0b144f
 
a699001
 
 
 
 
5d7e7c2
a699001
 
 
 
 
 
 
 
 
 
f0b144f
 
 
 
 
a699001
 
 
 
 
 
 
 
 
57a8718
a699001
57a8718
a699001
 
 
 
f0b144f
a699001
f0b144f
 
 
 
a699001
 
 
 
 
 
f0b144f
a699001
5d7e7c2
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
import os
import sys
import html
import glob
import uuid
import hashlib
import requests
from tqdm import tqdm

os.system("git clone https://github.com/FrozenBurning/SceneDreamer.git")
os.system("cp -r SceneDreamer/* ./")
os.system("bash install.sh")

pretrained_model = dict(file_url='https://drive.google.com/uc?id=1IFu1vNrgF1EaRqPizyEgN_5Vt7Fyg0Mj',
                            alt_url='', file_size=330571863,
                            file_path='./scenedreamer_released.pt',)


def download_file(session, file_spec, use_alt_url=False, chunk_size=128, num_attempts=10):
    file_path = file_spec['file_path']
    if use_alt_url:
        file_url = file_spec['alt_url']
    else:
        file_url = file_spec['file_url']

    file_dir = os.path.dirname(file_path)
    tmp_path = file_path + '.tmp.' + uuid.uuid4().hex
    if file_dir:
        os.makedirs(file_dir, exist_ok=True)

    progress_bar = tqdm(total=file_spec['file_size'], unit='B', unit_scale=True)
    for attempts_left in reversed(range(num_attempts)):
        data_size = 0
        progress_bar.reset()
        try:
            # Download.
            data_md5 = hashlib.md5()
            with session.get(file_url, stream=True) as res:
                res.raise_for_status()
                with open(tmp_path, 'wb') as f:
                    for chunk in res.iter_content(chunk_size=chunk_size<<10):
                        progress_bar.update(len(chunk))
                        f.write(chunk)
                        data_size += len(chunk)
                        data_md5.update(chunk)

            # Validate.
            if 'file_size' in file_spec and data_size != file_spec['file_size']:
                raise IOError('Incorrect file size', file_path)
            if 'file_md5' in file_spec and data_md5.hexdigest() != file_spec['file_md5']:
                raise IOError('Incorrect file MD5', file_path)
            break

        except Exception as e:
            # print(e)
            # Last attempt => raise error.
            if not attempts_left:
                raise

            # Handle Google Drive virus checker nag.
            if data_size > 0 and data_size < 8192:
                with open(tmp_path, 'rb') as f:
                    data = f.read()
                links = [html.unescape(link) for link in data.decode('utf-8').split('"') if 'confirm=t' in link]
                if len(links) == 1:
                    file_url = requests.compat.urljoin(file_url, links[0])
                    continue

    progress_bar.close()

    # Rename temp file to the correct name.
    os.replace(tmp_path, file_path) # atomic

    # Attempt to clean up any leftover temps.
    for filename in glob.glob(file_path + '.tmp.*'):
        try:
            os.remove(filename)
        except:
            pass

print('Downloading SceneDreamer pretrained model...')
with requests.Session() as session:
    try:
        download_file(session, pretrained_model)
    except:
        print('Google Drive download failed.\n')



import os
import torch
import torch.nn as nn
import importlib
import argparse
from imaginaire.config import Config
from imaginaire.utils.cudnn import init_cudnn
import gradio as gr
from PIL import Image


class WrappedModel(nn.Module):
    r"""Dummy wrapping the module.
    """

    def __init__(self, module):
        super(WrappedModel, self).__init__()
        self.module = module

    def forward(self, *args, **kwargs):
        r"""PyTorch module forward function overload."""
        return self.module(*args, **kwargs)

def parse_args():
    parser = argparse.ArgumentParser(description='Training')
    parser.add_argument('--config', type=str, default='./configs/scenedreamer_inference.yaml', help='Path to the training config file.')
    parser.add_argument('--checkpoint', default='./scenedreamer_released.pt',
                        help='Checkpoint path.')
    parser.add_argument('--output_dir', type=str, default='./test/',
                        help='Location to save the image outputs')
    parser.add_argument('--seed', type=int, default=8888,
                        help='Random seed.')
    args = parser.parse_args()
    return args


args = parse_args()
cfg = Config(args.config)

# Initialize cudnn.
init_cudnn(cfg.cudnn.deterministic, cfg.cudnn.benchmark)

# Initialize data loaders and models.

lib_G = importlib.import_module(cfg.gen.type)
net_G = lib_G.Generator(cfg.gen, cfg.data)
net_G = net_G.to('cuda')
net_G = WrappedModel(net_G)

if args.checkpoint == '':
    raise NotImplementedError("No checkpoint is provided for inference!")

# Load checkpoint.
# trainer.load_checkpoint(cfg, args.checkpoint)
checkpoint = torch.load(args.checkpoint, map_location='cpu')
net_G.load_state_dict(checkpoint['net_G'])

# Do inference.
net_G = net_G.module
net_G.eval()
for name, param in net_G.named_parameters():
    param.requires_grad = False
torch.cuda.empty_cache()
world_dir = os.path.join(args.output_dir)
os.makedirs(world_dir, exist_ok=True)



def get_bev(seed):
    print('[PCGGenerator] Generating BEV scene representation...')
    os.system('python terrain_generator.py --size {} --seed {} --outdir {}'.format(net_G.voxel.sample_size, seed, world_dir))
    heightmap_path = os.path.join(world_dir, 'heightmap.png')
    semantic_path = os.path.join(world_dir, 'colormap.png')
    heightmap = Image.open(heightmap_path)
    semantic = Image.open(semantic_path)
    return semantic, heightmap

def get_video(seed, num_frames, reso_h, reso_w):
    device = torch.device('cuda')
    rng_cuda = torch.Generator(device=device)
    rng_cuda = rng_cuda.manual_seed(seed)
    torch.manual_seed(seed)
    torch.cuda.manual_seed(seed)
    net_G.voxel.next_world(device, world_dir, checkpoint)
    cam_mode = cfg.inference_args.camera_mode
    cfg.inference_args.cam_maxstep = num_frames
    cfg.inference_args.resolution_hw = [reso_h, reso_w]
    current_outdir = os.path.join(world_dir, 'camera_{:02d}'.format(cam_mode))
    os.makedirs(current_outdir, exist_ok=True)
    z = torch.empty(1, net_G.style_dims, dtype=torch.float32, device=device)
    z.normal_(generator=rng_cuda)
    net_G.inference_givenstyle(z, current_outdir, **vars(cfg.inference_args))
    return os.path.join(current_outdir, 'rgb_render.mp4')

markdown=f'''
  # SceneDreamer: Unbounded 3D Scene Generation from 2D Image Collections
  
  Authored by Zhaoxi Chen, Guangcong Wang, Ziwei Liu
  ### Useful links:
  - [Official Github Repo](https://github.com/FrozenBurning/SceneDreamer)
  - [Project Page](https://scene-dreamer.github.io/)
  - [arXiv Link](https://arxiv.org/abs/2302.01330)
  Licensed under the S-Lab License.


  We offer a sampled scene whose BEVs are shown on the right. You can also use the button "Generate BEV" to randomly sample a new 3D world represented by a height map and a semantic map. But it requires a long time. 
  
  To render video, push the button "Render" to generate a camera trajectory flying through the world. You can specify rendering options as shown below!
'''

with gr.Blocks() as demo:
    with gr.Row():
        with gr.Column():
            gr.Markdown(markdown)
        with gr.Column():
            with gr.Row():
                with gr.Column():
                    semantic = gr.Image(value='./test/colormap.png',type="pil", shape=(512, 512))
                with gr.Column():
                    height = gr.Image(value='./test/heightmap.png', type="pil", shape=(512, 512))
            with gr.Row():
                # with gr.Column():
                #     image = gr.Image(type='pil', shape(540, 960))
                with gr.Column():
                    video = gr.Video()
    with gr.Row():
        num_frames = gr.Slider(minimum=10, maximum=200, value=20, step=1, label='Number of rendered frames')
        user_seed = gr.Slider(minimum=0, maximum=999999, value=8888, step=1, label='Random seed')
        resolution_h = gr.Slider(minimum=256, maximum=2160, value=270, step=1, label='Height of rendered image')
        resolution_w = gr.Slider(minimum=256, maximum=3840, value=480, step=1, label='Width of rendered image')

    with gr.Row():
        btn = gr.Button(value="Generate BEV")
        btn_2=gr.Button(value="Render")

    btn.click(get_bev,[user_seed],[semantic, height])
    btn_2.click(get_video,[user_seed, num_frames, resolution_h, resolution_w], [video])

demo.launch(debug=True)