SceneDiffuser commited on
Commit
f785e6f
1 Parent(s): be96c2d
Files changed (3) hide show
  1. app.py +97 -143
  2. pre-requirements.txt +3 -0
  3. requirements.txt +27 -3
app.py CHANGED
@@ -1,151 +1,105 @@
1
- import gradio as gr
2
-
3
- import os
4
- os.environ["PYOPENGL_PLATFORM"] = "osmesa" #opengl seems to only work with TPU
5
- print(os.environ['PYOPENGL_PLATFORM'])
6
- from OpenGL.osmesa import OSMesaCreateContextAttribs
7
-
8
- import numpy as np
9
  import os
10
- import trimesh
11
- import pyrender
12
- from pyrender import PerspectiveCamera,\
13
- DirectionalLight, SpotLight, PointLight,\
14
- MetallicRoughnessMaterial,\
15
- Primitive, Mesh, Node, Scene,\
16
- OffscreenRenderer
17
- from PIL import Image
18
-
19
- scene = Scene()
20
- axis = trimesh.creation.axis()
21
- axis = Mesh.from_trimesh(axis, smooth=False)
22
- scene.add(axis)
23
-
24
- camera_center = np.array([951.30, 536.77])
25
-
26
- camera = pyrender.camera.IntrinsicsCamera(
27
- fx=1060.53, fy=1060.38,
28
- cx=camera_center[0], cy=camera_center[1])
29
- light = pyrender.DirectionalLight(color=np.ones(3), intensity=2.0)
30
- camera_pose = np.eye(4)
31
- scene.add(camera, pose=camera_pose)
32
- scene.add(light, pose=camera_pose)
33
-
34
-
35
- r = OffscreenRenderer(
36
- viewport_width=720,
37
- viewport_height=720,
38
- )
39
- color, _ = r.render(scene)
40
- color = color.astype(np.float32) / 255.0
41
- img = Image.fromarray((color * 255).astype(np.uint8))
42
- r.delete()
43
- print(img)
44
-
45
- # import os
46
- # os.environ['PYOPENGL_PLATFORM'] = 'egl'
47
- # import sys
48
- # root_dir = os.path.dirname(os.path.abspath(__file__))
49
- # sys.path.insert(1, os.path.join(root_dir, 'scenediffuser'))
50
- # print(sys.path)
51
- # import gradio as gr
52
-
53
- # import interface as IF
54
 
55
- # with gr.Blocks(css='style.css') as demo:
56
- # with gr.Column(elem_id="col-container"):
57
- # gr.Markdown("<p align='center' style='font-size: 1.5em;'>Diffusion-based Generation, Optimization, and Planning in 3D Scenes</p>")
58
- # gr.HTML(value="<img src='file/figures/teaser.png' alt='Teaser' width='710px' height='284px' style='display: block; margin: auto;'>")
59
- # gr.HTML(value="<p align='center' style='font-size: 1.2em; color: #485fc7;'><a href='https://arxiv.org/abs/2301.06015' target='_blank'>arXiv</a> | <a href='https://scenediffuser.github.io/' target='_blank'>Project Page</a> | <a href='https://github.com/scenediffuser/Scene-Diffuser' target='_blank'>Code</a></p>")
60
- # gr.Markdown("<p align='center'><i>\"SceneDiffuser provides a unified model for solving scene-conditioned generation, optimization, and planning.\"</i></p>")
61
 
62
- # ## five task
63
- # ## pose generation
64
- # with gr.Tab("Pose Generation"):
65
- # with gr.Row():
66
- # with gr.Column(scale=2):
67
- # selector1 = gr.Dropdown(choices=['MPH16', 'MPH1Library', 'N0SittingBooth', 'N3OpenArea'], label='Scenes', value='MPH16', interactive=True)
68
- # with gr.Row():
69
- # sample1 = gr.Slider(minimum=1, maximum=8, step=1, label='Count', interactive=True, value=1)
70
- # seed1 = gr.Slider(minimum=0, maximum=2 ** 16, step=1, label='Seed', interactive=True, value=2023)
71
- # opt1 = gr.Checkbox(label='Optimizer Guidance', interactive=True, value=True)
72
- # scale1 = gr.Slider(minimum=0.1, maximum=9.9, step=0.1, label='Scale', interactive=True, value=1.1)
73
- # button1 = gr.Button("Run")
74
- # with gr.Column(scale=3):
75
- # image1 = gr.Gallery(label="Image [Result]").style(grid=[1], height="50")
76
- # # model1 = gr.Model3D(clear_color=[255, 255, 255, 255], label="3D Model [Result]")
77
- # input1 = [selector1, sample1, seed1, opt1, scale1]
78
- # button1.click(IF.pose_generation, inputs=input1, outputs=[image1])
79
 
80
- # ## motion generation
81
- # # with gr.Tab("Motion Generation"):
82
- # # with gr.Row():
83
- # # with gr.Column(scale=2):
84
- # # selector2 = gr.Dropdown(choices=['MPH16', 'MPH1Library', 'N0SittingBooth', 'N3OpenArea'], label='Scenes', value='MPH16', interactive=True)
85
- # # with gr.Row():
86
- # # sample2 = gr.Slider(minimum=1, maximum=8, step=1, label='Count', interactive=True, value=1)
87
- # # seed2 = gr.Slider(minimum=0, maximum=2 ** 16, step=1, label='Seed', interactive=True, value=2023)
88
- # # with gr.Row():
89
- # # withstart = gr.Checkbox(label='With Start', interactive=True, value=False)
90
- # # opt2 = gr.Checkbox(label='Optimizer Guidance', interactive=True, value=True)
91
- # # scale_opt2 = gr.Slider(minimum=0.1, maximum=9.9, step=0.1, label='Scale', interactive=True, value=1.1)
92
- # # button2 = gr.Button("Run")
93
- # # with gr.Column(scale=3):
94
- # # image2 = gr.Image(label="Result")
95
- # # input2 = [selector2, sample2, seed2, withstart, opt2, scale_opt2]
96
- # # button2.click(IF.motion_generation, inputs=input2, outputs=image2)
97
- # with gr.Tab("Motion Generation"):
98
- # with gr.Row():
99
- # with gr.Column(scale=2):
100
- # input2 = [
101
- # gr.Dropdown(choices=['MPH16', 'MPH1Library', 'N0SittingBooth', 'N3OpenArea'], label='Scenes')
102
- # ]
103
- # button2 = gr.Button("Generate")
104
- # gr.HTML("<p style='font-size: 0.9em; color: #555555;'>Notes: the output results are pre-sampled results. We will deploy a real-time model for this task soon.</p>")
105
- # with gr.Column(scale=3):
106
- # output2 = gr.Image(label="Result")
107
- # button2.click(IF.motion_generation, inputs=input2, outputs=output2)
108
 
109
- # ## grasp generation
110
- # with gr.Tab("Grasp Generation"):
111
- # with gr.Row():
112
- # with gr.Column(scale=2):
113
- # input3 = [
114
- # gr.Dropdown(choices=['contactdb+apple', 'contactdb+camera', 'contactdb+cylinder_medium', 'contactdb+door_knob', 'contactdb+rubber_duck', 'contactdb+water_bottle', 'ycb+baseball', 'ycb+pear', 'ycb+potted_meat_can', 'ycb+tomato_soup_can'], label='Objects')
115
- # ]
116
- # button3 = gr.Button("Run")
117
- # gr.HTML("<p style='font-size: 0.9em; color: #555555;'>Notes: the output results are pre-sampled results. We will deploy a real-time model for this task soon.</p>")
118
- # with gr.Column(scale=3):
119
- # output3 = [
120
- # gr.Model3D(clear_color=[255, 255, 255, 255], label="Result")
121
- # ]
122
- # button3.click(IF.grasp_generation, inputs=input3, outputs=output3)
123
 
124
- # ## path planning
125
- # with gr.Tab("Path Planing"):
126
- # with gr.Row():
127
- # with gr.Column(scale=2):
128
- # selector4 = gr.Dropdown(choices=['scene0603_00', 'scene0621_00', 'scene0626_00', 'scene0634_00', 'scene0637_00', 'scene0640_00', 'scene0641_00', 'scene0645_00', 'scene0653_00', 'scene0667_00', 'scene0672_00', 'scene0673_00', 'scene0678_00', 'scene0694_00', 'scene0698_00'], label='Scenes', value='scene0621_00', interactive=True)
129
- # mode4 = gr.Radio(choices=['Sampling', 'Planning'], value='Sampling', label='Mode', interactive=True)
130
- # with gr.Row():
131
- # sample4 = gr.Slider(minimum=1, maximum=8, step=1, label='Count', interactive=True, value=1)
132
- # seed4 = gr.Slider(minimum=0, maximum=2 ** 16, step=1, label='Seed', interactive=True, value=2023)
133
- # with gr.Box():
134
- # opt4 = gr.Checkbox(label='Optimizer Guidance', interactive=True, value=True)
135
- # scale_opt4 = gr.Slider(minimum=0.02, maximum=4.98, step=0.02, label='Scale', interactive=True, value=1.0)
136
- # with gr.Box():
137
- # pla4 = gr.Checkbox(label='Planner Guidance', interactive=True, value=True)
138
- # scale_pla4 = gr.Slider(minimum=0.02, maximum=0.98, step=0.02, label='Scale', interactive=True, value=0.2)
139
- # button4 = gr.Button("Run")
140
- # with gr.Column(scale=3):
141
- # image4 = gr.Gallery(label="Image [Result]").style(grid=[1], height="50")
142
- # number4 = gr.Number(label="Steps", precision=0)
143
- # gr.HTML("<p style='font-size: 0.9em; color: #555555;'>Notes: 1. It may take a long time to do planning in <b>Planning</b> mode. 2. The <span style='color: #cc0000;'>red</span> balls represent the planning result, starting with the lightest red ball and ending with the darkest red ball. The <span style='color: #00cc00;'>green</span> ball indicates the target position.</p>")
144
- # input4 = [selector4, mode4, sample4, seed4, opt4, scale_opt4, pla4, scale_pla4]
145
- # button4.click(IF.path_planning, inputs=input4, outputs=[image4, number4])
146
 
147
- # ## arm motion planning
148
- # with gr.Tab("Arm Motion Planning"):
149
- # gr.Markdown('Coming soon!')
150
 
151
- # demo.launch()
 
 
 
 
 
 
 
 
 
1
  import os
2
+ os.environ['PYOPENGL_PLATFORM'] = "osmesa"
3
+ import sys
4
+ root_dir = os.path.dirname(os.path.abspath(__file__))
5
+ sys.path.insert(1, os.path.join(root_dir, 'scenediffuser'))
6
+ import gradio as gr
7
+ import interface as IF
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8
 
9
+ with gr.Blocks(css='style.css') as demo:
10
+ with gr.Column(elem_id="col-container"):
11
+ gr.Markdown("<p align='center' style='font-size: 1.5em;'>Diffusion-based Generation, Optimization, and Planning in 3D Scenes</p>")
12
+ gr.HTML(value="<img src='file/figures/teaser.png' alt='Teaser' width='710px' height='284px' style='display: block; margin: auto;'>")
13
+ gr.HTML(value="<p align='center' style='font-size: 1.2em; color: #485fc7;'><a href='https://arxiv.org/abs/2301.06015' target='_blank'>arXiv</a> | <a href='https://scenediffuser.github.io/' target='_blank'>Project Page</a> | <a href='https://github.com/scenediffuser/Scene-Diffuser' target='_blank'>Code</a></p>")
14
+ gr.Markdown("<p align='center'><i>\"SceneDiffuser provides a unified model for solving scene-conditioned generation, optimization, and planning.\"</i></p>")
15
 
16
+ ## five task
17
+ ## pose generation
18
+ with gr.Tab("Pose Generation"):
19
+ with gr.Row():
20
+ with gr.Column(scale=2):
21
+ selector1 = gr.Dropdown(choices=['MPH16', 'MPH1Library', 'N0SittingBooth', 'N3OpenArea'], label='Scenes', value='MPH16', interactive=True)
22
+ with gr.Row():
23
+ sample1 = gr.Slider(minimum=1, maximum=8, step=1, label='Count', interactive=True, value=1)
24
+ seed1 = gr.Slider(minimum=0, maximum=2 ** 16, step=1, label='Seed', interactive=True, value=2023)
25
+ opt1 = gr.Checkbox(label='Optimizer Guidance', interactive=True, value=True)
26
+ scale1 = gr.Slider(minimum=0.1, maximum=9.9, step=0.1, label='Scale', interactive=True, value=1.1)
27
+ button1 = gr.Button("Run")
28
+ with gr.Column(scale=3):
29
+ image1 = gr.Gallery(label="Image [Result]").style(grid=[1], height="50")
30
+ # model1 = gr.Model3D(clear_color=[255, 255, 255, 255], label="3D Model [Result]")
31
+ input1 = [selector1, sample1, seed1, opt1, scale1]
32
+ button1.click(IF.pose_generation, inputs=input1, outputs=[image1])
33
 
34
+ ## motion generation
35
+ # with gr.Tab("Motion Generation"):
36
+ # with gr.Row():
37
+ # with gr.Column(scale=2):
38
+ # selector2 = gr.Dropdown(choices=['MPH16', 'MPH1Library', 'N0SittingBooth', 'N3OpenArea'], label='Scenes', value='MPH16', interactive=True)
39
+ # with gr.Row():
40
+ # sample2 = gr.Slider(minimum=1, maximum=8, step=1, label='Count', interactive=True, value=1)
41
+ # seed2 = gr.Slider(minimum=0, maximum=2 ** 16, step=1, label='Seed', interactive=True, value=2023)
42
+ # with gr.Row():
43
+ # withstart = gr.Checkbox(label='With Start', interactive=True, value=False)
44
+ # opt2 = gr.Checkbox(label='Optimizer Guidance', interactive=True, value=True)
45
+ # scale_opt2 = gr.Slider(minimum=0.1, maximum=9.9, step=0.1, label='Scale', interactive=True, value=1.1)
46
+ # button2 = gr.Button("Run")
47
+ # with gr.Column(scale=3):
48
+ # image2 = gr.Image(label="Result")
49
+ # input2 = [selector2, sample2, seed2, withstart, opt2, scale_opt2]
50
+ # button2.click(IF.motion_generation, inputs=input2, outputs=image2)
51
+ with gr.Tab("Motion Generation"):
52
+ with gr.Row():
53
+ with gr.Column(scale=2):
54
+ input2 = [
55
+ gr.Dropdown(choices=['MPH16', 'MPH1Library', 'N0SittingBooth', 'N3OpenArea'], label='Scenes')
56
+ ]
57
+ button2 = gr.Button("Generate")
58
+ gr.HTML("<p style='font-size: 0.9em; color: #555555;'>Notes: the output results are pre-sampled results. We will deploy a real-time model for this task soon.</p>")
59
+ with gr.Column(scale=3):
60
+ output2 = gr.Image(label="Result")
61
+ button2.click(IF.motion_generation, inputs=input2, outputs=output2)
62
 
63
+ ## grasp generation
64
+ with gr.Tab("Grasp Generation"):
65
+ with gr.Row():
66
+ with gr.Column(scale=2):
67
+ input3 = [
68
+ gr.Dropdown(choices=['contactdb+apple', 'contactdb+camera', 'contactdb+cylinder_medium', 'contactdb+door_knob', 'contactdb+rubber_duck', 'contactdb+water_bottle', 'ycb+baseball', 'ycb+pear', 'ycb+potted_meat_can', 'ycb+tomato_soup_can'], label='Objects')
69
+ ]
70
+ button3 = gr.Button("Run")
71
+ gr.HTML("<p style='font-size: 0.9em; color: #555555;'>Notes: the output results are pre-sampled results. We will deploy a real-time model for this task soon.</p>")
72
+ with gr.Column(scale=3):
73
+ output3 = [
74
+ gr.Model3D(clear_color=[255, 255, 255, 255], label="Result")
75
+ ]
76
+ button3.click(IF.grasp_generation, inputs=input3, outputs=output3)
77
 
78
+ ## path planning
79
+ with gr.Tab("Path Planing"):
80
+ with gr.Row():
81
+ with gr.Column(scale=2):
82
+ selector4 = gr.Dropdown(choices=['scene0603_00', 'scene0621_00', 'scene0626_00', 'scene0634_00', 'scene0637_00', 'scene0640_00', 'scene0641_00', 'scene0645_00', 'scene0653_00', 'scene0667_00', 'scene0672_00', 'scene0673_00', 'scene0678_00', 'scene0694_00', 'scene0698_00'], label='Scenes', value='scene0621_00', interactive=True)
83
+ mode4 = gr.Radio(choices=['Sampling', 'Planning'], value='Sampling', label='Mode', interactive=True)
84
+ with gr.Row():
85
+ sample4 = gr.Slider(minimum=1, maximum=8, step=1, label='Count', interactive=True, value=1)
86
+ seed4 = gr.Slider(minimum=0, maximum=2 ** 16, step=1, label='Seed', interactive=True, value=2023)
87
+ with gr.Box():
88
+ opt4 = gr.Checkbox(label='Optimizer Guidance', interactive=True, value=True)
89
+ scale_opt4 = gr.Slider(minimum=0.02, maximum=4.98, step=0.02, label='Scale', interactive=True, value=1.0)
90
+ with gr.Box():
91
+ pla4 = gr.Checkbox(label='Planner Guidance', interactive=True, value=True)
92
+ scale_pla4 = gr.Slider(minimum=0.02, maximum=0.98, step=0.02, label='Scale', interactive=True, value=0.2)
93
+ button4 = gr.Button("Run")
94
+ with gr.Column(scale=3):
95
+ image4 = gr.Gallery(label="Image [Result]").style(grid=[1], height="50")
96
+ number4 = gr.Number(label="Steps", precision=0)
97
+ gr.HTML("<p style='font-size: 0.9em; color: #555555;'>Notes: 1. It may take a long time to do planning in <b>Planning</b> mode. 2. The <span style='color: #cc0000;'>red</span> balls represent the planning result, starting with the lightest red ball and ending with the darkest red ball. The <span style='color: #00cc00;'>green</span> ball indicates the target position.</p>")
98
+ input4 = [selector4, mode4, sample4, seed4, opt4, scale_opt4, pla4, scale_pla4]
99
+ button4.click(IF.path_planning, inputs=input4, outputs=[image4, number4])
100
 
101
+ ## arm motion planning
102
+ with gr.Tab("Arm Motion Planning"):
103
+ gr.Markdown('Coming soon!')
104
 
105
+ demo.launch()
pre-requirements.txt CHANGED
@@ -1 +1,4 @@
 
 
 
1
  pyrender==0.1.45
 
1
+ --extra-index-url https://download.pytorch.org/whl/cu113
2
+ torch==1.11.0+cu113
3
+ torchvision==0.12.0+cu113
4
  pyrender==0.1.45
requirements.txt CHANGED
@@ -1,3 +1,27 @@
1
- trimesh
2
- pillow
3
- pyopengl==3.1.5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ -e git+https://github.com/Silverster98/pytorch_kinematics#egg=pytorch_kinematics
2
+ git+https://github.com/Silverster98/pytorch3d@T4_CUDA
3
+ git+https://github.com/otaheri/chamfer_distance.git@d2b524309db114d0f7ce18be6c01b3802cde9791
4
+ git+https://github.com/Silverster98/pointops@T4
5
+ git+https://github.com/nghorbani/human_body_prior
6
+ urdf-parser-py==0.0.4
7
+ einops==0.4.1
8
+ hydra-core==1.2.0
9
+ loguru==0.6.0
10
+ matplotlib==3.5.1
11
+ natsort==8.2.0
12
+ networkx==2.8.6
13
+ omegaconf==2.2.2
14
+ opencv-python==4.6.0.66
15
+ Pillow==9.0.1
16
+ plotly==5.11.0
17
+ protobuf==3.19.4
18
+ pyopengl==3.1.5
19
+ pyquaternion==0.9.9
20
+ pyrender==0.1.45
21
+ smplx==0.1.28
22
+ tabulate==0.8.10
23
+ tensorboard==2.8.0
24
+ tqdm==4.62.3
25
+ transforms3d==0.4.1
26
+ transformations==2022.9.26
27
+ trimesh==3.12.7