rolpotamias commited on
Commit
aea26c8
1 Parent(s): 2c6f981

Upload 124 files

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +1 -0
  2. README.md +4 -4
  3. app.py +181 -0
  4. assets/test1.jpg +0 -0
  5. assets/test2.png +3 -0
  6. assets/test3.jpg +0 -0
  7. assets/test4.jpg +0 -0
  8. assets/test5.jpeg +0 -0
  9. mano_data/mano/MANO_RIGHT.pkl +3 -0
  10. mano_data/mano_mean_params.npz +3 -0
  11. packages.txt +12 -0
  12. pretrained_models/dataset_config.yaml +62 -0
  13. pretrained_models/detector.pt +3 -0
  14. pretrained_models/model_config.yaml +119 -0
  15. pretrained_models/wilor_final.ckpt +3 -0
  16. pyrender/.coveragerc +5 -0
  17. pyrender/.flake8 +8 -0
  18. pyrender/.gitignore +106 -0
  19. pyrender/.pre-commit-config.yaml +6 -0
  20. pyrender/.travis.yml +43 -0
  21. pyrender/LICENSE +21 -0
  22. pyrender/MANIFEST.in +5 -0
  23. pyrender/README.md +92 -0
  24. pyrender/docs/Makefile +23 -0
  25. pyrender/docs/make.bat +35 -0
  26. pyrender/docs/source/api/index.rst +59 -0
  27. pyrender/docs/source/conf.py +352 -0
  28. pyrender/docs/source/examples/cameras.rst +26 -0
  29. pyrender/docs/source/examples/index.rst +20 -0
  30. pyrender/docs/source/examples/lighting.rst +21 -0
  31. pyrender/docs/source/examples/models.rst +143 -0
  32. pyrender/docs/source/examples/offscreen.rst +87 -0
  33. pyrender/docs/source/examples/quickstart.rst +71 -0
  34. pyrender/docs/source/examples/scenes.rst +78 -0
  35. pyrender/docs/source/examples/viewer.rst +61 -0
  36. pyrender/docs/source/index.rst +41 -0
  37. pyrender/docs/source/install/index.rst +172 -0
  38. pyrender/examples/duck.py +13 -0
  39. pyrender/examples/example.py +157 -0
  40. pyrender/pyrender/__init__.py +24 -0
  41. pyrender/pyrender/camera.py +437 -0
  42. pyrender/pyrender/constants.py +149 -0
  43. pyrender/pyrender/font.py +272 -0
  44. pyrender/pyrender/fonts/OpenSans-Bold.ttf +0 -0
  45. pyrender/pyrender/fonts/OpenSans-BoldItalic.ttf +0 -0
  46. pyrender/pyrender/fonts/OpenSans-ExtraBold.ttf +0 -0
  47. pyrender/pyrender/fonts/OpenSans-ExtraBoldItalic.ttf +0 -0
  48. pyrender/pyrender/fonts/OpenSans-Italic.ttf +0 -0
  49. pyrender/pyrender/fonts/OpenSans-Light.ttf +0 -0
  50. pyrender/pyrender/fonts/OpenSans-LightItalic.ttf +0 -0
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ assets/test2.png filter=lfs diff=lfs merge=lfs -text
README.md CHANGED
@@ -1,13 +1,13 @@
1
  ---
2
  title: WiLoR
3
- emoji: 📊
4
- colorFrom: purple
5
- colorTo: indigo
6
  sdk: gradio
7
  sdk_version: 4.44.0
8
  app_file: app.py
9
  pinned: false
10
- license: cc-by-nc-4.0
11
  ---
12
 
13
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
1
  ---
2
  title: WiLoR
3
+ emoji: 🚀
4
+ colorFrom: red
5
+ colorTo: red
6
  sdk: gradio
7
  sdk_version: 4.44.0
8
  app_file: app.py
9
  pinned: false
10
+ license: cc-by-nc-2.0
11
  ---
12
 
13
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app.py ADDED
@@ -0,0 +1,181 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import sys
3
+ os.environ["PYOPENGL_PLATFORM"] = "egl"
4
+ os.environ["MESA_GL_VERSION_OVERRIDE"] = "4.1"
5
+ os.system('pip install /home/user/app/pyrender')
6
+ sys.path.append('/home/user/app/pyrender')
7
+
8
+ import gradio as gr
9
+ import cv2
10
+ import numpy as np
11
+ import torch
12
+ from ultralytics import YOLO
13
+ from pathlib import Path
14
+ import argparse
15
+ import json
16
+ from typing import Dict, Optional
17
+
18
+ from wilor.models import WiLoR, load_wilor
19
+ from wilor.utils import recursive_to
20
+ from wilor.datasets.vitdet_dataset import ViTDetDataset, DEFAULT_MEAN, DEFAULT_STD
21
+ from wilor.utils.renderer import Renderer, cam_crop_to_full
22
+ device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
23
+
24
+ LIGHT_PURPLE=(0.25098039, 0.274117647, 0.65882353)
25
+
26
+ model, model_cfg = load_wilor(checkpoint_path = './pretrained_models/wilor_final.ckpt' , cfg_path= './pretrained_models/model_config.yaml')
27
+ # Setup the renderer
28
+ renderer = Renderer(model_cfg, faces=model.mano.faces)
29
+ renderer_side = Renderer(model_cfg, faces=model.mano.faces)
30
+ model = model.to(device)
31
+ model.eval()
32
+
33
+ detector = YOLO('./pretrained_models/detector.pt').to(device)
34
+
35
+ def run_wilow_model(image, conf, IoU_threshold=0.5):
36
+ img_cv2 = image[...,::-1]
37
+ img_vis = image.copy()
38
+
39
+ detections = detector(img_cv2, conf=conf, verbose=False, iou=IoU_threshold)[0]
40
+
41
+ bboxes = []
42
+ is_right = []
43
+ for det in detections:
44
+ Bbox = det.boxes.data.cpu().detach().squeeze().numpy()
45
+ Conf = det.boxes.conf.data.cpu().detach()[0].numpy().reshape(-1).astype(np.float16)
46
+ Side = det.boxes.cls.data.cpu().detach()
47
+ #Bbox[:2] -= np.int32(0.1 * Bbox[:2])
48
+ #Bbox[2:] += np.int32(0.1 * Bbox[ 2:])
49
+ is_right.append(det.boxes.cls.cpu().detach().squeeze().item())
50
+ bboxes.append(Bbox[:4].tolist())
51
+
52
+ color = (255*0.208, 255*0.647 ,255*0.603 ) if Side==0. else (255*1, 255*0.78039, 255*0.2353)
53
+ label = f'L - {Conf[0]:.3f}' if Side==0 else f'R - {Conf[0]:.3f}'
54
+
55
+ cv2.rectangle(img_vis, (int(Bbox[0]), int(Bbox[1])), (int(Bbox[2]), int(Bbox[3])), color , 3)
56
+ (w, h), _ = cv2.getTextSize(label, cv2.FONT_HERSHEY_SIMPLEX, 0.6, 1)
57
+ cv2.rectangle(img_vis, (int(Bbox[0]), int(Bbox[1]) - 20), (int(Bbox[0]) + w, int(Bbox[1])), color, -1)
58
+ cv2.putText(img_vis, label, (int(Bbox[0]), int(Bbox[1]) - 5), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0,0,0), 2)
59
+
60
+ if len(bboxes) != 0:
61
+ boxes = np.stack(bboxes)
62
+ right = np.stack(is_right)
63
+ dataset = ViTDetDataset(model_cfg, img_cv2, boxes, right, rescale_factor=2.0 )
64
+ dataloader = torch.utils.data.DataLoader(dataset, batch_size=32, shuffle=False, num_workers=0)
65
+
66
+ all_verts = []
67
+ all_cam_t = []
68
+ all_right = []
69
+ all_joints= []
70
+
71
+ for batch in dataloader:
72
+ batch = recursive_to(batch, device)
73
+
74
+ with torch.no_grad():
75
+ out = model(batch)
76
+
77
+ multiplier = (2*batch['right']-1)
78
+ pred_cam = out['pred_cam']
79
+ pred_cam[:,1] = multiplier*pred_cam[:,1]
80
+ box_center = batch["box_center"].float()
81
+ box_size = batch["box_size"].float()
82
+ img_size = batch["img_size"].float()
83
+ scaled_focal_length = model_cfg.EXTRA.FOCAL_LENGTH / model_cfg.MODEL.IMAGE_SIZE * img_size.max()
84
+ pred_cam_t_full = cam_crop_to_full(pred_cam, box_center, box_size, img_size, scaled_focal_length).detach().cpu().numpy()
85
+
86
+ # Render the result
87
+ all_verts = []
88
+ all_cam_t = []
89
+ all_right = []
90
+ all_joints = []
91
+
92
+ batch_size = batch['img'].shape[0]
93
+ for n in range(batch_size):
94
+
95
+ verts = out['pred_vertices'][n].detach().cpu().numpy()
96
+ joints = out['pred_keypoints_3d'][n].detach().cpu().numpy()
97
+
98
+ is_right = batch['right'][n].cpu().numpy()
99
+ verts[:,0] = (2*is_right-1)*verts[:,0]
100
+ joints[:,0] = (2*is_right-1)*joints[:,0]
101
+
102
+ cam_t = pred_cam_t_full[n]
103
+
104
+ all_verts.append(verts)
105
+ all_cam_t.append(cam_t)
106
+ all_right.append(is_right)
107
+ all_joints.append(joints)
108
+ # Render front view
109
+
110
+ misc_args = dict(
111
+ mesh_base_color=LIGHT_PURPLE,
112
+ scene_bg_color=(1, 1, 1),
113
+ focal_length=scaled_focal_length,
114
+ )
115
+ cam_view = renderer.render_rgba_multiple(all_verts, cam_t=all_cam_t, render_res=img_size[n], is_right=all_right, **misc_args)
116
+
117
+ # Overlay image
118
+
119
+ input_img = img_vis.astype(np.float32)/255.0
120
+ input_img = np.concatenate([input_img, np.ones_like(input_img[:,:,:1])], axis=2) # Add alpha channel
121
+ input_img_overlay = input_img[:,:,:3] * (1-cam_view[:,:,3:]) + cam_view[:,:,:3] * cam_view[:,:,3:]
122
+
123
+ image = input_img_overlay
124
+ return image, f'{len(detections)} hands detected'
125
+
126
+
127
+
128
+ header = ('''
129
+ <div class="embed_hidden" style="text-align: center;">
130
+ <h1> <b>WiLoR</b>: End-to-end 3D hand localization and reconstruction in-the-wild</h1>
131
+ <h3>
132
+ <a href="https://rolpotamias.github.io" target="_blank" rel="noopener noreferrer">Rolandos Alexandros Potamias</a><sup>1</sup>,
133
+ <a href="" target="_blank" rel="noopener noreferrer">Jinglei Zhang</a><sup>2</sup>,
134
+ <br>
135
+ <a href="https://jiankangdeng.github.io/" target="_blank" rel="noopener noreferrer">Jiankang Deng</a><sup>1</sup>,
136
+ <a href="https://wp.doc.ic.ac.uk/szafeiri/" target="_blank" rel="noopener noreferrer">Stefanos Zafeiriou</a><sup>1</sup>
137
+ </h3>
138
+ <h3>
139
+ <sup>1</sup>Imperial College London;
140
+ <sup>2</sup>Shanghai Jiao Tong University
141
+ </h3>
142
+ </div>
143
+ <div style="display:flex; gap: 0.3rem; justify-content: center; align-items: center;" align="center">
144
+ <a href=''><img src='https://img.shields.io/badge/Arxiv-2405.20340-A42C25?style=flat&logo=arXiv&logoColor=A42C25'></a>
145
+ <a href=''><img src='https://img.shields.io/badge/Paper-PDF-yellow?style=flat&logo=arXiv&logoColor=yellow'></a>
146
+ <a href='https://rolpotamias.github.io/wilor/'><img src='https://img.shields.io/badge/Project-Page-%23df5b46?style=flat&logo=Google%20chrome&logoColor=%23df5b46'></a>
147
+ <a href='https://github.com/rolpotamias/WiLoR'><img src='https://img.shields.io/badge/GitHub-Code-black?style=flat&logo=github&logoColor=white'></a>
148
+ ''')
149
+
150
+
151
+ with gr.Blocks(title="WiLoR: End-to-end 3D hand localization and reconstruction in-the-wild", css=".gradio-container") as demo:
152
+
153
+ gr.Markdown(header)
154
+
155
+ with gr.Row():
156
+ with gr.Column():
157
+ input_image = gr.Image(label="Input image", type="numpy")
158
+ threshold = gr.Slider(value=0.3, minimum=0.05, maximum=0.95, step=0.05, label='Detection Confidence Threshold')
159
+ #nms = gr.Slider(value=0.5, minimum=0.05, maximum=0.95, step=0.05, label='IoU NMS Threshold')
160
+ submit = gr.Button("Submit", variant="primary")
161
+
162
+
163
+ with gr.Column():
164
+ reconstruction = gr.Image(label="Reconstructions", type="numpy")
165
+ hands_detected = gr.Textbox(label="Hands Detected")
166
+
167
+ submit.click(fn=run_wilow_model, inputs=[input_image, threshold], outputs=[reconstruction, hands_detected])
168
+
169
+ with gr.Row():
170
+
171
+ example_images = gr.Examples([
172
+
173
+ ['/home/user/app/assets/test1.jpg'],
174
+ ['/home/user/app/assets/test2.png'],
175
+ ['/home/user/app/assets/test3.jpg'],
176
+ ['/home/user/app/assets/test4.jpg'],
177
+ ['/home/user/app/assets/test5.jpeg']
178
+ ],
179
+ inputs=input_image)
180
+
181
+ demo.launch()
assets/test1.jpg ADDED
assets/test2.png ADDED

Git LFS Details

  • SHA256: 589f5d12593acbcbcb9ec07b288b04f6d7e70542e1312ceee3ea992ba0f41ff9
  • Pointer size: 132 Bytes
  • Size of remote file: 1.01 MB
assets/test3.jpg ADDED
assets/test4.jpg ADDED
assets/test5.jpeg ADDED
mano_data/mano/MANO_RIGHT.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:45d60aa3b27ef9107a7afd4e00808f307fd91111e1cfa35afd5c4a62de264767
3
+ size 3821356
mano_data/mano_mean_params.npz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:efc0ec58e4a5cef78f3abfb4e8f91623b8950be9eff8b8e0dbb0d036ebc63988
3
+ size 1178
packages.txt ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ libglfw3-dev
2
+ libgles2-mesa-dev
3
+ libgl1
4
+ freeglut3-dev
5
+ unzip
6
+ ffmpeg
7
+ libsm6
8
+ libxext6
9
+ libgl1-mesa-dri
10
+ libegl1-mesa
11
+ libgbm1
12
+ build-essential
pretrained_models/dataset_config.yaml ADDED
@@ -0,0 +1,62 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ARCTIC-TRAIN:
2
+ TYPE: ImageDataset
3
+ URLS: hamer_training_data/dataset_tars/arctic-train/{000000..000176}.tar
4
+ epoch_size: 177000
5
+ BEDLAM-TRAIN:
6
+ TYPE: ImageDataset
7
+ URLS: hamer_training_data/dataset_tars/bedlam-train/{000000..000300}.tar
8
+ epoch_size: 301000
9
+ COCOW-TRAIN:
10
+ TYPE: ImageDataset
11
+ URLS: hamer_training_data/dataset_tars/cocow-train/{000000..000036}.tar
12
+ epoch_size: 78666
13
+ DEX-TRAIN:
14
+ TYPE: ImageDataset
15
+ URLS: hamer_training_data/dataset_tars/dex-train/{000000..000406}.tar
16
+ epoch_size: 406888
17
+ FREIHAND-MOCAP:
18
+ DATASET_FILE: hamer_training_data/freihand_mocap.npz
19
+ FREIHAND-TEST:
20
+ TYPE: ImageDataset
21
+ URLS: hamer_training_data/dataset_tars/freihand-test/{000000..000003}.tar
22
+ epoch_size: 3960
23
+ FREIHAND-TRAIN:
24
+ TYPE: ImageDataset
25
+ URLS: hamer_training_data/dataset_tars/freihand-train/{000000..000130}.tar
26
+ epoch_size: 130240
27
+ H2O3D-TRAIN:
28
+ TYPE: ImageDataset
29
+ URLS: hamer_training_data/dataset_tars/h2o3d-train/{000000..000060}.tar
30
+ epoch_size: 121996
31
+ HALPE-TRAIN:
32
+ TYPE: ImageDataset
33
+ URLS: hamer_training_data/dataset_tars/halpe-train/{000000..000022}.tar
34
+ epoch_size: 34289
35
+ HO3D-TRAIN:
36
+ TYPE: ImageDataset
37
+ URLS: hamer_training_data/dataset_tars/ho3d-train/{000000..000083}.tar
38
+ epoch_size: 83325
39
+ HOT3D-TRAIN:
40
+ TYPE: ImageDataset
41
+ URLS: hamer_training_data/dataset_tars/hot3d-train/{000000..000571}.tar
42
+ epoch_size: 572000
43
+ INTERHAND26M-TRAIN:
44
+ TYPE: ImageDataset
45
+ URLS: hamer_training_data/dataset_tars/interhand26m-train/{000000..001056}.tar
46
+ epoch_size: 1424632
47
+ MPIINZSL-TRAIN:
48
+ TYPE: ImageDataset
49
+ URLS: hamer_training_data/dataset_tars/mpiinzsl-train/{000000..000015}.tar
50
+ epoch_size: 15184
51
+ MTC-TRAIN:
52
+ TYPE: ImageDataset
53
+ URLS: hamer_training_data/dataset_tars/mtc-train/{000000..000306}.tar
54
+ epoch_size: 363947
55
+ REINTER-TRAIN:
56
+ TYPE: ImageDataset
57
+ URLS: hamer_training_data/dataset_tars/reinter-train/{000000..000418}.tar
58
+ epoch_size: 419000
59
+ RHD-TRAIN:
60
+ TYPE: ImageDataset
61
+ URLS: hamer_training_data/dataset_tars/rhd-train/{000000..000041}.tar
62
+ epoch_size: 61705
pretrained_models/detector.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5ef3df44e42d2db52d4ffe91f83a22ce9925e2acc9abebf453f2c5d22e380033
3
+ size 53582271
pretrained_models/model_config.yaml ADDED
@@ -0,0 +1,119 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ task_name: train
2
+ tags:
3
+ - dev
4
+ train: true
5
+ test: false
6
+ ckpt_path: null
7
+ seed: null
8
+ DATASETS:
9
+ TRAIN:
10
+ FREIHAND-TRAIN:
11
+ WEIGHT: 0.2
12
+ INTERHAND26M-TRAIN:
13
+ WEIGHT: 0.1
14
+ MTC-TRAIN:
15
+ WEIGHT: 0.05
16
+ RHD-TRAIN:
17
+ WEIGHT: 0.05
18
+ COCOW-TRAIN:
19
+ WEIGHT: 0.05
20
+ HALPE-TRAIN:
21
+ WEIGHT: 0.05
22
+ MPIINZSL-TRAIN:
23
+ WEIGHT: 0.05
24
+ HO3D-TRAIN:
25
+ WEIGHT: 0.05
26
+ H2O3D-TRAIN:
27
+ WEIGHT: 0.05
28
+ DEX-TRAIN:
29
+ WEIGHT: 0.05
30
+ BEDLAM-TRAIN:
31
+ WEIGHT: 0.05
32
+ REINTER-TRAIN:
33
+ WEIGHT: 0.1
34
+ HOT3D-TRAIN:
35
+ WEIGHT: 0.05
36
+ ARCTIC-TRAIN:
37
+ WEIGHT: 0.1
38
+ VAL:
39
+ FREIHAND-TRAIN:
40
+ WEIGHT: 1.0
41
+ MOCAP: FREIHAND-MOCAP
42
+ BETAS_REG: true
43
+ CONFIG:
44
+ SCALE_FACTOR: 0.3
45
+ ROT_FACTOR: 30
46
+ TRANS_FACTOR: 0.02
47
+ COLOR_SCALE: 0.2
48
+ ROT_AUG_RATE: 0.6
49
+ TRANS_AUG_RATE: 0.5
50
+ DO_FLIP: false
51
+ FLIP_AUG_RATE: 0.0
52
+ EXTREME_CROP_AUG_RATE: 0.0
53
+ EXTREME_CROP_AUG_LEVEL: 1
54
+ extras:
55
+ ignore_warnings: false
56
+ enforce_tags: true
57
+ print_config: true
58
+ exp_name: WiLoR
59
+ MANO:
60
+ DATA_DIR: mano_data
61
+ MODEL_PATH: ${MANO.DATA_DIR}/mano
62
+ GENDER: neutral
63
+ NUM_HAND_JOINTS: 15
64
+ MEAN_PARAMS: ${MANO.DATA_DIR}/mano_mean_params.npz
65
+ CREATE_BODY_POSE: false
66
+ EXTRA:
67
+ FOCAL_LENGTH: 5000
68
+ NUM_LOG_IMAGES: 4
69
+ NUM_LOG_SAMPLES_PER_IMAGE: 8
70
+ PELVIS_IND: 0
71
+ GENERAL:
72
+ TOTAL_STEPS: 1000000
73
+ LOG_STEPS: 1000
74
+ VAL_STEPS: 1000
75
+ CHECKPOINT_STEPS: 1000
76
+ CHECKPOINT_SAVE_TOP_K: 1
77
+ NUM_WORKERS: 8
78
+ PREFETCH_FACTOR: 2
79
+ TRAIN:
80
+ LR: 1.0e-05
81
+ WEIGHT_DECAY: 0.0001
82
+ BATCH_SIZE: 32
83
+ LOSS_REDUCTION: mean
84
+ NUM_TRAIN_SAMPLES: 2
85
+ NUM_TEST_SAMPLES: 64
86
+ POSE_2D_NOISE_RATIO: 0.01
87
+ SMPL_PARAM_NOISE_RATIO: 0.005
88
+ MODEL:
89
+ IMAGE_SIZE: 256
90
+ IMAGE_MEAN:
91
+ - 0.485
92
+ - 0.456
93
+ - 0.406
94
+ IMAGE_STD:
95
+ - 0.229
96
+ - 0.224
97
+ - 0.225
98
+ BACKBONE:
99
+ TYPE: vit
100
+ PRETRAINED_WEIGHTS: hamer_training_data/vitpose_backbone.pth
101
+ MANO_HEAD:
102
+ TYPE: transformer_decoder
103
+ IN_CHANNELS: 2048
104
+ TRANSFORMER_DECODER:
105
+ depth: 6
106
+ heads: 8
107
+ mlp_dim: 1024
108
+ dim_head: 64
109
+ dropout: 0.0
110
+ emb_dropout: 0.0
111
+ norm: layer
112
+ context_dim: 1280
113
+ LOSS_WEIGHTS:
114
+ KEYPOINTS_3D: 0.05
115
+ KEYPOINTS_2D: 0.01
116
+ GLOBAL_ORIENT: 0.001
117
+ HAND_POSE: 0.001
118
+ BETAS: 0.0005
119
+ ADVERSARIAL: 0.0005
pretrained_models/wilor_final.ckpt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3e97aafc7dd08d883a4cc5a027df61fdb6fda6136dbd1319405413862ada6bb2
3
+ size 2564989533
pyrender/.coveragerc ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ [report]
2
+ exclude_lines =
3
+ def __repr__
4
+ def __str__
5
+ @abc.abstractmethod
pyrender/.flake8 ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ [flake8]
2
+ ignore = E231,W504,F405,F403
3
+ max-line-length = 79
4
+ select = B,C,E,F,W,T4,B9
5
+ exclude =
6
+ docs/source/conf.py,
7
+ __pycache__,
8
+ examples/*
pyrender/.gitignore ADDED
@@ -0,0 +1,106 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Byte-compiled / optimized / DLL files
2
+ __pycache__/
3
+ *.py[cod]
4
+ *$py.class
5
+
6
+ docs/**/generated/**
7
+
8
+ # C extensions
9
+ *.so
10
+
11
+ # Distribution / packaging
12
+ .Python
13
+ build/
14
+ develop-eggs/
15
+ dist/
16
+ downloads/
17
+ eggs/
18
+ .eggs/
19
+ lib/
20
+ lib64/
21
+ parts/
22
+ sdist/
23
+ var/
24
+ wheels/
25
+ *.egg-info/
26
+ .installed.cfg
27
+ *.egg
28
+ MANIFEST
29
+
30
+ # PyInstaller
31
+ # Usually these files are written by a python script from a template
32
+ # before PyInstaller builds the exe, so as to inject date/other infos into it.
33
+ *.manifest
34
+ *.spec
35
+
36
+ # Installer logs
37
+ pip-log.txt
38
+ pip-delete-this-directory.txt
39
+
40
+ # Unit test / coverage reports
41
+ htmlcov/
42
+ .tox/
43
+ .coverage
44
+ .coverage.*
45
+ .cache
46
+ nosetests.xml
47
+ coverage.xml
48
+ *.cover
49
+ .hypothesis/
50
+ .pytest_cache/
51
+
52
+ # Translations
53
+ *.mo
54
+ *.pot
55
+
56
+ # Django stuff:
57
+ *.log
58
+ local_settings.py
59
+ db.sqlite3
60
+
61
+ # Flask stuff:
62
+ instance/
63
+ .webassets-cache
64
+
65
+ # Scrapy stuff:
66
+ .scrapy
67
+
68
+ # Sphinx documentation
69
+ docs/_build/
70
+
71
+ # PyBuilder
72
+ target/
73
+
74
+ # Jupyter Notebook
75
+ .ipynb_checkpoints
76
+
77
+ # pyenv
78
+ .python-version
79
+
80
+ # celery beat schedule file
81
+ celerybeat-schedule
82
+
83
+ # SageMath parsed files
84
+ *.sage.py
85
+
86
+ # Environments
87
+ .env
88
+ .venv
89
+ env/
90
+ venv/
91
+ ENV/
92
+ env.bak/
93
+ venv.bak/
94
+
95
+ # Spyder project settings
96
+ .spyderproject
97
+ .spyproject
98
+
99
+ # Rope project settings
100
+ .ropeproject
101
+
102
+ # mkdocs documentation
103
+ /site
104
+
105
+ # mypy
106
+ .mypy_cache/
pyrender/.pre-commit-config.yaml ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ repos:
2
+ - repo: https://gitlab.com/pycqa/flake8
3
+ rev: 3.7.1
4
+ hooks:
5
+ - id: flake8
6
+ exclude: ^setup.py
pyrender/.travis.yml ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ language: python
2
+ sudo: required
3
+ dist: xenial
4
+
5
+ python:
6
+ - '3.6'
7
+ - '3.7'
8
+
9
+ before_install:
10
+ # Pre-install osmesa
11
+ - sudo apt update
12
+ - sudo wget https://github.com/mmatl/travis_debs/raw/master/xenial/mesa_18.3.3-0.deb
13
+ - sudo dpkg -i ./mesa_18.3.3-0.deb || true
14
+ - sudo apt install -f
15
+ - git clone https://github.com/mmatl/pyopengl.git
16
+ - cd pyopengl
17
+ - pip install .
18
+ - cd ..
19
+
20
+ install:
21
+ - pip install .
22
+ # - pip install -q pytest pytest-cov coveralls
23
+ - pip install pytest pytest-cov coveralls
24
+ - pip install ./pyopengl
25
+
26
+ script:
27
+ - PYOPENGL_PLATFORM=osmesa pytest --cov=pyrender tests
28
+
29
+ after_success:
30
+ - coveralls || true
31
+
32
+ deploy:
33
+ provider: pypi
34
+ skip_existing: true
35
+ user: mmatl
36
+ on:
37
+ tags: true
38
+ branch: master
39
+ password:
40
+ secure: O4WWMbTYb2eVYIO4mMOVa6/xyhX7mPvJpd96cxfNvJdyuqho8VapOhzqsI5kahMB1hFjWWr61yR4+Ru5hoDYf3XA6BQVk8eCY9+0H7qRfvoxex71lahKAqfHLMoE1xNdiVTgl+QN9hYjOnopLod24rx8I8eXfpHu/mfCpuTYGyLlNcDP5St3bXpXLPB5wg8Jo1YRRv6W/7fKoXyuWjewk9cJAS0KrEgnDnSkdwm6Pb+80B2tcbgdGvpGaByw5frndwKiMUMgVUownepDU5POQq2p29wwn9lCvRucULxjEgO+63jdbZRj5fNutLarFa2nISfYnrd72LOyDfbJubwAzzAIsy2JbFORyeHvCgloiuE9oE7a9oOQt/1QHBoIV0seiawMWn55Yp70wQ7HlJs4xSGJWCGa5+9883QRNsvj420atkb3cgO8P+PXwiwTi78Dq7Z/xHqccsU0b8poqBneQoA+pUGgNnF6V7Z8e9RsCcse2gAWSZWuOK3ua+9xCgH7I7MeL3afykr2aJ+yFCoYJMFrUjJeodMX2RbL0q+3FzIPZeGW3WdhTEAL9TSKRcJBSQTskaQlZx/OcpobxS7t3d2S68CCLG9uMTqOTYws55WZ1etalA75sRk9K2MR7ZGjZW3jdtvMViISc/t6Rrjea1GE8ZHGJC6/IeLIWA2c7nc=
41
+ distributions: sdist bdist_wheel
42
+ notifications:
43
+ email: false
pyrender/LICENSE ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ MIT License
2
+
3
+ Copyright (c) 2019 Matthew Matl
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
pyrender/MANIFEST.in ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ # Include the license
2
+ include LICENSE
3
+ include README.rst
4
+ include pyrender/fonts/*
5
+ include pyrender/shaders/*
pyrender/README.md ADDED
@@ -0,0 +1,92 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Pyrender
2
+
3
+ [![Build Status](https://travis-ci.org/mmatl/pyrender.svg?branch=master)](https://travis-ci.org/mmatl/pyrender)
4
+ [![Documentation Status](https://readthedocs.org/projects/pyrender/badge/?version=latest)](https://pyrender.readthedocs.io/en/latest/?badge=latest)
5
+ [![Coverage Status](https://coveralls.io/repos/github/mmatl/pyrender/badge.svg?branch=master)](https://coveralls.io/github/mmatl/pyrender?branch=master)
6
+ [![PyPI version](https://badge.fury.io/py/pyrender.svg)](https://badge.fury.io/py/pyrender)
7
+ [![Downloads](https://pepy.tech/badge/pyrender)](https://pepy.tech/project/pyrender)
8
+
9
+ Pyrender is a pure Python (2.7, 3.4, 3.5, 3.6) library for physically-based
10
+ rendering and visualization.
11
+ It is designed to meet the [glTF 2.0 specification from Khronos](https://www.khronos.org/gltf/).
12
+
13
+ Pyrender is lightweight, easy to install, and simple to use.
14
+ It comes packaged with both an intuitive scene viewer and a headache-free
15
+ offscreen renderer with support for GPU-accelerated rendering on headless
16
+ servers, which makes it perfect for machine learning applications.
17
+
18
+ Extensive documentation, including a quickstart guide, is provided [here](https://pyrender.readthedocs.io/en/latest/).
19
+
20
+ For a minimal working example of GPU-accelerated offscreen rendering using EGL,
21
+ check out the [EGL Google CoLab Notebook](https://colab.research.google.com/drive/1pcndwqeY8vker3bLKQNJKr3B-7-SYenE?usp=sharing).
22
+
23
+
24
+ <p align="center">
25
+ <img width="48%" src="https://github.com/mmatl/pyrender/blob/master/docs/source/_static/rotation.gif?raw=true" alt="GIF of Viewer"/>
26
+ <img width="48%" src="https://github.com/mmatl/pyrender/blob/master/docs/source/_static/damaged_helmet.png?raw=true" alt="Damaged Helmet"/>
27
+ </p>
28
+
29
+ ## Installation
30
+ You can install pyrender directly from pip.
31
+
32
+ ```bash
33
+ pip install pyrender
34
+ ```
35
+
36
+ ## Features
37
+
38
+ Despite being lightweight, pyrender has lots of features, including:
39
+
40
+ * Simple interoperation with the amazing [trimesh](https://github.com/mikedh/trimesh) project,
41
+ which enables out-of-the-box support for dozens of mesh types, including OBJ,
42
+ STL, DAE, OFF, PLY, and GLB.
43
+ * An easy-to-use scene viewer with support for animation, showing face and vertex
44
+ normals, toggling lighting conditions, and saving images and GIFs.
45
+ * An offscreen rendering module that supports OSMesa and EGL backends.
46
+ * Shadow mapping for directional and spot lights.
47
+ * Metallic-roughness materials for physically-based rendering, including several
48
+ types of texture and normal mapping.
49
+ * Transparency.
50
+ * Depth and color image generation.
51
+
52
+ ## Sample Usage
53
+
54
+ For sample usage, check out the [quickstart
55
+ guide](https://pyrender.readthedocs.io/en/latest/examples/index.html) or one of
56
+ the Google CoLab Notebooks:
57
+
58
+ * [EGL Google CoLab Notebook](https://colab.research.google.com/drive/1pcndwqeY8vker3bLKQNJKr3B-7-SYenE?usp=sharing)
59
+
60
+ ## Viewer Keyboard and Mouse Controls
61
+
62
+ When using the viewer, the basic controls for moving about the scene are as follows:
63
+
64
+ * To rotate the camera about the center of the scene, hold the left mouse button and drag the cursor.
65
+ * To rotate the camera about its viewing axis, hold `CTRL` left mouse button and drag the cursor.
66
+ * To pan the camera, do one of the following:
67
+ * Hold `SHIFT`, then hold the left mouse button and drag the cursor.
68
+ * Hold the middle mouse button and drag the cursor.
69
+ * To zoom the camera in or out, do one of the following:
70
+ * Scroll the mouse wheel.
71
+ * Hold the right mouse button and drag the cursor.
72
+
73
+ The available keyboard commands are as follows:
74
+
75
+ * `a`: Toggles rotational animation mode.
76
+ * `c`: Toggles backface culling.
77
+ * `f`: Toggles fullscreen mode.
78
+ * `h`: Toggles shadow rendering.
79
+ * `i`: Toggles axis display mode (no axes, world axis, mesh axes, all axes).
80
+ * `l`: Toggles lighting mode (scene lighting, Raymond lighting, or direct lighting).
81
+ * `m`: Toggles face normal visualization.
82
+ * `n`: Toggles vertex normal visualization.
83
+ * `o`: Toggles orthographic camera mode.
84
+ * `q`: Quits the viewer.
85
+ * `r`: Starts recording a GIF, and pressing again stops recording and opens a file dialog.
86
+ * `s`: Opens a file dialog to save the current view as an image.
87
+ * `w`: Toggles wireframe mode (scene default, flip wireframes, all wireframe, or all solid).
88
+ * `z`: Resets the camera to the default view.
89
+
90
+ As a note, displaying shadows significantly slows down rendering, so if you're
91
+ experiencing low framerates, just kill shadows or reduce the number of lights in
92
+ your scene.
pyrender/docs/Makefile ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Minimal makefile for Sphinx documentation
2
+ #
3
+
4
+ # You can set these variables from the command line.
5
+ SPHINXOPTS =
6
+ SPHINXBUILD = sphinx-build
7
+ SOURCEDIR = source
8
+ BUILDDIR = build
9
+
10
+ # Put it first so that "make" without argument is like "make help".
11
+ help:
12
+ @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
13
+
14
+ .PHONY: help Makefile
15
+
16
+ clean:
17
+ @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
18
+ rm -rf ./source/generated/*
19
+
20
+ # Catch-all target: route all unknown targets to Sphinx using the new
21
+ # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS).
22
+ %: Makefile
23
+ @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
pyrender/docs/make.bat ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ @ECHO OFF
2
+
3
+ pushd %~dp0
4
+
5
+ REM Command file for Sphinx documentation
6
+
7
+ if "%SPHINXBUILD%" == "" (
8
+ set SPHINXBUILD=sphinx-build
9
+ )
10
+ set SOURCEDIR=source
11
+ set BUILDDIR=build
12
+
13
+ if "%1" == "" goto help
14
+
15
+ %SPHINXBUILD% >NUL 2>NUL
16
+ if errorlevel 9009 (
17
+ echo.
18
+ echo.The 'sphinx-build' command was not found. Make sure you have Sphinx
19
+ echo.installed, then set the SPHINXBUILD environment variable to point
20
+ echo.to the full path of the 'sphinx-build' executable. Alternatively you
21
+ echo.may add the Sphinx directory to PATH.
22
+ echo.
23
+ echo.If you don't have Sphinx installed, grab it from
24
+ echo.http://sphinx-doc.org/
25
+ exit /b 1
26
+ )
27
+
28
+ %SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS%
29
+ goto end
30
+
31
+ :help
32
+ %SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS%
33
+
34
+ :end
35
+ popd
pyrender/docs/source/api/index.rst ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Pyrender API Documentation
2
+ ==========================
3
+
4
+ Constants
5
+ ---------
6
+ .. automodapi:: pyrender.constants
7
+ :no-inheritance-diagram:
8
+ :no-main-docstr:
9
+ :no-heading:
10
+
11
+ Cameras
12
+ -------
13
+ .. automodapi:: pyrender.camera
14
+ :no-inheritance-diagram:
15
+ :no-main-docstr:
16
+ :no-heading:
17
+
18
+ Lighting
19
+ --------
20
+ .. automodapi:: pyrender.light
21
+ :no-inheritance-diagram:
22
+ :no-main-docstr:
23
+ :no-heading:
24
+
25
+ Objects
26
+ -------
27
+ .. automodapi:: pyrender
28
+ :no-inheritance-diagram:
29
+ :no-main-docstr:
30
+ :no-heading:
31
+ :skip: Camera, DirectionalLight, Light, OffscreenRenderer, Node
32
+ :skip: OrthographicCamera, PerspectiveCamera, PointLight, RenderFlags
33
+ :skip: Renderer, Scene, SpotLight, TextAlign, Viewer, GLTF
34
+
35
+ Scenes
36
+ ------
37
+ .. automodapi:: pyrender
38
+ :no-inheritance-diagram:
39
+ :no-main-docstr:
40
+ :no-heading:
41
+ :skip: Camera, DirectionalLight, Light, OffscreenRenderer
42
+ :skip: OrthographicCamera, PerspectiveCamera, PointLight, RenderFlags
43
+ :skip: Renderer, SpotLight, TextAlign, Viewer, Sampler, Texture, Material
44
+ :skip: MetallicRoughnessMaterial, Primitive, Mesh, GLTF
45
+
46
+ On-Screen Viewer
47
+ ----------------
48
+ .. automodapi:: pyrender.viewer
49
+ :no-inheritance-diagram:
50
+ :no-inherited-members:
51
+ :no-main-docstr:
52
+ :no-heading:
53
+
54
+ Off-Screen Rendering
55
+ --------------------
56
+ .. automodapi:: pyrender.offscreen
57
+ :no-inheritance-diagram:
58
+ :no-main-docstr:
59
+ :no-heading:
pyrender/docs/source/conf.py ADDED
@@ -0,0 +1,352 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ #
3
+ # core documentation build configuration file, created by
4
+ # sphinx-quickstart on Sun Oct 16 14:33:48 2016.
5
+ #
6
+ # This file is execfile()d with the current directory set to its
7
+ # containing dir.
8
+ #
9
+ # Note that not all possible configuration values are present in this
10
+ # autogenerated file.
11
+ #
12
+ # All configuration values have a default; values that are commented out
13
+ # serve to show the default.
14
+
15
+ import sys
16
+ import os
17
+ from pyrender import __version__
18
+ from sphinx.domains.python import PythonDomain
19
+
20
+ # If extensions (or modules to document with autodoc) are in another directory,
21
+ # add these directories to sys.path here. If the directory is relative to the
22
+ # documentation root, use os.path.abspath to make it absolute, like shown here.
23
+ sys.path.insert(0, os.path.abspath('../../'))
24
+
25
+ # -- General configuration ------------------------------------------------
26
+
27
+ # If your documentation needs a minimal Sphinx version, state it here.
28
+ #needs_sphinx = '1.0'
29
+
30
+ # Add any Sphinx extension module names here, as strings. They can be
31
+ # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
32
+ # ones.
33
+ extensions = [
34
+ 'sphinx.ext.autodoc',
35
+ 'sphinx.ext.autosummary',
36
+ 'sphinx.ext.coverage',
37
+ 'sphinx.ext.githubpages',
38
+ 'sphinx.ext.intersphinx',
39
+ 'sphinx.ext.napoleon',
40
+ 'sphinx.ext.viewcode',
41
+ 'sphinx_automodapi.automodapi',
42
+ 'sphinx_automodapi.smart_resolver'
43
+ ]
44
+ numpydoc_class_members_toctree = False
45
+ automodapi_toctreedirnm = 'generated'
46
+ automodsumm_inherited_members = True
47
+
48
+ # Add any paths that contain templates here, relative to this directory.
49
+ templates_path = ['_templates']
50
+
51
+ # The suffix(es) of source filenames.
52
+ # You can specify multiple suffix as a list of string:
53
+ # source_suffix = ['.rst', '.md']
54
+ source_suffix = '.rst'
55
+
56
+ # The encoding of source files.
57
+ #source_encoding = 'utf-8-sig'
58
+
59
+ # The master toctree document.
60
+ master_doc = 'index'
61
+
62
+ # General information about the project.
63
+ project = u'pyrender'
64
+ copyright = u'2018, Matthew Matl'
65
+ author = u'Matthew Matl'
66
+
67
+ # The version info for the project you're documenting, acts as replacement for
68
+ # |version| and |release|, also used in various other places throughout the
69
+ # built documents.
70
+ #
71
+ # The short X.Y version.
72
+ version = __version__
73
+ # The full version, including alpha/beta/rc tags.
74
+ release = __version__
75
+
76
+ # The language for content autogenerated by Sphinx. Refer to documentation
77
+ # for a list of supported languages.
78
+ #
79
+ # This is also used if you do content translation via gettext catalogs.
80
+ # Usually you set "language" from the command line for these cases.
81
+ language = None
82
+
83
+ # There are two options for replacing |today|: either, you set today to some
84
+ # non-false value, then it is used:
85
+ #today = ''
86
+ # Else, today_fmt is used as the format for a strftime call.
87
+ #today_fmt = '%B %d, %Y'
88
+
89
+ # List of patterns, relative to source directory, that match files and
90
+ # directories to ignore when looking for source files.
91
+ exclude_patterns = []
92
+
93
+ # The reST default role (used for this markup: `text`) to use for all
94
+ # documents.
95
+ #default_role = None
96
+
97
+ # If true, '()' will be appended to :func: etc. cross-reference text.
98
+ #add_function_parentheses = True
99
+
100
+ # If true, the current module name will be prepended to all description
101
+ # unit titles (such as .. function::).
102
+ #add_module_names = True
103
+
104
+ # If true, sectionauthor and moduleauthor directives will be shown in the
105
+ # output. They are ignored by default.
106
+ #show_authors = False
107
+
108
+ # The name of the Pygments (syntax highlighting) style to use.
109
+ pygments_style = 'sphinx'
110
+
111
+ # A list of ignored prefixes for module index sorting.
112
+ #modindex_common_prefix = []
113
+
114
+ # If true, keep warnings as "system message" paragraphs in the built documents.
115
+ #keep_warnings = False
116
+
117
+ # If true, `todo` and `todoList` produce output, else they produce nothing.
118
+ todo_include_todos = False
119
+
120
+
121
+ # -- Options for HTML output ----------------------------------------------
122
+
123
+ # The theme to use for HTML and HTML Help pages. See the documentation for
124
+ # a list of builtin themes.
125
+ import sphinx_rtd_theme
126
+ html_theme = 'sphinx_rtd_theme'
127
+ html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
128
+
129
+ # Theme options are theme-specific and customize the look and feel of a theme
130
+ # further. For a list of options available for each theme, see the
131
+ # documentation.
132
+ #html_theme_options = {}
133
+
134
+ # Add any paths that contain custom themes here, relative to this directory.
135
+ #html_theme_path = []
136
+
137
+ # The name for this set of Sphinx documents. If None, it defaults to
138
+ # "<project> v<release> documentation".
139
+ #html_title = None
140
+
141
+ # A shorter title for the navigation bar. Default is the same as html_title.
142
+ #html_short_title = None
143
+
144
+ # The name of an image file (relative to this directory) to place at the top
145
+ # of the sidebar.
146
+ #html_logo = None
147
+
148
+ # The name of an image file (relative to this directory) to use as a favicon of
149
+ # the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
150
+ # pixels large.
151
+ #html_favicon = None
152
+
153
+ # Add any paths that contain custom static files (such as style sheets) here,
154
+ # relative to this directory. They are copied after the builtin static files,
155
+ # so a file named "default.css" will overwrite the builtin "default.css".
156
+ html_static_path = ['_static']
157
+
158
+ # Add any extra paths that contain custom files (such as robots.txt or
159
+ # .htaccess) here, relative to this directory. These files are copied
160
+ # directly to the root of the documentation.
161
+ #html_extra_path = []
162
+
163
+ # If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
164
+ # using the given strftime format.
165
+ #html_last_updated_fmt = '%b %d, %Y'
166
+
167
+ # If true, SmartyPants will be used to convert quotes and dashes to
168
+ # typographically correct entities.
169
+ #html_use_smartypants = True
170
+
171
+ # Custom sidebar templates, maps document names to template names.
172
+ #html_sidebars = {}
173
+
174
+ # Additional templates that should be rendered to pages, maps page names to
175
+ # template names.
176
+ #html_additional_pages = {}
177
+
178
+ # If false, no module index is generated.
179
+ #html_domain_indices = True
180
+
181
+ # If false, no index is generated.
182
+ #html_use_index = True
183
+
184
+ # If true, the index is split into individual pages for each letter.
185
+ #html_split_index = False
186
+
187
+ # If true, links to the reST sources are added to the pages.
188
+ #html_show_sourcelink = True
189
+
190
+ # If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
191
+ #html_show_sphinx = True
192
+
193
+ # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
194
+ #html_show_copyright = True
195
+
196
+ # If true, an OpenSearch description file will be output, and all pages will
197
+ # contain a <link> tag referring to it. The value of this option must be the
198
+ # base URL from which the finished HTML is served.
199
+ #html_use_opensearch = ''
200
+
201
+ # This is the file name suffix for HTML files (e.g. ".xhtml").
202
+ #html_file_suffix = None
203
+
204
+ # Language to be used for generating the HTML full-text search index.
205
+ # Sphinx supports the following languages:
206
+ # 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
207
+ # 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
208
+ #html_search_language = 'en'
209
+
210
+ # A dictionary with options for the search language support, empty by default.
211
+ # Now only 'ja' uses this config value
212
+ #html_search_options = {'type': 'default'}
213
+
214
+ # The name of a javascript file (relative to the configuration directory) that
215
+ # implements a search results scorer. If empty, the default will be used.
216
+ #html_search_scorer = 'scorer.js'
217
+
218
+ # Output file base name for HTML help builder.
219
+ htmlhelp_basename = 'coredoc'
220
+
221
+ # -- Options for LaTeX output ---------------------------------------------
222
+
223
+ latex_elements = {
224
+ # The paper size ('letterpaper' or 'a4paper').
225
+ #'papersize': 'letterpaper',
226
+
227
+ # The font size ('10pt', '11pt' or '12pt').
228
+ #'pointsize': '10pt',
229
+
230
+ # Additional stuff for the LaTeX preamble.
231
+ #'preamble': '',
232
+
233
+ # Latex figure (float) alignment
234
+ #'figure_align': 'htbp',
235
+ }
236
+
237
+ # Grouping the document tree into LaTeX files. List of tuples
238
+ # (source start file, target name, title,
239
+ # author, documentclass [howto, manual, or own class]).
240
+ latex_documents = [
241
+ (master_doc, 'pyrender.tex', u'pyrender Documentation',
242
+ u'Matthew Matl', 'manual'),
243
+ ]
244
+
245
+ # The name of an image file (relative to this directory) to place at the top of
246
+ # the title page.
247
+ #latex_logo = None
248
+
249
+ # For "manual" documents, if this is true, then toplevel headings are parts,
250
+ # not chapters.
251
+ #latex_use_parts = False
252
+
253
+ # If true, show page references after internal links.
254
+ #latex_show_pagerefs = False
255
+
256
+ # If true, show URL addresses after external links.
257
+ #latex_show_urls = False
258
+
259
+ # Documents to append as an appendix to all manuals.
260
+ #latex_appendices = []
261
+
262
+ # If false, no module index is generated.
263
+ #latex_domain_indices = True
264
+
265
+
266
+ # -- Options for manual page output ---------------------------------------
267
+
268
+ # One entry per manual page. List of tuples
269
+ # (source start file, name, description, authors, manual section).
270
+ man_pages = [
271
+ (master_doc, 'pyrender', u'pyrender Documentation',
272
+ [author], 1)
273
+ ]
274
+
275
+ # If true, show URL addresses after external links.
276
+ #man_show_urls = False
277
+
278
+
279
+ # -- Options for Texinfo output -------------------------------------------
280
+
281
+ # Grouping the document tree into Texinfo files. List of tuples
282
+ # (source start file, target name, title, author,
283
+ # dir menu entry, description, category)
284
+ texinfo_documents = [
285
+ (master_doc, 'pyrender', u'pyrender Documentation',
286
+ author, 'pyrender', 'One line description of project.',
287
+ 'Miscellaneous'),
288
+ ]
289
+
290
+ # Documents to append as an appendix to all manuals.
291
+ #texinfo_appendices = []
292
+
293
+ # If false, no module index is generated.
294
+ #texinfo_domain_indices = True
295
+
296
+ # How to display URL addresses: 'footnote', 'no', or 'inline'.
297
+ #texinfo_show_urls = 'footnote'
298
+
299
+ # If true, do not generate a @detailmenu in the "Top" node's menu.
300
+ #texinfo_no_detailmenu = False
301
+
302
+ intersphinx_mapping = {
303
+ 'python' : ('https://docs.python.org/', None),
304
+ 'pyrender' : ('https://pyrender.readthedocs.io/en/latest/', None),
305
+ }
306
+
307
+ # Autosummary fix
308
+ autosummary_generate = True
309
+
310
+ # Try to suppress multiple-definition warnings by always taking the shorter
311
+ # path when two or more paths have the same base module
312
+
313
+ class MyPythonDomain(PythonDomain):
314
+
315
+ def find_obj(self, env, modname, classname, name, type, searchmode=0):
316
+ """Ensures an object always resolves to the desired module
317
+ if defined there."""
318
+ orig_matches = PythonDomain.find_obj(
319
+ self, env, modname, classname, name, type, searchmode
320
+ )
321
+
322
+ if len(orig_matches) <= 1:
323
+ return orig_matches
324
+
325
+ # If multiple matches, try to take the shortest if all the modules are
326
+ # the same
327
+ first_match_name_sp = orig_matches[0][0].split('.')
328
+ base_name = first_match_name_sp[0]
329
+ min_len = len(first_match_name_sp)
330
+ best_match = orig_matches[0]
331
+
332
+ for match in orig_matches[1:]:
333
+ match_name = match[0]
334
+ match_name_sp = match_name.split('.')
335
+ match_base = match_name_sp[0]
336
+
337
+ # If we have mismatched bases, return them all to trigger warnings
338
+ if match_base != base_name:
339
+ return orig_matches
340
+
341
+ # Otherwise, check and see if it's shorter
342
+ if len(match_name_sp) < min_len:
343
+ min_len = len(match_name_sp)
344
+ best_match = match
345
+
346
+ return (best_match,)
347
+
348
+
349
+ def setup(sphinx):
350
+ """Use MyPythonDomain in place of PythonDomain"""
351
+ sphinx.override_domain(MyPythonDomain)
352
+
pyrender/docs/source/examples/cameras.rst ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ .. _camera_guide:
2
+
3
+ Creating Cameras
4
+ ================
5
+
6
+ Pyrender supports three camera types -- :class:`.PerspectiveCamera` and
7
+ :class:`.IntrinsicsCamera` types,
8
+ which render scenes as a human would see them, and
9
+ :class:`.OrthographicCamera` types, which preserve distances between points.
10
+
11
+ Creating cameras is easy -- just specify their basic attributes:
12
+
13
+ >>> pc = pyrender.PerspectiveCamera(yfov=np.pi / 3.0, aspectRatio=1.414)
14
+ >>> oc = pyrender.OrthographicCamera(xmag=1.0, ymag=1.0)
15
+
16
+ For more information, see the Khronos group's documentation here_:
17
+
18
+ .. _here: https://github.com/KhronosGroup/glTF/tree/master/specification/2.0#projection-matrices
19
+
20
+ When you add cameras to the scene, make sure that you're using OpenGL camera
21
+ coordinates to specify their pose. See the illustration below for details.
22
+ Basically, the camera z-axis points away from the scene, the x-axis points
23
+ right in image space, and the y-axis points up in image space.
24
+
25
+ .. image:: /_static/camera_coords.png
26
+
pyrender/docs/source/examples/index.rst ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ .. _guide:
2
+
3
+ User Guide
4
+ ==========
5
+
6
+ This section contains guides on how to use Pyrender to quickly visualize
7
+ your 3D data, including a quickstart guide and more detailed descriptions
8
+ of each part of the rendering pipeline.
9
+
10
+
11
+ .. toctree::
12
+ :maxdepth: 2
13
+
14
+ quickstart.rst
15
+ models.rst
16
+ lighting.rst
17
+ cameras.rst
18
+ scenes.rst
19
+ offscreen.rst
20
+ viewer.rst
pyrender/docs/source/examples/lighting.rst ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ .. _lighting_guide:
2
+
3
+ Creating Lights
4
+ ===============
5
+
6
+ Pyrender supports three types of punctual light:
7
+
8
+ - :class:`.PointLight`: Point-based light sources, such as light bulbs.
9
+ - :class:`.SpotLight`: A conical light source, like a flashlight.
10
+ - :class:`.DirectionalLight`: A general light that does not attenuate with
11
+ distance.
12
+
13
+ Creating lights is easy -- just specify their basic attributes:
14
+
15
+ >>> pl = pyrender.PointLight(color=[1.0, 1.0, 1.0], intensity=2.0)
16
+ >>> sl = pyrender.SpotLight(color=[1.0, 1.0, 1.0], intensity=2.0,
17
+ ... innerConeAngle=0.05, outerConeAngle=0.5)
18
+ >>> dl = pyrender.DirectionalLight(color=[1.0, 1.0, 1.0], intensity=2.0)
19
+
20
+ For more information about how these lighting models are implemented,
21
+ see their class documentation.
pyrender/docs/source/examples/models.rst ADDED
@@ -0,0 +1,143 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ .. _model_guide:
2
+
3
+ Loading and Configuring Models
4
+ ==============================
5
+ The first step to any rendering application is loading your models.
6
+ Pyrender implements the GLTF 2.0 specification, which means that all
7
+ models are composed of a hierarchy of objects.
8
+
9
+ At the top level, we have a :class:`.Mesh`. The :class:`.Mesh` is
10
+ basically a wrapper of any number of :class:`.Primitive` types,
11
+ which actually represent geometry that can be drawn to the screen.
12
+
13
+ Primitives are composed of a variety of parameters, including
14
+ vertex positions, vertex normals, color and texture information,
15
+ and triangle indices if smooth rendering is desired.
16
+ They can implement point clouds, triangular meshes, or lines
17
+ depending on how you configure their data and set their
18
+ :attr:`.Primitive.mode` parameter.
19
+
20
+ Although you can create primitives yourself if you want to,
21
+ it's probably easier to just use the utility functions provided
22
+ in the :class:`.Mesh` class.
23
+
24
+ Creating Triangular Meshes
25
+ --------------------------
26
+
27
+ Simple Construction
28
+ ~~~~~~~~~~~~~~~~~~~
29
+ Pyrender allows you to create a :class:`.Mesh` containing a
30
+ triangular mesh model directly from a :class:`~trimesh.base.Trimesh` object
31
+ using the :meth:`.Mesh.from_trimesh` static method.
32
+
33
+ >>> import trimesh
34
+ >>> import pyrender
35
+ >>> import numpy as np
36
+ >>> tm = trimesh.load('examples/models/fuze.obj')
37
+ >>> m = pyrender.Mesh.from_trimesh(tm)
38
+ >>> m.primitives
39
+ [<pyrender.primitive.Primitive at 0x7fbb0af60e50>]
40
+
41
+ You can also create a single :class:`.Mesh` from a list of
42
+ :class:`~trimesh.base.Trimesh` objects:
43
+
44
+ >>> tms = [trimesh.creation.icosahedron(), trimesh.creation.cylinder()]
45
+ >>> m = pyrender.Mesh.from_trimesh(tms)
46
+ [<pyrender.primitive.Primitive at 0x7fbb0c2b74d0>,
47
+ <pyrender.primitive.Primitive at 0x7fbb0c2b7550>]
48
+
49
+ Vertex Smoothing
50
+ ~~~~~~~~~~~~~~~~
51
+
52
+ The :meth:`.Mesh.from_trimesh` method has a few additional optional parameters.
53
+ If you want to render the mesh without interpolating face normals, which can
54
+ be useful for meshes that are supposed to be angular (e.g. a cube), you
55
+ can specify ``smooth=False``.
56
+
57
+ >>> m = pyrender.Mesh.from_trimesh(tm, smooth=False)
58
+
59
+ Per-Face or Per-Vertex Coloration
60
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
61
+
62
+ If you have an untextured trimesh, you can color it in with per-face or
63
+ per-vertex colors:
64
+
65
+ >>> tm.visual.vertex_colors = np.random.uniform(size=tm.vertices.shape)
66
+ >>> tm.visual.face_colors = np.random.uniform(size=tm.faces.shape)
67
+ >>> m = pyrender.Mesh.from_trimesh(tm)
68
+
69
+ Instancing
70
+ ~~~~~~~~~~
71
+
72
+ If you want to render many copies of the same mesh at different poses,
73
+ you can statically create a vast array of them in an efficient manner.
74
+ Simply specify the ``poses`` parameter to be a list of ``N`` 4x4 homogenous
75
+ transformation matrics that position the meshes relative to their common
76
+ base frame:
77
+
78
+ >>> tfs = np.tile(np.eye(4), (3,1,1))
79
+ >>> tfs[1,:3,3] = [0.1, 0.0, 0.0]
80
+ >>> tfs[2,:3,3] = [0.2, 0.0, 0.0]
81
+ >>> tfs
82
+ array([[[1. , 0. , 0. , 0. ],
83
+ [0. , 1. , 0. , 0. ],
84
+ [0. , 0. , 1. , 0. ],
85
+ [0. , 0. , 0. , 1. ]],
86
+ [[1. , 0. , 0. , 0.1],
87
+ [0. , 1. , 0. , 0. ],
88
+ [0. , 0. , 1. , 0. ],
89
+ [0. , 0. , 0. , 1. ]],
90
+ [[1. , 0. , 0. , 0.2],
91
+ [0. , 1. , 0. , 0. ],
92
+ [0. , 0. , 1. , 0. ],
93
+ [0. , 0. , 0. , 1. ]]])
94
+
95
+ >>> m = pyrender.Mesh.from_trimesh(tm, poses=tfs)
96
+
97
+ Custom Materials
98
+ ~~~~~~~~~~~~~~~~
99
+
100
+ You can also specify a custom material for any triangular mesh you create
101
+ in the ``material`` parameter of :meth:`.Mesh.from_trimesh`.
102
+ The main material supported by Pyrender is the
103
+ :class:`.MetallicRoughnessMaterial`.
104
+ The metallic-roughness model supports rendering highly-realistic objects across
105
+ a wide gamut of materials.
106
+
107
+ For more information, see the documentation of the
108
+ :class:`.MetallicRoughnessMaterial` constructor or look at the Khronos_
109
+ documentation for more information.
110
+
111
+ .. _Khronos: https://github.com/KhronosGroup/glTF/tree/master/specification/2.0#materials
112
+
113
+ Creating Point Clouds
114
+ ---------------------
115
+
116
+ Point Sprites
117
+ ~~~~~~~~~~~~~
118
+ Pyrender also allows you to create a :class:`.Mesh` containing a
119
+ point cloud directly from :class:`numpy.ndarray` instances
120
+ using the :meth:`.Mesh.from_points` static method.
121
+
122
+ Simply provide a list of points and optional per-point colors and normals.
123
+
124
+ >>> pts = tm.vertices.copy()
125
+ >>> colors = np.random.uniform(size=pts.shape)
126
+ >>> m = pyrender.Mesh.from_points(pts, colors=colors)
127
+
128
+ Point clouds created in this way will be rendered as square point sprites.
129
+
130
+ .. image:: /_static/points.png
131
+
132
+ Point Spheres
133
+ ~~~~~~~~~~~~~
134
+ If you have a monochromatic point cloud and would like to render it with
135
+ spheres, you can render it by instancing a spherical trimesh:
136
+
137
+ >>> sm = trimesh.creation.uv_sphere(radius=0.1)
138
+ >>> sm.visual.vertex_colors = [1.0, 0.0, 0.0]
139
+ >>> tfs = np.tile(np.eye(4), (len(pts), 1, 1))
140
+ >>> tfs[:,:3,3] = pts
141
+ >>> m = pyrender.Mesh.from_trimesh(sm, poses=tfs)
142
+
143
+ .. image:: /_static/points2.png
pyrender/docs/source/examples/offscreen.rst ADDED
@@ -0,0 +1,87 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ .. _offscreen_guide:
2
+
3
+ Offscreen Rendering
4
+ ===================
5
+
6
+ .. note::
7
+ If you're using a headless server, you'll need to use either EGL (for
8
+ GPU-accelerated rendering) or OSMesa (for CPU-only software rendering).
9
+ If you're using OSMesa, be sure that you've installed it properly. See
10
+ :ref:`osmesa` for details.
11
+
12
+ Choosing a Backend
13
+ ------------------
14
+
15
+ Once you have a scene set up with its geometry, cameras, and lights,
16
+ you can render it using the :class:`.OffscreenRenderer`. Pyrender supports
17
+ three backends for offscreen rendering:
18
+
19
+ - Pyglet, the same engine that runs the viewer. This requires an active
20
+ display manager, so you can't run it on a headless server. This is the
21
+ default option.
22
+ - OSMesa, a software renderer.
23
+ - EGL, which allows for GPU-accelerated rendering without a display manager.
24
+
25
+ If you want to use OSMesa or EGL, you need to set the ``PYOPENGL_PLATFORM``
26
+ environment variable before importing pyrender or any other OpenGL library.
27
+ You can do this at the command line:
28
+
29
+ .. code-block:: bash
30
+
31
+ PYOPENGL_PLATFORM=osmesa python render.py
32
+
33
+ or at the top of your Python script:
34
+
35
+ .. code-block:: bash
36
+
37
+ # Top of main python script
38
+ import os
39
+ os.environ['PYOPENGL_PLATFORM'] = 'egl'
40
+
41
+ The handle for EGL is ``egl``, and the handle for OSMesa is ``osmesa``.
42
+
43
+ Running the Renderer
44
+ --------------------
45
+
46
+ Once you've set your environment variable appropriately, create your scene and
47
+ then configure the :class:`.OffscreenRenderer` object with a window width,
48
+ a window height, and a size for point-cloud points:
49
+
50
+ >>> r = pyrender.OffscreenRenderer(viewport_width=640,
51
+ ... viewport_height=480,
52
+ ... point_size=1.0)
53
+
54
+ Then, just call the :meth:`.OffscreenRenderer.render` function:
55
+
56
+ >>> color, depth = r.render(scene)
57
+
58
+ .. image:: /_static/scene.png
59
+
60
+ This will return a ``(w,h,3)`` channel floating-point color image and
61
+ a ``(w,h)`` floating-point depth image rendered from the scene's main camera.
62
+
63
+ You can customize the rendering process by using flag options from
64
+ :class:`.RenderFlags` and bitwise or-ing them together. For example,
65
+ the following code renders a color image with an alpha channel
66
+ and enables shadow mapping for all directional lights:
67
+
68
+ >>> flags = RenderFlags.RGBA | RenderFlags.SHADOWS_DIRECTIONAL
69
+ >>> color, depth = r.render(scene, flags=flags)
70
+
71
+ Once you're done with the offscreen renderer, you need to close it before you
72
+ can run a different renderer or open the viewer for the same scene:
73
+
74
+ >>> r.delete()
75
+
76
+ Google CoLab Examples
77
+ ---------------------
78
+
79
+ For a minimal working example of offscreen rendering using OSMesa,
80
+ see the `OSMesa Google CoLab notebook`_.
81
+
82
+ .. _OSMesa Google CoLab notebook: https://colab.research.google.com/drive/1Z71mHIc-Sqval92nK290vAsHZRUkCjUx
83
+
84
+ For a minimal working example of offscreen rendering using EGL,
85
+ see the `EGL Google CoLab notebook`_.
86
+
87
+ .. _EGL Google CoLab notebook: https://colab.research.google.com/drive/1rTLHk0qxh4dn8KNe-mCnN8HAWdd2_BEh
pyrender/docs/source/examples/quickstart.rst ADDED
@@ -0,0 +1,71 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ .. _quickstart_guide:
2
+
3
+ Quickstart
4
+ ==========
5
+
6
+
7
+ Minimal Example for 3D Viewer
8
+ -----------------------------
9
+ Here is a minimal example of loading and viewing a triangular mesh model
10
+ in pyrender.
11
+
12
+ >>> import trimesh
13
+ >>> import pyrender
14
+ >>> fuze_trimesh = trimesh.load('examples/models/fuze.obj')
15
+ >>> mesh = pyrender.Mesh.from_trimesh(fuze_trimesh)
16
+ >>> scene = pyrender.Scene()
17
+ >>> scene.add(mesh)
18
+ >>> pyrender.Viewer(scene, use_raymond_lighting=True)
19
+
20
+ .. image:: /_static/fuze.png
21
+
22
+
23
+ Minimal Example for Offscreen Rendering
24
+ ---------------------------------------
25
+ .. note::
26
+ If you're using a headless server, make sure that you followed the guide
27
+ for installing OSMesa. See :ref:`osmesa`.
28
+
29
+ Here is a minimal example of rendering a mesh model offscreen in pyrender.
30
+ The only additional necessities are that you need to add lighting and a camera.
31
+
32
+ >>> import numpy as np
33
+ >>> import trimesh
34
+ >>> import pyrender
35
+ >>> import matplotlib.pyplot as plt
36
+
37
+ >>> fuze_trimesh = trimesh.load('examples/models/fuze.obj')
38
+ >>> mesh = pyrender.Mesh.from_trimesh(fuze_trimesh)
39
+ >>> scene = pyrender.Scene()
40
+ >>> scene.add(mesh)
41
+ >>> camera = pyrender.PerspectiveCamera(yfov=np.pi / 3.0, aspectRatio=1.0)
42
+ >>> s = np.sqrt(2)/2
43
+ >>> camera_pose = np.array([
44
+ ... [0.0, -s, s, 0.3],
45
+ ... [1.0, 0.0, 0.0, 0.0],
46
+ ... [0.0, s, s, 0.35],
47
+ ... [0.0, 0.0, 0.0, 1.0],
48
+ ... ])
49
+ >>> scene.add(camera, pose=camera_pose)
50
+ >>> light = pyrender.SpotLight(color=np.ones(3), intensity=3.0,
51
+ ... innerConeAngle=np.pi/16.0,
52
+ ... outerConeAngle=np.pi/6.0)
53
+ >>> scene.add(light, pose=camera_pose)
54
+ >>> r = pyrender.OffscreenRenderer(400, 400)
55
+ >>> color, depth = r.render(scene)
56
+ >>> plt.figure()
57
+ >>> plt.subplot(1,2,1)
58
+ >>> plt.axis('off')
59
+ >>> plt.imshow(color)
60
+ >>> plt.subplot(1,2,2)
61
+ >>> plt.axis('off')
62
+ >>> plt.imshow(depth, cmap=plt.cm.gray_r)
63
+ >>> plt.show()
64
+
65
+ .. image:: /_static/minexcolor.png
66
+ :width: 45%
67
+ :align: left
68
+ .. image:: /_static/minexdepth.png
69
+ :width: 45%
70
+ :align: right
71
+
pyrender/docs/source/examples/scenes.rst ADDED
@@ -0,0 +1,78 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ .. _scene_guide:
2
+
3
+ Creating Scenes
4
+ ===============
5
+
6
+ Before you render anything, you need to put all of your lights, cameras,
7
+ and meshes into a scene. The :class:`.Scene` object keeps track of the relative
8
+ poses of these primitives by inserting them into :class:`.Node` objects and
9
+ keeping them in a directed acyclic graph.
10
+
11
+ Adding Objects
12
+ --------------
13
+
14
+ To create a :class:`.Scene`, simply call the constructor. You can optionally
15
+ specify an ambient light color and a background color:
16
+
17
+ >>> scene = pyrender.Scene(ambient_light=[0.02, 0.02, 0.02],
18
+ ... bg_color=[1.0, 1.0, 1.0])
19
+
20
+ You can add objects to a scene by first creating a :class:`.Node` object
21
+ and adding the object and its pose to the :class:`.Node`. Poses are specified
22
+ as 4x4 homogenous transformation matrices that are stored in the node's
23
+ :attr:`.Node.matrix` attribute. Note that the :class:`.Node`
24
+ constructor requires you to specify whether you're adding a mesh, light,
25
+ or camera.
26
+
27
+ >>> mesh = pyrender.Mesh.from_trimesh(tm)
28
+ >>> light = pyrender.PointLight(color=[1.0, 1.0, 1.0], intensity=2.0)
29
+ >>> cam = pyrender.PerspectiveCamera(yfov=np.pi / 3.0, aspectRatio=1.414)
30
+ >>> nm = pyrender.Node(mesh=mesh, matrix=np.eye(4))
31
+ >>> nl = pyrender.Node(light=light, matrix=np.eye(4))
32
+ >>> nc = pyrender.Node(camera=cam, matrix=np.eye(4))
33
+ >>> scene.add_node(nm)
34
+ >>> scene.add_node(nl)
35
+ >>> scene.add_node(nc)
36
+
37
+ You can also add objects directly to a scene with the :meth:`.Scene.add` function,
38
+ which takes care of creating a :class:`.Node` for you.
39
+
40
+ >>> scene.add(mesh, pose=np.eye(4))
41
+ >>> scene.add(light, pose=np.eye(4))
42
+ >>> scene.add(cam, pose=np.eye(4))
43
+
44
+ Nodes can be hierarchical, in which case the node's :attr:`.Node.matrix`
45
+ specifies that node's pose relative to its parent frame. You can add nodes to
46
+ a scene hierarchically by specifying a parent node in your calls to
47
+ :meth:`.Scene.add` or :meth:`.Scene.add_node`:
48
+
49
+ >>> scene.add_node(nl, parent_node=nc)
50
+ >>> scene.add(cam, parent_node=nm)
51
+
52
+ If you add multiple cameras to a scene, you can specify which one to render from
53
+ by setting the :attr:`.Scene.main_camera_node` attribute.
54
+
55
+ Updating Objects
56
+ ----------------
57
+
58
+ You can update the poses of existing nodes with the :meth:`.Scene.set_pose`
59
+ function. Simply call it with a :class:`.Node` that is already in the scene
60
+ and the new pose of that node with respect to its parent as a 4x4 homogenous
61
+ transformation matrix:
62
+
63
+ >>> scene.set_pose(nl, pose=np.eye(4))
64
+
65
+ If you want to get the local pose of a node, you can just access its
66
+ :attr:`.Node.matrix` attribute. However, if you want to the get
67
+ the pose of a node *with respect to the world frame*, you can call the
68
+ :meth:`.Scene.get_pose` method.
69
+
70
+ >>> tf = scene.get_pose(nl)
71
+
72
+ Removing Objects
73
+ ----------------
74
+
75
+ Finally, you can remove a :class:`.Node` and all of its children from the
76
+ scene with the :meth:`.Scene.remove_node` function:
77
+
78
+ >>> scene.remove_node(nl)
pyrender/docs/source/examples/viewer.rst ADDED
@@ -0,0 +1,61 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ .. _viewer_guide:
2
+
3
+ Live Scene Viewer
4
+ =================
5
+
6
+ Standard Usage
7
+ --------------
8
+ In addition to the offscreen renderer, Pyrender comes with a live scene viewer.
9
+ In its standard invocation, calling the :class:`.Viewer`'s constructor will
10
+ immediately pop a viewing window that you can navigate around in.
11
+
12
+ >>> pyrender.Viewer(scene)
13
+
14
+ By default, the viewer uses your scene's lighting. If you'd like to start with
15
+ some additional lighting that moves around with the camera, you can specify that
16
+ with:
17
+
18
+ >>> pyrender.Viewer(scene, use_raymond_lighting=True)
19
+
20
+ For a full list of the many options that the :class:`.Viewer` supports, check out its
21
+ documentation.
22
+
23
+ .. image:: /_static/rotation.gif
24
+
25
+ Running the Viewer in a Separate Thread
26
+ ---------------------------------------
27
+ If you'd like to animate your models, you'll want to run the viewer in a
28
+ separate thread so that you can update the scene while the viewer is running.
29
+ To do this, first pop the viewer in a separate thread by calling its constructor
30
+ with the ``run_in_thread`` option set:
31
+
32
+ >>> v = pyrender.Viewer(scene, run_in_thread=True)
33
+
34
+ Then, you can manipulate the :class:`.Scene` while the viewer is running to
35
+ animate things. However, be careful to acquire the viewer's
36
+ :attr:`.Viewer.render_lock` before editing the scene to prevent data corruption:
37
+
38
+ >>> i = 0
39
+ >>> while True:
40
+ ... pose = np.eye(4)
41
+ ... pose[:3,3] = [i, 0, 0]
42
+ ... v.render_lock.acquire()
43
+ ... scene.set_pose(mesh_node, pose)
44
+ ... v.render_lock.release()
45
+ ... i += 0.01
46
+
47
+ .. image:: /_static/scissors.gif
48
+
49
+ You can wait on the viewer to be closed manually:
50
+
51
+ >>> while v.is_active:
52
+ ... pass
53
+
54
+ Or you can close it from the main thread forcibly.
55
+ Make sure to still loop and block for the viewer to actually exit before using
56
+ the scene object again.
57
+
58
+ >>> v.close_external()
59
+ >>> while v.is_active:
60
+ ... pass
61
+
pyrender/docs/source/index.rst ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ .. core documentation master file, created by
2
+ sphinx-quickstart on Sun Oct 16 14:33:48 2016.
3
+ You can adapt this file completely to your liking, but it should at least
4
+ contain the root `toctree` directive.
5
+
6
+ Pyrender Documentation
7
+ ========================
8
+ Pyrender is a pure Python (2.7, 3.4, 3.5, 3.6) library for physically-based
9
+ rendering and visualization.
10
+ It is designed to meet the glTF 2.0 specification_ from Khronos
11
+
12
+ .. _specification: https://www.khronos.org/gltf/
13
+
14
+ Pyrender is lightweight, easy to install, and simple to use.
15
+ It comes packaged with both an intuitive scene viewer and a headache-free
16
+ offscreen renderer with support for GPU-accelerated rendering on headless
17
+ servers, which makes it perfect for machine learning applications.
18
+ Check out the :ref:`guide` for a full tutorial, or fork me on
19
+ Github_.
20
+
21
+ .. _Github: https://github.com/mmatl/pyrender
22
+
23
+ .. image:: _static/rotation.gif
24
+
25
+ .. image:: _static/damaged_helmet.png
26
+
27
+ .. toctree::
28
+ :maxdepth: 2
29
+
30
+ install/index.rst
31
+ examples/index.rst
32
+ api/index.rst
33
+
34
+
35
+ Indices and tables
36
+ ==================
37
+
38
+ * :ref:`genindex`
39
+ * :ref:`modindex`
40
+ * :ref:`search`
41
+
pyrender/docs/source/install/index.rst ADDED
@@ -0,0 +1,172 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Installation Guide
2
+ ==================
3
+
4
+ Python Installation
5
+ -------------------
6
+
7
+ This package is available via ``pip``.
8
+
9
+ .. code-block:: bash
10
+
11
+ pip install pyrender
12
+
13
+ If you're on MacOS, you'll need
14
+ to pre-install my fork of ``pyglet``, as the version on PyPI hasn't yet included
15
+ my change that enables OpenGL contexts on MacOS.
16
+
17
+ .. code-block:: bash
18
+
19
+ git clone https://github.com/mmatl/pyglet.git
20
+ cd pyglet
21
+ pip install .
22
+
23
+ .. _osmesa:
24
+
25
+ Getting Pyrender Working with OSMesa
26
+ ------------------------------------
27
+ If you want to render scenes offscreen but don't want to have to
28
+ install a display manager or deal with the pains of trying to get
29
+ OpenGL to work over SSH, you have two options.
30
+
31
+ The first (and preferred) option is using EGL, which enables you to perform
32
+ GPU-accelerated rendering on headless servers.
33
+ However, you'll need EGL 1.5 to get modern OpenGL contexts.
34
+ This comes packaged with NVIDIA's current drivers, but if you are having issues
35
+ getting EGL to work with your hardware, you can try using OSMesa,
36
+ a software-based offscreen renderer that is included with any Mesa
37
+ install.
38
+
39
+ If you want to use OSMesa with pyrender, you'll have to perform two additional
40
+ installation steps:
41
+
42
+ - :ref:`installmesa`
43
+ - :ref:`installpyopengl`
44
+
45
+ Then, read the offscreen rendering tutorial. See :ref:`offscreen_guide`.
46
+
47
+ .. _installmesa:
48
+
49
+ Installing OSMesa
50
+ *****************
51
+
52
+ As a first step, you'll need to rebuild and re-install Mesa with support
53
+ for fast offscreen rendering and OpenGL 3+ contexts.
54
+ I'd recommend installing from source, but you can also try my ``.deb``
55
+ for Ubuntu 16.04 and up.
56
+
57
+ Installing from a Debian Package
58
+ ********************************
59
+
60
+ If you're running Ubuntu 16.04 or newer, you should be able to install the
61
+ required version of Mesa from my ``.deb`` file.
62
+
63
+ .. code-block:: bash
64
+
65
+ sudo apt update
66
+ sudo wget https://github.com/mmatl/travis_debs/raw/master/xenial/mesa_18.3.3-0.deb
67
+ sudo dpkg -i ./mesa_18.3.3-0.deb || true
68
+ sudo apt install -f
69
+
70
+ If this doesn't work, try building from source.
71
+
72
+ Building From Source
73
+ ********************
74
+
75
+ First, install build dependencies via `apt` or your system's package manager.
76
+
77
+ .. code-block:: bash
78
+
79
+ sudo apt-get install llvm-6.0 freeglut3 freeglut3-dev
80
+
81
+ Then, download the current release of Mesa from here_.
82
+ Unpack the source and go to the source folder:
83
+
84
+ .. _here: https://archive.mesa3d.org/mesa-18.3.3.tar.gz
85
+
86
+ .. code-block:: bash
87
+
88
+ tar xfv mesa-18.3.3.tar.gz
89
+ cd mesa-18.3.3
90
+
91
+ Replace ``PREFIX`` with the path you want to install Mesa at.
92
+ If you're not worried about overwriting your default Mesa install,
93
+ a good place is at ``/usr/local``.
94
+
95
+ Now, configure the installation by running the following command:
96
+
97
+ .. code-block:: bash
98
+
99
+ ./configure --prefix=PREFIX \
100
+ --enable-opengl --disable-gles1 --disable-gles2 \
101
+ --disable-va --disable-xvmc --disable-vdpau \
102
+ --enable-shared-glapi \
103
+ --disable-texture-float \
104
+ --enable-gallium-llvm --enable-llvm-shared-libs \
105
+ --with-gallium-drivers=swrast,swr \
106
+ --disable-dri --with-dri-drivers= \
107
+ --disable-egl --with-egl-platforms= --disable-gbm \
108
+ --disable-glx \
109
+ --disable-osmesa --enable-gallium-osmesa \
110
+ ac_cv_path_LLVM_CONFIG=llvm-config-6.0
111
+
112
+ Finally, build and install Mesa.
113
+
114
+ .. code-block:: bash
115
+
116
+ make -j8
117
+ make install
118
+
119
+ Finally, if you didn't install Mesa in the system path,
120
+ add the following lines to your ``~/.bashrc`` file after
121
+ changing ``MESA_HOME`` to your mesa installation path (i.e. what you used as
122
+ ``PREFIX`` during the configure command).
123
+
124
+ .. code-block:: bash
125
+
126
+ MESA_HOME=/path/to/your/mesa/installation
127
+ export LIBRARY_PATH=$LIBRARY_PATH:$MESA_HOME/lib
128
+ export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:$MESA_HOME/lib
129
+ export C_INCLUDE_PATH=$C_INCLUDE_PATH:$MESA_HOME/include/
130
+ export CPLUS_INCLUDE_PATH=$CPLUS_INCLUDE_PATH:$MESA_HOME/include/
131
+
132
+ .. _installpyopengl:
133
+
134
+ Installing a Compatible Fork of PyOpenGL
135
+ ****************************************
136
+
137
+ Next, install and use my fork of ``PyOpenGL``.
138
+ This fork enables getting modern OpenGL contexts with OSMesa.
139
+ My patch has been included in ``PyOpenGL``, but it has not yet been released
140
+ on PyPI.
141
+
142
+ .. code-block:: bash
143
+
144
+ git clone https://github.com/mmatl/pyopengl.git
145
+ pip install ./pyopengl
146
+
147
+
148
+ Building Documentation
149
+ ----------------------
150
+
151
+ The online documentation for ``pyrender`` is automatically built by Read The Docs.
152
+ Building ``pyrender``'s documentation locally requires a few extra dependencies --
153
+ specifically, `sphinx`_ and a few plugins.
154
+
155
+ .. _sphinx: http://www.sphinx-doc.org/en/master/
156
+
157
+ To install the dependencies required, simply change directories into the `pyrender` source and run
158
+
159
+ .. code-block:: bash
160
+
161
+ $ pip install .[docs]
162
+
163
+ Then, go to the ``docs`` directory and run ``make`` with the appropriate target.
164
+ For example,
165
+
166
+ .. code-block:: bash
167
+
168
+ $ cd docs/
169
+ $ make html
170
+
171
+ will generate a set of web pages. Any documentation files
172
+ generated in this manner can be found in ``docs/build``.
pyrender/examples/duck.py ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from pyrender import Mesh, Scene, Viewer
2
+ from io import BytesIO
3
+ import numpy as np
4
+ import trimesh
5
+ import requests
6
+
7
+ duck_source = "https://github.com/KhronosGroup/glTF-Sample-Models/raw/master/2.0/Duck/glTF-Binary/Duck.glb"
8
+
9
+ duck = trimesh.load(BytesIO(requests.get(duck_source).content), file_type='glb')
10
+ duckmesh = Mesh.from_trimesh(list(duck.geometry.values())[0])
11
+ scene = Scene(ambient_light=np.array([1.0, 1.0, 1.0, 1.0]))
12
+ scene.add(duckmesh)
13
+ Viewer(scene)
pyrender/examples/example.py ADDED
@@ -0,0 +1,157 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Examples of using pyrender for viewing and offscreen rendering.
2
+ """
3
+ import pyglet
4
+ pyglet.options['shadow_window'] = False
5
+ import os
6
+ import numpy as np
7
+ import trimesh
8
+
9
+ from pyrender import PerspectiveCamera,\
10
+ DirectionalLight, SpotLight, PointLight,\
11
+ MetallicRoughnessMaterial,\
12
+ Primitive, Mesh, Node, Scene,\
13
+ Viewer, OffscreenRenderer, RenderFlags
14
+
15
+ #==============================================================================
16
+ # Mesh creation
17
+ #==============================================================================
18
+
19
+ #------------------------------------------------------------------------------
20
+ # Creating textured meshes from trimeshes
21
+ #------------------------------------------------------------------------------
22
+
23
+ # Fuze trimesh
24
+ fuze_trimesh = trimesh.load('./models/fuze.obj')
25
+ fuze_mesh = Mesh.from_trimesh(fuze_trimesh)
26
+
27
+ # Drill trimesh
28
+ drill_trimesh = trimesh.load('./models/drill.obj')
29
+ drill_mesh = Mesh.from_trimesh(drill_trimesh)
30
+ drill_pose = np.eye(4)
31
+ drill_pose[0,3] = 0.1
32
+ drill_pose[2,3] = -np.min(drill_trimesh.vertices[:,2])
33
+
34
+ # Wood trimesh
35
+ wood_trimesh = trimesh.load('./models/wood.obj')
36
+ wood_mesh = Mesh.from_trimesh(wood_trimesh)
37
+
38
+ # Water bottle trimesh
39
+ bottle_gltf = trimesh.load('./models/WaterBottle.glb')
40
+ bottle_trimesh = bottle_gltf.geometry[list(bottle_gltf.geometry.keys())[0]]
41
+ bottle_mesh = Mesh.from_trimesh(bottle_trimesh)
42
+ bottle_pose = np.array([
43
+ [1.0, 0.0, 0.0, 0.1],
44
+ [0.0, 0.0, -1.0, -0.16],
45
+ [0.0, 1.0, 0.0, 0.13],
46
+ [0.0, 0.0, 0.0, 1.0],
47
+ ])
48
+
49
+ #------------------------------------------------------------------------------
50
+ # Creating meshes with per-vertex colors
51
+ #------------------------------------------------------------------------------
52
+ boxv_trimesh = trimesh.creation.box(extents=0.1*np.ones(3))
53
+ boxv_vertex_colors = np.random.uniform(size=(boxv_trimesh.vertices.shape))
54
+ boxv_trimesh.visual.vertex_colors = boxv_vertex_colors
55
+ boxv_mesh = Mesh.from_trimesh(boxv_trimesh, smooth=False)
56
+
57
+ #------------------------------------------------------------------------------
58
+ # Creating meshes with per-face colors
59
+ #------------------------------------------------------------------------------
60
+ boxf_trimesh = trimesh.creation.box(extents=0.1*np.ones(3))
61
+ boxf_face_colors = np.random.uniform(size=boxf_trimesh.faces.shape)
62
+ boxf_trimesh.visual.face_colors = boxf_face_colors
63
+ boxf_mesh = Mesh.from_trimesh(boxf_trimesh, smooth=False)
64
+
65
+ #------------------------------------------------------------------------------
66
+ # Creating meshes from point clouds
67
+ #------------------------------------------------------------------------------
68
+ points = trimesh.creation.icosphere(radius=0.05).vertices
69
+ point_colors = np.random.uniform(size=points.shape)
70
+ points_mesh = Mesh.from_points(points, colors=point_colors)
71
+
72
+ #==============================================================================
73
+ # Light creation
74
+ #==============================================================================
75
+
76
+ direc_l = DirectionalLight(color=np.ones(3), intensity=1.0)
77
+ spot_l = SpotLight(color=np.ones(3), intensity=10.0,
78
+ innerConeAngle=np.pi/16, outerConeAngle=np.pi/6)
79
+ point_l = PointLight(color=np.ones(3), intensity=10.0)
80
+
81
+ #==============================================================================
82
+ # Camera creation
83
+ #==============================================================================
84
+
85
+ cam = PerspectiveCamera(yfov=(np.pi / 3.0))
86
+ cam_pose = np.array([
87
+ [0.0, -np.sqrt(2)/2, np.sqrt(2)/2, 0.5],
88
+ [1.0, 0.0, 0.0, 0.0],
89
+ [0.0, np.sqrt(2)/2, np.sqrt(2)/2, 0.4],
90
+ [0.0, 0.0, 0.0, 1.0]
91
+ ])
92
+
93
+ #==============================================================================
94
+ # Scene creation
95
+ #==============================================================================
96
+
97
+ scene = Scene(ambient_light=np.array([0.02, 0.02, 0.02, 1.0]))
98
+
99
+ #==============================================================================
100
+ # Adding objects to the scene
101
+ #==============================================================================
102
+
103
+ #------------------------------------------------------------------------------
104
+ # By manually creating nodes
105
+ #------------------------------------------------------------------------------
106
+ fuze_node = Node(mesh=fuze_mesh, translation=np.array([0.1, 0.15, -np.min(fuze_trimesh.vertices[:,2])]))
107
+ scene.add_node(fuze_node)
108
+ boxv_node = Node(mesh=boxv_mesh, translation=np.array([-0.1, 0.10, 0.05]))
109
+ scene.add_node(boxv_node)
110
+ boxf_node = Node(mesh=boxf_mesh, translation=np.array([-0.1, -0.10, 0.05]))
111
+ scene.add_node(boxf_node)
112
+
113
+ #------------------------------------------------------------------------------
114
+ # By using the add() utility function
115
+ #------------------------------------------------------------------------------
116
+ drill_node = scene.add(drill_mesh, pose=drill_pose)
117
+ bottle_node = scene.add(bottle_mesh, pose=bottle_pose)
118
+ wood_node = scene.add(wood_mesh)
119
+ direc_l_node = scene.add(direc_l, pose=cam_pose)
120
+ spot_l_node = scene.add(spot_l, pose=cam_pose)
121
+
122
+ #==============================================================================
123
+ # Using the viewer with a default camera
124
+ #==============================================================================
125
+
126
+ v = Viewer(scene, shadows=True)
127
+
128
+ #==============================================================================
129
+ # Using the viewer with a pre-specified camera
130
+ #==============================================================================
131
+ cam_node = scene.add(cam, pose=cam_pose)
132
+ v = Viewer(scene, central_node=drill_node)
133
+
134
+ #==============================================================================
135
+ # Rendering offscreen from that camera
136
+ #==============================================================================
137
+
138
+ r = OffscreenRenderer(viewport_width=640*2, viewport_height=480*2)
139
+ color, depth = r.render(scene)
140
+
141
+ import matplotlib.pyplot as plt
142
+ plt.figure()
143
+ plt.imshow(color)
144
+ plt.show()
145
+
146
+ #==============================================================================
147
+ # Segmask rendering
148
+ #==============================================================================
149
+
150
+ nm = {node: 20*(i + 1) for i, node in enumerate(scene.mesh_nodes)}
151
+ seg = r.render(scene, RenderFlags.SEG, nm)[0]
152
+ plt.figure()
153
+ plt.imshow(seg)
154
+ plt.show()
155
+
156
+ r.delete()
157
+
pyrender/pyrender/__init__.py ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from .camera import (Camera, PerspectiveCamera, OrthographicCamera,
2
+ IntrinsicsCamera)
3
+ from .light import Light, PointLight, DirectionalLight, SpotLight
4
+ from .sampler import Sampler
5
+ from .texture import Texture
6
+ from .material import Material, MetallicRoughnessMaterial
7
+ from .primitive import Primitive
8
+ from .mesh import Mesh
9
+ from .node import Node
10
+ from .scene import Scene
11
+ from .renderer import Renderer
12
+ from .viewer import Viewer
13
+ from .offscreen import OffscreenRenderer
14
+ from .version import __version__
15
+ from .constants import RenderFlags, TextAlign, GLTF
16
+
17
+ __all__ = [
18
+ 'Camera', 'PerspectiveCamera', 'OrthographicCamera', 'IntrinsicsCamera',
19
+ 'Light', 'PointLight', 'DirectionalLight', 'SpotLight',
20
+ 'Sampler', 'Texture', 'Material', 'MetallicRoughnessMaterial',
21
+ 'Primitive', 'Mesh', 'Node', 'Scene', 'Renderer', 'Viewer',
22
+ 'OffscreenRenderer', '__version__', 'RenderFlags', 'TextAlign',
23
+ 'GLTF'
24
+ ]
pyrender/pyrender/camera.py ADDED
@@ -0,0 +1,437 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Virtual cameras compliant with the glTF 2.0 specification as described at
2
+ https://github.com/KhronosGroup/glTF/tree/master/specification/2.0#reference-camera
3
+
4
+ Author: Matthew Matl
5
+ """
6
+ import abc
7
+ import numpy as np
8
+ import six
9
+ import sys
10
+
11
+ from .constants import DEFAULT_Z_NEAR, DEFAULT_Z_FAR
12
+
13
+
14
+ @six.add_metaclass(abc.ABCMeta)
15
+ class Camera(object):
16
+ """Abstract base class for all cameras.
17
+
18
+ Note
19
+ ----
20
+ Camera poses are specified in the OpenGL format,
21
+ where the z axis points away from the view direction and the
22
+ x and y axes point to the right and up in the image plane, respectively.
23
+
24
+ Parameters
25
+ ----------
26
+ znear : float
27
+ The floating-point distance to the near clipping plane.
28
+ zfar : float
29
+ The floating-point distance to the far clipping plane.
30
+ ``zfar`` must be greater than ``znear``.
31
+ name : str, optional
32
+ The user-defined name of this object.
33
+ """
34
+
35
+ def __init__(self,
36
+ znear=DEFAULT_Z_NEAR,
37
+ zfar=DEFAULT_Z_FAR,
38
+ name=None):
39
+ self.name = name
40
+ self.znear = znear
41
+ self.zfar = zfar
42
+
43
+ @property
44
+ def name(self):
45
+ """str : The user-defined name of this object.
46
+ """
47
+ return self._name
48
+
49
+ @name.setter
50
+ def name(self, value):
51
+ if value is not None:
52
+ value = str(value)
53
+ self._name = value
54
+
55
+ @property
56
+ def znear(self):
57
+ """float : The distance to the near clipping plane.
58
+ """
59
+ return self._znear
60
+
61
+ @znear.setter
62
+ def znear(self, value):
63
+ value = float(value)
64
+ if value < 0:
65
+ raise ValueError('z-near must be >= 0.0')
66
+ self._znear = value
67
+
68
+ @property
69
+ def zfar(self):
70
+ """float : The distance to the far clipping plane.
71
+ """
72
+ return self._zfar
73
+
74
+ @zfar.setter
75
+ def zfar(self, value):
76
+ value = float(value)
77
+ if value <= 0 or value <= self.znear:
78
+ raise ValueError('zfar must be >0 and >znear')
79
+ self._zfar = value
80
+
81
+ @abc.abstractmethod
82
+ def get_projection_matrix(self, width=None, height=None):
83
+ """Return the OpenGL projection matrix for this camera.
84
+
85
+ Parameters
86
+ ----------
87
+ width : int
88
+ Width of the current viewport, in pixels.
89
+ height : int
90
+ Height of the current viewport, in pixels.
91
+ """
92
+ pass
93
+
94
+
95
+ class PerspectiveCamera(Camera):
96
+
97
+ """A perspective camera for perspective projection.
98
+
99
+ Parameters
100
+ ----------
101
+ yfov : float
102
+ The floating-point vertical field of view in radians.
103
+ znear : float
104
+ The floating-point distance to the near clipping plane.
105
+ If not specified, defaults to 0.05.
106
+ zfar : float, optional
107
+ The floating-point distance to the far clipping plane.
108
+ ``zfar`` must be greater than ``znear``.
109
+ If None, the camera uses an infinite projection matrix.
110
+ aspectRatio : float, optional
111
+ The floating-point aspect ratio of the field of view.
112
+ If not specified, the camera uses the viewport's aspect ratio.
113
+ name : str, optional
114
+ The user-defined name of this object.
115
+ """
116
+
117
+ def __init__(self,
118
+ yfov,
119
+ znear=DEFAULT_Z_NEAR,
120
+ zfar=None,
121
+ aspectRatio=None,
122
+ name=None):
123
+ super(PerspectiveCamera, self).__init__(
124
+ znear=znear,
125
+ zfar=zfar,
126
+ name=name,
127
+ )
128
+
129
+ self.yfov = yfov
130
+ self.aspectRatio = aspectRatio
131
+
132
+ @property
133
+ def yfov(self):
134
+ """float : The vertical field of view in radians.
135
+ """
136
+ return self._yfov
137
+
138
+ @yfov.setter
139
+ def yfov(self, value):
140
+ value = float(value)
141
+ if value <= 0.0:
142
+ raise ValueError('Field of view must be positive')
143
+ self._yfov = value
144
+
145
+ @property
146
+ def zfar(self):
147
+ """float : The distance to the far clipping plane.
148
+ """
149
+ return self._zfar
150
+
151
+ @zfar.setter
152
+ def zfar(self, value):
153
+ if value is not None:
154
+ value = float(value)
155
+ if value <= 0 or value <= self.znear:
156
+ raise ValueError('zfar must be >0 and >znear')
157
+ self._zfar = value
158
+
159
+ @property
160
+ def aspectRatio(self):
161
+ """float : The ratio of the width to the height of the field of view.
162
+ """
163
+ return self._aspectRatio
164
+
165
+ @aspectRatio.setter
166
+ def aspectRatio(self, value):
167
+ if value is not None:
168
+ value = float(value)
169
+ if value <= 0.0:
170
+ raise ValueError('Aspect ratio must be positive')
171
+ self._aspectRatio = value
172
+
173
+ def get_projection_matrix(self, width=None, height=None):
174
+ """Return the OpenGL projection matrix for this camera.
175
+
176
+ Parameters
177
+ ----------
178
+ width : int
179
+ Width of the current viewport, in pixels.
180
+ height : int
181
+ Height of the current viewport, in pixels.
182
+ """
183
+ aspect_ratio = self.aspectRatio
184
+ if aspect_ratio is None:
185
+ if width is None or height is None:
186
+ raise ValueError('Aspect ratio of camera must be defined')
187
+ aspect_ratio = float(width) / float(height)
188
+
189
+ a = aspect_ratio
190
+ t = np.tan(self.yfov / 2.0)
191
+ n = self.znear
192
+ f = self.zfar
193
+
194
+ P = np.zeros((4,4))
195
+ P[0][0] = 1.0 / (a * t)
196
+ P[1][1] = 1.0 / t
197
+ P[3][2] = -1.0
198
+
199
+ if f is None:
200
+ P[2][2] = -1.0
201
+ P[2][3] = -2.0 * n
202
+ else:
203
+ P[2][2] = (f + n) / (n - f)
204
+ P[2][3] = (2 * f * n) / (n - f)
205
+
206
+ return P
207
+
208
+
209
+ class OrthographicCamera(Camera):
210
+ """An orthographic camera for orthographic projection.
211
+
212
+ Parameters
213
+ ----------
214
+ xmag : float
215
+ The floating-point horizontal magnification of the view.
216
+ ymag : float
217
+ The floating-point vertical magnification of the view.
218
+ znear : float
219
+ The floating-point distance to the near clipping plane.
220
+ If not specified, defaults to 0.05.
221
+ zfar : float
222
+ The floating-point distance to the far clipping plane.
223
+ ``zfar`` must be greater than ``znear``.
224
+ If not specified, defaults to 100.0.
225
+ name : str, optional
226
+ The user-defined name of this object.
227
+ """
228
+
229
+ def __init__(self,
230
+ xmag,
231
+ ymag,
232
+ znear=DEFAULT_Z_NEAR,
233
+ zfar=DEFAULT_Z_FAR,
234
+ name=None):
235
+ super(OrthographicCamera, self).__init__(
236
+ znear=znear,
237
+ zfar=zfar,
238
+ name=name,
239
+ )
240
+
241
+ self.xmag = xmag
242
+ self.ymag = ymag
243
+
244
+ @property
245
+ def xmag(self):
246
+ """float : The horizontal magnification of the view.
247
+ """
248
+ return self._xmag
249
+
250
+ @xmag.setter
251
+ def xmag(self, value):
252
+ value = float(value)
253
+ if value <= 0.0:
254
+ raise ValueError('X magnification must be positive')
255
+ self._xmag = value
256
+
257
+ @property
258
+ def ymag(self):
259
+ """float : The vertical magnification of the view.
260
+ """
261
+ return self._ymag
262
+
263
+ @ymag.setter
264
+ def ymag(self, value):
265
+ value = float(value)
266
+ if value <= 0.0:
267
+ raise ValueError('Y magnification must be positive')
268
+ self._ymag = value
269
+
270
+ @property
271
+ def znear(self):
272
+ """float : The distance to the near clipping plane.
273
+ """
274
+ return self._znear
275
+
276
+ @znear.setter
277
+ def znear(self, value):
278
+ value = float(value)
279
+ if value <= 0:
280
+ raise ValueError('z-near must be > 0.0')
281
+ self._znear = value
282
+
283
+ def get_projection_matrix(self, width=None, height=None):
284
+ """Return the OpenGL projection matrix for this camera.
285
+
286
+ Parameters
287
+ ----------
288
+ width : int
289
+ Width of the current viewport, in pixels.
290
+ Unused in this function.
291
+ height : int
292
+ Height of the current viewport, in pixels.
293
+ Unused in this function.
294
+ """
295
+ xmag = self.xmag
296
+ ymag = self.ymag
297
+
298
+ # If screen width/height defined, rescale xmag
299
+ if width is not None and height is not None:
300
+ xmag = width / height * ymag
301
+
302
+ n = self.znear
303
+ f = self.zfar
304
+ P = np.zeros((4,4))
305
+ P[0][0] = 1.0 / xmag
306
+ P[1][1] = 1.0 / ymag
307
+ P[2][2] = 2.0 / (n - f)
308
+ P[2][3] = (f + n) / (n - f)
309
+ P[3][3] = 1.0
310
+ return P
311
+
312
+
313
+ class IntrinsicsCamera(Camera):
314
+ """A perspective camera with custom intrinsics.
315
+
316
+ Parameters
317
+ ----------
318
+ fx : float
319
+ X-axis focal length in pixels.
320
+ fy : float
321
+ Y-axis focal length in pixels.
322
+ cx : float
323
+ X-axis optical center in pixels.
324
+ cy : float
325
+ Y-axis optical center in pixels.
326
+ znear : float
327
+ The floating-point distance to the near clipping plane.
328
+ If not specified, defaults to 0.05.
329
+ zfar : float
330
+ The floating-point distance to the far clipping plane.
331
+ ``zfar`` must be greater than ``znear``.
332
+ If not specified, defaults to 100.0.
333
+ name : str, optional
334
+ The user-defined name of this object.
335
+ """
336
+
337
+ def __init__(self,
338
+ fx,
339
+ fy,
340
+ cx,
341
+ cy,
342
+ znear=DEFAULT_Z_NEAR,
343
+ zfar=DEFAULT_Z_FAR,
344
+ name=None):
345
+ super(IntrinsicsCamera, self).__init__(
346
+ znear=znear,
347
+ zfar=zfar,
348
+ name=name,
349
+ )
350
+
351
+ self.fx = fx
352
+ self.fy = fy
353
+ self.cx = cx
354
+ self.cy = cy
355
+
356
+ @property
357
+ def fx(self):
358
+ """float : X-axis focal length in meters.
359
+ """
360
+ return self._fx
361
+
362
+ @fx.setter
363
+ def fx(self, value):
364
+ self._fx = float(value)
365
+
366
+ @property
367
+ def fy(self):
368
+ """float : Y-axis focal length in meters.
369
+ """
370
+ return self._fy
371
+
372
+ @fy.setter
373
+ def fy(self, value):
374
+ self._fy = float(value)
375
+
376
+ @property
377
+ def cx(self):
378
+ """float : X-axis optical center in pixels.
379
+ """
380
+ return self._cx
381
+
382
+ @cx.setter
383
+ def cx(self, value):
384
+ self._cx = float(value)
385
+
386
+ @property
387
+ def cy(self):
388
+ """float : Y-axis optical center in pixels.
389
+ """
390
+ return self._cy
391
+
392
+ @cy.setter
393
+ def cy(self, value):
394
+ self._cy = float(value)
395
+
396
+ def get_projection_matrix(self, width, height):
397
+ """Return the OpenGL projection matrix for this camera.
398
+
399
+ Parameters
400
+ ----------
401
+ width : int
402
+ Width of the current viewport, in pixels.
403
+ height : int
404
+ Height of the current viewport, in pixels.
405
+ """
406
+ width = float(width)
407
+ height = float(height)
408
+
409
+ cx, cy = self.cx, self.cy
410
+ fx, fy = self.fx, self.fy
411
+ if sys.platform == 'darwin':
412
+ cx = self.cx * 2.0
413
+ cy = self.cy * 2.0
414
+ fx = self.fx * 2.0
415
+ fy = self.fy * 2.0
416
+
417
+ P = np.zeros((4,4))
418
+ P[0][0] = 2.0 * fx / width
419
+ P[1][1] = 2.0 * fy / height
420
+ P[0][2] = 1.0 - 2.0 * cx / width
421
+ P[1][2] = 2.0 * cy / height - 1.0
422
+ P[3][2] = -1.0
423
+
424
+ n = self.znear
425
+ f = self.zfar
426
+ if f is None:
427
+ P[2][2] = -1.0
428
+ P[2][3] = -2.0 * n
429
+ else:
430
+ P[2][2] = (f + n) / (n - f)
431
+ P[2][3] = (2 * f * n) / (n - f)
432
+
433
+ return P
434
+
435
+
436
+ __all__ = ['Camera', 'PerspectiveCamera', 'OrthographicCamera',
437
+ 'IntrinsicsCamera']
pyrender/pyrender/constants.py ADDED
@@ -0,0 +1,149 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ DEFAULT_Z_NEAR = 0.05 # Near clipping plane, in meters
2
+ DEFAULT_Z_FAR = 100.0 # Far clipping plane, in meters
3
+ DEFAULT_SCENE_SCALE = 2.0 # Default scene scale
4
+ MAX_N_LIGHTS = 4 # Maximum number of lights of each type allowed
5
+ TARGET_OPEN_GL_MAJOR = 4 # Target OpenGL Major Version
6
+ TARGET_OPEN_GL_MINOR = 1 # Target OpenGL Minor Version
7
+ MIN_OPEN_GL_MAJOR = 3 # Minimum OpenGL Major Version
8
+ MIN_OPEN_GL_MINOR = 3 # Minimum OpenGL Minor Version
9
+ FLOAT_SZ = 4 # Byte size of GL float32
10
+ UINT_SZ = 4 # Byte size of GL uint32
11
+ SHADOW_TEX_SZ = 2048 # Width and Height of Shadow Textures
12
+ TEXT_PADDING = 20 # Width of padding for rendering text (px)
13
+
14
+
15
+ # Flags for render type
16
+ class RenderFlags(object):
17
+ """Flags for rendering in the scene.
18
+
19
+ Combine them with the bitwise or. For example,
20
+
21
+ >>> flags = OFFSCREEN | SHADOWS_DIRECTIONAL | VERTEX_NORMALS
22
+
23
+ would result in an offscreen render with directional shadows and
24
+ vertex normals enabled.
25
+ """
26
+ NONE = 0
27
+ """Normal PBR Render."""
28
+ DEPTH_ONLY = 1
29
+ """Only render the depth buffer."""
30
+ OFFSCREEN = 2
31
+ """Render offscreen and return the depth and (optionally) color buffers."""
32
+ FLIP_WIREFRAME = 4
33
+ """Invert the status of wireframe rendering for each mesh."""
34
+ ALL_WIREFRAME = 8
35
+ """Render all meshes as wireframes."""
36
+ ALL_SOLID = 16
37
+ """Render all meshes as solids."""
38
+ SHADOWS_DIRECTIONAL = 32
39
+ """Render shadows for directional lights."""
40
+ SHADOWS_POINT = 64
41
+ """Render shadows for point lights."""
42
+ SHADOWS_SPOT = 128
43
+ """Render shadows for spot lights."""
44
+ SHADOWS_ALL = 32 | 64 | 128
45
+ """Render shadows for all lights."""
46
+ VERTEX_NORMALS = 256
47
+ """Render vertex normals."""
48
+ FACE_NORMALS = 512
49
+ """Render face normals."""
50
+ SKIP_CULL_FACES = 1024
51
+ """Do not cull back faces."""
52
+ RGBA = 2048
53
+ """Render the color buffer with the alpha channel enabled."""
54
+ FLAT = 4096
55
+ """Render the color buffer flat, with no lighting computations."""
56
+ SEG = 8192
57
+
58
+
59
+ class TextAlign:
60
+ """Text alignment options for captions.
61
+
62
+ Only use one at a time.
63
+ """
64
+ CENTER = 0
65
+ """Center the text by width and height."""
66
+ CENTER_LEFT = 1
67
+ """Center the text by height and left-align it."""
68
+ CENTER_RIGHT = 2
69
+ """Center the text by height and right-align it."""
70
+ BOTTOM_LEFT = 3
71
+ """Put the text in the bottom-left corner."""
72
+ BOTTOM_RIGHT = 4
73
+ """Put the text in the bottom-right corner."""
74
+ BOTTOM_CENTER = 5
75
+ """Center the text by width and fix it to the bottom."""
76
+ TOP_LEFT = 6
77
+ """Put the text in the top-left corner."""
78
+ TOP_RIGHT = 7
79
+ """Put the text in the top-right corner."""
80
+ TOP_CENTER = 8
81
+ """Center the text by width and fix it to the top."""
82
+
83
+
84
+ class GLTF(object):
85
+ """Options for GL objects."""
86
+ NEAREST = 9728
87
+ """Nearest neighbor interpolation."""
88
+ LINEAR = 9729
89
+ """Linear interpolation."""
90
+ NEAREST_MIPMAP_NEAREST = 9984
91
+ """Nearest mipmapping."""
92
+ LINEAR_MIPMAP_NEAREST = 9985
93
+ """Linear mipmapping."""
94
+ NEAREST_MIPMAP_LINEAR = 9986
95
+ """Nearest mipmapping."""
96
+ LINEAR_MIPMAP_LINEAR = 9987
97
+ """Linear mipmapping."""
98
+ CLAMP_TO_EDGE = 33071
99
+ """Clamp to the edge of the texture."""
100
+ MIRRORED_REPEAT = 33648
101
+ """Mirror the texture."""
102
+ REPEAT = 10497
103
+ """Repeat the texture."""
104
+ POINTS = 0
105
+ """Render as points."""
106
+ LINES = 1
107
+ """Render as lines."""
108
+ LINE_LOOP = 2
109
+ """Render as a line loop."""
110
+ LINE_STRIP = 3
111
+ """Render as a line strip."""
112
+ TRIANGLES = 4
113
+ """Render as triangles."""
114
+ TRIANGLE_STRIP = 5
115
+ """Render as a triangle strip."""
116
+ TRIANGLE_FAN = 6
117
+ """Render as a triangle fan."""
118
+
119
+
120
+ class BufFlags(object):
121
+ POSITION = 0
122
+ NORMAL = 1
123
+ TANGENT = 2
124
+ TEXCOORD_0 = 4
125
+ TEXCOORD_1 = 8
126
+ COLOR_0 = 16
127
+ JOINTS_0 = 32
128
+ WEIGHTS_0 = 64
129
+
130
+
131
+ class TexFlags(object):
132
+ NONE = 0
133
+ NORMAL = 1
134
+ OCCLUSION = 2
135
+ EMISSIVE = 4
136
+ BASE_COLOR = 8
137
+ METALLIC_ROUGHNESS = 16
138
+ DIFFUSE = 32
139
+ SPECULAR_GLOSSINESS = 64
140
+
141
+
142
+ class ProgramFlags:
143
+ NONE = 0
144
+ USE_MATERIAL = 1
145
+ VERTEX_NORMALS = 2
146
+ FACE_NORMALS = 4
147
+
148
+
149
+ __all__ = ['RenderFlags', 'TextAlign', 'GLTF']
pyrender/pyrender/font.py ADDED
@@ -0,0 +1,272 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Font texture loader and processor.
2
+
3
+ Author: Matthew Matl
4
+ """
5
+ import freetype
6
+ import numpy as np
7
+ import os
8
+
9
+ import OpenGL
10
+ from OpenGL.GL import *
11
+
12
+ from .constants import TextAlign, FLOAT_SZ
13
+ from .texture import Texture
14
+ from .sampler import Sampler
15
+
16
+
17
+ class FontCache(object):
18
+ """A cache for fonts.
19
+ """
20
+
21
+ def __init__(self, font_dir=None):
22
+ self._font_cache = {}
23
+ self.font_dir = font_dir
24
+ if self.font_dir is None:
25
+ base_dir, _ = os.path.split(os.path.realpath(__file__))
26
+ self.font_dir = os.path.join(base_dir, 'fonts')
27
+
28
+ def get_font(self, font_name, font_pt):
29
+ # If it's a file, load it directly, else, try to load from font dir.
30
+ if os.path.isfile(font_name):
31
+ font_filename = font_name
32
+ _, font_name = os.path.split(font_name)
33
+ font_name, _ = os.path.split(font_name)
34
+ else:
35
+ font_filename = os.path.join(self.font_dir, font_name) + '.ttf'
36
+
37
+ cid = OpenGL.contextdata.getContext()
38
+ key = (cid, font_name, int(font_pt))
39
+
40
+ if key not in self._font_cache:
41
+ self._font_cache[key] = Font(font_filename, font_pt)
42
+ return self._font_cache[key]
43
+
44
+ def clear(self):
45
+ for key in self._font_cache:
46
+ self._font_cache[key].delete()
47
+ self._font_cache = {}
48
+
49
+
50
+ class Character(object):
51
+ """A single character, with its texture and attributes.
52
+ """
53
+
54
+ def __init__(self, texture, size, bearing, advance):
55
+ self.texture = texture
56
+ self.size = size
57
+ self.bearing = bearing
58
+ self.advance = advance
59
+
60
+
61
+ class Font(object):
62
+ """A font object.
63
+
64
+ Parameters
65
+ ----------
66
+ font_file : str
67
+ The file to load the font from.
68
+ font_pt : int
69
+ The height of the font in pixels.
70
+ """
71
+
72
+ def __init__(self, font_file, font_pt=40):
73
+ self.font_file = font_file
74
+ self.font_pt = int(font_pt)
75
+ self._face = freetype.Face(font_file)
76
+ self._face.set_pixel_sizes(0, font_pt)
77
+ self._character_map = {}
78
+
79
+ for i in range(0, 128):
80
+
81
+ # Generate texture
82
+ face = self._face
83
+ face.load_char(chr(i))
84
+ buf = face.glyph.bitmap.buffer
85
+ src = (np.array(buf) / 255.0).astype(np.float32)
86
+ src = src.reshape((face.glyph.bitmap.rows,
87
+ face.glyph.bitmap.width))
88
+ tex = Texture(
89
+ sampler=Sampler(
90
+ magFilter=GL_LINEAR,
91
+ minFilter=GL_LINEAR,
92
+ wrapS=GL_CLAMP_TO_EDGE,
93
+ wrapT=GL_CLAMP_TO_EDGE
94
+ ),
95
+ source=src,
96
+ source_channels='R',
97
+ )
98
+ character = Character(
99
+ texture=tex,
100
+ size=np.array([face.glyph.bitmap.width,
101
+ face.glyph.bitmap.rows]),
102
+ bearing=np.array([face.glyph.bitmap_left,
103
+ face.glyph.bitmap_top]),
104
+ advance=face.glyph.advance.x
105
+ )
106
+ self._character_map[chr(i)] = character
107
+
108
+ self._vbo = None
109
+ self._vao = None
110
+
111
+ @property
112
+ def font_file(self):
113
+ """str : The file the font was loaded from.
114
+ """
115
+ return self._font_file
116
+
117
+ @font_file.setter
118
+ def font_file(self, value):
119
+ self._font_file = value
120
+
121
+ @property
122
+ def font_pt(self):
123
+ """int : The height of the font in pixels.
124
+ """
125
+ return self._font_pt
126
+
127
+ @font_pt.setter
128
+ def font_pt(self, value):
129
+ self._font_pt = int(value)
130
+
131
+ def _add_to_context(self):
132
+
133
+ self._vao = glGenVertexArrays(1)
134
+ glBindVertexArray(self._vao)
135
+ self._vbo = glGenBuffers(1)
136
+ glBindBuffer(GL_ARRAY_BUFFER, self._vbo)
137
+ glBufferData(GL_ARRAY_BUFFER, FLOAT_SZ * 6 * 4, None, GL_DYNAMIC_DRAW)
138
+ glEnableVertexAttribArray(0)
139
+ glVertexAttribPointer(
140
+ 0, 4, GL_FLOAT, GL_FALSE, 4 * FLOAT_SZ, ctypes.c_void_p(0)
141
+ )
142
+ glBindVertexArray(0)
143
+
144
+ glPixelStorei(GL_UNPACK_ALIGNMENT, 1)
145
+ for c in self._character_map:
146
+ ch = self._character_map[c]
147
+ if not ch.texture._in_context():
148
+ ch.texture._add_to_context()
149
+
150
+ def _remove_from_context(self):
151
+ for c in self._character_map:
152
+ ch = self._character_map[c]
153
+ ch.texture.delete()
154
+ if self._vao is not None:
155
+ glDeleteVertexArrays(1, [self._vao])
156
+ glDeleteBuffers(1, [self._vbo])
157
+ self._vao = None
158
+ self._vbo = None
159
+
160
+ def _in_context(self):
161
+ return self._vao is not None
162
+
163
+ def _bind(self):
164
+ glBindVertexArray(self._vao)
165
+
166
+ def _unbind(self):
167
+ glBindVertexArray(0)
168
+
169
+ def delete(self):
170
+ self._unbind()
171
+ self._remove_from_context()
172
+
173
+ def render_string(self, text, x, y, scale=1.0,
174
+ align=TextAlign.BOTTOM_LEFT):
175
+ """Render a string to the current view buffer.
176
+
177
+ Note
178
+ ----
179
+ Assumes correct shader program already bound w/ uniforms set.
180
+
181
+ Parameters
182
+ ----------
183
+ text : str
184
+ The text to render.
185
+ x : int
186
+ Horizontal pixel location of text.
187
+ y : int
188
+ Vertical pixel location of text.
189
+ scale : int
190
+ Scaling factor for text.
191
+ align : int
192
+ One of the TextAlign options which specifies where the ``x``
193
+ and ``y`` parameters lie on the text. For example,
194
+ :attr:`.TextAlign.BOTTOM_LEFT` means that ``x`` and ``y`` indicate
195
+ the position of the bottom-left corner of the textbox.
196
+ """
197
+ glActiveTexture(GL_TEXTURE0)
198
+ glEnable(GL_BLEND)
199
+ glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA)
200
+ glDisable(GL_DEPTH_TEST)
201
+ glPolygonMode(GL_FRONT_AND_BACK, GL_FILL)
202
+ self._bind()
203
+
204
+ # Determine width and height of text relative to x, y
205
+ width = 0.0
206
+ height = 0.0
207
+ for c in text:
208
+ ch = self._character_map[c]
209
+ height = max(height, ch.bearing[1] * scale)
210
+ width += (ch.advance >> 6) * scale
211
+
212
+ # Determine offsets based on alignments
213
+ xoff = 0
214
+ yoff = 0
215
+ if align == TextAlign.BOTTOM_RIGHT:
216
+ xoff = -width
217
+ elif align == TextAlign.BOTTOM_CENTER:
218
+ xoff = -width / 2.0
219
+ elif align == TextAlign.TOP_LEFT:
220
+ yoff = -height
221
+ elif align == TextAlign.TOP_RIGHT:
222
+ yoff = -height
223
+ xoff = -width
224
+ elif align == TextAlign.TOP_CENTER:
225
+ yoff = -height
226
+ xoff = -width / 2.0
227
+ elif align == TextAlign.CENTER:
228
+ xoff = -width / 2.0
229
+ yoff = -height / 2.0
230
+ elif align == TextAlign.CENTER_LEFT:
231
+ yoff = -height / 2.0
232
+ elif align == TextAlign.CENTER_RIGHT:
233
+ xoff = -width
234
+ yoff = -height / 2.0
235
+
236
+ x += xoff
237
+ y += yoff
238
+
239
+ ch = None
240
+ for c in text:
241
+ ch = self._character_map[c]
242
+ xpos = x + ch.bearing[0] * scale
243
+ ypos = y - (ch.size[1] - ch.bearing[1]) * scale
244
+ w = ch.size[0] * scale
245
+ h = ch.size[1] * scale
246
+
247
+ vertices = np.array([
248
+ [xpos, ypos, 0.0, 0.0],
249
+ [xpos + w, ypos, 1.0, 0.0],
250
+ [xpos + w, ypos + h, 1.0, 1.0],
251
+ [xpos + w, ypos + h, 1.0, 1.0],
252
+ [xpos, ypos + h, 0.0, 1.0],
253
+ [xpos, ypos, 0.0, 0.0],
254
+ ], dtype=np.float32)
255
+
256
+ ch.texture._bind()
257
+
258
+ glBindBuffer(GL_ARRAY_BUFFER, self._vbo)
259
+ glBufferData(
260
+ GL_ARRAY_BUFFER, FLOAT_SZ * 6 * 4, vertices, GL_DYNAMIC_DRAW
261
+ )
262
+ # TODO MAKE THIS MORE EFFICIENT, lgBufferSubData is broken
263
+ # glBufferSubData(
264
+ # GL_ARRAY_BUFFER, 0, 6 * 4 * FLOAT_SZ,
265
+ # np.ascontiguousarray(vertices.flatten)
266
+ # )
267
+ glDrawArrays(GL_TRIANGLES, 0, 6)
268
+ x += (ch.advance >> 6) * scale
269
+
270
+ self._unbind()
271
+ if ch:
272
+ ch.texture._unbind()
pyrender/pyrender/fonts/OpenSans-Bold.ttf ADDED
Binary file (225 kB). View file
 
pyrender/pyrender/fonts/OpenSans-BoldItalic.ttf ADDED
Binary file (213 kB). View file
 
pyrender/pyrender/fonts/OpenSans-ExtraBold.ttf ADDED
Binary file (223 kB). View file
 
pyrender/pyrender/fonts/OpenSans-ExtraBoldItalic.ttf ADDED
Binary file (213 kB). View file
 
pyrender/pyrender/fonts/OpenSans-Italic.ttf ADDED
Binary file (213 kB). View file
 
pyrender/pyrender/fonts/OpenSans-Light.ttf ADDED
Binary file (222 kB). View file
 
pyrender/pyrender/fonts/OpenSans-LightItalic.ttf ADDED
Binary file (213 kB). View file