Nadine Rueegg commited on
Commit
432392d
β€’
1 Parent(s): d847241

use gpu as default

Browse files
app.py DELETED
@@ -1,269 +0,0 @@
1
-
2
- # python gradio_demo/barc_demo_v3.py
3
-
4
- import numpy as np
5
- import os
6
- import glob
7
- import torch
8
- from torch.utils.data import DataLoader
9
- import torchvision
10
- from torchvision.models.detection.faster_rcnn import FastRCNNPredictor
11
- import torchvision.transforms as T
12
- import cv2
13
- from matplotlib import pyplot as plt
14
- from PIL import Image
15
-
16
- import gradio as gr
17
-
18
-
19
-
20
- import sys
21
- sys.path.insert(0, os.path.join(os.path.dirname(__file__), '../', 'src'))
22
- from stacked_hourglass.datasets.imgcropslist import ImgCrops
23
- from combined_model.train_main_image_to_3d_withbreedrel import do_visual_epoch
24
- from combined_model.model_shape_v7 import ModelImageTo3d_withshape_withproj
25
-
26
- from configs.barc_cfg_defaults import get_cfg_global_updated
27
-
28
-
29
-
30
- def get_prediction(model, img_path_or_img, confidence=0.5):
31
- """
32
- see https://haochen23.github.io/2020/04/object-detection-faster-rcnn.html#.YsMCm4TP3-g
33
- get_prediction
34
- parameters:
35
- - img_path - path of the input image
36
- - confidence - threshold value for prediction score
37
- method:
38
- - Image is obtained from the image path
39
- - the image is converted to image tensor using PyTorch's Transforms
40
- - image is passed through the model to get the predictions
41
- - class, box coordinates are obtained, but only prediction score > threshold
42
- are chosen.
43
-
44
- """
45
- if isinstance(img_path_or_img, str):
46
- img = Image.open(img_path_or_img).convert('RGB')
47
- else:
48
- img = img_path_or_img
49
- transform = T.Compose([T.ToTensor()])
50
- img = transform(img)
51
- pred = model([img])
52
- # pred_class = [COCO_INSTANCE_CATEGORY_NAMES[i] for i in list(pred[0]['labels'].numpy())]
53
- pred_class = list(pred[0]['labels'].numpy())
54
- pred_boxes = [[(int(i[0]), int(i[1])), (int(i[2]), int(i[3]))] for i in list(pred[0]['boxes'].detach().numpy())]
55
- pred_score = list(pred[0]['scores'].detach().numpy())
56
- try:
57
- pred_t = [pred_score.index(x) for x in pred_score if x>confidence][-1]
58
- pred_boxes = pred_boxes[:pred_t+1]
59
- pred_class = pred_class[:pred_t+1]
60
- return pred_boxes, pred_class, pred_score
61
- except:
62
- print('no bounding box with a score that is high enough found! -> work on full image')
63
- return None, None, None
64
-
65
- def detect_object(model, img_path_or_img, confidence=0.5, rect_th=2, text_size=0.5, text_th=1):
66
- """
67
- see https://haochen23.github.io/2020/04/object-detection-faster-rcnn.html#.YsMCm4TP3-g
68
- object_detection_api
69
- parameters:
70
- - img_path_or_img - path of the input image
71
- - confidence - threshold value for prediction score
72
- - rect_th - thickness of bounding box
73
- - text_size - size of the class label text
74
- - text_th - thichness of the text
75
- method:
76
- - prediction is obtained from get_prediction method
77
- - for each prediction, bounding box is drawn and text is written
78
- with opencv
79
- - the final image is displayed
80
- """
81
- boxes, pred_cls, pred_scores = get_prediction(model, img_path_or_img, confidence)
82
- if isinstance(img_path_or_img, str):
83
- img = cv2.imread(img_path_or_img)
84
- img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
85
- else:
86
- img = img_path_or_img
87
- is_first = True
88
- bbox = None
89
- if boxes is not None:
90
- for i in range(len(boxes)):
91
- cls = pred_cls[i]
92
- if cls == 18 and bbox is None:
93
- cv2.rectangle(img, boxes[i][0], boxes[i][1],color=(0, 255, 0), thickness=rect_th)
94
- # cv2.putText(img, pred_cls[i], boxes[i][0], cv2.FONT_HERSHEY_SIMPLEX, text_size, (0,255,0),thickness=text_th)
95
- cv2.putText(img, str(pred_scores[i]), boxes[i][0], cv2.FONT_HERSHEY_SIMPLEX, text_size, (0,255,0),thickness=text_th)
96
- bbox = boxes[i]
97
- return img, bbox
98
-
99
-
100
-
101
- def run_bbox_inference(input_image):
102
- model = torchvision.models.detection.fasterrcnn_resnet50_fpn(pretrained=True)
103
- model.eval()
104
- out_path = os.path.join(cfg.paths.ROOT_OUT_PATH, 'gradio_examples', 'test2.png')
105
- img, bbox = detect_object(model=model, img_path_or_img=input_image, confidence=0.5)
106
- fig = plt.figure() # plt.figure(figsize=(20,30))
107
- plt.imsave(out_path, img)
108
- return img, bbox
109
-
110
-
111
-
112
-
113
-
114
- def run_barc_inference(input_image, bbox=None):
115
-
116
- # load configs
117
- cfg = get_cfg_global_updated()
118
-
119
- model_file_complete = os.path.join(cfg.paths.ROOT_CHECKPOINT_PATH, 'barc_complete', 'model_best.pth.tar')
120
-
121
-
122
-
123
- # Select the hardware device to use for inference.
124
- if torch.cuda.is_available() and cfg.device=='cuda':
125
- device = torch.device('cuda', torch.cuda.current_device())
126
- # torch.backends.cudnn.benchmark = True
127
- else:
128
- device = torch.device('cpu')
129
-
130
- path_model_file_complete = os.path.join(cfg.paths.ROOT_CHECKPOINT_PATH, model_file_complete)
131
-
132
- # Disable gradient calculations.
133
- torch.set_grad_enabled(False)
134
-
135
- # prepare complete model
136
- complete_model = ModelImageTo3d_withshape_withproj(
137
- num_stage_comb=cfg.params.NUM_STAGE_COMB, num_stage_heads=cfg.params.NUM_STAGE_HEADS, \
138
- num_stage_heads_pose=cfg.params.NUM_STAGE_HEADS_POSE, trans_sep=cfg.params.TRANS_SEP, \
139
- arch=cfg.params.ARCH, n_joints=cfg.params.N_JOINTS, n_classes=cfg.params.N_CLASSES, \
140
- n_keyp=cfg.params.N_KEYP, n_bones=cfg.params.N_BONES, n_betas=cfg.params.N_BETAS, n_betas_limbs=cfg.params.N_BETAS_LIMBS, \
141
- n_breeds=cfg.params.N_BREEDS, n_z=cfg.params.N_Z, image_size=cfg.params.IMG_SIZE, \
142
- silh_no_tail=cfg.params.SILH_NO_TAIL, thr_keyp_sc=cfg.params.KP_THRESHOLD, add_z_to_3d_input=cfg.params.ADD_Z_TO_3D_INPUT,
143
- n_segbps=cfg.params.N_SEGBPS, add_segbps_to_3d_input=cfg.params.ADD_SEGBPS_TO_3D_INPUT, add_partseg=cfg.params.ADD_PARTSEG, n_partseg=cfg.params.N_PARTSEG, \
144
- fix_flength=cfg.params.FIX_FLENGTH, structure_z_to_betas=cfg.params.STRUCTURE_Z_TO_B, structure_pose_net=cfg.params.STRUCTURE_POSE_NET,
145
- nf_version=cfg.params.NF_VERSION)
146
-
147
- # load trained model
148
- print(path_model_file_complete)
149
- assert os.path.isfile(path_model_file_complete)
150
- print('Loading model weights from file: {}'.format(path_model_file_complete))
151
- checkpoint_complete = torch.load(path_model_file_complete)
152
- state_dict_complete = checkpoint_complete['state_dict']
153
- complete_model.load_state_dict(state_dict_complete, strict=False)
154
- complete_model = complete_model.to(device)
155
-
156
- save_imgs_path = os.path.join(cfg.paths.ROOT_OUT_PATH, 'gradio_examples')
157
- if not os.path.exists(save_imgs_path):
158
- os.makedirs(save_imgs_path)
159
-
160
- input_image_list = [input_image]
161
- if bbox is not None:
162
- input_bbox_list = [bbox]
163
- else:
164
- input_bbox_list = None
165
- val_dataset = ImgCrops(image_list=input_image_list, bbox_list=input_bbox_list, dataset_mode='complete')
166
- test_name_list = val_dataset.test_name_list
167
- val_loader = DataLoader(val_dataset, batch_size=1, shuffle=False,
168
- num_workers=0, pin_memory=True, drop_last=False)
169
-
170
- # run visual evaluation
171
- # remark: take ACC_Joints and DATA_INFO from StanExt as this is the training dataset
172
- all_results = do_visual_epoch(val_loader, complete_model, device,
173
- ImgCrops.DATA_INFO,
174
- weight_dict=None,
175
- acc_joints=ImgCrops.ACC_JOINTS,
176
- save_imgs_path=None, # save_imgs_path,
177
- metrics='all',
178
- test_name_list=test_name_list,
179
- render_all=cfg.params.RENDER_ALL,
180
- pck_thresh=cfg.params.PCK_THRESH,
181
- return_results=True)
182
-
183
- mesh = all_results[0]['mesh_posed']
184
- result_path = os.path.join(save_imgs_path, test_name_list[0] + '_z')
185
-
186
- mesh.apply_transform([[-1, 0, 0, 0],
187
- [0, -1, 0, 0],
188
- [0, 0, 1, 1],
189
- [0, 0, 0, 1]])
190
- mesh.export(file_obj=result_path + '.glb')
191
- result_gltf = result_path + '.glb'
192
- return [result_gltf, result_gltf]
193
-
194
-
195
-
196
-
197
-
198
-
199
- def run_complete_inference(input_image):
200
-
201
- output_interm_image, output_interm_bbox = run_bbox_inference(input_image.copy())
202
-
203
- print(output_interm_bbox)
204
-
205
- # output_image = run_barc_inference(input_image)
206
- output_image = run_barc_inference(input_image, output_interm_bbox)
207
-
208
- return output_image
209
-
210
-
211
-
212
-
213
- # demo = gr.Interface(run_barc_inference, gr.Image(), "image")
214
- # demo = gr.Interface(run_complete_inference, gr.Image(), "image")
215
-
216
-
217
-
218
- # see: https://huggingface.co/spaces/radames/PIFu-Clothed-Human-Digitization/blob/main/PIFu/spaces.py
219
-
220
- description = '''
221
- # BARC (old)
222
-
223
- #### Project Page
224
- * https://barc.is.tue.mpg.de/
225
-
226
- #### Description
227
- This is a demo for BARC. While BARC is trained on image crops, this demo uses a pretrained Faster-RCNN in order to get bounding boxes for the dogs.
228
- To see your result you may have to wait a minute or two, please be paitient.
229
-
230
- <details>
231
-
232
- <summary>More</summary>
233
-
234
- #### Citation
235
-
236
- ```
237
- @inproceedings{BARC:2022,
238
- title = {BARC}: Learning to Regress {3D} Dog Shape from Images by Exploiting Breed Information,
239
- author = {Rueegg, Nadine and Zuffi, Silvia and Schindler, Konrad and Black, Michael J.},
240
- booktitle = {Proceedings IEEE Conf. on Computer Vision and Pattern Recognition (CVPR)},
241
- year = {2022}
242
- }
243
- ```
244
-
245
- </details>
246
- '''
247
-
248
- examples = sorted(glob.glob(os.path.join(os.path.dirname(__file__), '../', 'datasets', 'test_image_crops', '*.jpg')) + glob.glob(os.path.join(os.path.dirname(__file__), '../', 'datasets', 'test_image_crops', '*.png')))
249
-
250
-
251
- demo = gr.Interface(
252
- fn=run_complete_inference,
253
- description=description,
254
- # inputs=gr.Image(type="filepath", label="Input Image"),
255
- inputs=gr.Image(label="Input Image"),
256
- outputs=[
257
- gr.Model3D(
258
- clear_color=[0.0, 0.0, 0.0, 0.0], label="3D Model"),
259
- gr.File(label="Download 3D Model")
260
- ],
261
- examples=examples,
262
- thumbnail="barc_thumbnail.png",
263
- allow_flagging="never",
264
- cache_examples=True
265
- )
266
-
267
-
268
-
269
- demo.launch(share=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
gradio_demo/barc_demo_v3.py DELETED
@@ -1,289 +0,0 @@
1
- # python gradio_demo/barc_demo_v3.py
2
-
3
- import os
4
- os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
5
- os.environ["CUDA_VISIBLE_DEVICES"]="0"
6
- try:
7
- # os.system("pip install --upgrade torch==1.11.0+cu113 torchvision==0.12.0+cu113 -f https://download.pytorch.org/whl/cu113/torch_stable.html")
8
- os.system("pip install --upgrade torch==1.6.0+cu101 torchvision==0.7.0+cu101 -f https://download.pytorch.org/whl/cu101/torch_stable.html")
9
- except Exception as e:
10
- print(e)
11
-
12
- import numpy as np
13
- import os
14
- import glob
15
- import torch
16
- from torch.utils.data import DataLoader
17
- import torchvision
18
- from torchvision.models.detection.faster_rcnn import FastRCNNPredictor
19
- import torchvision.transforms as T
20
- import cv2
21
- from matplotlib import pyplot as plt
22
- from PIL import Image
23
-
24
- import gradio as gr
25
-
26
-
27
-
28
- import sys
29
- sys.path.insert(0, os.path.join(os.path.dirname(__file__), '../', 'src'))
30
- from stacked_hourglass.datasets.imgcropslist import ImgCrops
31
- from combined_model.train_main_image_to_3d_withbreedrel import do_visual_epoch
32
- from combined_model.model_shape_v7 import ModelImageTo3d_withshape_withproj
33
-
34
- from configs.barc_cfg_defaults import get_cfg_global_updated
35
-
36
- print(
37
- "torch: ", torch.__version__,
38
- "\ntorchvision: ", torchvision.__version__,
39
- )
40
- # print("EnV", os.environ)
41
-
42
-
43
-
44
- def get_prediction(model, img_path_or_img, confidence=0.5):
45
- """
46
- see https://haochen23.github.io/2020/04/object-detection-faster-rcnn.html#.YsMCm4TP3-g
47
- get_prediction
48
- parameters:
49
- - img_path - path of the input image
50
- - confidence - threshold value for prediction score
51
- method:
52
- - Image is obtained from the image path
53
- - the image is converted to image tensor using PyTorch's Transforms
54
- - image is passed through the model to get the predictions
55
- - class, box coordinates are obtained, but only prediction score > threshold
56
- are chosen.
57
-
58
- """
59
- if isinstance(img_path_or_img, str):
60
- img = Image.open(img_path_or_img).convert('RGB')
61
- else:
62
- img = img_path_or_img
63
- transform = T.Compose([T.ToTensor()])
64
- img = transform(img)
65
- pred = model([img])
66
- # pred_class = [COCO_INSTANCE_CATEGORY_NAMES[i] for i in list(pred[0]['labels'].numpy())]
67
- pred_class = list(pred[0]['labels'].numpy())
68
- pred_boxes = [[(int(i[0]), int(i[1])), (int(i[2]), int(i[3]))] for i in list(pred[0]['boxes'].detach().numpy())]
69
- pred_score = list(pred[0]['scores'].detach().numpy())
70
- try:
71
- pred_t = [pred_score.index(x) for x in pred_score if x>confidence][-1]
72
- pred_boxes = pred_boxes[:pred_t+1]
73
- pred_class = pred_class[:pred_t+1]
74
- return pred_boxes, pred_class, pred_score
75
- except:
76
- print('no bounding box with a score that is high enough found! -> work on full image')
77
- return None, None, None
78
-
79
- def detect_object(model, img_path_or_img, confidence=0.5, rect_th=2, text_size=0.5, text_th=1):
80
- """
81
- see https://haochen23.github.io/2020/04/object-detection-faster-rcnn.html#.YsMCm4TP3-g
82
- object_detection_api
83
- parameters:
84
- - img_path_or_img - path of the input image
85
- - confidence - threshold value for prediction score
86
- - rect_th - thickness of bounding box
87
- - text_size - size of the class label text
88
- - text_th - thichness of the text
89
- method:
90
- - prediction is obtained from get_prediction method
91
- - for each prediction, bounding box is drawn and text is written
92
- with opencv
93
- - the final image is displayed
94
- """
95
- boxes, pred_cls, pred_scores = get_prediction(model, img_path_or_img, confidence)
96
- if isinstance(img_path_or_img, str):
97
- img = cv2.imread(img_path_or_img)
98
- img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
99
- else:
100
- img = img_path_or_img
101
- is_first = True
102
- bbox = None
103
- if boxes is not None:
104
- for i in range(len(boxes)):
105
- cls = pred_cls[i]
106
- if cls == 18 and bbox is None:
107
- cv2.rectangle(img, boxes[i][0], boxes[i][1],color=(0, 255, 0), thickness=rect_th)
108
- # cv2.putText(img, pred_cls[i], boxes[i][0], cv2.FONT_HERSHEY_SIMPLEX, text_size, (0,255,0),thickness=text_th)
109
- cv2.putText(img, str(pred_scores[i]), boxes[i][0], cv2.FONT_HERSHEY_SIMPLEX, text_size, (0,255,0),thickness=text_th)
110
- bbox = boxes[i]
111
- return img, bbox
112
-
113
-
114
-
115
- def run_bbox_inference(input_image):
116
- # load configs
117
- cfg = get_cfg_global_updated()
118
-
119
- model = torchvision.models.detection.fasterrcnn_resnet50_fpn(pretrained=True)
120
- model.eval()
121
- out_path = os.path.join(cfg.paths.ROOT_OUT_PATH, 'gradio_examples', 'test2.png')
122
- img, bbox = detect_object(model=model, img_path_or_img=input_image, confidence=0.5)
123
- fig = plt.figure() # plt.figure(figsize=(20,30))
124
- plt.imsave(out_path, img)
125
- return img, bbox
126
-
127
-
128
-
129
-
130
-
131
- def run_barc_inference(input_image, bbox=None):
132
-
133
- # load configs
134
- cfg = get_cfg_global_updated()
135
-
136
- model_file_complete = os.path.join(cfg.paths.ROOT_CHECKPOINT_PATH, 'barc_complete', 'model_best.pth.tar')
137
-
138
-
139
-
140
- # Select the hardware device to use for inference.
141
- '''if torch.cuda.is_available() and cfg.device=='cuda':
142
- device = torch.device('cuda', torch.cuda.current_device())
143
- # torch.backends.cudnn.benchmark = True
144
- else:
145
- device = torch.device('cpu')'''
146
- device = 'cuda' if torch.cuda.is_available() else 'cpu'
147
- print('----------------------> device: ')
148
- print(device)
149
-
150
- path_model_file_complete = os.path.join(cfg.paths.ROOT_CHECKPOINT_PATH, model_file_complete)
151
-
152
- # Disable gradient calculations.
153
- torch.set_grad_enabled(False)
154
-
155
- # prepare complete model
156
- complete_model = ModelImageTo3d_withshape_withproj(
157
- num_stage_comb=cfg.params.NUM_STAGE_COMB, num_stage_heads=cfg.params.NUM_STAGE_HEADS, \
158
- num_stage_heads_pose=cfg.params.NUM_STAGE_HEADS_POSE, trans_sep=cfg.params.TRANS_SEP, \
159
- arch=cfg.params.ARCH, n_joints=cfg.params.N_JOINTS, n_classes=cfg.params.N_CLASSES, \
160
- n_keyp=cfg.params.N_KEYP, n_bones=cfg.params.N_BONES, n_betas=cfg.params.N_BETAS, n_betas_limbs=cfg.params.N_BETAS_LIMBS, \
161
- n_breeds=cfg.params.N_BREEDS, n_z=cfg.params.N_Z, image_size=cfg.params.IMG_SIZE, \
162
- silh_no_tail=cfg.params.SILH_NO_TAIL, thr_keyp_sc=cfg.params.KP_THRESHOLD, add_z_to_3d_input=cfg.params.ADD_Z_TO_3D_INPUT,
163
- n_segbps=cfg.params.N_SEGBPS, add_segbps_to_3d_input=cfg.params.ADD_SEGBPS_TO_3D_INPUT, add_partseg=cfg.params.ADD_PARTSEG, n_partseg=cfg.params.N_PARTSEG, \
164
- fix_flength=cfg.params.FIX_FLENGTH, structure_z_to_betas=cfg.params.STRUCTURE_Z_TO_B, structure_pose_net=cfg.params.STRUCTURE_POSE_NET,
165
- nf_version=cfg.params.NF_VERSION)
166
-
167
- # load trained model
168
- print(path_model_file_complete)
169
- assert os.path.isfile(path_model_file_complete)
170
- print('Loading model weights from file: {}'.format(path_model_file_complete))
171
- checkpoint_complete = torch.load(path_model_file_complete, map_location=device)
172
- state_dict_complete = checkpoint_complete['state_dict']
173
- complete_model.load_state_dict(state_dict_complete, strict=False)
174
- complete_model = complete_model.to(device)
175
-
176
- save_imgs_path = os.path.join(cfg.paths.ROOT_OUT_PATH, 'gradio_examples')
177
- if not os.path.exists(save_imgs_path):
178
- os.makedirs(save_imgs_path)
179
-
180
- input_image_list = [input_image]
181
- if bbox is not None:
182
- input_bbox_list = [bbox]
183
- else:
184
- input_bbox_list = None
185
- val_dataset = ImgCrops(image_list=input_image_list, bbox_list=input_bbox_list, dataset_mode='complete')
186
- test_name_list = val_dataset.test_name_list
187
- val_loader = DataLoader(val_dataset, batch_size=1, shuffle=False,
188
- num_workers=0, pin_memory=True, drop_last=False)
189
-
190
- # run visual evaluation
191
- # remark: take ACC_Joints and DATA_INFO from StanExt as this is the training dataset
192
- all_results = do_visual_epoch(val_loader, complete_model, device,
193
- ImgCrops.DATA_INFO,
194
- weight_dict=None,
195
- acc_joints=ImgCrops.ACC_JOINTS,
196
- save_imgs_path=None, # save_imgs_path,
197
- metrics='all',
198
- test_name_list=test_name_list,
199
- render_all=cfg.params.RENDER_ALL,
200
- pck_thresh=cfg.params.PCK_THRESH,
201
- return_results=True)
202
-
203
- mesh = all_results[0]['mesh_posed']
204
- result_path = os.path.join(save_imgs_path, test_name_list[0] + '_z')
205
-
206
- mesh.apply_transform([[-1, 0, 0, 0],
207
- [0, -1, 0, 0],
208
- [0, 0, 1, 1],
209
- [0, 0, 0, 1]])
210
- mesh.export(file_obj=result_path + '.glb')
211
- result_gltf = result_path + '.glb'
212
- return [result_gltf, result_gltf]
213
-
214
-
215
-
216
-
217
-
218
-
219
- def run_complete_inference(input_image):
220
-
221
- output_interm_image, output_interm_bbox = run_bbox_inference(input_image.copy())
222
-
223
- print(output_interm_bbox)
224
-
225
- # output_image = run_barc_inference(input_image)
226
- output_image = run_barc_inference(input_image, output_interm_bbox)
227
-
228
- return output_image
229
-
230
-
231
-
232
-
233
- # demo = gr.Interface(run_barc_inference, gr.Image(), "image")
234
- # demo = gr.Interface(run_complete_inference, gr.Image(), "image")
235
-
236
-
237
-
238
- # see: https://huggingface.co/spaces/radames/PIFu-Clothed-Human-Digitization/blob/main/PIFu/spaces.py
239
-
240
- description = '''
241
- # BARC
242
-
243
- #### Project Page
244
- * https://barc.is.tue.mpg.de/
245
-
246
- #### Description
247
- This is a demo for BARC. While BARC is trained on image crops, this demo uses a pretrained Faster-RCNN in order to get bounding boxes for the dogs.
248
- To see your result you may have to wait a minute or two, please be paitient.
249
-
250
- <details>
251
-
252
- <summary>More</summary>
253
-
254
- #### Citation
255
-
256
- ```
257
- @inproceedings{BARC:2022,
258
- title = {BARC}: Learning to Regress {3D} Dog Shape from Images by Exploiting Breed Information,
259
- author = {Rueegg, Nadine and Zuffi, Silvia and Schindler, Konrad and Black, Michael J.},
260
- booktitle = {Proceedings IEEE Conf. on Computer Vision and Pattern Recognition (CVPR)},
261
- year = {2022}
262
- }
263
- ```
264
-
265
- </details>
266
- '''
267
-
268
- examples = sorted(glob.glob(os.path.join(os.path.dirname(__file__), '../', 'datasets', 'test_image_crops', '*.jpg')) + glob.glob(os.path.join(os.path.dirname(__file__), '../', 'datasets', 'test_image_crops', '*.png')))
269
-
270
-
271
- demo = gr.Interface(
272
- fn=run_complete_inference,
273
- description=description,
274
- # inputs=gr.Image(type="filepath", label="Input Image"),
275
- inputs=gr.Image(label="Input Image"),
276
- outputs=[
277
- gr.Model3D(
278
- clear_color=[0.0, 0.0, 0.0, 0.0], label="3D Model"),
279
- gr.File(label="Download 3D Model")
280
- ],
281
- examples=examples,
282
- thumbnail="barc_thumbnail.png",
283
- allow_flagging="never",
284
- cache_examples=False # True
285
- )
286
-
287
-
288
-
289
- demo.launch() # (share=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
src/configs/barc_cfg_defaults.py CHANGED
@@ -8,7 +8,7 @@ abs_barc_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..
8
 
9
  _C = CN()
10
  _C.barc_dir = abs_barc_dir
11
- _C.device = 'cpu' # 'cuda'
12
 
13
  ## path settings
14
  _C.paths = CN()
 
8
 
9
  _C = CN()
10
  _C.barc_dir = abs_barc_dir
11
+ _C.device = 'cuda' # 'cpu' # 'cuda'
12
 
13
  ## path settings
14
  _C.paths = CN()