Huang commited on
Commit
6a80886
1 Parent(s): 2eb5b9e
annotator/__init__.py CHANGED
@@ -16,6 +16,7 @@ from .pidinet import PidInet
16
  from .shuffle import Image2MaskShuffleDetector
17
  from .zoe import ZoeDetector
18
  from .oneformer import OneformerDetector
 
19
 
20
  __all__ = [
21
  UniformerDetector,
@@ -34,7 +35,8 @@ __all__ = [
34
  PidInet,
35
  Image2MaskShuffleDetector,
36
  ZoeDetector,
37
- OneformerDetector
 
38
  ]
39
  #
40
  #
 
16
  from .shuffle import Image2MaskShuffleDetector
17
  from .zoe import ZoeDetector
18
  from .oneformer import OneformerDetector
19
+ from .denthPose import DenthPoseProcessor
20
 
21
  __all__ = [
22
  UniformerDetector,
 
35
  PidInet,
36
  Image2MaskShuffleDetector,
37
  ZoeDetector,
38
+ OneformerDetector,
39
+ DenthPoseProcessor
40
  ]
41
  #
42
  #
annotator/denthPose/Base-DensePose-RCNN-FPN.yaml ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ VERSION: 2
2
+ MODEL:
3
+ META_ARCHITECTURE: "GeneralizedRCNN"
4
+ BACKBONE:
5
+ NAME: "build_resnet_fpn_backbone"
6
+ RESNETS:
7
+ OUT_FEATURES: ["res2", "res3", "res4", "res5"]
8
+ FPN:
9
+ IN_FEATURES: ["res2", "res3", "res4", "res5"]
10
+ ANCHOR_GENERATOR:
11
+ SIZES: [[32], [64], [128], [256], [512]] # One size for each in feature map
12
+ ASPECT_RATIOS: [[0.5, 1.0, 2.0]] # Three aspect ratios (same for all in feature maps)
13
+ RPN:
14
+ IN_FEATURES: ["p2", "p3", "p4", "p5", "p6"]
15
+ PRE_NMS_TOPK_TRAIN: 2000 # Per FPN level
16
+ PRE_NMS_TOPK_TEST: 1000 # Per FPN level
17
+ # Detectron1 uses 2000 proposals per-batch,
18
+ # (See "modeling/rpn/rpn_outputs.py" for details of this legacy issue)
19
+ # which is approximately 1000 proposals per-image since the default batch size for FPN is 2.
20
+ POST_NMS_TOPK_TRAIN: 1000
21
+ POST_NMS_TOPK_TEST: 1000
22
+
23
+ DENSEPOSE_ON: True
24
+ ROI_HEADS:
25
+ NAME: "DensePoseROIHeads"
26
+ IN_FEATURES: ["p2", "p3", "p4", "p5"]
27
+ NUM_CLASSES: 1
28
+ ROI_BOX_HEAD:
29
+ NAME: "FastRCNNConvFCHead"
30
+ NUM_FC: 2
31
+ POOLER_RESOLUTION: 7
32
+ POOLER_SAMPLING_RATIO: 2
33
+ POOLER_TYPE: "ROIAlign"
34
+ ROI_DENSEPOSE_HEAD:
35
+ NAME: "DensePoseV1ConvXHead"
36
+ POOLER_TYPE: "ROIAlign"
37
+ NUM_COARSE_SEGM_CHANNELS: 2
38
+ DATASETS:
39
+ TRAIN: ("densepose_coco_2014_train", "densepose_coco_2014_valminusminival")
40
+ TEST: ("densepose_coco_2014_minival",)
41
+ SOLVER:
42
+ IMS_PER_BATCH: 16
43
+ BASE_LR: 0.01
44
+ STEPS: (60000, 80000)
45
+ MAX_ITER: 90000
46
+ WARMUP_FACTOR: 0.1
47
+ INPUT:
48
+ MIN_SIZE_TRAIN: (640, 672, 704, 736, 768, 800)
annotator/denthPose/__init__.py ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+
3
+
4
+ def install_package():
5
+ print(os.system("pip install git+https://github.com/facebookresearch/detectron2.git"))
6
+ print(os.system("pip install git+https://github.com/facebookresearch/detectron2@main#subdirectory=projects/DensePose"))
7
+
8
+
9
+ install_package()
10
+
11
+ print("finished")
12
+
13
+
14
+ class DenthPoseProcessor:
15
+ def __init__(self):
16
+ from .depth_pose import DenthPoseProcess
17
+ self.processor = DenthPoseProcess()
18
+
19
+ def __call__(self, img):
20
+ return self.processor(img)
annotator/denthPose/densepose_rcnn_R_50_FPN_s1x.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b8a7382001b16e453bad95ca9dbc68ae8f2b839b304cf90eaf5c27fbdb4dae91
3
+ size 255757821
annotator/denthPose/densepose_rcnn_R_50_FPN_s1x.yaml ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ _BASE_: "Base-DensePose-RCNN-FPN.yaml"
2
+ MODEL:
3
+ WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl"
4
+ RESNETS:
5
+ DEPTH: 50
6
+ DEVICE: "cpu"
7
+ SOLVER:
8
+ MAX_ITER: 130000
9
+ STEPS: (100000, 120000)
annotator/denthPose/depth_pose.py ADDED
@@ -0,0 +1,114 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os.path
2
+ from os import path
3
+ from typing import List, Dict, Any, ClassVar
4
+
5
+ import torch
6
+ from densepose import add_densepose_config
7
+ from densepose.vis.base import CompoundVisualizer
8
+ from densepose.vis.bounding_box import ScoredBoundingBoxVisualizer
9
+ from densepose.vis.densepose_outputs_vertex import get_texture_atlases, DensePoseOutputsTextureVisualizer, \
10
+ DensePoseOutputsVertexVisualizer
11
+ from densepose.vis.densepose_results import DensePoseResultsContourVisualizer, \
12
+ DensePoseResultsFineSegmentationVisualizer, DensePoseResultsUVisualizer, DensePoseResultsVVisualizer
13
+ from densepose.vis.densepose_results_textures import get_texture_atlas, DensePoseResultsVisualizerWithTexture
14
+ from densepose.vis.extractor import create_extractor, CompoundExtractor
15
+ from detectron2.config import CfgNode, get_cfg
16
+ from detectron2.engine.defaults import DefaultPredictor
17
+
18
+ from annotator.base_annotator import BaseProcessor
19
+
20
+ config_model = {
21
+ "densepose_rcnn_R_50_FPN_s1x": {
22
+ "yaml": 'densepose_rcnn_R_50_FPN_s1x.yaml',
23
+ "file": "densepose_rcnn_R_50_FPN_s1x.pkl"
24
+ }
25
+ }
26
+
27
+ default_conf = config_model["densepose_rcnn_R_50_FPN_s1x"]
28
+
29
+
30
+ class DenthPoseProcess(BaseProcessor):
31
+ VISUALIZERS: ClassVar[Dict[str, object]] = {
32
+ "dp_contour": DensePoseResultsContourVisualizer,
33
+ "dp_segm": DensePoseResultsFineSegmentationVisualizer,
34
+ "dp_u": DensePoseResultsUVisualizer,
35
+ "dp_v": DensePoseResultsVVisualizer,
36
+ "dp_iuv_texture": DensePoseResultsVisualizerWithTexture,
37
+ "dp_cse_texture": DensePoseOutputsTextureVisualizer,
38
+ "dp_vertex": DensePoseOutputsVertexVisualizer,
39
+ "bbox": ScoredBoundingBoxVisualizer,
40
+ }
41
+
42
+ def __init__(self, **kwargs):
43
+ super().__init__(**kwargs)
44
+ here = path.abspath(path.dirname(__file__))
45
+ self.cfg, self.model_conf = os.path.join(here, default_conf["yaml"]), os.path.join(here, default_conf["file"])
46
+ self.predictor = None
47
+
48
+ @classmethod
49
+ def setup_config(
50
+ cls: type, config_fpath: str, model_fpath: str, opts: List[str]
51
+ ):
52
+ cfg = get_cfg()
53
+ add_densepose_config(cfg)
54
+ cfg.merge_from_file(config_fpath)
55
+ if opts:
56
+ cfg.merge_from_list(opts)
57
+ cfg.MODEL.WEIGHTS = model_fpath
58
+ cfg.freeze()
59
+ return cfg
60
+
61
+ @classmethod
62
+ def create_context(cls: type, vis_specs, cfg: CfgNode, arg_texture_atlas=None, arg_texture_atlases_map=None) -> \
63
+ Dict[
64
+ str, Any]:
65
+ """
66
+ 创建可视化
67
+ """
68
+ # vis_specs = visualizations.split(",")
69
+ visualizers = []
70
+ extractors = []
71
+ for vis_spec in vis_specs:
72
+ texture_atlas = get_texture_atlas(arg_texture_atlas)
73
+ texture_atlases_dict = get_texture_atlases(arg_texture_atlases_map)
74
+ vis = cls.VISUALIZERS[vis_spec](
75
+ cfg=cfg,
76
+ texture_atlas=texture_atlas,
77
+ texture_atlases_dict=texture_atlases_dict,
78
+ )
79
+ visualizers.append(vis)
80
+ extractor = create_extractor(vis)
81
+ extractors.append(extractor)
82
+ visualizer = CompoundVisualizer(visualizers)
83
+ extractor = CompoundExtractor(extractors)
84
+ context = {
85
+ "extractor": extractor,
86
+ "visualizer": visualizer,
87
+ "entry_idx": 0,
88
+ }
89
+ return context
90
+
91
+ def execute_on_outputs(self, image, context: Dict[str, Any], outputs):
92
+ visualizer = context["visualizer"]
93
+ extractor = context["extractor"]
94
+ # image = np.tile(image[:, :, np.newaxis], [1, 1, 3])
95
+ data = extractor(outputs)
96
+ # zero_image = np.zeros(image.shape, dtype=np.uint8)
97
+ image_vis = visualizer.visualize(image, data)
98
+ return image_vis
99
+
100
+ def __call__(self, img, visualizations=["dp_u", "dp_v", "bbox"], texture_atlas=None, texture_atlases_map=None):
101
+ opts = []
102
+ cfg = self.setup_config(config_fpath=self.cfg, model_fpath=self.model_conf, opts=opts)
103
+ if self.predictor is None:
104
+ self.predictor = DefaultPredictor(cfg)
105
+ context = self.create_context(visualizations, cfg, texture_atlas, texture_atlases_map)
106
+ with torch.no_grad():
107
+ outputs = self.predictor(img)["instances"]
108
+ return self.execute_on_outputs(img, context, outputs)
109
+
110
+ # if __name__ == '__main__':
111
+ # image_path = "demo.jpeg"
112
+ # img = cv2.imread(image_path)
113
+ # process = DenthPoseProcessor()
114
+ # process(img, "dp_contour,bbox")
annotator/denthPose/readme.md ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ ### 安装 detectron2
2
+ pip install 'git+https://github.com/facebookresearch/detectron2.git'
3
+
4
+ ### 安装 DensePose
5
+ pip install git+https://github.com/facebookresearch/detectron2@main#subdirectory=projects/DensePose
6
+
7
+
8
+ #
9
+ python apply_net.py show densepose_rcnn_R_50_FPN_s1x.yaml model_final_162be9.pkl demo.jpg dp_contour,bbox --output image_densepose_contour.png
10
+
config/annotator.yaml CHANGED
@@ -123,3 +123,6 @@ zoe:
123
  oneformer:
124
  process: OneformerDetector
125
  input: []
 
 
 
 
123
  oneformer:
124
  process: OneformerDetector
125
  input: []
126
+ denthPose:
127
+ process: DenthPoseProcessor
128
+ input: []