Upload 2 files
Browse files- vis_utils.py +688 -0
- visualize.py +110 -0
vis_utils.py
ADDED
|
@@ -0,0 +1,688 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
|
| 2 |
+
import os.path as osp
|
| 3 |
+
import json
|
| 4 |
+
from typing import List, Union
|
| 5 |
+
import random
|
| 6 |
+
|
| 7 |
+
import yaml
|
| 8 |
+
from einops import rearrange, reduce
|
| 9 |
+
import torch
|
| 10 |
+
import torchvision.transforms.functional as tv_functional
|
| 11 |
+
import gzip
|
| 12 |
+
import numpy as np
|
| 13 |
+
import cv2
|
| 14 |
+
from PIL import Image
|
| 15 |
+
from torchvision.transforms.functional import pil_to_tensor
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
class Colors:
|
| 19 |
+
# Ultralytics color palette https://ultralytics.com/
|
| 20 |
+
def __init__(self):
|
| 21 |
+
# hex = matplotlib.colors.TABLEAU_COLORS.values()
|
| 22 |
+
# hexs = ('FF1010', '10FF10', 'FFF010', '100FFF', 'c0c0c0', 'FF3838', 'FF9D97', 'FF701F', 'FFB21D', 'CFD231', '48F90A', '92CC17', '3DDB86', '1A9334', '00D4BB',
|
| 23 |
+
# '2C99A8', '00C2FF', '344593', '6473FF', '0018EC', '8438FF', '520085', 'CB38FF', 'FF95C8', 'FF37C7')
|
| 24 |
+
hexs = [
|
| 25 |
+
'#4363d8',
|
| 26 |
+
'#9A6324',
|
| 27 |
+
'#808000',
|
| 28 |
+
'#469990',
|
| 29 |
+
'#000075',
|
| 30 |
+
'#e6194B',
|
| 31 |
+
'#f58231',
|
| 32 |
+
'#ffe119',
|
| 33 |
+
'#bfef45',
|
| 34 |
+
'#3cb44b',
|
| 35 |
+
'#42d4f4',
|
| 36 |
+
'#800000',
|
| 37 |
+
'#911eb4',
|
| 38 |
+
'#f032e6',
|
| 39 |
+
'#fabed4',
|
| 40 |
+
'#ffd8b1',
|
| 41 |
+
'#fffac8',
|
| 42 |
+
'#aaffc3',
|
| 43 |
+
'#dcbeff',
|
| 44 |
+
'#a9a9a9',
|
| 45 |
+
'#006400',
|
| 46 |
+
'#4169E1',
|
| 47 |
+
'#8B4513',
|
| 48 |
+
'#FA8072',
|
| 49 |
+
'#87CEEB',
|
| 50 |
+
'#FFD700',
|
| 51 |
+
'#ffffff',
|
| 52 |
+
'#000000',
|
| 53 |
+
]
|
| 54 |
+
self.palette = [self.hex2rgb(f'#{c}') if not c.startswith('#') else self.hex2rgb(c) for c in hexs]
|
| 55 |
+
self.n = len(self.palette)
|
| 56 |
+
|
| 57 |
+
def __call__(self, i, bgr=False):
|
| 58 |
+
c = self.palette[int(i) % self.n]
|
| 59 |
+
return (c[2], c[1], c[0]) if bgr else c
|
| 60 |
+
|
| 61 |
+
@staticmethod
|
| 62 |
+
def hex2rgb(h): # rgb order (PIL)
|
| 63 |
+
return tuple(int(h[1 + i:1 + i + 2], 16) for i in (0, 2, 4))
|
| 64 |
+
|
| 65 |
+
DEFAULT_COLOR_PALETTE = Colors()
|
| 66 |
+
def get_color(idx):
|
| 67 |
+
if idx == -1:
|
| 68 |
+
return 255
|
| 69 |
+
else:
|
| 70 |
+
return DEFAULT_COLOR_PALETTE(idx)
|
| 71 |
+
|
| 72 |
+
|
| 73 |
+
|
| 74 |
+
VALID_BODY_PARTS_V2 = [
|
| 75 |
+
'hair', 'headwear', 'face', 'eyes', 'eyewear', 'ears', 'earwear', 'nose', 'mouth',
|
| 76 |
+
'neck', 'neckwear', 'topwear', 'handwear', 'bottomwear', 'legwear', 'footwear',
|
| 77 |
+
'tail', 'wings', 'objects'
|
| 78 |
+
]
|
| 79 |
+
|
| 80 |
+
|
| 81 |
+
def seed_everything(seed):
|
| 82 |
+
random.seed(seed)
|
| 83 |
+
np.random.seed(seed)
|
| 84 |
+
torch.manual_seed(seed)
|
| 85 |
+
torch.cuda.manual_seed_all(seed)
|
| 86 |
+
|
| 87 |
+
|
| 88 |
+
def load_image(imgp: str, mode="RGB", output_type='numpy'):
|
| 89 |
+
"""
|
| 90 |
+
return RGB image as output_type
|
| 91 |
+
"""
|
| 92 |
+
img = Image.open(imgp).convert(mode)
|
| 93 |
+
if output_type == 'numpy':
|
| 94 |
+
img = np.array(img)
|
| 95 |
+
if len(img.shape) == 2:
|
| 96 |
+
img = img[..., None]
|
| 97 |
+
return img
|
| 98 |
+
|
| 99 |
+
|
| 100 |
+
def bbox_intersection(xyxy, xyxy2):
|
| 101 |
+
x1, y1, x2, y2 = xyxy2
|
| 102 |
+
dx1, dy1, dx2, dy2 = xyxy
|
| 103 |
+
ix1, ix2 = max(x1, dx1), min(x2, dx2)
|
| 104 |
+
iy1, iy2 = max(y1, dy1), min(y2, dy2)
|
| 105 |
+
if ix2 >= ix1 and iy2 >= iy1:
|
| 106 |
+
return [ix1, iy1, ix2, iy2]
|
| 107 |
+
return None
|
| 108 |
+
|
| 109 |
+
|
| 110 |
+
_IMG2TENSOR_IMGTYPE = (Image.Image, np.ndarray, str)
|
| 111 |
+
_IMG2TENSOR_DIMORDER = ('bchw', 'chw', 'hwc')
|
| 112 |
+
def img2tensor(img: Union[Image.Image, np.ndarray, str, torch.Tensor], normalize = False, mean = 0., std = 255., dim_order: str = 'bchw', dtype=torch.float32, device: str = 'cpu', imread_mode='RGB'):
|
| 113 |
+
|
| 114 |
+
def _check_normalize_values(values, num_channels):
|
| 115 |
+
if isinstance(values, tuple):
|
| 116 |
+
values = list(values)
|
| 117 |
+
elif isinstance(values, (int, float, np.ScalarType)):
|
| 118 |
+
values = [values] * num_channels
|
| 119 |
+
else:
|
| 120 |
+
assert isinstance(values, (np.ndarray, list))
|
| 121 |
+
if len(values) > num_channels:
|
| 122 |
+
values = values[:num_channels]
|
| 123 |
+
assert len(values) == num_channels
|
| 124 |
+
return values
|
| 125 |
+
|
| 126 |
+
assert isinstance(img, _IMG2TENSOR_IMGTYPE)
|
| 127 |
+
assert dim_order in _IMG2TENSOR_DIMORDER
|
| 128 |
+
|
| 129 |
+
if isinstance(img, str):
|
| 130 |
+
img = load_image(img, mode=imread_mode)
|
| 131 |
+
|
| 132 |
+
if isinstance(img, Image.Image):
|
| 133 |
+
img = pil_to_tensor(img)
|
| 134 |
+
if dim_order == 'bchw':
|
| 135 |
+
img = img.unsqueeze(0)
|
| 136 |
+
elif dim_order == 'hwc':
|
| 137 |
+
img = img.permute((1, 2, 0))
|
| 138 |
+
else:
|
| 139 |
+
if img.ndim == 2:
|
| 140 |
+
img = img[..., None]
|
| 141 |
+
else:
|
| 142 |
+
assert img.ndim == 3
|
| 143 |
+
if dim_order == 'bchw':
|
| 144 |
+
img = rearrange(img, 'h w c -> c h w')[None, ...]
|
| 145 |
+
elif dim_order == 'chw':
|
| 146 |
+
img = rearrange(img, 'h w c -> c h w')
|
| 147 |
+
img = torch.from_numpy(np.ascontiguousarray(img))
|
| 148 |
+
|
| 149 |
+
|
| 150 |
+
img = img.to(device=device, dtype=dtype)
|
| 151 |
+
|
| 152 |
+
if normalize:
|
| 153 |
+
|
| 154 |
+
if dim_order == 'bchw':
|
| 155 |
+
c = img.shape[1]
|
| 156 |
+
elif dim_order == 'chw':
|
| 157 |
+
c = img.shape[0]
|
| 158 |
+
else:
|
| 159 |
+
c = img.shape[2]
|
| 160 |
+
|
| 161 |
+
if mean is not None and std is not None:
|
| 162 |
+
mean = _check_normalize_values(mean, c)
|
| 163 |
+
std = _check_normalize_values(std, c)
|
| 164 |
+
img = tv_functional.normalize(img, mean=mean, std=std)
|
| 165 |
+
|
| 166 |
+
return img
|
| 167 |
+
|
| 168 |
+
|
| 169 |
+
|
| 170 |
+
def optim_depth(part_dict_list, fullpage):
|
| 171 |
+
window = create_window(11, 1.5, 3)
|
| 172 |
+
depth_map = np.full(fullpage.shape[:2], 2, dtype=np.float32)
|
| 173 |
+
|
| 174 |
+
ssim_map = np.full(fullpage.shape[:2], 0., dtype=np.float32)
|
| 175 |
+
depth_order_map = np.full(fullpage.shape[:2], -1, dtype=np.int16)
|
| 176 |
+
color_order_map = depth_order_map.copy()
|
| 177 |
+
fullpage_torch = img2tensor(fullpage[..., :3])
|
| 178 |
+
|
| 179 |
+
for ii, pd in enumerate(part_dict_list):
|
| 180 |
+
x1, y1, x2, y2 = pd['xyxy']
|
| 181 |
+
xyxy = pd['xyxy']
|
| 182 |
+
mask = pd['mask']
|
| 183 |
+
region_torch = img2tensor(pd['img'][..., :3])
|
| 184 |
+
with torch.no_grad():
|
| 185 |
+
ssim_map_region = calculate_ssim_map(fullpage_torch[:, :, y1: y2, x1: x2], region_torch, window, 255, use_padding=True)
|
| 186 |
+
ssim_map_region = ssim_map_region.to(dtype=torch.float32, device='cpu')[0].numpy()
|
| 187 |
+
ssim_update_mask = np.bitwise_and(ssim_map_region > ssim_map[y1: y2, x1: x2], mask)
|
| 188 |
+
|
| 189 |
+
if np.any(ssim_update_mask):
|
| 190 |
+
upd_mask = ssim_update_mask.astype(np.int32)
|
| 191 |
+
color_order_map[y1: y2, x1: x2] = color_order_map[y1: y2, x1: x2] * (1-upd_mask) + upd_mask * np.full((y2 - y1, x2 - x1), ii, dtype=np.int16)
|
| 192 |
+
ssim_map[y1: y2, x1: x2] = ssim_map[y1: y2, x1: x2] * (1-upd_mask) + upd_mask * ssim_map_region
|
| 193 |
+
|
| 194 |
+
depth_update_mask = np.bitwise_and(pd['depth'] < depth_map[y1: y2, x1: x2], mask)
|
| 195 |
+
if np.any(depth_update_mask):
|
| 196 |
+
depth_map[y1: y2, x1: x2] = (1 - depth_update_mask) * depth_map[y1: y2, x1: x2] + depth_update_mask * pd['depth']
|
| 197 |
+
depth_order_map[y1: y2, x1: x2] = (1 - depth_update_mask) * depth_order_map[y1: y2, x1: x2] + depth_update_mask * np.full((y2 - y1, x2 - x1), ii, dtype=np.int16)
|
| 198 |
+
|
| 199 |
+
|
| 200 |
+
for _ in range(1):
|
| 201 |
+
for ii in range(len(part_dict_list)):
|
| 202 |
+
pd = part_dict_list[ii]
|
| 203 |
+
|
| 204 |
+
# if pd['tag'] in {'face', 'topwear', 'nose'}:
|
| 205 |
+
# continue
|
| 206 |
+
|
| 207 |
+
x1, y1, x2, y2 = pd['xyxy']
|
| 208 |
+
mask = pd['mask']
|
| 209 |
+
color_mask = color_order_map[y1: y2, x1: x2] == ii
|
| 210 |
+
if not np.any(color_mask):
|
| 211 |
+
continue
|
| 212 |
+
depth = pd['depth']
|
| 213 |
+
depth_region = depth_map[y1: y2, x1: x2]
|
| 214 |
+
max_shift = np.max((depth - depth_region) * color_mask * mask)
|
| 215 |
+
if max_shift == 0:
|
| 216 |
+
continue
|
| 217 |
+
max_shift += 0.001
|
| 218 |
+
min_shift = np.min((depth - depth_region) * mask)
|
| 219 |
+
# print(min_shift)
|
| 220 |
+
shift_list = np.linspace(0., max_shift, num=20)
|
| 221 |
+
# shift_list = np.concat([np.linspace(0, min_shift, num=20), shift_list])
|
| 222 |
+
|
| 223 |
+
score_map = depth[..., None] - shift_list[None, None] < depth_region[..., None]
|
| 224 |
+
score_map = reduce((score_map == color_mask[..., None]).astype(np.float32) * mask[..., None], 'h w c -> c', reduction='mean')
|
| 225 |
+
shift = shift_list[np.argmax(score_map)]
|
| 226 |
+
if shift > 0:
|
| 227 |
+
depth -= shift
|
| 228 |
+
depth_update_mask = np.bitwise_and(depth < depth_region, mask)
|
| 229 |
+
depth_map[y1: y2, x1: x2] = (1 - depth_update_mask) * depth_map[y1: y2, x1: x2] + depth_update_mask * depth
|
| 230 |
+
pd['depth'] = depth
|
| 231 |
+
|
| 232 |
+
|
| 233 |
+
|
| 234 |
+
|
| 235 |
+
def load_parts(srcp, rotate=False):
|
| 236 |
+
srcimg = osp.join(srcp, 'src_img.png')
|
| 237 |
+
fullpage = np.array(Image.open(srcimg).convert('RGBA'))
|
| 238 |
+
|
| 239 |
+
infop = osp.join(srcp, 'info.json')
|
| 240 |
+
infos = json2dict(infop)
|
| 241 |
+
|
| 242 |
+
part_dict_list = []
|
| 243 |
+
tag2pd = {}
|
| 244 |
+
part_id = 0
|
| 245 |
+
|
| 246 |
+
min_sz = 12
|
| 247 |
+
|
| 248 |
+
if rotate:
|
| 249 |
+
fullpage = np.rot90(fullpage, 3, )
|
| 250 |
+
|
| 251 |
+
for tag, partdict in infos['parts'].items():
|
| 252 |
+
img = Image.open(osp.join(srcp, tag + '.png')).convert('RGBA')
|
| 253 |
+
depthp = osp.join(srcp, tag + '_depth.png')
|
| 254 |
+
|
| 255 |
+
img = np.array(img)
|
| 256 |
+
p_test = max(img.shape[:2]) // 10
|
| 257 |
+
mask = img[..., -1] > 10
|
| 258 |
+
if np.sum(mask[:-p_test, :-p_test]) > 4:
|
| 259 |
+
if rotate:
|
| 260 |
+
img = np.rot90(img, 3)
|
| 261 |
+
mask = np.rot90(mask, 3, )
|
| 262 |
+
|
| 263 |
+
xyxy = cv2.boundingRect(cv2.findNonZero(mask.astype(np.uint8)))
|
| 264 |
+
xyxy = np.array(xyxy)
|
| 265 |
+
h, w = xyxy[2:]
|
| 266 |
+
xyxy[2] += xyxy[0]
|
| 267 |
+
xyxy[3] += xyxy[1]
|
| 268 |
+
p = min_sz - w
|
| 269 |
+
if p > 0:
|
| 270 |
+
if xyxy[0] >= p:
|
| 271 |
+
xyxy[0] -= p
|
| 272 |
+
else:
|
| 273 |
+
xyxy[2] += p
|
| 274 |
+
p = min_sz - h
|
| 275 |
+
if p > 0:
|
| 276 |
+
if xyxy[1] >= p:
|
| 277 |
+
xyxy[1] -= p
|
| 278 |
+
else:
|
| 279 |
+
xyxy[3] += p
|
| 280 |
+
|
| 281 |
+
x1, y1, x2, y2 = xyxy
|
| 282 |
+
depth = np.array(Image.open(depthp).convert('L'))
|
| 283 |
+
if rotate:
|
| 284 |
+
depth = np.rot90(depth, 3)
|
| 285 |
+
dmin, dmax = partdict['depth_min'], partdict['depth_max']
|
| 286 |
+
|
| 287 |
+
mask = mask[y1: y2, x1: x2].copy()
|
| 288 |
+
img = img[y1: y2, x1: x2].copy()
|
| 289 |
+
depth = depth[y1: y2, x1: x2].copy()
|
| 290 |
+
|
| 291 |
+
depth = np.array(depth, dtype=np.float32) / 255 * (dmax - dmin) + dmin
|
| 292 |
+
tag2pd[tag] = {'img': img, 'depth': depth, 'part_id': part_id, 'xyxy': xyxy, 'mask': mask, 'tag': tag}
|
| 293 |
+
part_dict_list.append(tag2pd[tag])
|
| 294 |
+
part_id += 1
|
| 295 |
+
|
| 296 |
+
return fullpage, infos, part_dict_list
|
| 297 |
+
|
| 298 |
+
|
| 299 |
+
def json2dict(json_path: str):
|
| 300 |
+
plower = json_path.lower()
|
| 301 |
+
if plower.endswith('.gz'):
|
| 302 |
+
with gzip.open(json_path, 'rt', encoding='utf8') as f:
|
| 303 |
+
metadata = json.load(f)
|
| 304 |
+
return metadata
|
| 305 |
+
|
| 306 |
+
if plower.endswith('.yaml'):
|
| 307 |
+
with open(json_path, 'r') as file:
|
| 308 |
+
metadata = yaml.load(file, yaml.CSafeLoader)
|
| 309 |
+
return metadata
|
| 310 |
+
|
| 311 |
+
with open(json_path, 'r', encoding='utf8') as f:
|
| 312 |
+
metadata = json.loads(f.read())
|
| 313 |
+
return metadata
|
| 314 |
+
|
| 315 |
+
|
| 316 |
+
# Source: https://github.com/One-sixth/ms_ssim_pytorch/blob/master/ssim.py
|
| 317 |
+
|
| 318 |
+
'''
|
| 319 |
+
code modified from
|
| 320 |
+
https://github.com/VainF/pytorch-msssim/blob/master/pytorch_msssim/ssim.py
|
| 321 |
+
'''
|
| 322 |
+
|
| 323 |
+
import torch
|
| 324 |
+
import torch.jit
|
| 325 |
+
import torch.nn.functional as F
|
| 326 |
+
|
| 327 |
+
|
| 328 |
+
@torch.jit.script
|
| 329 |
+
def create_window(window_size: int = 11, sigma: float = 1.5, channel: int = 3):
|
| 330 |
+
'''
|
| 331 |
+
Create 1-D gauss kernel
|
| 332 |
+
:param window_size: the size of gauss kernel
|
| 333 |
+
:param sigma: sigma of normal distribution
|
| 334 |
+
:param channel: input channel
|
| 335 |
+
:return: 1D kernel
|
| 336 |
+
'''
|
| 337 |
+
coords = torch.arange(window_size, dtype=torch.float)
|
| 338 |
+
coords -= window_size // 2
|
| 339 |
+
|
| 340 |
+
g = torch.exp(-(coords ** 2) / (2 * sigma ** 2))
|
| 341 |
+
g /= g.sum()
|
| 342 |
+
|
| 343 |
+
g = g.reshape(1, 1, 1, -1).repeat(channel, 1, 1, 1)
|
| 344 |
+
return g
|
| 345 |
+
|
| 346 |
+
|
| 347 |
+
@torch.jit.script
|
| 348 |
+
def _gaussian_filter(x, window_1d, use_padding: bool):
|
| 349 |
+
'''
|
| 350 |
+
Blur input with 1-D kernel
|
| 351 |
+
:param x: batch of tensors to be blured
|
| 352 |
+
:param window_1d: 1-D gauss kernel
|
| 353 |
+
:param use_padding: padding image before conv
|
| 354 |
+
:return: blured tensors
|
| 355 |
+
'''
|
| 356 |
+
C = x.shape[1]
|
| 357 |
+
padding = 0
|
| 358 |
+
if use_padding:
|
| 359 |
+
window_size = window_1d.shape[3]
|
| 360 |
+
padding = window_size // 2
|
| 361 |
+
out = F.conv2d(x, window_1d, stride=1, padding=(0, padding), groups=C)
|
| 362 |
+
out = F.conv2d(out, window_1d.transpose(2, 3), stride=1, padding=(padding, 0), groups=C)
|
| 363 |
+
return out
|
| 364 |
+
|
| 365 |
+
|
| 366 |
+
@torch.jit.script
|
| 367 |
+
def calculate_ssim_map(X, Y, window, data_range: float, use_padding: bool=True):
|
| 368 |
+
'''
|
| 369 |
+
Calculate ssim index for X and Y
|
| 370 |
+
:param X: images
|
| 371 |
+
:param Y: images
|
| 372 |
+
:param window: 1-D gauss kernel
|
| 373 |
+
:param data_range: value range of input images. (usually 1.0 or 255)
|
| 374 |
+
:param use_padding: padding image before conv
|
| 375 |
+
:return:
|
| 376 |
+
'''
|
| 377 |
+
|
| 378 |
+
K1 = 0.01
|
| 379 |
+
K2 = 0.03
|
| 380 |
+
compensation = 1.0
|
| 381 |
+
|
| 382 |
+
C1 = (K1 * data_range) ** 2
|
| 383 |
+
C2 = (K2 * data_range) ** 2
|
| 384 |
+
|
| 385 |
+
mu1 = _gaussian_filter(X, window, use_padding)
|
| 386 |
+
mu2 = _gaussian_filter(Y, window, use_padding)
|
| 387 |
+
sigma1_sq = _gaussian_filter(X * X, window, use_padding)
|
| 388 |
+
sigma2_sq = _gaussian_filter(Y * Y, window, use_padding)
|
| 389 |
+
sigma12 = _gaussian_filter(X * Y, window, use_padding)
|
| 390 |
+
|
| 391 |
+
mu1_sq = mu1.pow(2)
|
| 392 |
+
mu2_sq = mu2.pow(2)
|
| 393 |
+
mu1_mu2 = mu1 * mu2
|
| 394 |
+
|
| 395 |
+
sigma1_sq = compensation * (sigma1_sq - mu1_sq)
|
| 396 |
+
sigma2_sq = compensation * (sigma2_sq - mu2_sq)
|
| 397 |
+
sigma12 = compensation * (sigma12 - mu1_mu2)
|
| 398 |
+
|
| 399 |
+
cs_map = (2 * sigma12 + C2) / (sigma1_sq + sigma2_sq + C2)
|
| 400 |
+
# Fixed the issue that the negative value of cs_map caused ms_ssim to output Nan.
|
| 401 |
+
cs_map = F.relu(cs_map)
|
| 402 |
+
ssim_map = ((2 * mu1_mu2 + C1) / (mu1_sq + mu2_sq + C1)) * cs_map
|
| 403 |
+
|
| 404 |
+
ssim_val = ssim_map.mean(dim=(1)) # reduce along CHW
|
| 405 |
+
return ssim_val
|
| 406 |
+
|
| 407 |
+
|
| 408 |
+
@torch.jit.script
|
| 409 |
+
def ssim(X, Y, window, data_range: float, use_padding: bool=False):
|
| 410 |
+
'''
|
| 411 |
+
Calculate ssim index for X and Y
|
| 412 |
+
:param X: images
|
| 413 |
+
:param Y: images
|
| 414 |
+
:param window: 1-D gauss kernel
|
| 415 |
+
:param data_range: value range of input images. (usually 1.0 or 255)
|
| 416 |
+
:param use_padding: padding image before conv
|
| 417 |
+
:return:
|
| 418 |
+
'''
|
| 419 |
+
|
| 420 |
+
K1 = 0.01
|
| 421 |
+
K2 = 0.03
|
| 422 |
+
compensation = 1.0
|
| 423 |
+
|
| 424 |
+
C1 = (K1 * data_range) ** 2
|
| 425 |
+
C2 = (K2 * data_range) ** 2
|
| 426 |
+
|
| 427 |
+
mu1 = _gaussian_filter(X, window, use_padding)
|
| 428 |
+
mu2 = _gaussian_filter(Y, window, use_padding)
|
| 429 |
+
sigma1_sq = _gaussian_filter(X * X, window, use_padding)
|
| 430 |
+
sigma2_sq = _gaussian_filter(Y * Y, window, use_padding)
|
| 431 |
+
sigma12 = _gaussian_filter(X * Y, window, use_padding)
|
| 432 |
+
|
| 433 |
+
mu1_sq = mu1.pow(2)
|
| 434 |
+
mu2_sq = mu2.pow(2)
|
| 435 |
+
mu1_mu2 = mu1 * mu2
|
| 436 |
+
|
| 437 |
+
sigma1_sq = compensation * (sigma1_sq - mu1_sq)
|
| 438 |
+
sigma2_sq = compensation * (sigma2_sq - mu2_sq)
|
| 439 |
+
sigma12 = compensation * (sigma12 - mu1_mu2)
|
| 440 |
+
|
| 441 |
+
cs_map = (2 * sigma12 + C2) / (sigma1_sq + sigma2_sq + C2)
|
| 442 |
+
# Fixed the issue that the negative value of cs_map caused ms_ssim to output Nan.
|
| 443 |
+
cs_map = F.relu(cs_map)
|
| 444 |
+
ssim_map = ((2 * mu1_mu2 + C1) / (mu1_sq + mu2_sq + C1)) * cs_map
|
| 445 |
+
|
| 446 |
+
ssim_val = ssim_map.mean(dim=(1, 2, 3)) # reduce along CHW
|
| 447 |
+
cs = cs_map.mean(dim=(1, 2, 3))
|
| 448 |
+
|
| 449 |
+
return ssim_val, cs
|
| 450 |
+
|
| 451 |
+
|
| 452 |
+
@torch.jit.script
|
| 453 |
+
def ms_ssim(X, Y, window, data_range: float, weights, use_padding: bool=False, eps: float=1e-8):
|
| 454 |
+
'''
|
| 455 |
+
interface of ms-ssim
|
| 456 |
+
:param X: a batch of images, (N,C,H,W)
|
| 457 |
+
:param Y: a batch of images, (N,C,H,W)
|
| 458 |
+
:param window: 1-D gauss kernel
|
| 459 |
+
:param data_range: value range of input images. (usually 1.0 or 255)
|
| 460 |
+
:param weights: weights for different levels
|
| 461 |
+
:param use_padding: padding image before conv
|
| 462 |
+
:param eps: use for avoid grad nan.
|
| 463 |
+
:return:
|
| 464 |
+
'''
|
| 465 |
+
weights = weights[:, None]
|
| 466 |
+
|
| 467 |
+
levels = weights.shape[0]
|
| 468 |
+
vals = []
|
| 469 |
+
for i in range(levels):
|
| 470 |
+
ss, cs = ssim(X, Y, window=window, data_range=data_range, use_padding=use_padding)
|
| 471 |
+
|
| 472 |
+
if i < levels-1:
|
| 473 |
+
vals.append(cs)
|
| 474 |
+
X = F.avg_pool2d(X, kernel_size=2, stride=2, ceil_mode=True)
|
| 475 |
+
Y = F.avg_pool2d(Y, kernel_size=2, stride=2, ceil_mode=True)
|
| 476 |
+
else:
|
| 477 |
+
vals.append(ss)
|
| 478 |
+
|
| 479 |
+
vals = torch.stack(vals, dim=0)
|
| 480 |
+
# Use for fix a issue. When c = a ** b and a is 0, c.backward() will cause the a.grad become inf.
|
| 481 |
+
vals = vals.clamp_min(eps)
|
| 482 |
+
# The origin ms-ssim op.
|
| 483 |
+
ms_ssim_val = torch.prod(vals[:-1] ** weights[:-1] * vals[-1:] ** weights[-1:], dim=0)
|
| 484 |
+
# The new ms-ssim op. But I don't know which is best.
|
| 485 |
+
# ms_ssim_val = torch.prod(vals ** weights, dim=0)
|
| 486 |
+
# In this file's image training demo. I feel the old ms-ssim more better. So I keep use old ms-ssim op.
|
| 487 |
+
return ms_ssim_val
|
| 488 |
+
|
| 489 |
+
|
| 490 |
+
class SSIMCriteria(torch.jit.ScriptModule):
|
| 491 |
+
__constants__ = ['data_range', 'use_padding']
|
| 492 |
+
|
| 493 |
+
def __init__(self, window_size=11, window_sigma=1.5, data_range=255., channel=3, use_padding=False):
|
| 494 |
+
'''
|
| 495 |
+
:param window_size: the size of gauss kernel
|
| 496 |
+
:param window_sigma: sigma of normal distribution
|
| 497 |
+
:param data_range: value range of input images. (usually 1.0 or 255)
|
| 498 |
+
:param channel: input channels (default: 3)
|
| 499 |
+
:param use_padding: padding image before conv
|
| 500 |
+
'''
|
| 501 |
+
super().__init__()
|
| 502 |
+
assert window_size % 2 == 1, 'Window size must be odd.'
|
| 503 |
+
window = create_window(window_size, window_sigma, channel)
|
| 504 |
+
self.register_buffer('window', window)
|
| 505 |
+
self.data_range = data_range
|
| 506 |
+
self.use_padding = use_padding
|
| 507 |
+
|
| 508 |
+
@torch.jit.script_method
|
| 509 |
+
def forward(self, X, Y):
|
| 510 |
+
r = ssim(X, Y, window=self.window, data_range=self.data_range, use_padding=self.use_padding)
|
| 511 |
+
return r[0]
|
| 512 |
+
|
| 513 |
+
|
| 514 |
+
class MS_SSIM(torch.jit.ScriptModule):
|
| 515 |
+
__constants__ = ['data_range', 'use_padding', 'eps']
|
| 516 |
+
|
| 517 |
+
def __init__(self, window_size=11, window_sigma=1.5, data_range=255., channel=3, use_padding=False, weights=None, levels=None, eps=1e-8):
|
| 518 |
+
'''
|
| 519 |
+
class for ms-ssim
|
| 520 |
+
:param window_size: the size of gauss kernel
|
| 521 |
+
:param window_sigma: sigma of normal distribution
|
| 522 |
+
:param data_range: value range of input images. (usually 1.0 or 255)
|
| 523 |
+
:param channel: input channels
|
| 524 |
+
:param use_padding: padding image before conv
|
| 525 |
+
:param weights: weights for different levels. (default [0.0448, 0.2856, 0.3001, 0.2363, 0.1333])
|
| 526 |
+
:param levels: number of downsampling
|
| 527 |
+
:param eps: Use for fix a issue. When c = a ** b and a is 0, c.backward() will cause the a.grad become inf.
|
| 528 |
+
'''
|
| 529 |
+
super().__init__()
|
| 530 |
+
assert window_size % 2 == 1, 'Window size must be odd.'
|
| 531 |
+
self.data_range = data_range
|
| 532 |
+
self.use_padding = use_padding
|
| 533 |
+
self.eps = eps
|
| 534 |
+
|
| 535 |
+
window = create_window(window_size, window_sigma, channel)
|
| 536 |
+
self.register_buffer('window', window)
|
| 537 |
+
|
| 538 |
+
if weights is None:
|
| 539 |
+
weights = [0.0448, 0.2856, 0.3001, 0.2363, 0.1333]
|
| 540 |
+
weights = torch.tensor(weights, dtype=torch.float)
|
| 541 |
+
|
| 542 |
+
if levels is not None:
|
| 543 |
+
weights = weights[:levels]
|
| 544 |
+
weights = weights / weights.sum()
|
| 545 |
+
|
| 546 |
+
self.register_buffer('weights', weights)
|
| 547 |
+
|
| 548 |
+
@torch.jit.script_method
|
| 549 |
+
def forward(self, X, Y):
|
| 550 |
+
return ms_ssim(X, Y, window=self.window, data_range=self.data_range, weights=self.weights,
|
| 551 |
+
use_padding=self.use_padding, eps=self.eps)
|
| 552 |
+
|
| 553 |
+
|
| 554 |
+
|
| 555 |
+
|
| 556 |
+
def img_alpha_blending(
|
| 557 |
+
drawables: List[np.ndarray],
|
| 558 |
+
xyxy=None,
|
| 559 |
+
output_type='numpy',
|
| 560 |
+
final_size=None,
|
| 561 |
+
max_depth_val=255,
|
| 562 |
+
premultiplied=True,
|
| 563 |
+
):
|
| 564 |
+
'''
|
| 565 |
+
final_size: (h, w)
|
| 566 |
+
'''
|
| 567 |
+
|
| 568 |
+
if isinstance(drawables, (np.ndarray, dict)):
|
| 569 |
+
drawables = [drawables]
|
| 570 |
+
|
| 571 |
+
# infer final scene size
|
| 572 |
+
if xyxy is not None:
|
| 573 |
+
final_size = [xyxy[3] - xyxy[1], xyxy[2] - xyxy[0]]
|
| 574 |
+
x1, y1, x2, y2 = xyxy
|
| 575 |
+
elif final_size is None:
|
| 576 |
+
d = drawables[0]
|
| 577 |
+
if isinstance(d, dict):
|
| 578 |
+
d = d['img']
|
| 579 |
+
final_size = d.shape[:2]
|
| 580 |
+
|
| 581 |
+
final_rgb = np.zeros((final_size[0], final_size[1], 3), dtype=np.float32)
|
| 582 |
+
final_alpha = np.zeros_like(final_rgb[..., [0]])
|
| 583 |
+
final_depth = None
|
| 584 |
+
|
| 585 |
+
for drawable_img in drawables:
|
| 586 |
+
dxyxy = None
|
| 587 |
+
depth = None
|
| 588 |
+
if isinstance(drawable_img, dict):
|
| 589 |
+
depth = drawable_img.get('depth', None)
|
| 590 |
+
tag = drawable_img.get('tag', None)
|
| 591 |
+
if depth is not None:
|
| 592 |
+
if depth.ndim == 2:
|
| 593 |
+
depth = depth[..., None]
|
| 594 |
+
if final_depth is None:
|
| 595 |
+
final_depth = np.full_like(final_alpha, fill_value=max_depth_val)
|
| 596 |
+
if 'xyxy' in drawable_img:
|
| 597 |
+
dxyxy = drawable_img['xyxy']
|
| 598 |
+
dx1, dy1, dx2, dy2 = dxyxy
|
| 599 |
+
drawable_img = drawable_img['img']
|
| 600 |
+
if dxyxy is not None:
|
| 601 |
+
if dx1 < 0:
|
| 602 |
+
drawable_img = drawable_img[:, -dx1:]
|
| 603 |
+
if depth is not None:
|
| 604 |
+
depth = depth[:, -dx1:]
|
| 605 |
+
dx1 = 0
|
| 606 |
+
if dy1 < 0:
|
| 607 |
+
drawable_img = drawable_img[-dy1:]
|
| 608 |
+
if depth is not None:
|
| 609 |
+
depth = depth[-dy1:]
|
| 610 |
+
dy1 = 0
|
| 611 |
+
|
| 612 |
+
if drawable_img.ndim == 3 and drawable_img.shape[-1] == 3:
|
| 613 |
+
drawable_alpha = np.ones_like(drawable_img[..., [-1]])
|
| 614 |
+
else:
|
| 615 |
+
drawable_alpha = drawable_img[..., [-1]] / 255
|
| 616 |
+
|
| 617 |
+
drawable_img = drawable_img[..., :3]
|
| 618 |
+
|
| 619 |
+
if xyxy is not None:
|
| 620 |
+
if dxyxy is None:
|
| 621 |
+
drawable_img = drawable_img[y1: y2, x1: x2]
|
| 622 |
+
else:
|
| 623 |
+
intersection = bbox_intersection(xyxy, dxyxy)
|
| 624 |
+
if intersection is None:
|
| 625 |
+
continue
|
| 626 |
+
ix1, iy1, ix2, iy2 = intersection
|
| 627 |
+
drawable_alpha = drawable_alpha[iy1-dy1: iy2-dy1, ix1-dx1: ix2-dx1]
|
| 628 |
+
final_alpha[iy1-y1: iy2-y1, ix1-x1: ix2-x1] += drawable_alpha
|
| 629 |
+
drawable_img = drawable_img[iy1-dy1: iy2-dy1, ix1-dx1: ix2-dx1]
|
| 630 |
+
final_rgb[iy1-y1: iy2-y1, ix1-x1: ix2-x1] = final_rgb[iy1-y1: iy2-y1, ix1-x1: ix2-x1] * (1-drawable_alpha) + drawable_img
|
| 631 |
+
continue
|
| 632 |
+
|
| 633 |
+
if dxyxy is None:
|
| 634 |
+
if depth is not None:
|
| 635 |
+
update_mask = (final_depth > depth).astype(np.uint8)
|
| 636 |
+
final_depth = update_mask * depth + (1-update_mask) * final_depth
|
| 637 |
+
final_rgb = update_mask * (final_rgb * (1-drawable_alpha) + drawable_img) + \
|
| 638 |
+
(1 - update_mask) * (drawable_img * (1-final_alpha) + final_rgb)
|
| 639 |
+
final_alpha = np.clip(final_alpha + drawable_alpha, 0, 1)
|
| 640 |
+
else:
|
| 641 |
+
final_alpha += drawable_alpha
|
| 642 |
+
final_alpha = np.clip(final_alpha, 0, 1)
|
| 643 |
+
if not premultiplied:
|
| 644 |
+
drawable_img = drawable_img * drawable_alpha
|
| 645 |
+
final_rgb = final_rgb * (1 - drawable_alpha) + drawable_img
|
| 646 |
+
else:
|
| 647 |
+
if depth is not None:
|
| 648 |
+
update_mask = (final_depth[dy1: dy2, dx1: dx2] > depth).astype(np.uint8)
|
| 649 |
+
update_mask = update_mask * (drawable_alpha > 0.1)
|
| 650 |
+
final_depth[dy1: dy2, dx1: dx2] = update_mask * depth + (1-update_mask) * final_depth[dy1: dy2, dx1: dx2]
|
| 651 |
+
final_rgb[dy1: dy2, dx1: dx2] = update_mask * (final_rgb[dy1: dy2, dx1: dx2] * (1-drawable_alpha) + drawable_img) + \
|
| 652 |
+
(1 - update_mask) * (drawable_img * (1-final_alpha[dy1: dy2, dx1: dx2]) + final_rgb[dy1: dy2, dx1: dx2])
|
| 653 |
+
final_alpha[dy1: dy2, dx1: dx2] = np.clip(final_alpha[dy1: dy2, dx1: dx2] + drawable_alpha, 0, 1)
|
| 654 |
+
else:
|
| 655 |
+
final_alpha[dy1: dy2, dx1: dx2] += drawable_alpha
|
| 656 |
+
final_alpha = np.clip(final_alpha, 0, 1)
|
| 657 |
+
final_rgb[dy1: dy2, dx1: dx2] = final_rgb[dy1: dy2, dx1: dx2] * (1-drawable_alpha) + drawable_img
|
| 658 |
+
|
| 659 |
+
final_alpha = np.clip(final_alpha, 0, 1) * 255
|
| 660 |
+
final = np.concatenate([final_rgb, final_alpha], axis=2)
|
| 661 |
+
final = np.clip(final, 0, 255).astype(np.uint8)
|
| 662 |
+
|
| 663 |
+
output_type = output_type.lower()
|
| 664 |
+
if output_type == 'pil':
|
| 665 |
+
final = Image.fromarray(final)
|
| 666 |
+
elif output_type == 'dict':
|
| 667 |
+
final = {
|
| 668 |
+
'img': final
|
| 669 |
+
}
|
| 670 |
+
if final_depth is not None:
|
| 671 |
+
final['depth'] = final_depth
|
| 672 |
+
|
| 673 |
+
return final
|
| 674 |
+
|
| 675 |
+
|
| 676 |
+
def rgba_to_rgb_fixbg(img: np.ndarray, background_color=255):
|
| 677 |
+
if isinstance(img, Image.Image):
|
| 678 |
+
img = np.array(img)
|
| 679 |
+
assert img.ndim == 3
|
| 680 |
+
if img.shape[-1] == 3:
|
| 681 |
+
return img
|
| 682 |
+
if isinstance(background_color, int):
|
| 683 |
+
bg = np.full_like(img[..., :3], fill_value=background_color)
|
| 684 |
+
else:
|
| 685 |
+
background_color = np.array(background_color)[:3].astype(np.uint8)
|
| 686 |
+
bg = np.full_like(img[..., :3], fill_value=255)
|
| 687 |
+
bg[..., :3] = background_color
|
| 688 |
+
return img_alpha_blending([bg, img])[..., :3].copy()
|
visualize.py
ADDED
|
@@ -0,0 +1,110 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os.path as osp
|
| 2 |
+
import os
|
| 3 |
+
import argparse
|
| 4 |
+
|
| 5 |
+
import numpy as np
|
| 6 |
+
import numpy as np
|
| 7 |
+
import cv2
|
| 8 |
+
import matplotlib.pyplot as plt
|
| 9 |
+
from matplotlib.lines import Line2D
|
| 10 |
+
from PIL import Image
|
| 11 |
+
|
| 12 |
+
import matplotlib.pyplot as plt
|
| 13 |
+
from vis_utils import *
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
if __name__ == '__main__':
|
| 18 |
+
|
| 19 |
+
parser = argparse.ArgumentParser()
|
| 20 |
+
parser.add_argument('--saved', type=str, default=None)
|
| 21 |
+
parser.add_argument('--srcp', type=str)
|
| 22 |
+
|
| 23 |
+
args = parser.parse_args()
|
| 24 |
+
srcp = args.srcp
|
| 25 |
+
saved = args.saved
|
| 26 |
+
|
| 27 |
+
if saved is None:
|
| 28 |
+
saved = './'
|
| 29 |
+
|
| 30 |
+
os.makedirs(saved, exist_ok=True)
|
| 31 |
+
|
| 32 |
+
seed_everything(0)
|
| 33 |
+
|
| 34 |
+
# for srcp in tqdm(load_exec_list(exec_list)):
|
| 35 |
+
if osp.isfile(srcp):
|
| 36 |
+
srcp = osp.dirname(srcp)
|
| 37 |
+
try:
|
| 38 |
+
fullpage, infos, part_dict_list = load_parts(srcp)
|
| 39 |
+
except Exception as e:
|
| 40 |
+
print(f'failed to load {srcp}: \n')
|
| 41 |
+
print(e)
|
| 42 |
+
|
| 43 |
+
# optim_before = img_alpha_blending(part_dict_list, final_size=(1024, 1024))
|
| 44 |
+
optim_depth(part_dict_list, fullpage)
|
| 45 |
+
|
| 46 |
+
n_components = len(part_dict_list)
|
| 47 |
+
|
| 48 |
+
colors = []
|
| 49 |
+
tag_list = []
|
| 50 |
+
for ii in range(len(part_dict_list)):
|
| 51 |
+
pd = part_dict_list[ii]
|
| 52 |
+
depth = pd['depth']
|
| 53 |
+
h, w = depth.shape[:2]
|
| 54 |
+
pd['depth_median'] = np.median(depth[pd['mask']])
|
| 55 |
+
tag_list.append(pd['tag'])
|
| 56 |
+
color = get_color(VALID_BODY_PARTS_V2.index(pd['tag']))
|
| 57 |
+
alpha = pd['img'][..., 3]
|
| 58 |
+
colors.append(color)
|
| 59 |
+
pd['img'] = np.full((h, w, 4), (*color, 255))
|
| 60 |
+
pd['img'][..., 3] = alpha
|
| 61 |
+
# pd.pop('depth')
|
| 62 |
+
|
| 63 |
+
part_dict_list.sort(key=lambda x: x['depth_median'], reverse=True)
|
| 64 |
+
color_code = img_alpha_blending(part_dict_list, final_size=(1024, 1024))
|
| 65 |
+
|
| 66 |
+
save_dir = osp.join(saved, osp.basename(osp.dirname(srcp)))
|
| 67 |
+
os.makedirs(save_dir, exist_ok=True)
|
| 68 |
+
savep = osp.join(save_dir, osp.basename(srcp)) + '.png'
|
| 69 |
+
|
| 70 |
+
alpha = (color_code[..., [3]] / 255.) * 0.8
|
| 71 |
+
blended = alpha * color_code[..., :3] + (1 - alpha) * fullpage[..., :3]
|
| 72 |
+
result = np.round(blended).astype(np.uint8)
|
| 73 |
+
|
| 74 |
+
# print('xxxxx')
|
| 75 |
+
|
| 76 |
+
colors = np.array(colors)
|
| 77 |
+
colors = colors.astype(np.float32) / 255.
|
| 78 |
+
px = 1 / plt.rcParams['figure.dpi'] # pixel in inches
|
| 79 |
+
fig = plt.figure(figsize=(result.shape[1] * px, result.shape[0] * px), facecolor=[0, 0, 0, 0])
|
| 80 |
+
|
| 81 |
+
fnt_sz = int(5 * result.shape[0] / 256)
|
| 82 |
+
plt.rcParams['legend.fontsize'] = fnt_sz
|
| 83 |
+
lw = 5 * result.shape[0] / 256
|
| 84 |
+
lines = [Line2D([0], [0], color=colors[i], lw=lw)
|
| 85 |
+
for i in range(n_components)]
|
| 86 |
+
# c_labels = [all_labels[i] for i in all_labels]
|
| 87 |
+
plt.legend(lines,
|
| 88 |
+
tag_list,
|
| 89 |
+
mode="expand",
|
| 90 |
+
fancybox=False,
|
| 91 |
+
edgecolor="black",
|
| 92 |
+
# frameon=False,
|
| 93 |
+
shadow=False,
|
| 94 |
+
framealpha=0.)
|
| 95 |
+
|
| 96 |
+
plt.tight_layout(pad=0, w_pad=0, h_pad=0)
|
| 97 |
+
plt.axis('off')
|
| 98 |
+
fig.canvas.draw()
|
| 99 |
+
data = np.frombuffer(fig.canvas.buffer_rgba() , dtype=np.uint8)
|
| 100 |
+
plt.close(fig=fig)
|
| 101 |
+
data = data.reshape(fig.canvas.get_width_height()[::-1] + (4,))
|
| 102 |
+
dx, dy, dw, dh = cv2.boundingRect(cv2.findNonZero(data[..., 3]))
|
| 103 |
+
|
| 104 |
+
data = rgba_to_rgb_fixbg(data[:, dx: dx + dw])
|
| 105 |
+
data = cv2.copyMakeBorder(data, 0, 0, fnt_sz, fnt_sz, borderType=cv2.BORDER_CONSTANT, value=(255, 255, 255))
|
| 106 |
+
|
| 107 |
+
result = np.hstack((result, data))
|
| 108 |
+
Image.fromarray(result).save(savep)
|
| 109 |
+
|
| 110 |
+
print(f'result saved to {savep}')
|