YouLiXiya's picture
Upload 22 files
7dbe662
raw
history blame
No virus
6.84 kB
import math
import cv2
import PIL
import torch
from PIL.Image import Image
from typing import Union, Tuple, List, Optional
import numpy as np
import supervision as sv
from sklearn.decomposition import PCA
# def add_points_tag(img: Union[Image, np.ndarray],
# point_labels: Union[List[int], np.ndarray] = None,
# point_coords: Union[List[List[int]], np.ndarray] = None,
# pil: bool = False):
# if point_labels is None or point_coords is None or \
# not isinstance(point_labels, (List, np.ndarray)) or \
# not isinstance(point_coords, (List, np.ndarray)):
# return img
# if len(point_labels) != len(point_coords):
# print('length of point_label and point_coordinate must be same!')
# return img
# if isinstance(img, Image):
# img = np.uint8(img)
# start_angle = 40
# x = 8
# y = 2
# def get_point(angle, d, base):
# angle = angle / 180.0 * math.pi
# _x, _y = math.cos(angle) * d, math.sin(angle) * d
# return [base[0] + _x, base[1] - _y]
# # assert len(point_labels) == len(point_coords), ''
# for i in range(len(point_labels)):
# points = []
# for j in range(5):
# _x, _y = math.cos(start_angle), math.sin(start_angle)
# points.append(get_point(start_angle, x, point_coords[i]))
# start_angle -= 36
# points.append(get_point(start_angle, y, point_coords[i]))
# start_angle -= 36
# points = np.array([points], np.int32)
# color = (255, 0, 0) if point_labels[i] == 0 else (0, 255, 0)
# cv2.fillPoly(img, points, color, cv2.LINE_AA)
# if pil:
# img = PIL.Image.fromarray(img)
# return img
def add_points_tag(img: Union[Image, np.ndarray],
point_labels: Union[List[int], np.ndarray] = None,
point_coords: Union[List[List[int]], np.ndarray] = None,
pil: bool = False):
if point_labels is None or point_coords is None or \
not isinstance(point_labels, (List, np.ndarray)) or \
not isinstance(point_coords, (List, np.ndarray)):
return img
if len(point_labels) != len(point_coords):
print('length of point_label and point_coordinate must be same!')
return img
if isinstance(img, Image):
img = np.array(img)
# img.flags.writeable = True
h, w = img.shape[:2]
x_start_list, x_end_list = np.where((point_coords[:, 0] - 4) > 0, point_coords[:, 0] - 4, 0), np.where((point_coords[:, 0] + 4) < w, point_coords[:, 0] + 4, w)
y_start_list, y_end_list = np.where((point_coords[:, 1] - 4) > 0, point_coords[:, 1] - 4, 0), np.where((point_coords[:, 1] + 4) < h, point_coords[:, 1] + 4, h)
for i in range(len(point_labels)):
x_start, x_end = x_start_list[i], x_end_list[i]
y_start, y_end = y_start_list[i], y_end_list[i]
label = point_labels[i]
color = [0, 255, 0] if int(label) == 1 else [255, 0, 0]
for x in range(x_start, x_end):
for y in range(y_start, y_end):
img[y, x, :] = color
if pil:
img = PIL.Image.fromarray(img)
return img
def add_boxes_tag(img: Union[Image, np.ndarray],
boxes: Union[List[List[int]], np.ndarray] = None,
pil: bool = False):
if boxes is None or not isinstance(boxes, (List, np.ndarray)):
return img
# if isinstance(boxes, np.ndarray):
# if not boxes.all():
# return img
# else:
# if not boxes:
# return img
if isinstance(img, Image):
img = np.uint8(img)
thickness = 2
for i in range(len(boxes)):
color = (0, 255, 0)
img = cv2.rectangle(img, (boxes[i][0], boxes[i][1]), (boxes[i][2], boxes[i][3]), color, thickness)
if pil:
img = PIL.Image.fromarray(img)
return img
def add_prompts_tag(img: Union[Image, np.ndarray],
point_labels: Union[List[int], np.ndarray] = None,
point_coords: Union[List[List[int]], np.ndarray] = None,
boxes: Union[List[List[int]], np.ndarray] = None,
pil: bool = False):
img = add_points_tag(img, point_labels, point_coords, pil=pil)
img = add_boxes_tag(img, boxes, pil=pil)
return img
def get_empty_detections():
detections = sv.Detections(xyxy=np.array([0, 0, 0, 0]).reshape(1, 4))
detections.xyxy = None
return detections
def pca_feature(feature: torch.Tensor, dim: int = 3, return_np: bool = True):
pca = PCA(n_components=dim)
H, W, C = feature.shape
feature = feature.view(-1, C).cpu().numpy()
feature = pca.fit_transform(feature)
feature = torch.tensor(feature.reshape(H, W, dim))
if return_np:
return feature.numpy()
else:
return feature
def visual_feature_rgb(feature: torch.Tensor, pil:bool = True):
assert feature.ndim >= 3, 'the dim of feature must >= 3!'
if feature.ndim == 4:
feature = feature.squeeze(0)
if feature.shape[-1] != 3:
feature = pca_feature(feature, 3, False)
max_f, _ = feature.max(-1)
min_f, _ = feature.min(-1)
feature = (feature - min_f[..., None]) / (max_f[..., None] - min_f[..., None])
feature = np.uint8((feature*255).cpu().numpy())
if pil:
return PIL.Image.fromarray(feature)
else:
return feature
def transform_coords(src_shape, des_shape, points = None, boxes = None):
assert points is not None or boxes is not None, 'one of points and boxes must be given!'
scale_h = des_shape[0] / src_shape[0]
scale_w = des_shape[1] / src_shape[1]
if points is not None:
new_points = np.full_like(points, 0)
new_points[:, 0] = points[:, 0] * scale_w
new_points[:, 1] = points[:, 1] * scale_h
new_points.astype(np.int64)
else:
new_points = None
if boxes is not None:
new_boxes = np.full_like(boxes, 0)
new_boxes[:, 0] = boxes[:, 0] * scale_w
new_boxes[:, 1] = boxes[:, 1] * scale_h
new_boxes[:, 2] = boxes[:, 2] * scale_w
new_boxes[:, 3] = boxes[:, 3] * scale_h
new_boxes.astype(np.int64)
else:
new_boxes = None
return new_points, new_boxes
def mask2greyimg(mask_list, pil=True):
grey_img_list = []
for mask in mask_list:
if pil:
grey_img_list.append(PIL.Image.fromarray(np.uint8(mask*255)))
else:
grey_img_list.append(np.uint8(mask * 255))
return grey_img_list
if __name__ == '__main__':
src_shape = (100,100)
des_shape = (200,200)
points = np.array([[20,20],[40,40]])
boxes = np.array([[10,10,20,20]])
new_points, new_boxes = transform_coords(src_shape, des_shape, points, boxes)
print(new_points, new_boxes)