import time
from random import *
import random
import os
import cv2
import numpy as np
import queue
from concurrent.futures import ThreadPoolExecutor

imgexts = ['.jpg', '.jpeg', '.png', '.bmp']


class BoundThreadPoolExecutor(ThreadPoolExecutor):
    # 对ThreadPoolExecutor 进行重写，给队列设置边界
    def __init__(self, qsize: int = None, *args, **kwargs):
        super(BoundThreadPoolExecutor, self).__init__(*args, **kwargs)
        self._work_queue = queue.Queue(qsize)


def bound(x, minx, maxx):
    return max(min(x, maxx), minx)


def mat2img(mat):
    a, b = mat.min(), mat.max()
    m1 = (mat - a) * 255 / (b - a)
    return m1.astype(np.uint8)


def bgr2gray(im):
    return cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)


def gray2bgr(im):
    return cv2.cvtColor(im, cv2.COLOR_GRAY2BGR)


def vcat(li):
    return np.vstack(li)


def hcat(li):
    return np.hstack(li)


def rect_height(r):
    return r[3] - r[1]


def rect_width(r):
    return r[2] - r[0]


def rect_wh(r):
    return (r[2] - r[0], r[3] - r[1])


def rect_size(r):
    return [r[2] - r[0], r[3] - r[1]]


def rect_center(r):
    return [(r[2] + r[0]) / 2, (r[3] + r[1]) / 2]


def imresize(img, k, inter=cv2.INTER_LINEAR):
    sz = (int(img.shape[1] * k), int(img.shape[0] * k))
    img = cv2.resize(img, sz, interpolation=inter)
    return img


def imresize2(img, kw, kh):
    sz = (int(img.shape[1] * kw), int(img.shape[0] * kh))
    img = cv2.resize(img, sz)
    return img


def intrect(r):
    return (int(r[0]), int(r[1]), int(r[2]), int(r[3]))


def putText(img, text, rect, fontFace, fonth, color, linew):
    lines = text.split('\n')
    textSize = (0, 0)
    baseline = 0

    for line in lines:
        textSize1, baseline1 = cv2.getTextSize(
            line, fontFace, 1, linew)
        textSize = (max(textSize[0], textSize1[0]),
                    textSize[1] + textSize1[1])
        baseline = max(baseline, baseline1)

    linegap = textSize1[1] // 3
    textSize = (textSize[0], textSize[1] + linegap * (len(lines) - 1))
    rectw = rect[2] - rect[0]
    recth = rect[3] - rect[1]
    fontScale = min(recth / (textSize[1] + 1), rectw / (textSize[0] + 1))
    x = rect[0] + 0  # (rectw-textSize[0])
    y = rect[1] + (recth - textSize[1]) + baseline + linew

    # write the text on the image
    for line in lines:
        textSize1, baseline = cv2.getTextSize(line, fontFace, fontScale, linew)
        cv2.putText(img, line, (int(x), int(y)), fontFace, fontScale, color, int(linew))
        y += textSize1[1] + linegap


def draw_line(img, line, color=(0, 0, 255), linew=1):
    x1, y1, x2, y2 = line
    cv2.line(img, (int(x1), int(y1)), (int(x2), int(y2)),
             color, linew)  # 点的坐标必须是元组，不能是列表。

    return img


def drawrect(img, r, color=(0, 0, 255), linew=0):
    if linew < 1:
        linew = int(min(r[2] - r[0], r[3] - r[1]) * 0.02 + 0.5)
        linew = max(linew, 1)
    cv2.rectangle(img, (int(r[0]), int(r[1])), (int(r[2]), int(r[3])), color, int(linew))
    return img


def drawrects(img, rs, color=(0, 0, 255), linew=0):
    for r in rs:
        drawrect(img, r, color, linew)
    return img


def drawpolys(img, polys, color=(0, 0, 255), wline=2):
    cv2.polylines(img, polys, True, color, wline)
    return img


def chdir(path):
    try:
        os.chdir(path)
    # Python >2.5 (except OSError, exc: for Python <2.5)
    except OSError as exc:
        pass


def mkdir(path):
    try:
        os.makedirs(path)
    # Python >2.5 (except OSError, exc: for Python <2.5)
    except OSError as exc:
        pass


def imshow(name, im, ms_time=-2):
    cv2.namedWindow(name, 0)
    cv2.imshow(name, im)
    ret = 0
    if ms_time > -2:
        ret = cv2.waitKey(ms_time)
    return ret


def xywh2xyxy(r):
    return (r[0], r[1], r[0] + r[2], r[1] + r[3])


def imrect(img, r):
    img_h, img_w = img.shape[0], img.shape[1]
    r = [int(max(0, r[0])), int(max(0, r[1])), int(min(img_w, r[2])), int(min(img_h, r[3]))]
    return img[int(r[1]):int(r[3]), int(r[0]):int(r[2])]


def imrect_set(img, r, src):
    img_h, img_w = img.shape[0], img.shape[1]
    r = [int(max(0, r[0])), int(max(0, r[1])), int(min(img_w, r[2])), int(min(img_h, r[3]))]
    img[int(r[1]):int(r[3]), int(r[0]):int(r[2])] = src
    return img


def imrect_hw(img, r):
    return img[int(r[1]):int(r[1] + r[3]), int(r[0]):int(r[0] + r[2])]


def extend_rect4(img_rgb, rc, l, t, r, b):
    if img_rgb is None:
        x1, y1, x2, y2 = rc
        w, h = x2 - x1, y2 - y1
        ox1 = int(x1 - l * w)
        oy1 = int(y1 - t * h)
        ox2 = int(x2 + r * w)
        oy2 = int(y2 + b * h)
        return (ox1, oy1, ox2, oy2)

    if len(img_rgb) <= 3 or isinstance(img_rgb, list):
        img_h, img_w = img_rgb[0], img_rgb[1]
    else:
        img_h, img_w = img_rgb.shape[0], img_rgb.shape[1]

    if rc is None:
        x1, y1, x2, y2 = 0, 0, img_w, img_h
    else:
        x1, y1, x2, y2 = rc
    w, h = x2 - x1, y2 - y1
    ox1 = int(max(0, x1 - l * w))
    oy1 = int(max(0, y1 - t * h))
    ox2 = int(min(img_w, x2 + r * w))
    oy2 = int(min(img_h, y2 + b * h))
    return (ox1, oy1, ox2, oy2)


def extend_rect2(img, rc, kx, ky):
    return extend_rect4(img, rc, kx, ky, kx, ky)


def extend_rect1(img, r, k):
    return extend_rect4(img, r, k, k, k, k)


def rect_mul(r, ss):
    cc = []
    for x in r:
        cc.append(int(x * ss))

    return tuple(cc)


def rects_mul(rr, ss):
    outrr = []
    for r in rr:
        outrr.append(rect_mul(r, ss))

    return outrr


def rect_off(r, off):
    cc = []
    for i in range(len(r)):
        x = r[i]
        o = off[i % len(off)]
        cc.append(int(x + o))
    cc = tuple(cc)
    return cc


def rects_off(rr, off):
    outrr = []
    for r in rr:
        cc = []
        for i in range(len(r)):
            x = r[i]
            o = off[i % len(off)]
            cc.append(int(x + o))
        outrr.append(tuple(cc))

    return outrr


def get_time_stamp(id=0):
    ct = time.time() + id
    local_time = time.localtime(ct)
    data_head = time.strftime("%Y%m%d_%H%M%S", local_time)
    data_secs = (ct - int(ct)) * 1000
    time_stamp = "%s_%03d" % (data_head, data_secs)
    return time_stamp


def get_date_stamp():
    ct = time.time()
    local_time = time.localtime(ct)
    data_head = time.strftime("%Y%m%d", local_time)
    return data_head


def rotate_bound1(image, r, angle):
    # 获取图像的尺寸
    # 旋转中心
    w = (r[2] - r[0])
    h = (r[3] - r[1])
    (cx, cy) = ((r[2] + r[0]) / 2, (r[3] + r[1]) / 2)

    # 设置旋转矩阵
    M = cv2.getRotationMatrix2D((cx, cy), -angle, 1.0)

    # 计算图像旋转后的新边界
    nW = w
    nH = h

    # 调整旋转矩阵的移动距离（t_{x}, t_{y}）
    M[0, 2] += (nW / 2) - cx
    M[1, 2] += (nH / 2) - cy

    return cv2.warpAffine(image, M, (nW, nH))


#
class Resize1(object):
    def __init__(self, output_size):
        self.output_size = output_size

    def __call__(self, X):
        _X = cv2.resize(X, self.output_size)
        return _X


# 剪辑函数是一项非常有用的函数，尤其是当您需要从一种颜色空间切换到另一种颜色空间或在0和1或0和255之间重置图像时。
# 默认情况下，如果仅指定一个阈值，则最小阈值为0。

class Clip(object):
    def __init__(self, mini, maxi=None):
        if maxi is None:
            self.mini, self.maxi = 0, mini
        else:
            self.mini, self.maxi = mini, maxi

    def __call__(self, X):
        mini_mask = np.where(X < self.mini)
        maxi_mask = np.where(X > self.maxi)
        X[mini_mask] = self.mini
        X[maxi_mask] = self.maxi
        return X


# 标准化


class Normalize(object):
    def __init__(self, axis=None):
        self.axis = axis

    def __call__(self, X):
        mini = np.min(X, self.axis)
        maxi = np.max(X, self.axis)
        X = (X - mini) / (maxi - mini)
        return X


# 规范化
class Standardize(object):
    def __init__(self, axis=None):
        self.axis = axis

    def __call__(self, X):
        mean = np.mean(X, self.axis)
        std = np.std(X, self.axis)
        X = (X - mean) / std
        return X


# 随机水平和垂直翻转


class Flip(object):
    def __call__(self, X):
        for axis in [0, 1]:
            if np.random.rand(1) < 0.5:
                X = np.flip(X, axis)
                Y = np.flip(Y, axis)
        return X


# 选择裁剪图像的尺寸


class Crop(object):
    def __init__(self, min_size_ratio, max_size_ratio=(1, 1)):
        self.min_size_ratio = np.array(list(min_size_ratio))
        self.max_size_ratio = np.array(list(max_size_ratio))

    def __call__(self, X):
        size = np.array(X.shape[:2])
        mini = self.min_size_ratio * size
        maxi = self.max_size_ratio * size
        # random size
        h = np.random.randint(mini[0], maxi[0])
        w = np.random.randint(mini[1], maxi[1])
        # random place
        shift_h = np.random.randint(0, size[0] - h)
        shift_w = np.random.randint(0, size[1] - w)
        X = X[shift_h:shift_h + h, shift_w:shift_w + w]
        Y = Y[shift_h:shift_h + h, shift_w:shift_w + w]

        return X


# 过滤器
class CustomFilter(object):
    def __init__(self, kernel):
        self.kernel = kernel

    def __call__(self, X):
        X = cv2.filter2D(X, -1, self.kernel)
        return X


# 锐化


class Sharpen(object):
    def __init__(self, max_center=4):
        self.identity = np.array([[0, 0, 0],
                                  [0, 1, 0],
                                  [0, 0, 0]])
        self.sharpen = np.array([[0, -1, 0],
                                 [-1, 4, -1],
                                 [0, -1, 0]]) / 4

    def __call__(self, X):
        sharp = self.sharpen * np.random.random() * self.max_center
        kernel = self.identity + sharp

        X = cv2.filter2D(X, -1, kernel)
        return X


# 高斯模糊
class GaussianBlur(object):
    def __init__(self, max_kernel=(7, 7)):
        self.max_kernel = ((max_kernel[0] + 1) // 2, (max_kernel[1] + 1) // 2)

    def __call__(self, X):
        kernel_size = (
            np.random.randint(1, self.max_kernel[0]) * 2 + 1,
            np.random.randint(1, self.max_kernel[1]) * 2 + 1,
        )
        X = cv2.GaussianBlur(X, kernel_size, 0)
        return X


# 透视变换


class Perspective(object):
    def __init__(self,
                 max_ratio_translation=(0.2, 0.2, 0),
                 max_rotation=(10, 10, 360),
                 max_scale=(0.1, 0.1, 0.2),
                 max_shearing=(15, 15, 5)):
        self.max_ratio_translation = np.array(max_ratio_translation)
        self.max_rotation = np.array(max_rotation)
        self.max_scale = np.array(max_scale)
        self.max_shearing = np.array(max_shearing)

    def __call__(self, X):
        # get the height and the width of the image
        h, w = X.shape[:2]
        max_translation = self.max_ratio_translation * np.array([w, h, 1])
        # get the values on each axis
        t_x, t_y, t_z = np.random.uniform(-1, 1, 3) * max_translation
        r_x, r_y, r_z = np.random.uniform(-1, 1, 3) * self.max_rotation
        sc_x, sc_y, sc_z = np.random.uniform(-1, 1, 3) * self.max_scale + 1
        sh_x, sh_y, sh_z = np.random.uniform(-1, 1, 3) * self.max_shearing

        # convert degree angles to rad
        theta_rx = np.deg2rad(r_x)
        theta_ry = np.deg2rad(r_y)
        theta_rz = np.deg2rad(r_z)
        theta_shx = np.deg2rad(sh_x)
        theta_shy = np.deg2rad(sh_y)
        theta_shz = np.deg2rad(sh_z)

        # compute its diagonal
        diag = (h ** 2 + w ** 2) ** 0.5
        # compute the focal length
        f = diag
        if np.sin(theta_rz) != 0:
            f /= 2 * np.sin(theta_rz)

        # set the image from cartesian to projective dimension
        H_M = np.array([[1, 0, -w / 2],
                        [0, 1, -h / 2],
                        [0, 0, 1],
                        [0, 0, 1]])
        # set the image projective to carrtesian dimension
        Hp_M = np.array([[f, 0, w / 2, 0],
                         [0, f, h / 2, 0],
                         [0, 0, 1, 0]])

        # adjust the translation on z
        t_z = (f - t_z) / sc_z ** 2
        # translation matrix to translate the image
        T_M = np.array([[1, 0, 0, t_x],
                        [0, 1, 0, t_y],
                        [0, 0, 1, t_z],
                        [0, 0, 0, 1]])

        # calculate cos and sin of angles
        sin_rx, cos_rx = np.sin(theta_rx), np.cos(theta_rx)
        sin_ry, cos_ry = np.sin(theta_ry), np.cos(theta_ry)
        sin_rz, cos_rz = np.sin(theta_rz), np.cos(theta_rz)
        # get the rotation matrix on x axis
        R_Mx = np.array([[1, 0, 0, 0],
                         [0, cos_rx, -sin_rx, 0],
                         [0, sin_rx, cos_rx, 0],
                         [0, 0, 0, 1]])
        # get the rotation matrix on y axis
        R_My = np.array([[cos_ry, 0, -sin_ry, 0],
                         [0, 1, 0, 0],
                         [sin_ry, 0, cos_ry, 0],
                         [0, 0, 0, 1]])
        # get the rotation matrix on z axis
        R_Mz = np.array([[cos_rz, -sin_rz, 0, 0],
                         [sin_rz, cos_rz, 0, 0],
                         [0, 0, 1, 0],
                         [0, 0, 0, 1]])
        # compute the full rotation matrix
        R_M = np.dot(np.dot(R_Mx, R_My), R_Mz)

        # get the scaling matrix
        Sc_M = np.array([[sc_x, 0, 0, 0],
                         [0, sc_y, 0, 0],
                         [0, 0, sc_z, 0],
                         [0, 0, 0, 1]])

        # get the tan of angles
        tan_shx = np.tan(theta_shx)
        tan_shy = np.tan(theta_shy)
        tan_shz = np.tan(theta_shz)
        # get the shearing matrix on x axis
        Sh_Mx = np.array([[1, 0, 0, 0],
                          [tan_shy, 1, 0, 0],
                          [tan_shz, 0, 1, 0],
                          [0, 0, 0, 1]])
        # get the shearing matrix on y axis
        Sh_My = np.array([[1, tan_shx, 0, 0],
                          [0, 1, 0, 0],
                          [0, tan_shz, 1, 0],
                          [0, 0, 0, 1]])
        # get the shearing matrix on z axis
        Sh_Mz = np.array([[1, 0, tan_shx, 0],
                          [0, 1, tan_shy, 0],
                          [0, 0, 1, 0],
                          [0, 0, 0, 1]])
        # compute the full shearing matrix
        Sh_M = np.dot(np.dot(Sh_Mx, Sh_My), Sh_Mz)

        Identity = np.array([[1, 0, 0, 0],
                             [0, 1, 0, 0],
                             [0, 0, 1, 0],
                             [0, 0, 0, 1]])

        # compute the full transform matrix
        M = Identity
        M = np.dot(Sh_M, M)
        M = np.dot(R_M, M)
        M = np.dot(Sc_M, M)
        M = np.dot(T_M, M)
        M = np.dot(Hp_M, np.dot(M, H_M))
        # apply the transformation
        X = cv2.warpPerspective(X, M, (w, h))
        Y = cv2.warpPerspective(Y, M, (w, h))
        return X


# ​ 抠图


class Cutout(object):
    def __init__(self,
                 min_size_ratio,
                 max_size_ratio,
                 channel_wise=False,
                 crop_target=True,
                 max_crop=10,
                 replacement=0):
        self.min_size_ratio = np.array(list(min_size_ratio))
        self.max_size_ratio = np.array(list(max_size_ratio))
        self.channel_wise = channel_wise
        self.crop_target = crop_target
        self.max_crop = max_crop
        self.replacement = replacement

    def __call__(self, X):
        size = np.array(X.shape[:2])
        mini = self.min_size_ratio * size
        maxi = self.max_size_ratio * size
        for _ in range(self.max_crop):
            # random size
            h = np.random.randint(mini[0], maxi[0])
            w = np.random.randint(mini[1], maxi[1])
            # random place
            shift_h = np.random.randint(0, size[0] - h)
            shift_w = np.random.randint(0, size[1] - w)
            if self.channel_wise:
                c = np.random.randint(0, X.shape[-1])
                X[shift_h:shift_h + h, shift_w:shift_w + w, c] = self.replacement
                if self.crop_target:
                    Y[shift_h:shift_h + h, shift_w:shift_w + w] = self.replacement
            else:
                X[shift_h:shift_h + h, shift_w:shift_w + w] = self.replacement
                if self.crop_target:
                    Y[shift_h:shift_h + h, shift_w:shift_w + w] = self.replacement
        return X


# 随机更改其颜色
class Leaf(object):
    def __init__(self):
        pass

    def __call__(self, X):
        blur = cv2.GaussianBlur(X, (7, 7), 0)
        hsv_blur = cv2.cvtColor(blur, cv2.COLOR_BGR2HSV)
        # lower mask (0-10)
        lower_red = np.array([0, 130, 130])
        upper_red = np.array([20, 255, 255])
        mask_0 = cv2.inRange(hsv_blur, lower_red, upper_red)

        # upper mask (170-180)
        lower_red = np.array([165, 130, 130])
        upper_red = np.array([185, 255, 255])
        mask_1 = cv2.inRange(hsv_blur, lower_red, upper_red)
        hsv_blur[np.where(mask_1)] = hsv_blur[np.where(
            mask_1)] - np.array([165, 0, 0])

        mask = mask_0 + mask_1
        # change color
        turn_color = np.random.randint(0, 255)
        hsv_blur[np.where(mask)] = hsv_blur[np.where(mask)] + \
                                   np.array([turn_color, 0, 0])
        X_blur = cv2.cvtColor(hsv_blur, cv2.COLOR_HSV2BGR)
        X[np.where(mask)] = X_blur[np.where(mask)]
        return X


# 亮度
class Brightness(object):
    def __init__(self, range_brightness=(-50, 50)):
        self.range_brightness = range_brightness

    def __call__(self, X):
        brightness = np.random.randint(*self.range_brightness)
        X = X + brightness
        return X


# 对比度


class Contrast(object):
    def __init__(self, range_contrast=(-50, 50)):
        self.range_contrast = range_contrast

    def __call__(self, X):
        contrast = np.random.randint(*self.range_contrast)
        X = X * (contrast / 127 + 1) - contrast
        return X


# 噪声


class UniformNoise(object):
    def __init__(self, low=-50, high=50):
        self.low = low
        self.high = high

    def __call__(self, X):
        noise = np.random.uniform(self.low, self.high, X.shape)
        X = X + noise
        return X


# 高斯噪声


class GaussianNoise(object):
    def __init__(self, center=0, std=50):
        self.center = center
        self.std = std

    def __call__(self, X):
        noise = np.random.normal(
            self.center, self.std, X.shape).astype(np.uint8)
        X = X + noise
        return X


def _extend_rect1(r, k):
    x1, y1, x2, y2 = r
    x, y = (x2 - x1) * k, (y2 - y1) * k
    x1 = int(x1 - x)
    y1 = int(y1 - y)
    x2 = int(x2 + x)
    y2 = int(y2 + y)
    return (x1, y1, x2, y2)


# 朝着黑色还是向白色方向渐晕


class Vignetting(object):
    def __init__(self,
                 ratio_min_dist=0.2,
                 range_vignette=(0.2, 0.8),
                 random_sign=False):
        self.ratio_min_dist = ratio_min_dist
        self.range_vignette = np.array(range_vignette)
        self.random_sign = random_sign

    def __call__(self, X):
        h, w = X.shape[:2]
        min_dist = np.array([h, w]) / 2 * \
                   np.random.random() * self.ratio_min_dist

        # create matrix of distance from the center on the two axis
        x, y = np.meshgrid(np.linspace(-w / 2, w / 2, w),
                           np.linspace(-h / 2, h / 2, h))
        x, y = np.abs(x), np.abs(y)

        # create the vignette mask on the two axis
        x = (x - min_dist[0]) / (np.max(x) - min_dist[0])
        x = np.clip(x, 0, 1)
        y = (y - min_dist[1]) / (np.max(y) - min_dist[1])
        y = np.clip(y, 0, 1)

        # then get a random intensity of the vignette
        vignette = (x + y) / 2 * np.random.uniform(*self.range_vignette)
        vignette = np.tile(vignette[..., None], [1, 1, 3])

        sign = 2 * (np.random.random() < 0.5) * (self.random_sign) - 1
        X = X * (1 + sign * vignette)

        return X


# 镜头变形


class LensDistortion(object):
    def __init__(self, d_coef=(0.15, 0.15, 0.1, 0.1, 0.05)):
        self.d_coef = np.array(d_coef)

    def __call__(self, X):
        # get the height and the width of the image
        h, w = X.shape[:2]

        # compute its diagonal
        f = (h ** 2 + w ** 2) ** 0.5

        # set the image projective to carrtesian dimension
        K = np.array([[f, 0, w / 2],
                      [0, f, h / 2],
                      [0, 0, 1]])

        d_coef = self.d_coef * np.random.random(5)  # value
        d_coef = d_coef * (2 * (np.random.random(5) < 0.5) - 1)  # sign
        # Generate new camera matrix from parameters
        M, _ = cv2.getOptimalNewCameraMatrix(K, d_coef, (w, h), 0)

        # Generate look-up tables for remapping the camera image
        remap = cv2.initUndistortRectifyMap(K, d_coef, None, M, (w, h), 5)

        # Remap the original image to a new image
        X = cv2.remap(X, *remap, cv2.INTER_LINEAR)

        return X


def imarg(img, r, cnt, ops, extend=0, maxoff=0.1, maxangle=10):
    out = []
    kx = int((r[2] - r[0]) * maxoff)
    ky = int((r[3] - r[1]) * maxoff)
    r3 = extend_rect1(img, r, extend)
    for i in range(cnt):
        r4 = r3
        if i > 0:
            offx = randint(-kx, kx)
            offy = randint(-ky, ky)
            r4 = (r3[0] + offx, r3[1] + offy, r3[2] + offx, r3[3] + offy)
            angle = 2 * maxangle * random.random() - maxangle
            img_r = rotate_bound1(img, r4, angle)

            for op in ops:
                if random.random() > 0.5:
                    img_r = op(img_r)
        else:
            img_r = imrect(img, r4)
        # imshow('adsfadsf', img_r, -1)
        out.append(img_r)

    return out


def cut_rect(r, rows, cols):
    height = r[3] - r[1]
    width = r[2] - r[0]
    box_list = []
    # (left, upper, right, lower)
    for j in range(0, rows):
        y0 = r[1] + ((j * height) // rows)
        y1 = r[1] + (((j + 1) * height) // rows)
        for i in range(0, cols):
            x0 = r[0] + ((i * width) // cols)
            x1 = r[0] + (((i + 1) * width) // cols)
            box = (x0, y0, x1, y1)
            box_list.append(box)

    return box_list


def mean(a):
    average_a = np.mean(a)
    return (average_a)


def least_squares(x, y=None):
    if y is None:
        pt = x
        x = []
        y = []
        for a, b, in pt:
            x.append(a)
            y.append(b)

    x_ = mean(x)
    y_ = mean(y)
    m = 0
    n = 0
    k = 0
    p = 0
    for i in range(len(x)):
        t = (x[i] - x_)
        k = t * (y[i] - y_)
        m += k
        p = t * t
        n = n + p
    k1 = m
    k2 = n
    b = k2 * y_ - k1 * x_
    return k1, k2, b


import math


def get_point_line_distance2(point, line):
    point_x = point[0]
    point_y = point[1]
    k1 = line[0]
    k2 = line[1]
    b = line[2]
    dis = math.fabs(k1 * point_x - k2 * point_y + b) / math.sqrt(k1 * k1 + k2 * k2)
    return dis


def get_point_line_distance(point, linept0, linept1):
    point_x = point[0]
    point_y = point[1]
    line_s_x = linept0[0]
    line_s_y = linept0[1]
    line_e_x = linept1[0]
    line_e_y = linept1[1]
    # 若直线与y轴平行，则距离为点的x坐标与直线上任意一点的x坐标差值的绝对值
    if line_e_x - line_s_x == 0:
        return math.fabs(point_x - line_s_x)
    # 若直线与x轴平行，则距离为点的y坐标与直线上任意一点的y坐标差值的绝对值
    if line_e_y - line_s_y == 0:
        return math.fabs(point_y - line_s_y)
    # 斜率
    k1 = (line_e_y - line_s_y)
    k2 = (line_e_x - line_s_x)
    # 截距
    b = k2 * line_s_y - k1 * line_s_x
    # 带入公式得到距离dis
    dis = math.fabs(k1 * point_x - k2 * point_y + b) / \
          math.pow(k1 * k1 + k2 * k2, 0.5)
    return dis


def cv_imread(filePath, flags=cv2.IMREAD_COLOR):
    cv_img = cv2.imdecode(np.fromfile(filePath, dtype=np.uint8), int(flags))
    # imdecode读取的是rgb，如果后续需要opencv处理的话，需要转换成bgr，转换后图片颜色会变化
    # cv_img=cv2.cvtColor(cv_img,cv2.COLOR_RGB2BGR)
    return cv_img


def cv_imwrite(fn, src, quality=70):
    fn1, ext = os.path.splitext(fn)
    if ext.lower() not in ['.png', '.bmp', '.jpg', '.jpeg']:
        ext = '.jpg'
    cv2.imencode(ext, src, params=[cv2.IMWRITE_JPEG_QUALITY, quality])[1].tofile(fn)


def listdirdir(dir, full=False):
    if not os.path.isdir(dir):
        return []
    liall = os.listdir(dir)
    liall = [x for x in liall if os.path.isdir(f'{dir}/{x}')]
    if full:
        liall = [f'{dir}/{x}' for x in liall]
    return liall


def listdir(dir, filter=None, full=False):
    if not os.path.isdir(dir):
        return []

    liall = os.listdir(dir)
    if filter is None:
        return liall

    if isinstance(filter, str):
        filter = filter.split(' ')

    li = []
    filter = [x.lower() for x in filter]
    for x in liall:
        _, ext = os.path.splitext(x)
        if ext.lower() in filter:
            if full:
                x = f'{dir}/{x}'
            li.append(x)

    return li


def listdirsub(pa, exts=None, full_path=True):
    img_fn_list = []
    for root, dirs, files in os.walk(pa):
        for fn in files:
            fn0, ext = os.path.splitext(fn)
            if (exts is None or ext.lower() in exts) and '_mini' not in root:
                fn1 = os.path.join(root, fn)
                img_fn_list.append(fn1)

    if not full_path:
        img_fn_list = [fn.replace(pa, '') for fn in img_fn_list]
    return img_fn_list


def listimg(img_folder):
    return listdir(img_folder, ['.jpg', '.jpeg', '.png', '.bmp'])


def rect_inter(r1, r2):
    x11, y11, x12, y12 = r1
    x21, y21, x22, y22 = r2
    xA = max(x11, x21)
    yA = max(y11, y21)
    xB = min(x12, x22)
    yB = min(y12, y22)

    interArea = max(xB - xA, 0) * max(yB - yA, 0)
    if interArea == 0:
        return [0, 0, 0, 0], 0

    return [xA, yA, xB, yB], interArea


def rect_iarea(r1, r2):
    x11, y11, x12, y12 = r1
    x21, y21, x22, y22 = r2
    xA = max(x11, x21)
    yA = max(y11, y21)
    xB = min(x12, x22)
    yB = min(y12, y22)

    # 重叠面积
    interArea = max(xB - xA, 0) * max(yB - yA, 0)
    return interArea


def rect_iou(r1, r2):
    x11, y11, x12, y12 = r1
    x21, y21, x22, y22 = r2
    xA = max(x11, x21)
    yA = max(y11, y21)
    xB = min(x12, x22)
    yB = min(y12, y22)

    # 两个框各自的面积
    boxAArea = (x12 - x11) * (y12 - y11)
    boxBArea = (x22 - x21) * (y22 - y21)

    # 重叠面积
    interArea = max(xB - xA, 0) * max(yB - yA, 0)

    # 计算IOU
    iou = interArea / (boxAArea + boxBArea - interArea)
    # iou = interArea/min(boxAArea, boxBArea)
    return iou


def rect_iou_in(r1, r2):
    x11, y11, x12, y12 = r1
    x21, y21, x22, y22 = r2
    xA = max(x11, x21)
    yA = max(y11, y21)
    xB = min(x12, x22)
    yB = min(y12, y22)

    # 两个框各自的面积
    boxAArea = (x12 - x11) * (y12 - y11)
    boxBArea = (x22 - x21) * (y22 - y21)

    # 重叠面积
    interArea = max(xB - xA, 0) * max(yB - yA, 0)

    # 计算IOU
    iou = interArea / (boxBArea)
    # iou = interArea/min(boxAArea, boxBArea)
    return iou


def rect_iominxy(r1, r2):
    x11, y11, x12, y12 = r1
    x21, y21, x22, y22 = r2
    xA = max(x11, x21)
    yA = max(y11, y21)
    xB = min(x12, x22)
    yB = min(y12, y22)

    minX = min(x12 - x11, x22 - x21)
    minY = min(y12 - y11, y22 - y21)

    # 重叠面积
    interX = max(xB - xA, 0)
    interY = max(yB - yA, 0)

    ioMinX = interX / max(minX, 1)
    interY = interY / max(minY, 1)
    return ioMinX, interY


def rect_iomin(r1, r2):
    x11, y11, x12, y12 = r1
    x21, y21, x22, y22 = r2
    xA = max(x11, x21)
    yA = max(y11, y21)
    xB = min(x12, x22)
    yB = min(y12, y22)

    # 两个框各自的面积
    boxAArea = (x12 - x11) * (y12 - y11)
    boxBArea = (x22 - x21) * (y22 - y21)

    # 重叠面积
    interArea = max(xB - xA, 0) * max(yB - yA, 0)

    # 计算IOU
    iou = interArea / min(boxAArea, boxBArea)
    # iou = interArea/min(boxAArea, boxBArea)
    return iou


def max_iou_with_rects(r, pos):
    max_iou = 0
    for x in pos:
        iou = rect_iou(r, x)
        if iou > max_iou:
            max_iou = iou

    return max_iou


def rect2rbox(r):
    x = (r[0] + r[2]) / 2
    y = (r[1] + r[3]) / 2
    w = r[2] - r[0]
    h = r[3] - r[1]
    rb = [x, y, w, h, 0]
    return rb


def rect2pts(r):
    x1, y1, x2, y2 = r
    pts = [[x1, y1], [x2, y1], [x2, y2], [x1, y2]]
    return pts


def boxPoints(rbox):
    cx, cy, w, h, degree = rbox
    h /= 2
    w /= 2
    pts = [(w, h), (-w, h), (-w, -h), (w, -h)]
    a = degree * math.pi / 180
    s = math.sin(a)
    c = math.cos(a)
    mini = 0
    xy = 10000000
    for i in range(len(pts)):
        x, y = pts[i]
        x1 = c * x - s * y + cx
        y1 = s * x + c * y + cy
        pts[i] = [x1, y1]
        if (x1 + y1) < xy:
            xy = x1 + y1
            mini = i

    pts = pts[mini:] + pts[:mini]
    return pts


def draw_pts(img, pts, color=(0, 0, 255), linew=0):
    pts1 = np.array(pts, np.int32).astype(int).reshape(-1, 1, 2)
    cv2.polylines(img, [pts1], True, color, linew)
    return img


def draw_rbox(img, rbox, color=(0, 0, 255), linew=0):
    if len(rbox) == 5:
        cx, cy, w, h, degree = rbox
        pts = boxPoints(rbox)
    elif len(rbox) == 4:
        if isinstance(rbox[0], list) or isinstance(rbox[0], tuple) or isinstance(rbox, np.ndarray):
            pts = rbox
        else:
            rbox = rect2rbox(rbox)
            pts = boxPoints(rbox)

    if linew < 1:
        linew = int(min(h, w) * 0.02 + 0.5)
        linew = max(linew, 1)
    pts1 = np.array(pts, np.int32).astype(int).reshape(-1, 1, 2)
    cv2.polylines(img, [pts1], True, color, linew)
    return img, pts


def boundingRect(pts):
    if isinstance(pts, list):
        pts = np.array(pts).astype(np.float32).reshape([-1, 1, 2])

    pts = pts.astype(np.float32)
    x, y, w, h = cv2.boundingRect(pts)
    rect = [x, y, x + w, y + h]
    return rect


def rbox2rect(rbox):
    pts = boxPoints(rbox)
    return boundingRect(pts)


def rbox2mat(rbox, ext=1):
    cx, cy, w, h, degree = rbox

    h *= ext
    w *= ext

    # 设置旋转矩阵
    M = cv2.getRotationMatrix2D((int(cx), int(cy)), degree, 1.0)

    # 计算图像旋转后的新边界
    nW = int(w)
    nH = int(h)

    # 调整旋转矩阵的移动距离（t_{x}, t_{y}）
    M[0, 2] += (w / 2) - cx
    M[1, 2] += (h / 2) - cy
    return M


def imrbox(image, rbox, ext=1):
    cx, cy, w, h, degree = rbox

    h *= ext
    w *= ext

    # 设置旋转矩阵
    M = cv2.getRotationMatrix2D((int(cx), int(cy)), degree, 1.0)

    # 计算图像旋转后的新边界
    nW = int(w)
    nH = int(h)

    # 调整旋转矩阵的移动距离（t_{x}, t_{y}）
    M[0, 2] += (w / 2) - cx
    M[1, 2] += (h / 2) - cy
    return cv2.warpAffine(image, M, (nW, nH)), M


def rbox_off(rbox, off):
    cx, cy, w, h, degree = rbox
    return [cx + off[0], cy + off[1], w, h, degree]


def rbox_norm(rbox):
    cx, cy, w, h, degree = rbox
    if h > w:
        h, w = w, h
        degree += 90
        while (degree > 90):
            degree -= 180
        pass
    return (cx, cy, w, h, degree)


def extend_rbox(rb, kw, kh):
    x, y, w, h, a = rb[:5]
    w *= (1 + kw)
    h *= (1 + kh)
    return [x, y, w, h, a]


def minAreaRect1(c):
    if isinstance(c, list):
        c = np.array(c).reshape([-1, 1, 2])
    if len(c.shape) == 2:
        c = c.reshape([-1, 1, 2])
    c = c.astype(np.float32)
    rect = cv2.minAreaRect(c)
    cx = rect[0][0]
    cy = rect[0][1]
    w = rect[1][0]
    h = rect[1][1]
    degree = rect[2]
    if h > w:
        h, w = w, h
        degree += 90
        while (degree > 90):
            degree -= 180
        pass
    return (cx, cy, w, h, degree)


def get_angle(x1, y1, x2, y2):
    dx = x2 - x1
    dy = y2 - y1
    angle1 = math.atan2(dy, dx)
    angle1 = angle1 * 180 / math.pi
    return angle1


def minAreaRect(c):
    if isinstance(c, list):
        c = np.array(c).reshape([-1, 1, 2])
    if len(c.shape) == 2:
        c = c.reshape([-1, 1, 2])
    c = c.astype(np.float32)
    rect = cv2.minAreaRect(c)
    cx = rect[0][0]
    cy = rect[0][1]
    w = rect[1][0]
    h = rect[1][1]
    degree = rect[2]
    if h > w:
        h, w = w, h
        degree += 90
        while (degree > 90):
            degree -= 180
        pass
    a1 = get_angle(c[0][0][0], c[0][0][1], c[1][0][0], c[1][0][1])
    t = abs(degree - a1)
    if t > abs(t - 180):
        degree += 180
    return (cx, cy, w, h, degree)


def process_bar(percent, start_str='', end_str='', total_length=0):
    c = '▇'
    bar = ''.join([c] * int(percent * total_length)) + ''
    bar = '\r' + start_str + \
          bar.ljust(total_length) + ' {:0>4.1f}%|'.format(percent * 100) + end_str
    print(bar, end='', flush=True)


def get_last_modify_file(pa, filter, deffile='nofile'):
    lists = listdir(pa, filter)
    if len(lists) == 0:
        return deffile
    lists.sort(key=lambda fn: os.path.getmtime(os.path.join(pa, fn)))
    filepath = os.path.join(pa, lists[-1])
    return filepath


def adjust_pts_order(pts_2ds):
    ''' sort rectangle points by counterclockwise '''

    cen_x, cen_y = np.mean(pts_2ds, axis=0)
    # refer_line = np.array([10,0])
    d2s = []
    for i in range(len(pts_2ds)):
        o_x = pts_2ds[i][0] - cen_x
        o_y = pts_2ds[i][1] - cen_y
        atan2 = np.arctan2(o_y, o_x)
        # if atan2 < 0:
        #    atan2 += np.pi * 2
        atan2 = atan2 * 360 / (np.pi * 2)
        d2s.append([pts_2ds[i], atan2])

    d2s = sorted(d2s, key=lambda x: x[1])
    order_2ds = np.array([x[0] for x in d2s])

    return order_2ds


def saveICDAR(txt_fn, shapes):
    lines = []
    for item in shapes:
        poly = item[0]
        line = list(map(lambda p: f'{int(p[0])},{int(p[1])}', poly))
        line.append(item[1])
        lines.append(','.join(line) + '\n')
    open(txt_fn, 'w', encoding='utf8').writelines(lines)


def loadICDAR(txt_fn):
    if not os.path.exists(txt_fn):
        return []
    shapes = []
    try:
        lines = open(txt_fn, 'r', encoding='utf8').readlines()
    except Exception as e:
        return []

    for line in lines:
        item = {}
        # line = line.replace(' ', ',')
        parts = line.strip().split(',')
        label = ','.join(parts[8:])
        line = [i.strip('\ufeff').strip('\xef\xbb\xbf') for i in parts]
        l = 8
        poly = list(map(float, line[:l]))
        poly = np.array(poly, dtype=np.float32).reshape((-1, 2))  # [4, 2]
        poly = poly.tolist()
        # poly = list(map(lambda a: (a[0], a[1]), poly))
        shapes.append([poly, label])

    return shapes


def loadICDAR_rbox(txt_fn):
    out = []
    labs = loadICDAR(txt_fn)
    for i in range(len(labs)):
        a = labs[i]
        la = a[1]
        pts = np.array(a[0]).astype("float32")
        rb = minAreaRect(pts)
        out.append(rb)
    return out


def is_new_then(src, out):
    if not os.path.exists(src):
        return False

    if os.path.exists(out):
        out_mtime = os.path.getmtime(out)
        src_mtime = os.path.getmtime(src)
        if src_mtime > out_mtime:
            return True
    else:
        return True

    return False


def xml_dumps(bndboxs, fn, img):
    fn1, ext = os.path.split(fn)
    h, w = 0, 0
    if img is not None:
        h, w = img.shape[:2]
    objs = ''
    for rect, name in bndboxs:
        xmin, ymin, xmax, ymax = rect
        objs += f'''
  <object>
    <type>bndbox</type>
    <name>{name}</name>
    <pose>Unspecified</pose>
    <truncated>0</truncated>
    <difficult>0</difficult>
    <bndbox>
      <xmin>{int(xmin)}</xmin>
      <ymin>{int(ymin)}</ymin>
      <xmax>{int(xmax)}</xmax>
      <ymax>{int(ymax)}</ymax>
    </bndbox>
  </object>
'''
    return f'''
    <annotation verified="no">
  <folder>.</folder>
  <filename>{fn1}</filename>
  <path>{fn}</path>
  <source>
    <database>Unknown</database>
  </source>
  <size>
    <width>{w}</width>
    <height>{h}</height>
    <depth>1</depth>
  </size>
  <segmented>0</segmented>
  {objs}
</annotation>
'''


def read_xml_annotation(in_file, divwh=False):
    import xml.etree.ElementTree as ET
    if not os.path.exists(in_file):
        return []

    try:
        tree = ET.parse(open(in_file, mode='rb'))
    except Exception as e:
        return []
    root = tree.getroot()
    bndboxlist = []
    sz = root.find('size')
    width = int(sz.find('width').text)
    height = int(sz.find('height').text)

    for object in root.findall('object'):  # 找到root节点下的所有country节点
        bndbox = object.find('bndbox')  # 子节点下节点rank的值

        xmin = int(float(bndbox.find('xmin').text))
        xmax = int(float(bndbox.find('xmax').text))
        ymin = int(float(bndbox.find('ymin').text))
        ymax = int(float(bndbox.find('ymax').text))
        if divwh:
            xmin /= width
            xmax /= width
            ymin /= height
            ymax /= height
            x_center = (xmin + xmax) / 2
            y_center = (ymin + ymax) / 2
            ww = xmax - xmin
            hh = ymax - ymin
            bnd = [x_center, y_center, ww, hh]
        else:
            bnd = [xmin, ymin, xmax, ymax]

        name = object.findtext('name')
        # print(xmin,ymin,xmax,ymax)
        bndboxlist.append([bnd, name])
        # print(bndboxlist)
    # if root.find('object') is None:
    #    return None
    # bndbox = root.find('object').find('bndbox')
    return bndboxlist


# pa = 'D:/data/220329竹筷/标注caise/mini5/MER2-630-60U3C(FCQ21050041)_2022-03-08_11_18_24_872-1672_img.xml'
# read_xml_annotation(pa, True)

def save_xml_annotation(outfn, obj, size=[1, 1]):
    obj_xml = ''
    for x in obj:
        box, name = x[:2]
        xmin, ymin, xmax, ymax = box
        xmin = int(xmin * size[0])
        ymin = int(ymin * size[1])
        xmax = int(xmax * size[0])
        ymax = int(ymax * size[1])
        obj_xml += f'''
        <object>
            <name>{name}</name>
            <pose>Unspecified</pose>
            <truncated>0</truncated>
            <difficult>0</difficult>
            <bndbox>
                <xmin>{xmin}</xmin>
                <ymin>{ymin}</ymin>
                <xmax>{xmax}</xmax>
                <ymax>{ymax}</ymax>
            </bndbox>
        </object>'''

    out = f'''<annotation>
	<folder></folder>
	<filename></filename>
	<path>{''}</path>
	<source>
		<database>Unknown</database>
	</source>
	<size>
		<width>1924</width>
		<height>1556</height>
		<depth>3</depth>
	</size>
	<segmented>0</segmented>
    {obj_xml}
    </annotation>'''

    f = open(outfn, 'w')
    f.write(out)
    f.close()
    return out


# |c,  s||sx,  0||c, -s|
# |-s, c|| 0, sy||s,  c|
# | c*sx, s*sy||c, -s|
# |-s*sx, c*sy||s,  c|
# |c*c*sx+s*s*sy, -s*c*sx+c*s*cy|
# |-c*s*sx+s*c*sy, s*s*sx+c*c*sy|
def affPoints(rbox, tx=0, ty=0, sx=1, sy=1):
    cx, cy, w, h, degree = rbox
    h /= 2
    w /= 2
    pts = [(-w, -h), (-w, h), (w, h), (w, -h)]
    a = degree * math.pi / 180
    s = math.sin(a)
    c = math.cos(a)
    tx = math.tan(math.radians(tx))
    ty = math.tan(math.radians(ty))
    m00 = c
    m01 = -s
    m10 = s
    m11 = c

    m01 += tx
    m10 += ty

    cx *= sx
    cy *= sy
    if 1:
        m00 *= sx
        m01 *= 1
        m10 *= 1
        m11 *= sy

    # print(m00, m01, m10, m11, cx, cy)
    for i in range(len(pts)):
        x, y = pts[i]
        x1 = m00 * x + m01 * y + cx
        y1 = m10 * x + m11 * y + cy
        pts[i] = [x1, y1]

    return pts


def affPoints1(rbox, tx=0, ty=0, sx=1, sy=1):
    cx, cy, w, h, degree = rbox
    h /= 2
    w /= 2
    pts = [(-w, -h), (-w, h), (w, h), (w, -h)]
    a = degree * math.pi / 180
    s = math.sin(a)
    c = math.cos(a)
    tx = math.tan(math.radians(tx))
    ty = math.tan(math.radians(ty))
    M = np.array([[c, -s], [s, c]])
    M1 = np.array([[c, s], [-s, c]])
    S = np.array([[sx, 0], [0, sy]])
    T = np.array([[1, 1 + tx], [1 + ty, 1]])

    cx *= sx
    cy *= sy
    # M = S.dot(M)
    M = M.dot(S)
    M = M.dot(T)

    m00 = M[0, 0]
    m01 = M[0, 1]
    m10 = M[1, 0]
    m11 = M[1, 1]
    # print(m00, m01, m10, m11, cx, cy)
    for i in range(len(pts)):
        x, y = pts[i]
        x1 = m00 * x + m01 * y + cx
        y1 = m10 * x + m11 * y + cy
        pts[i] = [x1, y1]

    return pts


def imaff(image, rbox, a, tx=0, ty=0, sx=1, sy=1):
    cx, cy, w, h, degree = rbox
    degree = 0
    nW = int(w * sx)
    nH = int(h * sy)
    pts = np.array(affPoints(rbox)).astype(np.float32).reshape([-1, 1, 2])[:3]
    ptd = np.array(affPoints([w / 2, h / 2, w, h, a], tx, ty, sx, sy)).astype(np.float32).reshape([-1, 1, 2])[:3]
    M = cv2.getAffineTransform(pts, ptd)
    return cv2.warpAffine(image, M, (nW, nH)), M


def get_max_disk():
    import shutil
    disks = 'CDEFG'
    tt = []
    for d in disks:
        try:
            total, used, free = shutil.disk_usage(f'{d}:')
            # print(total, used, free)
            tt.append(total)
        except Exception as e:
            break
    i = np.argmax(tt)
    return disks[i]


def find_file(fn, basedirs=['./']):
    for basedir in basedirs:
        for root, dirs, files in os.walk(basedir):
            if fn in files:
                return os.path.join(root, fn)

    return ''


# repx 横线重叠区域
# repy 纵向重叠区域
def split_rect(r, rows, cols, repx=0, repy=0):
    rows = int(rows)
    cols = int(cols)
    rw = r[2] - r[0]
    rh = r[3] - r[1]
    w = rw / (cols - (cols - 1) * repx)
    h = rh / (rows - (rows - 1) * repy)
    repx1 = repx * w
    repy1 = repy * h
    out = []
    for i in range(rows):
        y = r[1] + i * (h - repy1)
        for j in range(cols):
            x = r[0] + j * (w - repx1)
            r1 = [x, y, x + w, y + h]
            out.append(r1)

    return out


def split_rect2(r0, k, repx=0, repy=0):
    rows = int((rect_height(r0) + k - 1) // k)
    cols = int((rect_width(r0) + k - 1) // k)
    rr = split_rect(r0, rows, cols, repx, repy)
    return rr


def split_rect_by_size(shape, r, h, w, repy1=0, repx1=0):
    rh = rect_height(r)
    rw = rect_width(r)
    if shape is None:
        imh, imw = r[3] * 2, r[2] * 2
    else:
        imh, imw = shape[:2]
    k = 0.4
    rows = max(int(rh / h + k), 1)
    cols = max(int(rw / w + k), 1)
    offy = ((rows * h) - rh) // 2
    offx = ((cols * w) - rw) // 2
    out = []
    for i in range(rows):
        y = r[1] + i * (h - repy1) + offy
        y = bound(y, 0, imh - h)
        for j in range(cols):
            x = r[0] + j * (w - repx1) + offx
            x = bound(x, 0, imw - w)
            r1 = [x, y, x + w, y + h]
            out.append(r1)

    return out


# 这是从堆栈内存的第3层开始查找返回变量名称
def retrieve_name(var):
    import inspect
    for fi in inspect.stack()[2:]:
        for item in fi.frame.f_locals.items():
            if var is item[1]:
                return item[0]
    return ""


def imshow0(*args):
    ms_time = -1
    names = []
    ims = []
    for x in args:
        if isinstance(x, np.ndarray):
            ims.append(x)
        elif isinstance(x, str):
            names.append(x)
        elif isinstance(x, int):
            ms_tims = x
    defname = 'noname'
    name = defname
    for i, im in enumerate(ims):
        if i < len(names):
            defname = name = names[i]
        elif i > 0:
            name = f'{defname}{i}'
            name = retrieve_name(im)
        cv2.namedWindow(name, 0)
        cv2.imshow(name, im)

    ret = 0
    if ms_time > -2:
        ret = cv2.waitKey(ms_time)
    return ret


def load_labelme_rects(json_path):
    import json
    rects = []
    data = json.load(open(json_path, 'r'))
    for obj in data['shapes']:
        label = obj['label']
        x1 = int(obj['points'][0][0])
        y1 = int(obj['points'][0][1])
        x2 = int(obj['points'][1][0])
        y2 = int(obj['points'][1][1])
        # cv2.imwrite(os.path.join(root_path, 'aaa%d.jpg' % count), img_rgb[y1:y2, x1:x2])
        r = [x1, y1, x2, y2]
        rects.append([r, label])

    return rects


def select_out(out1, k, labs=None, exlibs=None):
    if labs is not None:
        if isinstance(labs, str):
            labs = labs.split()
        out1 = [x for x in out1 if x[1] in labs]
    if exlibs is not None:
        if isinstance(exlibs, str):
            exlibs = exlibs.split()
        out1 = [x for x in out1 if x[1] not in exlibs]
    if len(out1) > 0 and len(out1[0]) > 2:
        out1.sort(key=lambda x: x[2], reverse=True)
    out1 = out1[:k]
    return out1


def nms(bboxes, scores, threshold=0.5):
    if len(bboxes) == 0:
        return []
    x1 = bboxes[:, 0]
    y1 = bboxes[:, 1]
    x2 = bboxes[:, 2]
    y2 = bboxes[:, 3]
    areas = (x2 - x1) * (y2 - y1)
    # 从大到小对应的的索引
    order = scores.argsort()[::-1]
    # 记录输出的bbox
    keep = []
    while order.size > 0:
        i = order[0]
        # 记录本轮最大的score对应的index
        keep.append(i)
        if order.size == 1:
            break
        # 计算当前bbox与剩余的bbox之间的IoU
        # 计算IoU需要两个bbox中最大左上角的坐标点和最小右下角的坐标点
        # 即重合区域的左上角坐标点和右下角坐标点
        xx1 = np.maximum(x1[i], x1[order[1:]])
        yy1 = np.maximum(y1[i], y1[order[1:]])
        xx2 = np.minimum(x2[i], x2[order[1:]])
        yy2 = np.minimum(y2[i], y2[order[1:]])
        # 如果两个bbox之间没有重合, 那么有可能出现负值
        w = np.maximum(0.0, (xx2 - xx1))
        h = np.maximum(0.0, (yy2 - yy1))
        inter = w * h
        iou = inter / (areas[i] + areas[order[1:]] - inter)
        # 删除IoU大于指定阈值的bbox(重合度高), 保留小于指定阈值的bbox
        ids = np.where(iou <= threshold)[0]
        # 因为ids表示剩余的bbox的索引长度
        # +1恢复到order的长度
        order = order[ids + 1]
    return keep


def bnds_nms_1class(out, threshold):
    rects = []
    scores = []
    for r, l, conf in out:
        rects.append(r)
        scores.append(conf)

    keep = nms(np.array(rects), np.array(scores), threshold)
    out1 = [out[i] for i in keep]
    return out1


def bnds_nms(out, threshold):
    dd = {}
    for r, l, conf in out:
        if l not in dd:
            dd[l] = []
        dd[l].append([r, l, conf])

    out0 = []
    for l in dd:
        if len(dd[l]) > 1:
            out0 += bnds_nms_1class(dd[l], threshold)

    return out0


def imrot(numpy_image, ang):
    if ang != 0:
        if ang in [90]:
            numpy_image = cv2.rotate(numpy_image, cv2.ROTATE_90_CLOCKWISE)
        elif ang in [-90, 270]:
            numpy_image = cv2.rotate(numpy_image, cv2.ROTATE_90_COUNTERCLOCKWISE)
        elif ang == 180:
            numpy_image = cv2.rotate(numpy_image, cv2.ROTATE_180)

    return numpy_image


def nms(bboxes, scores, threshold=0.5):
    if len(bboxes) == 0:
        return []
    x1 = bboxes[:, 0]
    y1 = bboxes[:, 1]
    x2 = bboxes[:, 2]
    y2 = bboxes[:, 3]
    areas = (x2 - x1) * (y2 - y1)
    # 从大到小对应的的索引
    order = scores.argsort()[::-1]
    # 记录输出的bbox
    keep = []
    while order.size > 0:
        i = order[0]
        # 记录本轮最大的score对应的index
        keep.append(i)
        if order.size == 1:
            break
        # 计算当前bbox与剩余的bbox之间的IoU
        # 计算IoU需要两个bbox中最大左上角的坐标点和最小右下角的坐标点
        # 即重合区域的左上角坐标点和右下角坐标点
        xx1 = np.maximum(x1[i], x1[order[1:]])
        yy1 = np.maximum(y1[i], y1[order[1:]])
        xx2 = np.minimum(x2[i], x2[order[1:]])
        yy2 = np.minimum(y2[i], y2[order[1:]])
        # 如果两个bbox之间没有重合, 那么有可能出现负值
        w = np.maximum(0.0, (xx2 - xx1))
        h = np.maximum(0.0, (yy2 - yy1))
        inter = w * h
        iou = inter / (areas[i] + areas[order[1:]] - inter)
        # 删除IoU大于指定阈值的bbox(重合度高), 保留小于指定阈值的bbox
        ids = np.where(iou <= threshold)[0]
        # 因为ids表示剩余的bbox的索引长度
        # +1恢复到order的长度
        order = order[ids + 1]
    return keep


def do_nms1(out, threshold=0.5):
    out0 = {}
    for r, l, conf in out:
        out0[l] = []
    for r, l, conf in out:
        out0[l].append([r, l, conf])

    out2 = []
    for l in out0:
        out1 = out0[l]
        if len(out1) > 1:
            keep = nms(np.array([x[0] for x in out1]), np.array([x[2] for x in out1]), threshold=threshold)
            out2 += [out1[i] for i in keep]
        else:
            out2 += out1

    return out2


def do_nms(out, classes, threshold=0.5):
    if classes is None:
        return do_nms1(out, threshold)
    out0 = []
    out1 = []
    scores = []
    for r, l, conf in out:
        if l in classes:
            out1.append([r, l, conf])
            scores.append(conf)
        else:
            out0.append([r, l, conf])

    keep = nms(np.array([x[0] for x in out1]), np.array(scores), threshold=threshold)
    out1 = [out1[i] for i in keep]
    out0 += out1
    return out0


def partition1(vec, predicate):
    N = len(vec)
    PARENT = 0
    RANK = 1

    nodes = []  # (N*2);

    # The first O(N) pass: create N single-vertex trees
    for i in range(N):
        nodes.append([0, 0])
        nodes[i][PARENT] = -1
        nodes[i][RANK] = 0

    # The main O(N^2) pass: merge connected components
    for i in range(N):
        root = i

        # find root
        while (nodes[root][PARENT] >= 0):
            root = nodes[root][PARENT]

        for j in range(N):
            if (i == j or not predicate(vec[i], vec[j])):
                continue

            root2 = j

            while (nodes[root2][PARENT] >= 0):
                root2 = nodes[root2][PARENT]

            if (root2 != root):
                # unite both trees
                rank = nodes[root][RANK]
                rank2 = nodes[root2][RANK]
                if (rank > rank2):
                    nodes[root2][PARENT] = root
                else:
                    nodes[root][PARENT] = root2
                    nodes[root2][RANK] += rank == rank2
                    root = root2

                assert (nodes[root][PARENT] < 0)

                k = j
                parent = 0

                # compress the path from node2 to root
                while ((nodes[k][PARENT]) >= 0):
                    parent = nodes[k][PARENT]
                    nodes[k][PARENT] = root
                    k = parent

                # compress the path from node to root
                k = i
                while ((nodes[k][PARENT]) >= 0):
                    parent = nodes[k][PARENT]
                    nodes[k][PARENT] = root
                    k = parent

    # Final O(N) pass: enumerate classes
    labels = []
    nclasses = 0

    for i in range(N):
        root = i
        while (nodes[root][PARENT] >= 0):
            root = nodes[root][PARENT]
        # re-use the rank as the class label
        if (nodes[root][RANK] >= 0):
            nclasses += 1
            nodes[root][RANK] = ~nclasses
        labels.append(~nodes[root][RANK])

    return nclasses, labels


def partition_split(vec, nclasses, labels, mincnt=0):
    out = []
    for i in range(nclasses):
        out.append([])

    for i in range(len(vec)):
        j = labels[i] - 1
        out[j].append(vec[i])

    myList1 = sorted(out, key=lambda i: len(i), reverse=True)
    if mincnt > 0:
        myList2 = []
        for x in myList1:
            if len(x) > mincnt:
                myList2.append(x)

        return myList2

    return myList1


def partition2(vec, predicate, mincnt=0):
    nclasses, labels = partition1(vec, predicate)
    myList1 = partition_split(vec, nclasses, labels, mincnt)
    return myList1


def get_rect_iou(kx, ky):
    def rect_iou(a, b):
        k = 0.2
        ra = extend_rect2(None, a[0], kx, ky)
        rb = extend_rect2(None, b[0], kx, ky)
        iou = rect_iou(ra, rb)
        return iou > 0

    return get_rect_iou


def nms_meg(wout1, names, thd=0.1, kx=0.1, ky=0.1):
    wout1 = do_nms(wout1, names, thd)
    aaaa = partition2(wout1, get_rect_iou(kx, ky))
    return aaaa


def draw_text_cn(img, text, left, top, textcolor=(0, 0, 255), textsize=2):
    from PIL import Image, ImageDraw, ImageFont
    if isinstance(img, np.ndarray):
        img = Image.fromarray(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
    draw = ImageDraw.Draw(img)
    textcolor1 = tuple([textcolor[2], textcolor[1], textcolor[0]])
    fontstyle = ImageFont.truetype(
        "font/simsun.ttc", textsize, encoding='utf-8')
    draw.text((left, top), text, textcolor1, font=fontstyle)
    return cv2.cvtColor(np.asarray(img), cv2.COLOR_RGB2BGR)


def livefile(id=0):
    fnfn = f'D:\~live_{str(id)}.txt'
    if not os.path.exists(fnfn):
        with open(fnfn, 'w') as f:
            f.write(str(1))


# 坐标转换
def pointPerspective(pt, cvt_mat_t):
    u, v = pt
    t = cvt_mat_t[2][0] * u + cvt_mat_t[2][1] * v + cvt_mat_t[2][2]
    x = (cvt_mat_t[0][0] * u + cvt_mat_t[0][1] * v + cvt_mat_t[0][2])
    y = (cvt_mat_t[1][0] * u + cvt_mat_t[1][1] * v + cvt_mat_t[1][2])
    return [x / t, y / t]


def pointsPerspective(pts, cvt_mat_t):
    return [pointPerspective(pt, cvt_mat_t) for pt in pts]


def rectPerspective(r, cvt_mat_t):
    pts = pointsPerspective(rect2pts(r), cvt_mat_t)
    r2 = boundingRect(pts)
    return r2


def randPerspective(img, r1, bnds, rate=0.2):
    pts1 = rect2pts(r1)
    w, h = rect_wh(r1)
    r2 = [0, 0, w, h]
    pts2 = rect2pts(r2)
    pts22 = []
    wr = int(w * rate)
    hr = int(h * rate)
    for i in range(len(pts2)):
        x, y = pts2[i]
        x += random.randint(-wr, wr)
        y += random.randint(-hr, hr)
        pts22.append([x, y])

    pts1 = np.float32(pts1)
    pts22 = np.float32(pts22)
    pts2 = pts22 - np.min(pts22, axis=0)
    cols, rows = np.max(pts2, axis=0).astype(np.int32).tolist()
    M = cv2.getPerspectiveTransform(pts1, pts2)
    # M1 = cv2.getPerspectiveTransform(pts2, pts1)
    bnds = [[rectPerspective(rect_off(x[0], r1[:2]), M)] + x[1:] for x in bnds]
    tt_img = cv2.warpPerspective(img, M, (cols, rows))
    return tt_img, bnds
