import time
from random import *
import random
import os
import cv2
import numpy as np
import struct


def float_to_bin(num):
    return format(struct.unpack('!I', struct.pack('!f', num))[0], '032b')

def mat2img(mat):
    a, b = mat.min(), mat.max()
    m1 = (mat-a)*255/(b-a)
    return m1.astype(np.uint8)

def bgr2gray(im):
    return cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)

def gray2bgr(im):
    return cv2.cvtColor(im, cv2.COLOR_GRAY2BGR)


def vcat(li):
    return np.vstack(li)

def hcat(li):
    return np.hstack(li)

def rect_height(r):
    return r[3]-r[1]

def rect_width(r):
    return r[2]-r[0]

def imresize(img, k):
    sz = (int(img.shape[1]*k), int(img.shape[0]*k))
    img = cv2.resize(img, sz)
    return img

def intrect(r):
    return (int(r[0]), int(r[1]), int(r[2]), int(r[3]))

def putText(img, text, rect, fontFace, fonth, color, linew):
    lines = text.split('\n')
    textSize = (0, 0)
    baseline = 0

    for line in lines:
        textSize1, baseline1 = cv2.getTextSize(
            line, fontFace, 1, linew)
        textSize = (max(textSize[0], textSize1[0]),
                    textSize[1]+textSize1[1])
        baseline = max(baseline, baseline1)

    linegap = textSize1[1]//3
    textSize = (textSize[0], textSize[1]+linegap*(len(lines)-1))
    rectw = rect[2]-rect[0]
    recth = rect[3]-rect[1]
    fontScale = min(recth/(textSize[1]+1), rectw/(textSize[0]+1))
    x = rect[0] + 0#(rectw-textSize[0])
    y = rect[1] + (recth-textSize[1]) + baseline + linew

    # write the text on the image
    for line in lines:
        textSize1, baseline = cv2.getTextSize(line, fontFace, fontScale, linew)
        cv2.putText(img, line, (int(x), int(y)), fontFace, fontScale, color, int(linew))
        y += textSize1[1]+linegap


def draw_line(img, line, color=(0, 0, 255), linew=1):
    x1, y1, x2, y2 = line
    cv2.line(img, (int(x1), int(y1)), (int(x2), int(y2)),
             color, linew)  # 点的坐标必须是元组，不能是列表。

    return img


def drawrect(img, r, color=(0, 0, 255), linew=0):
    if linew<1:
        linew = int(min(r[2]-r[0], r[3]-r[1])*0.05+0.5)
        linew = max(linew, 1)
    cv2.rectangle(img, (int(r[0]), int(r[1])), (int(r[2]), int(r[3])), color, int(linew))
    return img


def drawrects(img, rs, color=(0, 0, 255), linew=2):
    for r in rs:
        drawrect(img, r, color, linew)
    
    return img


def drawpolys(img, polys, color=(0, 0, 255), wline=2):
    cv2.polylines(img, polys, True, color, wline)

def chdir(path):
    try:
        os.chdir(path)
    # Python >2.5 (except OSError, exc: for Python <2.5)
    except OSError as exc:
        pass

def mkdir(path):
    try:
        os.makedirs(path)
    # Python >2.5 (except OSError, exc: for Python <2.5)
    except OSError as exc:
        pass


def check_disk_space(disk_path, threshold_gb):
    total, used, free = os.statvfs(disk_path)
    free_gb = (free * total) / (2 ** 30)

    return free_gb >= threshold_gb

import ctypes
def show_warning_dialog():
    ctypes.windll.user32.MessageBoxW(0, "D盘剩余空间不足，请删除文件以释放空间！", "警告", 0x30)

def imshow(name, im, ms_time=-2):
    cv2.namedWindow(name, 0)
    cv2.imshow(name, im)
    ret = 0
    if ms_time > -2:
        ret = cv2.waitKey(ms_time)
    return ret


def xywh2xyxy(r):
    return (r[0], r[1], r[0]+r[2], r[1]+r[3])


def imrect(img, r):
    img_h, img_w = img.shape[0], img.shape[1]
    r = [int(max(0, r[0])), int(max(0, r[1])), int(min(img_w, r[2])), int(min(img_h, r[3]))]
    return img[int(r[1]):int(r[3]), int(r[0]):int(r[2])]


def imrect_hw(img, r):
    return img[int(r[1]):int(r[1]+r[3]), int(r[0]):int(r[0]+r[2])]


def extend_rect4(img_rgb, rc, l, t, r, b):
    if img_rgb is None:
        x1, y1, x2, y2 = rc
        w, h = x2-x1, y2-y1
        ox1 = int(x1-l*w)
        oy1 = int(y1-t*h)
        ox2 = int(x2+r*w)
        oy2 = int(y2+b*h)
        return (ox1, oy1, ox2, oy2)
        
    img_h, img_w = img_rgb.shape[0], img_rgb.shape[1]

    if rc is None:
        x1, y1, x2, y2 = 0, 0, img_w, img_h
    else:
        x1, y1, x2, y2 = rc
    w, h = x2-x1, y2-y1
    ox1 = int(max(0, x1-l*w))
    oy1 = int(max(0, y1-t*h))
    ox2 = int(min(img_w, x2+r*w))
    oy2 = int(min(img_h, y2+b*h))
    return (ox1, oy1, ox2, oy2)

def extend_rect2(img, rc, kx, ky):
    return extend_rect4(img, rc, kx, ky, kx, ky)


def extend_rect1(img, r, k):
    return extend_rect4(img, r, k, k, k, k)

def rect_mul(r, ss):
    cc = []
    for x in r:
        cc.append(int(x*ss))

    return tuple(cc)

def rects_mul(rr, ss):
    outrr = []
    for r in rr:
        outrr.append(rect_mul(r, ss))

    return outrr


def rect_off(r, off):
    cc = []
    for i in range(len(r)):
        x = r[i]
        o = off[i % len(off)]
        cc.append(int(x+o))
    cc = tuple(cc)
    return cc


def rects_off(rr, off):
    outrr = []
    for r in rr:
        cc = []
        for i in range(len(r)):
            x = r[i]
            o = off[i % len(off)]
            cc.append(int(x+o))
        outrr.append(tuple(cc))

    return outrr


def get_time_stamp(id=0):
    ct = time.time()+id
    local_time = time.localtime(ct)
    data_head = time.strftime("%Y%m%d_%H%M%S", local_time)
    data_secs = (ct - int(ct)) * 1000
    time_stamp = "%s_%03d" % (data_head, data_secs)
    return time_stamp

def get_date_stamp(id=0):
    ct = time.time()+id
    local_time = time.localtime(ct)
    data_head = time.strftime("%Y%m%d", local_time)
    return data_head


def rotate_bound1(image, r, angle):
    # 获取图像的尺寸
    # 旋转中心
    w = (r[2]-r[0])
    h = (r[3]-r[1])
    (cx, cy) = ((r[2]+r[0])/2, (r[3]+r[1])/2)

    # 设置旋转矩阵
    M = cv2.getRotationMatrix2D((cx, cy), -angle, 1.0)

    # 计算图像旋转后的新边界
    nW = w
    nH = h

    # 调整旋转矩阵的移动距离（t_{x}, t_{y}）
    M[0, 2] += (nW/2) - cx
    M[1, 2] += (nH/2) - cy

    return cv2.warpAffine(image, M, (nW, nH))


#
class Resize1(object):
    def __init__(self, output_size):
        self.output_size = output_size

    def __call__(self, X):
        _X = cv2.resize(X, self.output_size)
        return _X


# 剪辑函数是一项非常有用的函数，尤其是当您需要从一种颜色空间切换到另一种颜色空间或在0和1或0和255之间重置图像时。
# 默认情况下，如果仅指定一个阈值，则最小阈值为0。

class Clip(object):
    def __init__(self, mini, maxi=None):
        if maxi is None:
            self.mini, self.maxi = 0, mini
        else:
            self.mini, self.maxi = mini, maxi

    def __call__(self, X):
        mini_mask = np.where(X < self.mini)
        maxi_mask = np.where(X > self.maxi)
        X[mini_mask] = self.mini
        X[maxi_mask] = self.maxi
        return X

# 标准化


class Normalize(object):
    def __init__(self, axis=None):
        self.axis = axis

    def __call__(self, X):
        mini = np.min(X, self.axis)
        maxi = np.max(X, self.axis)
        X = (X - mini) / (maxi - mini)
        return X


# 规范化
class Standardize(object):
    def __init__(self, axis=None):
        self.axis = axis

    def __call__(self, X):
        mean = np.mean(X, self.axis)
        std = np.std(X, self.axis)
        X = (X - mean) / std
        return X

# 随机水平和垂直翻转


class Flip(object):
    def __call__(self, X):
        for axis in [0, 1]:
            if np.random.rand(1) < 0.5:
                X = np.flip(X, axis)
                Y = np.flip(Y, axis)
        return X

# 选择裁剪图像的尺寸


class Crop(object):
    def __init__(self, min_size_ratio, max_size_ratio=(1, 1)):
        self.min_size_ratio = np.array(list(min_size_ratio))
        self.max_size_ratio = np.array(list(max_size_ratio))

    def __call__(self, X):
        size = np.array(X.shape[:2])
        mini = self.min_size_ratio * size
        maxi = self.max_size_ratio * size
        # random size
        h = np.random.randint(mini[0], maxi[0])
        w = np.random.randint(mini[1], maxi[1])
        # random place
        shift_h = np.random.randint(0, size[0] - h)
        shift_w = np.random.randint(0, size[1] - w)
        X = X[shift_h:shift_h+h, shift_w:shift_w+w]
        Y = Y[shift_h:shift_h+h, shift_w:shift_w+w]

        return X


# 过滤器
class CustomFilter(object):
    def __init__(self, kernel):
        self.kernel = kernel

    def __call__(self, X):
        X = cv2.filter2D(X, -1, self.kernel)
        return X

# 锐化


class Sharpen(object):
    def __init__(self, max_center=4):
        self.identity = np.array([[0, 0, 0],
                                  [0, 1, 0],
                                  [0, 0, 0]])
        self.sharpen = np.array([[0, -1,  0],
                                 [-1,  4, -1],
                                 [0, -1,  0]]) / 4

    def __call__(self, X):

        sharp = self.sharpen * np.random.random() * self.max_center
        kernel = self.identity + sharp

        X = cv2.filter2D(X, -1, kernel)
        return X


# 高斯模糊
class GaussianBlur(object):
    def __init__(self, max_kernel=(7, 7)):
        self.max_kernel = ((max_kernel[0] + 1) // 2, (max_kernel[1] + 1) // 2)

    def __call__(self, X):
        kernel_size = (
            np.random.randint(1, self.max_kernel[0]) * 2 + 1,
            np.random.randint(1, self.max_kernel[1]) * 2 + 1,
        )
        X = cv2.GaussianBlur(X, kernel_size, 0)
        return X

# 透视变换


class Perspective(object):
    def __init__(self,
                 max_ratio_translation=(0.2, 0.2, 0),
                 max_rotation=(10, 10, 360),
                 max_scale=(0.1, 0.1, 0.2),
                 max_shearing=(15, 15, 5)):

        self.max_ratio_translation = np.array(max_ratio_translation)
        self.max_rotation = np.array(max_rotation)
        self.max_scale = np.array(max_scale)
        self.max_shearing = np.array(max_shearing)

    def __call__(self, X):

        # get the height and the width of the image
        h, w = X.shape[:2]
        max_translation = self.max_ratio_translation * np.array([w, h, 1])
        # get the values on each axis
        t_x, t_y, t_z = np.random.uniform(-1, 1, 3) * max_translation
        r_x, r_y, r_z = np.random.uniform(-1, 1, 3) * self.max_rotation
        sc_x, sc_y, sc_z = np.random.uniform(-1, 1, 3) * self.max_scale + 1
        sh_x, sh_y, sh_z = np.random.uniform(-1, 1, 3) * self.max_shearing

        # convert degree angles to rad
        theta_rx = np.deg2rad(r_x)
        theta_ry = np.deg2rad(r_y)
        theta_rz = np.deg2rad(r_z)
        theta_shx = np.deg2rad(sh_x)
        theta_shy = np.deg2rad(sh_y)
        theta_shz = np.deg2rad(sh_z)

        # compute its diagonal
        diag = (h ** 2 + w ** 2) ** 0.5
        # compute the focal length
        f = diag
        if np.sin(theta_rz) != 0:
            f /= 2 * np.sin(theta_rz)

        # set the image from cartesian to projective dimension
        H_M = np.array([[1, 0, -w / 2],
                        [0, 1, -h / 2],
                        [0, 0,      1],
                        [0, 0,      1]])
        # set the image projective to carrtesian dimension
        Hp_M = np.array([[f, 0, w / 2, 0],
                         [0, f, h / 2, 0],
                         [0, 0,     1, 0]])

        # adjust the translation on z
        t_z = (f - t_z) / sc_z ** 2
        # translation matrix to translate the image
        T_M = np.array([[1, 0, 0, t_x],
                        [0, 1, 0, t_y],
                        [0, 0, 1, t_z],
                        [0, 0, 0,  1]])

        # calculate cos and sin of angles
        sin_rx, cos_rx = np.sin(theta_rx), np.cos(theta_rx)
        sin_ry, cos_ry = np.sin(theta_ry), np.cos(theta_ry)
        sin_rz, cos_rz = np.sin(theta_rz), np.cos(theta_rz)
        # get the rotation matrix on x axis
        R_Mx = np.array([[1,      0,       0, 0],
                         [0, cos_rx, -sin_rx, 0],
                         [0, sin_rx,  cos_rx, 0],
                         [0,      0,       0, 1]])
        # get the rotation matrix on y axis
        R_My = np.array([[cos_ry, 0, -sin_ry, 0],
                         [0, 1,       0, 0],
                         [sin_ry, 0,  cos_ry, 0],
                         [0, 0,       0, 1]])
        # get the rotation matrix on z axis
        R_Mz = np.array([[cos_rz, -sin_rz, 0, 0],
                         [sin_rz,  cos_rz, 0, 0],
                         [0,       0, 1, 0],
                         [0,       0, 0, 1]])
        # compute the full rotation matrix
        R_M = np.dot(np.dot(R_Mx, R_My), R_Mz)

        # get the scaling matrix
        Sc_M = np.array([[sc_x,     0,    0, 0],
                         [0,  sc_y,    0, 0],
                         [0,     0, sc_z, 0],
                         [0,     0,    0, 1]])

        # get the tan of angles
        tan_shx = np.tan(theta_shx)
        tan_shy = np.tan(theta_shy)
        tan_shz = np.tan(theta_shz)
        # get the shearing matrix on x axis
        Sh_Mx = np.array([[1, 0, 0, 0],
                          [tan_shy, 1, 0, 0],
                          [tan_shz, 0, 1, 0],
                          [0, 0, 0, 1]])
        # get the shearing matrix on y axis
        Sh_My = np.array([[1, tan_shx, 0, 0],
                          [0,       1, 0, 0],
                          [0, tan_shz, 1, 0],
                          [0,       0, 0, 1]])
        # get the shearing matrix on z axis
        Sh_Mz = np.array([[1, 0, tan_shx, 0],
                          [0, 1, tan_shy, 0],
                          [0, 0,       1, 0],
                          [0, 0,       0, 1]])
        # compute the full shearing matrix
        Sh_M = np.dot(np.dot(Sh_Mx, Sh_My), Sh_Mz)

        Identity = np.array([[1, 0, 0, 0],
                             [0, 1, 0, 0],
                             [0, 0, 1, 0],
                             [0, 0, 0, 1]])

        # compute the full transform matrix
        M = Identity
        M = np.dot(Sh_M, M)
        M = np.dot(R_M,  M)
        M = np.dot(Sc_M, M)
        M = np.dot(T_M,  M)
        M = np.dot(Hp_M, np.dot(M, H_M))
        # apply the transformation
        X = cv2.warpPerspective(X, M, (w, h))
        Y = cv2.warpPerspective(Y, M, (w, h))
        return X
# ​ 抠图


class Cutout(object):
    def __init__(self,
                 min_size_ratio,
                 max_size_ratio,
                 channel_wise=False,
                 crop_target=True,
                 max_crop=10,
                 replacement=0):
        self.min_size_ratio = np.array(list(min_size_ratio))
        self.max_size_ratio = np.array(list(max_size_ratio))
        self.channel_wise = channel_wise
        self.crop_target = crop_target
        self.max_crop = max_crop
        self.replacement = replacement

    def __call__(self, X,Y):
        size = np.array(X.shape[:2])
        mini = self.min_size_ratio * size
        maxi = self.max_size_ratio * size
        for _ in range(self.max_crop):
            # random size
            h = np.random.randint(mini[0], maxi[0])
            w = np.random.randint(mini[1], maxi[1])
            # random place
            shift_h = np.random.randint(0, size[0] - h)
            shift_w = np.random.randint(0, size[1] - w)
            if self.channel_wise:
                c = np.random.randint(0, X.shape[-1])
                X[shift_h:shift_h+h, shift_w:shift_w+w, c] = self.replacement
                if self.crop_target:
                    Y[shift_h:shift_h+h, shift_w:shift_w+w] = self.replacement
            else:
                X[shift_h:shift_h+h, shift_w:shift_w+w] = self.replacement
                if self.crop_target:
                    Y[shift_h:shift_h+h, shift_w:shift_w+w] = self.replacement
        return X


# 随机更改其颜色
class Leaf(object):
    def __init__(self):
        pass

    def __call__(self, X):
        blur = cv2.GaussianBlur(X, (7, 7), 0)
        hsv_blur = cv2.cvtColor(blur, cv2.COLOR_BGR2HSV)
        # lower mask (0-10)
        lower_red = np.array([0, 130, 130])
        upper_red = np.array([20, 255, 255])
        mask_0 = cv2.inRange(hsv_blur, lower_red, upper_red)

        # upper mask (170-180)
        lower_red = np.array([165, 130, 130])
        upper_red = np.array([185, 255, 255])
        mask_1 = cv2.inRange(hsv_blur, lower_red, upper_red)
        hsv_blur[np.where(mask_1)] = hsv_blur[np.where(
            mask_1)] - np.array([165, 0, 0])

        mask = mask_0 + mask_1
        # change color
        turn_color = np.random.randint(0, 255)
        hsv_blur[np.where(mask)] = hsv_blur[np.where(mask)] + \
            np.array([turn_color, 0, 0])
        X_blur = cv2.cvtColor(hsv_blur, cv2.COLOR_HSV2BGR)
        X[np.where(mask)] = X_blur[np.where(mask)]
        return X


# 亮度
class Brightness(object):
    def __init__(self, range_brightness=(-50, 50)):
        self.range_brightness = range_brightness

    def __call__(self, X):
        brightness = np.random.randint(*self.range_brightness)
        X = X + brightness
        return X
# 对比度


class Contrast(object):
    def __init__(self, range_contrast=(-50, 50)):
        self.range_contrast = range_contrast

    def __call__(self, X):
        contrast = np.random.randint(*self.range_contrast)
        X = X * (contrast / 127 + 1) - contrast
        return X

# 噪声


class UniformNoise(object):
    def __init__(self, low=-50, high=50):
        self.low = low
        self.high = high

    def __call__(self, X):
        noise = np.random.uniform(self.low, self.high, X.shape)
        X = X + noise
        return X

# 高斯噪声


class GaussianNoise(object):
    def __init__(self, center=0, std=50):
        self.center = center
        self.std = std

    def __call__(self, X):
        noise = np.random.normal(
            self.center, self.std, X.shape).astype(np.uint8)
        X = X + noise
        return X


def _extend_rect1(r, k):
    x1, y1, x2, y2 = r
    x, y = (x2-x1)*k, (y2-y1)*k
    x1 = int(x1-x)
    y1 = int(y1-y)
    x2 = int(x2+x)
    y2 = int(y2+y)
    return (x1, y1, x2, y2)

# 朝着黑色还是向白色方向渐晕


class Vignetting(object):
    def __init__(self,
                 ratio_min_dist=0.2,
                 range_vignette=(0.2, 0.8),
                 random_sign=False):
        self.ratio_min_dist = ratio_min_dist
        self.range_vignette = np.array(range_vignette)
        self.random_sign = random_sign

    def __call__(self, X):
        h, w = X.shape[:2]
        min_dist = np.array([h, w]) / 2 * \
            np.random.random() * self.ratio_min_dist

        # create matrix of distance from the center on the two axis
        x, y = np.meshgrid(np.linspace(-w/2, w/2, w),
                           np.linspace(-h/2, h/2, h))
        x, y = np.abs(x), np.abs(y)

        # create the vignette mask on the two axis
        x = (x - min_dist[0]) / (np.max(x) - min_dist[0])
        x = np.clip(x, 0, 1)
        y = (y - min_dist[1]) / (np.max(y) - min_dist[1])
        y = np.clip(y, 0, 1)

        # then get a random intensity of the vignette
        vignette = (x + y) / 2 * np.random.uniform(*self.range_vignette)
        vignette = np.tile(vignette[..., None], [1, 1, 3])

        sign = 2 * (np.random.random() < 0.5) * (self.random_sign) - 1
        X = X * (1 + sign * vignette)

        return X

# 镜头变形


class LensDistortion(object):
    def __init__(self, d_coef=(0.15, 0.15, 0.1, 0.1, 0.05)):
        self.d_coef = np.array(d_coef)

    def __call__(self, X):
        # get the height and the width of the image
        h, w = X.shape[:2]

        # compute its diagonal
        f = (h ** 2 + w ** 2) ** 0.5

        # set the image projective to carrtesian dimension
        K = np.array([[f, 0, w / 2],
                      [0, f, h / 2],
                      [0, 0,     1]])

        d_coef = self.d_coef * np.random.random(5)  # value
        d_coef = d_coef * (2 * (np.random.random(5) < 0.5) - 1)  # sign
        # Generate new camera matrix from parameters
        M, _ = cv2.getOptimalNewCameraMatrix(K, d_coef, (w, h), 0)

        # Generate look-up tables for remapping the camera image
        remap = cv2.initUndistortRectifyMap(K, d_coef, None, M, (w, h), 5)

        # Remap the original image to a new image
        X = cv2.remap(X, *remap, cv2.INTER_LINEAR)

        return X

def imarg(img, r, cnt, ops, extend=0, maxoff=0.1, maxangle=10):
    out = []
    kx = int((r[2]-r[0])*maxoff)
    ky = int((r[3]-r[1])*maxoff)
    r3 = extend_rect1(img, r, extend)
    for i in range(cnt):
        r4 = r3
        if i > 0:
            offx = randint(-kx, kx)
            offy = randint(-ky, ky)
            r4 = (r3[0]+offx, r3[1]+offy, r3[2]+offx, r3[3]+offy)
            angle = 2*maxangle*random.random() - maxangle
            img_r = rotate_bound1(img, r4, angle)

            for op in ops:
                if random.random() > 0.5:
                    img_r = op(img_r)
        else:
            img_r = imrect(img, r4)
        #imshow('adsfadsf', img_r, -1)
        out.append(img_r)

    return out


def cut_rect(r, rows, cols):
    height = r[3]-r[1]
    width = r[2]-r[0]
    box_list = []
    # (left, upper, right, lower)
    for j in range(0, rows):
        y0 = r[1] + ((j*height) // rows)
        y1 = r[1] + (((j+1)*height) // rows)
        for i in range(0, cols):
            x0 = r[0] + ((i*width) // cols)
            x1 = r[0] + (((i+1)*width) // cols)
            box = (x0, y0, x1, y1)
            box_list.append(box)

    return box_list


def mean(a):
    average_a = np.mean(a)
    return (average_a)


def least_squares(x, y=None):
    if y is None:
        pt = x
        x = []
        y = []
        for a, b, in pt:
            x.append(a)
            y.append(b)

    x_ = mean(x)
    y_ = mean(y)
    m = 0
    n = 0
    k = 0
    p = 0
    for i in range(len(x)):
        t = (x[i]-x_)
        k = t * (y[i]-y_)
        m += k
        p = t*t
        n = n + p
    k1 = m
    k2 = n
    b = k2*y_ - k1 * x_
    return k1, k2, b

import math

def get_point_line_distance2(point, line):
    point_x = point[0]
    point_y = point[1]
    k1 = line[0]
    k2 = line[1]
    b = line[2]
    dis = math.fabs(k1 * point_x - k2*point_y + b) / math.sqrt(k1 * k1 + k2*k2)
    return dis


def get_point_line_distance(point, linept0, linept1):
    point_x = point[0]
    point_y = point[1]
    line_s_x = linept0[0]
    line_s_y = linept0[1]
    line_e_x = linept1[0]
    line_e_y = linept1[1]
    # 若直线与y轴平行，则距离为点的x坐标与直线上任意一点的x坐标差值的绝对值
    if line_e_x - line_s_x == 0:
        return math.fabs(point_x - line_s_x)
    # 若直线与x轴平行，则距离为点的y坐标与直线上任意一点的y坐标差值的绝对值
    if line_e_y - line_s_y == 0:
        return math.fabs(point_y - line_s_y)
    # 斜率
    k1 = (line_e_y - line_s_y)
    k2 = (line_e_x - line_s_x)
    # 截距
    b = k2*line_s_y - k1 * line_s_x
    # 带入公式得到距离dis
    dis = math.fabs(k1 * point_x - k2*point_y + b) / \
        math.pow(k1 * k1 + k2*k2, 0.5)
    return dis



#初始种子选择
def originalSeed(gray, th):
    #ret, thresh = cv2.threshold(gray, th, 255, cv2.THRESH_BINARY)#二值图，种子区域(不同划分可获得不同种子)

    thresh = cv2.adaptiveThreshold(gray, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, blockSize=31,
                               C=2)
    kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3,3))#3×3结构元
    thresh_copy = thresh.copy() #复制thresh_A到thresh_copy
    thresh_B = np.zeros(gray.shape, np.uint8) #thresh_B大小与A相同，像素值为0
    seeds = [ ] #为了记录种子坐标

    #循环，直到thresh_copy中的像素值全部为0
    while thresh_copy.any():
        Xa_copy, Ya_copy = np.where(thresh_copy > 0) #thresh_A_copy中值为255的像素的坐标
        thresh_B[Xa_copy[0], Ya_copy[0]] = 255 #选取第一个点，并将thresh_B中对应像素值改为255
        #连通分量算法，先对thresh_B进行膨胀，再和thresh执行and操作（取交集）
        for i in range(200):
            dilation_B = cv2.dilate(thresh_B, kernel, iterations=1)
            thresh_B = cv2.bitwise_and(thresh, dilation_B)
        #取thresh_B值为255的像素坐标，并将thresh_copy中对应坐标像素值变为0
        Xb, Yb = np.where(thresh_B > 0)
        thresh_copy[Xb, Yb] = 0
        #循环，在thresh_B中只有一个像素点时停止
        while str(thresh_B.tolist()).count("255") > 1:
            thresh_B = cv2.erode(thresh_B,  kernel, iterations=1) #腐蚀操作
        X_seed, Y_seed = np.where(thresh_B > 0) #取处种子坐标
        if X_seed.size > 0 and Y_seed.size > 0:
            seeds.append((X_seed[0], Y_seed[0]))#将种子坐标写入seeds
        thresh_B[Xb, Yb] = 0 #将thresh_B像素值置零
    return seeds

#区域生长
def regionGrow2(gray, seeds, thresh, p):
    seedMark = np.zeros(gray.shape)
    #八邻域
    if p == 8:
        connection = [(-1, -1), (-1, 0), (-1, 1), (0, 1), (1, 1), (1, 0), (1, -1), (0, -1)]
    elif p == 4:
        connection = [(-1, 0), (0, 1), (1, 0), (0, -1)]

    print("len seeds:",len(seeds))
    myseeds = []
    h,w = gray.shape[:2]
    myseeds.append((w/2,h/2))
    #seeds内无元素时候生长停止
    while len(myseeds) != 0:
        #栈顶元素出栈
        pt = myseeds.pop(0)
        for i in range(p):
            tmpX = pt[0] + connection[i][0]
            tmpY = pt[1] + connection[i][1]

            #检测边界点
            if tmpX < 0 or tmpY < 0 or tmpX >= gray.shape[0] or tmpY >= gray.shape[1]:
                continue

            if abs(int(gray[tmpX, tmpY]) - int(gray[pt])) < thresh and seedMark[tmpX, tmpY] == 0:
                seedMark[tmpX, tmpY] = 255
                myseeds.append((tmpX, tmpY))
    return seedMark

#区域生长
def regionGrow(gray, thresh, p):
    seedMark = np.zeros(gray.shape)
    #八邻域
    if p == 8:
        connection = [(-1, -1), (-1, 0), (-1, 1), (0, 1), (1, 1), (1, 0), (1, -1), (0, -1)]
    elif p == 4:
        connection = [(-1, 0), (0, 1), (1, 0), (0, -1)]


    seeds = []
    h,w = gray.shape[:2]
    #print(f'h:{h} w:{w} shape:{gray.shape}')
    seeds.append((int(w/2),int(h/2)))
    #seeds内无元素时候生长停止
    while len(seeds) != 0:
        #栈顶元素出栈
        pt = seeds.pop(0)
        for i in range(p):
            tmpX = pt[0] + connection[i][0]
            tmpY = pt[1] + connection[i][1]
            #检测边界点
            if tmpX < 0 or tmpY < 0 or tmpX >= w or tmpY >= h:
                continue
            if abs(int(gray[int(h/2),int(w/2)]) - int(gray[tmpY,tmpX])) < thresh and seedMark[tmpY,tmpX] == 0:
                seedMark[tmpY,tmpX] = 255
                seeds.append((tmpX, tmpY))
    return seedMark

def cv_imread(filePath, flags=cv2.IMREAD_COLOR):
    cv_img = cv2.imdecode(np.fromfile(filePath, dtype=np.uint8), flags)
    # imdecode读取的是rgb，如果后续需要opencv处理的话，需要转换成bgr，转换后图片颜色会变化
    # cv_img=cv2.cvtColor(cv_img,cv2.COLOR_RGB2BGR)
    return cv_img


def cv_imwrite(filename, src, q=90):
    cv2.imencode('.jpg', src, params=[cv2.IMWRITE_JPEG_QUALITY, q])[1].tofile(filename)


def listdir(dir, filter=None):
    if not os.path.isdir(dir):
        return []
        
    liall = os.listdir(dir)
    if filter is None:
        return liall

    if isinstance(filter,str):
        filter = filter.split(' ')
                
    li = []
    for x in liall:
        _,ext = os.path.splitext(x)
        if ext in filter:
            li.append(x)

    return li

def listimg(img_folder):
    return listdir(img_folder, ['.jpg', '.jpeg', '.png', '.bmp'])


def rect_inter(r1, r2):
    x11, y11, x12, y12 = r1
    x21, y21, x22, y22 = r2
    xA = max(x11, x21)
    yA = max(y11, y21)
    xB = min(x12, x22)
    yB = min(y12, y22)

    interArea = max(xB-xA, 0)*max(yB-yA, 0)
    if interArea==0:
        return [0, 0, 0, 0], 0
        
    return [xA, yA, xB, yB], interArea

def rect_iarea(r1, r2):
    x11, y11, x12, y12 = r1
    x21, y21, x22, y22 = r2
    xA = max(x11, x21)
    yA = max(y11, y21)
    xB = min(x12, x22)
    yB = min(y12, y22)

    # 重叠面积
    interArea = max(xB-xA, 0)*max(yB-yA, 0)
    return interArea

def rect_iou(r1, r2):
    x11, y11, x12, y12 = r1
    x21, y21, x22, y22 = r2
    xA = max(x11, x21)
    yA = max(y11, y21)
    xB = min(x12, x22)
    yB = min(y12, y22)

    # 两个框各自的面积
    boxAArea = (x12-x11)*(y12-y11)
    boxBArea = (x22-x21)*(y22-y21)

    # 重叠面积
    interArea = max(xB-xA, 0)*max(yB-yA, 0)

    # 计算IOU
    iou = interArea/(boxAArea+boxBArea-interArea)
    #iou = interArea/min(boxAArea, boxBArea)
    return iou


def max_iou_with_rects(r, pos):
    max_iou = 0
    for x in pos:
        iou = rect_iou(r, x)
        if iou > max_iou:
            max_iou = iou

    return max_iou



def boxPoints(rbox):
    cx, cy, w, h, degree = rbox
    h/=2
    w/=2
    pts = [(w, h), (-w, h), (-w, -h), (w, -h)]
    a = degree*math.pi/180
    s = math.sin(a)
    c = math.cos(a)
    mini = 0
    xy = 10000000
    for i in range(len(pts)):
        x, y = pts[i]
        x1 = c*x - s*y + cx
        y1 = s*x + c*y + cy
        pts[i] = [x1, y1]
        if (x1+y1)<xy:
            xy = x1+y1
            mini = i

    pts = pts[mini:] + pts[:mini]
    return pts


def draw_rbox(img, rbox, color=(0, 0, 255), linew=2):
    cx, cy, w, h, degree = rbox
    pts = boxPoints(rbox)
    pts1 = np.array(pts, np.int32).astype(int).reshape(-1, 1, 2)
    cv2.polylines(img, [pts1], True, color, linew)
    return img, pts

def boundingRect(pts):
    if isinstance(pts,list):
        pts = np.array(pts).astype(np.int).reshape([-1,1,2])

    x, y, w, h = cv2.boundingRect(pts)
    rect = [x, y, x+w, y+h]
    return rect

def rbox2rect(rbox):
    pts = boxPoints(rbox)
    return boundingRect(pts)

def imrbox(image, rbox, ext = 1):
    cx, cy, w, h, degree = rbox
    
    h*=ext
    w*=ext

    # 设置旋转矩阵
    M = cv2.getRotationMatrix2D((int(cx), int(cy)), degree, 1.0)

    # 计算图像旋转后的新边界
    nW = int(w)
    nH = int(h)

    # 调整旋转矩阵的移动距离（t_{x}, t_{y}）
    M[0, 2] += (w/2) - cx
    M[1, 2] += (h/2) - cy
    return cv2.warpAffine(image, M, (nW, nH)),M

def rbox_norm(rbox):
    cx, cy, w, h,degree = rbox
    if h > w:
        h, w = w, h
        degree += 90
        while(degree>90):
            degree-=180
        pass
    return (cx, cy, w, h, degree)

def minAreaRect(c):
    if isinstance(c, list):
        c = np.array(c).reshape([-1, 1, 2])
    if len(c.shape)==2:
        c = c.reshape([-1, 1, 2])
    rect = cv2.minAreaRect(c)
    cx = rect[0][0]
    cy = rect[0][1]
    w = rect[1][0]
    h = rect[1][1]
    degree = rect[2]
    if h > w:
        h, w = w, h
        degree += 90
        while(degree>90):
            degree-=180
        pass
    return (cx, cy, w, h, degree)


def process_bar(percent, start_str='', end_str='', total_length=0):
    c = '▇'
    bar = ''.join([c] * int(percent * total_length)) + ''
    bar = '\r' + start_str + \
        bar.ljust(total_length) + ' {:0>4.1f}%|'.format(percent*100) + end_str
    print(bar, end='', flush=True)


def get_last_modify_file(pa, filter, deffile='nofile'):
    lists = listdir(pa, filter)
    if len(lists) == 0:
        return deffile
    lists.sort(key=lambda fn: os.path.getmtime(os.path.join(pa, fn)))
    filepath = os.path.join(pa, lists[-1])
    return filepath

def loadICDAR(txt_fn):
    shapes = []
    lines = open(txt_fn, 'r', encoding='utf8').readlines()
    for line in lines:
        item = {}
        line = line.replace(' ', ',')
        parts = line.strip().split(',')
        label = parts[8]
        line = [i.strip('\ufeff').strip('\xef\xbb\xbf') for i in parts]
        l = 8
        poly = list(map(float, line[:l]))
        poly = np.array(poly, dtype=np.float32).reshape((-1, 2))  # [4, 2]
        poly = poly.tolist()
        #poly = list(map(lambda a: (a[0], a[1]), poly))
        item = (poly, label)
        shapes.append(item)

    return shapes
    
def loadICDAR_rbox(txt_fn):
    out = []
    labs = loadICDAR(txt_fn)
    for i in range(len(labs)):
        a = labs[i]
        la = a[1]
        pts = np.array(a[0]).astype("float32")
        rb = minAreaRect(pts)
        out.append(rb)
    return out
    
def is_new_then(src, out):
    if not os.path.exists(src):
        return False
        
    if os.path.exists(out):
        out_mtime = os.path.getmtime(out)
        src_mtime = os.path.getmtime(src)
        if src_mtime > out_mtime:
            return True
    else:
        return True

    return False


def save_xml_annotation(outfn, obj,h,w, size = [1, 1]):
    obj_xml = ''
    for box, name in obj:
        xmin, ymin, xmax, ymax = box
        xmin = int(xmin*size[0])
        ymin = int(ymin*size[1])
        xmax = int(xmax*size[0])
        ymax = int(ymax*size[1])
        obj_xml += f'''
        <object>
            <name>{name}</name>
            <pose>Unspecified</pose>
            <truncated>0</truncated>
            <difficult>0</difficult>
            <bndbox>
                <xmin>{xmin}</xmin>
                <ymin>{ymin}</ymin>
                <xmax>{xmax}</xmax>
                <ymax>{ymax}</ymax>
            </bndbox>
        </object>'''

    out = f'''<annotation>
	<folder></folder>
	<filename></filename>
	<path>{''}</path>
	<source>
		<database>Unknown</database>
	</source>
	<size>
		<width>{w}</width>
		<height>{h}</height>
		<depth>3</depth>
	</size>
	<segmented>0</segmented>
    {obj_xml}
    </annotation>'''

    f = open(outfn, 'w')
    f.write(out)
    f.close()
    return out


def affPoints(rbox, sx=0, sy=0):
    cx, cy, w, h, degree = rbox
    h/=2
    w/=2
    pts = [(-w, -h), (-w, h), (w, h), (w, -h)]
    a = degree*math.pi/180
    s = math.sin(a)
    c = math.cos(a)
    tx = math.tan(math.radians(sx))
    ty = math.tan(math.radians(sy))
    m00 = c
    m01 = -s + tx
    m10 = s + ty
    m11 = c
    for i in range(len(pts)):
        x, y = pts[i]
        x1 = m00*x + m01*y + cx
        y1 = m10*x + m11*y + cy
        pts[i] = [x1, y1]

    return pts

#图片切变
def imaff(image, rbox, tx = 0, ty = 0):
    cx, cy, w, h, degree = rbox
    degree = 0
    nW = int(w)
    nH = int(h)
    pts = np.array(affPoints(rbox)).astype(np.float32).reshape([-1, 1, 2])[:3]
    ptd = np.array(affPoints([w/2, h/2, w, h, 0], tx, ty)).astype(np.float32).reshape([-1, 1, 2])[:3]
    M = cv2.getAffineTransform(pts, ptd)
    return cv2.warpAffine(image, M, (nW, nH)), M


def read_xml_annotation(in_file):
    import xml.etree.ElementTree as ET
    if not os.path.exists(in_file):
        return []

    try:
        tree = ET.parse(open(in_file, mode='rb'))
    except Exception as e:
        return []
    root = tree.getroot()
    bndboxlist = []

    for object in root.findall('object'):  # 找到root节点下的所有country节点
        bndbox = object.find('bndbox')  # 子节点下节点rank的值

        xmin = int(float(bndbox.find('xmin').text))
        xmax = int(float(bndbox.find('xmax').text))
        ymin = int(float(bndbox.find('ymin').text))
        ymax = int(float(bndbox.find('ymax').text))
        name = object.findtext('name')
        # print(xmin,ymin,xmax,ymax)
        bndboxlist.append(([xmin, ymin, xmax, ymax], name))
        # print(bndboxlist)
    #if root.find('object') is None:
    #    return None
    #bndbox = root.find('object').find('bndbox')
    return bndboxlist

def convert2Pixmap(img, lableW: int, lableH: int):
    from PyQt5.QtGui import QImage, QPixmap
    h, w = img.shape[:2]
    s = min(lableW / w, lableH / h)
    img = imresize(img, s)
    h, w = img.shape[:2]
    if len(img.shape) == 3:
        img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)  # opencv读取的bgr格式图片转换成rgb格式
    else:
        img = cv2.cvtColor(img, cv2.COLOR_GRAY2RGB)
    # frame = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)  # opencv读取的bgr格式图片转换成rgb格式
    image = QImage(img.data, w, h, w * 3, QImage.Format_RGB888)  # pyqt5转换成自己能放的图片格式
    jpg_out = QPixmap(image) # 转换成QPixmap
    return jpg_out

