# -*- coding: UTF-8 -*-
import copy, json, logging, os, random, shutil, sys, cv2
import numpy as np
from enum import Enum
from config.config_loader import *
from concurrent.futures import ThreadPoolExecutor


def sample_triplets(file_list, num_groups, skip):
    """
    从文件列表中按每3个为一组进行分组，并随机选取指定数量的组
    参数:
    file_list (list): 原始文件路径列表，长度必须是3的倍数
    num_groups (int): 要随机选取的三元组组数
    """
    if len(file_list) % skip != 0:
        raise ValueError("file_list 的长度必须是3的倍数")

    # 按照3个一组进行分组
    groups = [file_list[i:i + skip] for i in range(0, len(file_list), skip)]

    if num_groups > len(groups):
        raise ValueError("无法选取 {num_groups} 组，最多只能选取 {len(groups)} 组")

    # 随机选取指定数量的组
    sampled_groups = random.sample(groups, num_groups)

    # 展平为一维列表
    sampled_list = [item for group in sampled_groups for item in group]

    return sampled_list


def labelme_to_mask(json_path):
    """
    Transform label_me file to a mask, which background is 0, other label is 1-255
    """
    if not os.path.exists(json_path):
        print(json_path, "does not exist.")
        return None, {}

    if os.path.getsize(json_path) == 0:
        return None, {}

    with open(json_path, 'r') as f:
        json_data = json.load(f)

    image_w, image_h = json_data['imageWidth'], json_data['imageHeight']
    mask = np.zeros(shape=(image_h, image_w), dtype=np.uint8)
    label_map = {}
    cur_label_num = 0
    for shape_data in json_data['shapes']:
        label = shape_data['label']
        if label not in label_map:
            cur_label_num += 1
            label_map[label] = cur_label_num

        points = shape_data['points']
        int_points = [[int(round(num)) for num in sublist] for sublist in points]
        if shape_data['shape_type'] == 'rectangle':
            cv2.rectangle(mask, (int_points[0][0], int_points[0][1]), (int_points[1][0], int_points[1][1]),
                          cur_label_num + 1, cv2.FILLED)
        elif shape_data["shape_type"] == 'polygon':
            np_pts = np.array(int_points, dtype=np.int32)
            mask = cv2.fillPoly(mask, [np_pts], [cur_label_num])
        else:
            pass
    return mask, label_map


def labelme_to_bbox(json_path):
    """
    功能: labelme文件转换成矩形框与对应的标签列表,便于可视化时的绘制
    返回：
        - bbox_list: 每个列表元素为：[min_x, min_y, max_x, max_y]。
        - label_list: 每个标签元素为：“标注时候的字符串”
    """
    if not os.path.exists(json_path):
        print(json_path, "does not exist.")
        return [], []

    if os.path.getsize(json_path) == 0:
        return [], []

    with open(json_path, 'r') as f:
        json_data = json.load(f)
    bbox_list, label_list = [], []
    for shape_data in json_data['shapes']:
        label_list.append(shape_data['label'])
        points = shape_data['points']
        if shape_data['shape_type'] == 'rectangle':
            box = [points[0][0], points[0][1], points[1][0], points[1][1]]
        elif shape_data["shape_type"] == 'polygon':
            max_x, max_y = -np.inf, -np.inf
            min_x, min_y = np.inf, np.inf
            for point in points:
                max_x = point[0] if point[0] > max_x else max_x
                max_y = point[1] if point[1] > max_y else max_y
                min_x = point[0] if point[0] < min_x else min_x
                min_y = point[1] if point[1] < min_y else min_y
            box = [min_x, min_y, max_x, max_y]
        else:
            continue
        box = [int(round(num)) for num in box]
        bbox_list.append(box)
    return bbox_list, label_list


def labelme_to_yolo(file_path, save_path, task='od'):
    """
    功能: labelme的json文件转换成yolo对应的标注文件,存储成txt文件
    """
    if not os.path.exists(file_path):
        print(file_path, "does not exist.")
        return

    file_name = str(os.path.splitext(os.path.basename(file_path))[0])
    yolo_file_path = save_path + "/" + file_name + ".txt"
    label_txt_list = []
    if 'od' == task:
        with open(file_path, 'r') as f:
            json_data = json.load(f)
        image_w, image_h = json_data['imageWidth'], json_data['imageHeight']

        bbox_list, label_list = labelme_to_bbox(file_path)
        unique_label_list = list(set(label_list))
        for index, box in enumerate(bbox_list):
            points_nor_list = [(box[0] + box[2]) / 2 / image_w, (box[1] + box[3]) / 2 / image_h,
                               (box[2] - box[0]) / image_w, (box[3] - box[1]) / image_h]

            points_nor_list = list(map(lambda x: str(x), points_nor_list))
            points_nor_str = ' '.join(points_nor_list)
            label_str = str(unique_label_list.index(label_list[index])) + " " + points_nor_str + '\n'
            label_txt_list.append(label_str)
    elif 'seg' == task:
        seg_mask, label_map = labelme_to_mask(file_path)
        image_h, image_w = seg_mask.shape[:2]
        unique_arr = np.unique(seg_mask)
        for n in range(unique_arr.shape[0]):
            label = unique_arr[n]
            if label <= 0:
                continue

            label_mask = np.zeros_like(seg_mask)
            label_mask[np.where(seg_mask == label)] = 255
            cnts, _ = cv2.findContours(label_mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
            for cnt in cnts:
                hull = cv2.convexHull(cnt)
                temp_str = str(label - 1)
                for i in range(hull.shape[0]):
                    temp_str += ' ' + str(hull[i, 0, 0] / image_w) + ' ' + str(hull[i, 0, 1] / image_h)
                label_txt_list.append(temp_str + '\n')
    with open(yolo_file_path, 'w', encoding='utf-8') as f:
        for label_str in label_txt_list:
            f.writelines(label_str)
    print(" --> The %s.txt file saved successfully" % file_name)
    return True


def generate_polygon(center, avg_radius, irregularity, spikeyness, num_vertices):
    """
    生成一个随机多边形。

    :param center: 多边形中心点坐标 (x, y)
    :param avg_radius: 多边形的平均半径
    :param irregularity: 形状不规则度（0到1之间）
    :param spikeyness: 尖锐度（0到1之间）
    :param num_vertices: 顶点数量
    :return: 多边形顶点列表
    """
    irregularity = np.clip(irregularity, 0, 1) * 2 * np.pi / num_vertices
    spikeyness = np.clip(spikeyness, 0, 1)

    # 计算角度
    angle_steps = 2 * np.pi / num_vertices
    angles = np.array([angle_steps * i + np.random.uniform(-irregularity, irregularity) for i in range(num_vertices)])

    # 计算半径
    radius_list = np.clip(np.random.normal(avg_radius, avg_radius * spikeyness, num_vertices), 0, 2 * avg_radius)

    # 计算顶点
    points = np.zeros((num_vertices, 2))
    for i in range(num_vertices):
        x = center[0] + radius_list[i] * np.cos(angles[i])
        y = center[1] + radius_list[i] * np.sin(angles[i])
        points[i] = [int(x), int(y)]
    return points


def get_key_from_value(dictionary, target_value):
    """
    说明：通过给定一个字典，然后再给定字典指定的value，返回对应的key
    """
    for key, value in dictionary.items():
        if value == target_value:
            return key
    return None  # 如果找不到，返回 None


def generate_odd_random_number(low, high):
    # Ensure the range is valid and contains odd numbers
    if low % 2 == 0:
        low += 1
    if high % 2 == 0:
        high -= 1
    return np.random.choice(range(low, high + 1, 2))


def draw_custom_line(img, pt1, pt2, color, thickness=1, pattern=[5, 5], gap=5):
    """
    pattern：【a】表示实线，可以给任意数值
             【a, b】表示点划线，其中每个实线段长a像素，每个空白段长b像素
             【a, b, c】先画a像素的实线，再跳过b像素，再画c像素的虚线，依次循环。
    """
    dist = ((pt1[0] - pt2[0]) ** 2 + (pt1[1] - pt2[1]) ** 2) ** .5
    pts = []
    for i in np.arange(0, dist, gap):
        r = i / dist
        x = int((pt1[0] * (1 - r) + pt2[0] * r))
        y = int((pt1[1] * (1 - r) + pt2[1] * r))
        pts.append((x, y))

    s = pts[0]
    e = pts[0]
    i = 0
    pattern_index = 0
    segment_length = 0
    for p in pts:
        s = e
        e = p
        segment_length += gap
        if segment_length >= pattern[pattern_index]:
            if pattern_index % 2 == 0:
                cv2.line(img, s, e, color, thickness)
            segment_length -= pattern[pattern_index]
            pattern_index = (pattern_index + 1) % len(pattern)


def defect_detect(image_template, image_inspection, high_th=10, low_th=10):
    """
    Chen version
    """
    if image_template.ndim == 3:
        gray_temp_image = cv2.cvtColor(image_template, cv2.COLOR_BGR2GRAY)
        gray_insp_image = cv2.cvtColor(image_inspection, cv2.COLOR_BGR2GRAY)
    else:
        gray_temp_image = image_template.copy()
        gray_insp_image = image_inspection.copy()

    kernel = np.ones((3, 3), np.uint8)
    temp_high = cv2.dilate(gray_temp_image, kernel, iterations=1)
    temp_low = cv2.erode(gray_temp_image, kernel, iterations=1)
    gray_insp_image, temp_high, temp_low = np.float32(gray_insp_image), np.float32(temp_high), np.float32(temp_low)

    # 计算差值并限制结果范围为 [0, 255]
    mask_high = np.clip(gray_insp_image - (temp_high + np.float32(high_th)), 0, 255)
    mask_low = np.clip((temp_low - np.float32(low_th)) - gray_insp_image, 0, 255)
    mask_high, mask_low = mask_high.astype(np.uint8), mask_low.astype(np.uint8)
    mask_merge = np.maximum(mask_high, mask_low)
    return mask_high, mask_low, mask_merge


def get_diff_mask(image_src, image_dst_rough, bright_th=20, dark_th=20):
    """
    JinSong version
    """
    if image_src.ndim == 3:
        gray_src_image = cv2.cvtColor(image_src, cv2.COLOR_BGR2GRAY)
        gray_dst_image = cv2.cvtColor(image_dst_rough, cv2.COLOR_BGR2GRAY)
    else:
        gray_src_image = image_src.copy()
        gray_dst_image = image_dst_rough.copy()

    kernel = np.ones((3, 3), np.uint8)
    template_bright = cv2.dilate(gray_src_image, kernel, iterations=1)
    kernel = np.ones((9, 9), np.uint8)
    template_dark = cv2.erode(gray_src_image, kernel, iterations=1)

    template_bright = gray_src_image
    template_dark = gray_src_image
    # cv2.imwrite("./image/3_template_bright.bmp", template_bright)
    # cv2.imwrite("./image/4_template_dark.bmp", template_dark)

    gray_dst_image = np.float32(gray_dst_image)
    template_bright = np.float32(template_bright)
    template_dark = np.float32(template_dark)

    bright_diff_mask_float = gray_dst_image - (template_bright + np.float32(bright_th))
    dark_diff_mask_float = (template_dark - np.float32(dark_th)) - gray_dst_image

    target_image_height, target_image_width = gray_dst_image.shape
    bright_diff_mask = np.zeros(dtype=np.uint8, shape=(target_image_height, target_image_width))
    dark_diff_mask = np.zeros(dtype=np.uint8, shape=(target_image_height, target_image_width))
    idx = bright_diff_mask_float > 0
    bright_diff_mask[idx] = 255
    idx = dark_diff_mask_float > 0
    dark_diff_mask[idx] = 255
    # cv2.imwrite("./image/5_bright_diff_mask.bmp", np.uint8(bright_diff_mask))
    # cv2.imwrite("./image/6_dark_diff_mask.bmp", np.uint8(dark_diff_mask))

    # 合并明暗缺陷mask
    bright_diff_mask, dark_diff_mask = np.uint8(bright_diff_mask), np.uint8(dark_diff_mask)
    merge_mask = cv2.bitwise_or(bright_diff_mask, dark_diff_mask)
    return bright_diff_mask, dark_diff_mask, merge_mask


def apply_gaussian_blur_around_anchors(image, anchor_list,
                                       radius_list, blur_kernel=(15, 15)):
    """
    根据指定的锚点及其周围外扩区域进行高斯模糊。

    :param image: 原始图像 (三通道)
    :param anchors: 锚点列表 [(x1, y1), (x2, y2), ...]
    :param radius: 每个锚点周围外扩的半径
    :param blur_kernel: 高斯模糊核大小，默认为 (15, 15)
    :return: 处理后的图像
    """
    result_image = image.copy()

    for id, anchor in enumerate(anchor_list):
        x, y = anchor
        radius = radius_list[id]+2
        # 计算锚点周围区域的边界框
        x_min = max(0, x - radius)
        x_max = min(image.shape[1], x + radius)
        y_min = max(0, y - radius)
        y_max = min(image.shape[0], y + radius)

        # 提取局部区域
        roi = image[y_min:y_max, x_min:x_max].copy()

        # 对局部区域进行高斯模糊
        # blurred_roi = cv2.GaussianBlur(roi, blur_kernel, 0)
        blurred_roi = cv2.blur(roi, blur_kernel, 0)

        # 将模糊后的局部区域粘贴回原图
        result_image[y_min:y_max, x_min:x_max] = blurred_roi

    return result_image


def get_background_flag(input_image):
    """
    说明：通过给的输入图片判断是白底黑字还是黑底白字，返回标志位。主要通过提取前景纹理从而判断前景灰度是否高于背景灰度
    返回：
    texture_flag: 1，白底黑字。0，黑底白字。
    """
    if input_image.ndim == 3:
        gray_image = cv2.cvtColor(input_image, cv2.COLOR_BGR2GRAY)
    else:
        gray_image = input_image.copy()

    filter_image = cv2.bilateralFilter(gray_image, d=3, sigmaColor=25, sigmaSpace=25)  # 尽可能保留轮廓清晰

    # 使用 Canny 边缘检测
    edge_mask = cv2.Canny(filter_image, threshold1=50, threshold2=50)
    kernel = np.ones((3, 3), np.uint8)
    edge_mask = cv2.dilate(edge_mask, kernel, iterations=1)


    # # 获取前景的所有blob
    # back_val_list, text_val_list = [], []
    # contours, _ = cv2.findContours(edge_mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
    # for contour in list(contours):
    #     x, y, w, h = cv2.boundingRect(contour)
    #
    #     if w == 1 or h == 1:
    #         continue
    #     local_image = gray_image[y:y+h, x:x+w]
    #     local_mask = edge_mask[y:y+h, x:x+w]
    #     # 获取前景灰度
    #     y, x = np.where(local_mask == 255)
    #     text_val_list.append(np.median(local_image[y, x]))
    #     # 获取背景灰度
    #     y, x = np.where(local_mask == 0)
    #     back_val_list.append(np.median(local_image[y, x]))
    #
    # text_gray_val = np.median(np.array(text_val_list))
    # back_gray_val = np.median(np.array(back_val_list))

    y, x = np.where(edge_mask == 255)
    text_gray_val = np.median(filter_image[y, x])

    y, x = np.where(edge_mask == 0)
    back_gray_val = np.median(filter_image[y, x])

    if back_gray_val > text_gray_val:
        texture_flag = 1  # 白底黑字
    else:
        texture_flag = 0  # 黑底白字
    return texture_flag


def get_background_mean_val(input_image, texture=0):
    """
    说明：对输入图像进行背景色提取（仅支持三通道图像）
    输入：
    input_image：待仿真的图片，这张图片可以是单或三通道，算法内部一律按照三通道处理
    texture：0即黑底白字，如彩色包装盒有些文字是白色文字（文字或主要纹理区域接近于255），1即白底黑字，如说明书类产品。
    返回：
    rand_val_list：返回的背景中值，如果是rgb就是三元素列表[R, G, B]，否则是单元素列表[Gray]
    """

    if input_image.ndim == 2:
        src_image = cv2.cvtColor(input_image, cv2.COLOR_GRAY2BGR)
        temp = input_image.copy()
    else:
        src_image = input_image.copy()
        temp = cv2.cvtColor(src_image, cv2.COLOR_BGR2GRAY)

    th, th_mask = cv2.threshold(temp, 0, 255, type=cv2.THRESH_BINARY+cv2.THRESH_OTSU)
    th, th_image = cv2.threshold(temp, th, 255, type=cv2.THRESH_BINARY)

    if texture == 1:  # 白底黑字，比如印刷书刊（漏印区域：填充白色）
        y, x = np.where(th_image == 255)
    else:  # 黑底白字，比如二维码（漏印区域：填充黑色），二维码也可以理解为白底黑字
        y, x = np.where(th_image == 0)
    mean_r = np.median(src_image[y, x, 0])
    mean_g = np.median(src_image[y, x, 1])
    mean_b = np.median(src_image[y, x, 2])

    left_r, left_g, left_b = int(max(0, mean_r)), int(max(0, mean_g)), int(max(0, mean_b))
    right_r, right_g, right_b = int(min(mean_r, 255)), int(min(mean_g, 255)), int(min(mean_b, 255))

    if input_image.ndim == 2:
        rand_val_list = [int((random.randint(left_r, right_r) +
                              random.randint(left_g, right_g) +
                              random.randint(left_b, right_b))/3)]
    else:
        rand_val_list = [random.randint(left_r, right_r), random.randint(left_g, right_g),
                         random.randint(left_b, right_b)]
    return rand_val_list, th_image


def get_foreground_mean_val(input_image, texture=0):
    """
    说明：对输入图像进行背景色提取
    输入：
    input_image：待仿真的图片，这张图片可以是单或三通道，算法内部一律按照三通道处理
    texture：0即黑底白字，如彩色包装盒有些文字是白色文字（文字或主要纹理区域接近于255），1即白底黑字，如说明书类产品。
    返回：
    rand_val_list：返回的背景中值，如果是rgb就是三元素列表[R, G, B]，否则是单元素列表[Gray]
    """
    if input_image.ndim == 2:
        src_image = cv2.cvtColor(input_image, cv2.COLOR_GRAY2BGR)
        temp = input_image.copy()
    else:
        src_image = input_image.copy()
        temp = cv2.cvtColor(src_image, cv2.COLOR_BGR2GRAY)
    th, th_mask = cv2.threshold(temp, 0, 255, type=cv2.THRESH_BINARY+cv2.THRESH_OTSU)
    th, th_image = cv2.threshold(temp, th, 255, type=cv2.THRESH_BINARY)

    # temp = cv2.cvtColor(src_image, cv2.COLOR_BGR2GRAY)
    # th, _ = cv2.threshold(temp, 0, 255, type=cv2.THRESH_BINARY+cv2.THRESH_OTSU)
    # th, th_image = cv2.threshold(src_image, th, 255, type=cv2.THRESH_BINARY)

    if texture == 1:  # 白底黑字，比如印刷书刊（漏印区域：填充白色）
        y, x = np.where(th_image == 0)
    else:  # 黑底白字，比如二维码（漏印区域：填充黑色），二维码也可以理解为白底黑字
        y, x = np.where(th_image == 255)
    mean_r = np.median(src_image[y, x, 0])
    mean_g = np.median(src_image[y, x, 1])
    mean_b = np.median(src_image[y, x, 2])

    left_r, left_g, left_b = int(max(0, mean_r)), int(max(0, mean_g)), int(max(0, mean_b))
    right_r, right_g, right_b = int(min(mean_r, 255)), int(min(mean_g, 255)), int(min(mean_b, 255))

    if input_image.ndim == 2:
        rand_val_list = [int((random.randint(left_r, right_r) +
                              random.randint(left_g, right_g) +
                              random.randint(left_b, right_b))/3)]
    else:
        rand_val_list = [random.randint(left_r, right_r), random.randint(left_g, right_g),
                         random.randint(left_b, right_b)]

    return rand_val_list, th_image





def generate_random_anchor_pt(standard_image_, inset_x, inset_y, gen_defect_num):
    image_h, image_w = standard_image_.shape[0:2]
    min_anchor_x, max_anchor_x = inset_x, image_w-inset_x
    min_anchor_y, max_anchor_y = inset_y, image_h-inset_y

    anchor_pt_list = []
    for id in range(gen_defect_num):
        anchor_x = random.randint(min_anchor_x, max_anchor_x)
        anchor_y = random.randint(min_anchor_y, max_anchor_y)
        anchor_pt_list.append([anchor_x, anchor_y])
    return anchor_pt_list


def gen_scale_box(box, scale, max_w, max_h):
    center_x, center_y = (box[0] + box[2]) / 2, (box[1] + box[3]) / 2
    scale_w, scale_h = scale * (box[2] - box[0]), scale * (box[3] - box[1])
    xmin = max(int(round(center_x - scale_w / 2)), 0)
    ymin = max(int(round(center_y - scale_h / 2)), 0)
    xmax = min(int(round(center_x + scale_w / 2)), max_w)
    ymax = min(int(round(center_y + scale_h / 2)), max_h)
    return [xmin, ymin, xmax, ymax]


def calculate_iou_hc(box1, box2):
    """
    calculate the IoU(Intersection over Union) of two bounding boxes
    box1 and box2: [xmin, ymin, xmax, ymax]
    """
    x1, y1, x2, y2 = box1
    x3, y3, x4, y4 = box2

    # 计算交集的坐标
    x_left = max(x1, x3)
    y_top = max(y1, y3)
    x_right = min(x2, x4)
    y_bottom = min(y2, y4)

    if x_right < x_left or y_bottom < y_top:
        # 两个边界框没有交集
        return 0.0

    intersection_area = (x_right - x_left) * (y_bottom - y_top)

    # 计算并集的面积
    box1_area = (x2 - x1) * (y2 - y1)
    box2_area = (x4 - x3) * (y4 - y3)
    union_area = box1_area + box2_area - intersection_area

    iou = intersection_area / union_area
    return iou


def calculate_iou(box1, box2):
    """
    calculate the IoU(Intersection over Union) of two bounding boxes
    box1 and box2: [xmin, ymin, xmax, ymax]
    """
    if box1.shape[0] == 1:
        box1 = box1.T
    if box2.shape[0] == 1:
        box2 = box2.T

    x1, y1, x2, y2 = box1
    x3, y3, x4, y4 = box2

    # 计算交集的坐标
    x_left = max(x1, x3)
    y_top = max(y1, y3)
    x_right = min(x2, x4)
    y_bottom = min(y2, y4)

    if x_right < x_left or y_bottom < y_top:
        # 两个边界框没有交集
        return 0.0
    intersection_area = (x_right - x_left) * (y_bottom - y_top)

    # 计算并集的面积
    box1_area = (x2 - x1) * (y2 - y1)
    box2_area = (x4 - x3) * (y4 - y3)
    union_area = box1_area + box2_area - intersection_area

    iou = intersection_area / union_area
    return iou


def nms_merge_defect(boxes, confs, clses, iou_threshold, confidence_threshold):
    """
    Using NMS to process detection results for various categories
    cls: list, conf:list, box: list [x_min, y_min, x_max, y_max]
    threshold: overlap threshold. bounding boxes above which will be suppressed
    """
    filtered_indices = np.where(np.array(confs) > confidence_threshold)[0]
    boxes = [boxes[i] for i in filtered_indices]
    confs = [confs[i] for i in filtered_indices]
    clses = [clses[i] for i in filtered_indices]
    np_clses = np.array(clses)

    # 初始化空列表来存储保留的边界框索引
    keep_indices = []
    unique_classes = np.unique(np_clses)
    for cls in unique_classes:
        # 获取属于当前类别的边界框索引
        cls_indices = np.where(np_clses == cls)[0]
        # 根据当前类别的置信度得分对边界框进行排序
        sorted_indices = np.argsort(-np.array(confs)[cls_indices].T)
        cls_indices = cls_indices[sorted_indices].T
        while len(cls_indices) > 0:
            # 选择当前得分最高的边界框
            current_index = cls_indices[0]
            current_box = boxes[current_index]
            keep_indices.append(filtered_indices[current_index])
            # 计算当前边界框与其他边界框的IoU
            other_indices = cls_indices[1:]
            ious = np.array(
                [calculate_iou_hc(current_box, boxes[i]) for i in other_indices]
            )
            # 找到重叠度低于阈值的边界框索引
            low_iou_indices = np.where(ious < iou_threshold)[0]
            # 更新剩余边界框索引
            cls_indices = cls_indices[1:][low_iou_indices]
    return keep_indices


def slice_to_target_size(image, target_size, overlap=0.1):
    """
    Slice image size to target_size. When the image size is smaller than the win_size, pad it.
    [Input] image: detected image [bgr]
    [Output] slice_image_list: the slice image list
             src_sub_pos_list: the part of the slice_image that belong to the original image,
                                located in the original image
    """
    image = to_bgr(image)
    h, w = image.shape[:2]
    pad_h, pad_w = 0, 0
    resized_image = copy.deepcopy(image)
    if h < target_size[1] or w < target_size[0]:
        pad_h = target_size[1] - h if target_size[1] - h > 0 else 0
        pad_w = target_size[0] - w if target_size[0] - w > 0 else 0
        padding = ((0, pad_h), (0, pad_w), (0, 0))
        resized_image = np.pad(image, padding, mode='constant', constant_values=255)

    # slice image
    pos_list, slice_image_list, src_sub_pos_list = [], [], []
    if resized_image.shape[0] > target_size[1] or resized_image.shape[1] > target_size[0]:
        slice_width, slice_height = target_size[:2]
        dx = int((1.0 - overlap) * slice_width)
        dy = int((1.0 - overlap) * slice_height)

        image_h, image_w = resized_image.shape[:2]
        src_box = [pad_w, pad_h, image_w, image_h]
        for y0 in range(0, image_h, dy):
            for x0 in range(0, image_w, dx):
                # make sure we don't have a tiny image on the edge
                x, y = x0, y0
                if y0 + slice_height > image_h:
                    y = image_h - slice_height
                if x0 + slice_width > image_w:
                    x = image_w - slice_width
                sub_box = [x, y, x + slice_width, y + slice_height]
                if sub_box not in pos_list:
                    pos_list.append(sub_box)
                    sub_image = resized_image[sub_box[1]:sub_box[3], sub_box[0]:sub_box[2], ...]
                    slice_image_list.append(sub_image)

                    # calc the intersection of src_box
                    intersect_box = [max(src_box[0], sub_box[0]), max(src_box[1], sub_box[1]),
                                     min(src_box[2], sub_box[2]), min(src_box[3], sub_box[3])]
                    src_sub_pos_list.append([intersect_box[0] - src_box[0], intersect_box[1] - src_box[1],
                                             intersect_box[2] - src_box[0], intersect_box[3] - src_box[1]])
    else:
        slice_image_list.append(resized_image)
        src_sub_pos_list.append([0, 0, image.shape[1], image.shape[0]])
    return slice_image_list, src_sub_pos_list


def to_bgr(image):
    if len(image.shape) == 3 and image.shape[2] == 3:
        bgr_image = copy.deepcopy(image)
    else:
        bgr_image = cv2.cvtColor(image, cv2.COLOR_GRAY2BGR)
    return bgr_image


def get_batch_image_list(batch_image_root):
    src_image_path_list = get_file_path_list(batch_image_root, FileType.IMAGE_TYPE)
    batch_image_list, batch_image_name_list = [], []
    for image_id, image_path in enumerate(src_image_path_list):
        image_name = os.path.basename(image_path)
        image = cv2.imread(image_path, 1)
        batch_image_list.append(image)
        batch_image_name_list.append(image_name)
    return batch_image_list, batch_image_name_list


class FileType(Enum):
    IMAGE_TYPE = 1
    OTHER_TYPE = 2


def get_file_path_list(file_root, file_type: FileType):
    """
    输入：指定的文件路径与指定的扩展名
    返回：返回对应的文件全路径列表
    """
    if not os.path.exists(file_root):
        print(file_root, "does not exist.")
        return []

    if file_type == FileType.IMAGE_TYPE:
        format_list = ['.jpg', '.jpeg', '.JPG', '.JPEG',
                       '.png', '.tiff', '.tif', '.TIF', '.bmp', '.BMP']
    elif file_type == FileType.OTHER_TYPE:
        format_list = [".json", ".txt"]
    else:
        print("Invalid file type selection....")
        return []
    path_list = []
    for file_and_dir in os.listdir(file_root):
        if os.path.isfile(os.path.join(file_root, file_and_dir)):
            if os.path.splitext(file_and_dir)[1] in format_list:  # 如果文件名称的扩展名存在于指定的列表内
                path_list.append(os.path.abspath(os.path.join(file_root, file_and_dir)))
    return path_list


def get_file_path_list_sort(file_root, file_type: FileType):
    """
    输入：指定的文件路径与指定的扩展名
    返回：返回对应的文件全路径列表，并按文件名中的数字顺序排列
    """
    if not os.path.exists(file_root):
        print(file_root, "does not exist.")
        return []

    # 根据文件类型设置支持的扩展名列表
    if file_type == FileType.IMAGE_TYPE:
        format_list = ['.jpg', '.jpeg', '.JPG', '.JPEG',
                       '.png', '.tiff', '.tif', 'TIF', '.bmp', '.BMP']
    elif file_type == FileType.OTHER_TYPE:
        format_list = [".json", ".txt"]
    else:
        print("Invalid file type selection....")
        return []

    path_list = []
    for file_and_dir in os.listdir(file_root):
        full_path = os.path.join(file_root, file_and_dir)
        if os.path.isfile(full_path):
            if os.path.splitext(file_and_dir)[1] in format_list:  # 如果文件名称的扩展名存在于指定的列表内
                path_list.append(full_path)

    # 自定义排序逻辑
    def custom_sort_key(file_path):
        file_name = os.path.basename(file_path)  # 获取文件名
        name, ext = os.path.splitext(file_name)  # 分离文件名和扩展名

        # 提取文件名中的数字部分作为主要排序依据
        num_part = ''.join(filter(str.isdigit, name))
        num_part = int(num_part) if num_part else float('inf')  # 如果没有数字，默认排到最后

        # 提取文件名中的字母部分作为次要排序依据
        alpha_part = ''.join(filter(str.isalpha, name))

        # 如果文件名包含 "_temp"，将其排在主文件之后
        is_temp = "_temp" in name
        return (num_part, alpha_part, is_temp)

    # 按照自定义排序规则排序
    path_list.sort(key=custom_sort_key)
    return path_list


def json_to_mask(json_path, ignored_name_list):
    """
    把一个标注的Json文件转换为mask图片（背景：0， 标签：1-255）
    返回：标注的mask，以及对应的类别序号与名称的字典
    """
    if not os.path.exists(json_path):
        print(json_path, "does not exist.")
        return None, {}

    if os.path.getsize(json_path) == 0:
        return None, {}

    with open(json_path, 'r') as f:
        json_data = json.load(f)

    image_w, image_h = json_data['imageWidth'], json_data['imageHeight']
    mask = np.zeros(shape=(image_h, image_w), dtype=np.uint8)
    label_dict = {}
    cur_label_num = 0
    for shape_data in json_data['shapes']:
        label = shape_data['label']

        # 过滤需要忽略的标签
        if label in ignored_name_list:
            continue

        if label not in label_dict:
            cur_label_num += 1
            label_dict[label] = cur_label_num

        points = shape_data['points']
        int_points = [[int(round(num)) for num in sublist] for sublist in points]
        if shape_data['shape_type'] == 'rectangle':
            # cv2.rectangle(mask, (int_points[0][0], int_points[0][1]), (int_points[1][0], int_points[1][1]),
            #               cur_label_num + 1, cv2.FILLED)
            cv2.rectangle(mask, (int_points[0][0], int_points[0][1]), (int_points[1][0], int_points[1][1]),
                          label_dict[label], cv2.FILLED)
        elif shape_data["shape_type"] == 'polygon':
            np_pts = np.array(int_points, dtype=np.int32)
            # mask = cv2.fillPoly(mask, [np_pts], [cur_label_num])
            mask = cv2.fillPoly(mask, [np_pts], [label_dict[label]])
        else:
            pass
    return mask, label_dict


def update_progress_bar(iteration, total, prefix='', suffix='',
                        decimals=1, length=20,
                        fill='█', print_end="\r"):
    """
    调用本函数来更新命令行中的进度条。

    参数:
        iteration   - 必需：当前迭代次数 (Int)
        total       - 必需：总迭代次数 (Int)
        prefix      - 可选：前缀字符串 (Str)
        suffix      - 可选：后缀字符串 (Str)
        decimals    - 可选：正数位数，要显示的小数点后的位数 (Int)
        length      - 可选：进度条字符长度 (Int)
        fill        - 可选：填充字符种类 (Str)
        print_end   - 可选：用于替代换行的末尾字符 (Str)
    """
    percent = ("{0:." + str(decimals) + "f}").format(100 * (iteration / float(total)))
    filled_length = int(length * iteration // total)
    bar = fill * filled_length + '-' * (length - filled_length)
    sys.stdout.write('\r%s |%s| %s%% %s\n' % (prefix, bar, percent, suffix)),
    sys.stdout.flush()
    if iteration == total:
        sys.stdout.write('\n')


def adjust_bbox_to_image_boundaries(image_w, image_h, x, y, w, h):
    """
    根据给定的x, y, w, h调整边界框，使其适应图像尺寸。

    参数:
        image_w/image_h： 大图宽高
        x, y: 裁剪区域左上角坐标
        w: 裁剪区域宽度
        h: 裁剪区域高度
    返回:
        tuple: 包含调整后的 (x, y, w, h)
    """

    # 确保 x 和 y 不小于0
    adjusted_x = max(x, 0)
    adjusted_y = max(y, 0)

    # 计算右下角坐标，并确保它们不超过图像尺寸
    right = min(x + w, image_w)
    bottom = min(y + h, image_h)

    # 根据调整后的坐标重新计算宽度和高度
    adjusted_w = right - adjusted_x
    adjusted_h = bottom - adjusted_y

    # 返回调整后的 (x, y, w, h)
    return adjusted_x, adjusted_y, adjusted_w, adjusted_h


def boundary_judge(x, y, x_min_lim, y_min_lim, x_max_lim, y_max_lim, x_extend, y_extend):
    """
    描述：越界判断
    返回：返回矩形外扩后的上下左右数据
    """
    over_border = False
    if y - y_extend < y_min_lim:
        y = y_min_lim
        if y + y_extend >= y_max_lim:
            over_border = True

    if y + y_extend >= y_max_lim:
        y = y_max_lim - y_extend
        if y < y_min_lim:
            over_border = True

    if x - x_extend < x_min_lim:
        x = x_min_lim
        if x + x_extend >= x_max_lim:
            over_border = True

    if x + x_extend >= x_max_lim:
        x = x_max_lim - x_extend
        if x < x_min_lim:
            over_border = True
    return x, y, over_border


def create_directory_if_not_exists(specified_root):
    if not os.path.exists(specified_root):
        try:
            os.makedirs(specified_root)
            print("'{specified_root}' created ....")
        except OSError as error:
            print("Failed to create directory '{specified_root}'. ")
    else:
        # print(f"Directory '{specified_root}' already exists.")
        pass


def split_dataset(src_path, train_path, test_path, val_path, ratio):
    each_class_image = []
    each_class_label = []
    count = 0
    for dirpath, dirnames, filenames in os.walk(src_path):
        im_list = [z for z in filenames if z.endswith('.bmp') or
                                           z.endswith('.JPG') or
                                           z.endswith('.jpg') or
                                           z.endswith('.jpeg')]
        for img in im_list:
            each_class_image.append(os.path.join(dirpath, img))
            each_class_label.append(os.path.join(dirpath, img.split('.')[0] + ".txt"))
            if os.stat(os.path.join(dirpath, img.split('.')[0] + ".txt")).st_size > 0:
                count += 1

    data = list(zip(each_class_image, each_class_label))
    total = len(each_class_image)
    random.shuffle(data)
    each_class_image, each_class_label = zip(*data)

    for i in range(total):
        image_path = each_class_image[i]
        label_path = each_class_label[i]
        if 0 <= i < int(ratio[0] * total):
            dst_path = train_path
        elif int(ratio[0] * total) <= i < int((ratio[0] + ratio[1]) * total):
            dst_path = test_path
        else:
            dst_path = val_path

        if not os.path.exists(dst_path + '/' + 'images'):
            os.makedirs(dst_path + '/' + 'images')
        if not os.path.exists(dst_path + '/' + 'labels'):
            os.makedirs(dst_path + '/' + 'labels')
        shutil.copy(image_path, dst_path + '/' + 'images')
        shutil.copy(label_path, dst_path + '/' + 'labels')


def feather_region_with_mask(defect_image, defect_mask,
                             feather_radius=3, sigma=0):
    """
    说明：对指定区域进行羽化处理，保持背景不变

    参数:
        defect_image: 仿真的缺陷图片（三通道）
        defect_mask: 缺陷轮廓的mask，背景0，前景255（单通道）
        ---> defect_mask这张图像必须配合边缘提取才有最佳效果, Ex: cv2.Canny(defect_mask*255, 100, 200)
        feather_amount: 羽化程度(值越大羽化范围越大)
    """

    # 确保mask是二值图像
    _, binary_mask = cv2.threshold(defect_mask, 127, 255, cv2.THRESH_BINARY)

    # 1. 获取mask的边缘区域
    # 先对mask进行膨胀和腐蚀，得到边缘区域
    kernel = np.ones((feather_radius, feather_radius), np.uint8)
    dilated = cv2.dilate(binary_mask, kernel, iterations=1)
    eroded = cv2.erode(binary_mask, kernel, iterations=1)
    edge_region = dilated - eroded

    # 2. 创建羽化权重图(只在边缘区域有值)
    # 对边缘区域进行高斯模糊
    blurred_edge = cv2.GaussianBlur(edge_region, (2 * feather_radius + 1, 2 * feather_radius + 1), sigma)
    # blurred_edge = cv2.blur(edge_region, (2 * feather_radius + 1, 2 * feather_radius + 1))
    # 归一化到0-1范围
    feather_weights = blurred_edge.astype(np.float32) / 255.0

    # 3. 对原始图像进行高斯模糊
    blurred_img = cv2.GaussianBlur(defect_image, (2 * feather_radius + 1, 2 * feather_radius + 1), 0)

    # 4. 只在边缘区域混合原始图像和模糊图像
    # 将权重图转换为3通道
    feather_weights_3ch = cv2.merge([feather_weights, feather_weights, feather_weights])

    # 5. 创建结果图像
    result = defect_image.copy()
    # 只在边缘区域应用混合
    result[edge_region > 0] = (
            defect_image[edge_region > 0] * (1 - feather_weights_3ch[edge_region > 0]) +
            blurred_img[edge_region > 0] * feather_weights_3ch[edge_region > 0]
    )
    return result.astype(np.uint8)


def apply_mask_to_image(image, mask, color=(0, 255, 0), alpha=0.5):
    """
    将二值掩膜应用到原始图像上，并用指定颜色高亮显示掩膜区域。

    参数:
        image: 原始图像 (numpy array)，3通道 (BGR)
        mask: 二值掩膜 (numpy array)，单通道，值为 0 或 255
        color: 掩膜的颜色 (B,G,R)，默认为绿色
        alpha: 掩膜的透明度，范围 [0,1]，默认为0.5

    返回:
        result: 应用了掩膜的图像
    """
    # 确保mask是8位单通道
    mask = mask.astype(np.uint8)

    # 创建一个基于mask的图像，填充为指定颜色
    colored_mask = np.zeros_like(image)
    colored_mask[:] = color

    # 使用mask作为蒙版，只保留mask标记的部分
    masked_part = cv2.bitwise_and(colored_mask, colored_mask, mask=mask)

    # 原始图像中被mask覆盖的部分
    img_part = cv2.bitwise_and(image, image, mask=cv2.bitwise_not(mask))

    # 合并两部分图像，使用alpha混合
    result = cv2.addWeighted(img_part, 1, masked_part, alpha, 0)

    return result


def norm_train_data(image_temp, image_insp):
    diff  = cv2.absdiff(image_temp, image_insp)
    clahe = cv2.createCLAHE(clipLimit=2, tileGridSize=(4, 4))
    image_norm = clahe.apply(diff)
    return image_norm


def joint_normalize_old(img1, img2):
    # 合并两张图像用于统一归一化
    combined = np.concatenate([img1, img2], axis=0)
    min_val = np.min(combined)
    max_val = np.max(combined)
    # 统一归一化
    img1_norm = (img1 - min_val) / (max_val - min_val + 1e-8)
    img2_norm = (img2 - min_val) / (max_val - min_val + 1e-8)
    return (img1_norm * 255).astype(np.uint8), (img2_norm * 255).astype(np.uint8)


def joint_normalize(img1, img2):
    """
    改进的归一化函数，分别处理R和B通道，增强对比度。
    """
    # 检查输入
    if img1.shape != img2.shape or len(img1.shape) != 2:
        raise ValueError("Inputs must have the same shape and be 2D")
    if img1.size == 0 or img2.size == 0:
        raise ValueError("Inputs cannot be empty")

    # 分别计算最小值和最大值
    min_val1, max_val1 = np.min(img1), np.max(img1)
    min_val2, max_val2 = np.min(img2), np.max(img2)

    # 处理边界情况
    if max_val1 == min_val1:
        img1_norm = np.zeros_like(img1, dtype=np.float32)
    else:
        img1_norm = (img1 - min_val1) / (max_val1 - min_val1 + 1e-8)

    if max_val2 == min_val2:
        img2_norm = np.zeros_like(img2, dtype=np.float32)
    else:
        img2_norm = (img2 - min_val2) / (max_val2 - min_val2 + 1e-8)

    # 转换为[0, 255]范围，保留浮点型以供后续处理
    return np.uint8(img1_norm*255), np.uint8(img2_norm*255)


def advanced_normalization(template, defect):
    # 转换到YCrCb颜色空间
    ycrcb_t = cv2.cvtColor(template, cv2.COLOR_BGR2YCrCb)
    ycrcb_d = cv2.cvtColor(defect, cv2.COLOR_BGR2YCrCb)
    # 对亮度通道进行局部对比度增强（CLAHE）
    clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8))
    Y_t = clahe.apply(ycrcb_t[:, :, 0])
    Y_d = clahe.apply(ycrcb_d[:, :, 0])
    # 创建新的三通道图像
    merged = np.zeros_like(template)
    # B通道: 模板图的亮度信息
    merged[:, :, 0] = Y_t
    # R通道: 缺陷仿真图像的亮度信息
    merged[:, :, 2] = Y_d
    # G通道设置为0
    merged[:, :, 1] = 0
    return merged


def need_merge(rect1, rect2, thre=20):
    '''
    根据两个框的曼哈顿距离判断是否需要合并
    :param rect1:第一个矩形框。表示为x,y,w,h，其中x,y表示矩形右上角的坐标
    :param rect2:第二个矩形框。
    :return:是否需要合并
    '''
    x1, y1, w1, h1 = rect1
    x2, y2, w2, h2 = rect2

    inter_w = (w1 + w2) - (max(x1 + w1, x2 + w2) - min(x1, x2))
    inter_h = (h1 + h2) - (max(y1 + h1, y2 + h2) - min(y1, y2))

    v_dis = abs(min((y1 + h1), (y2 + h2)) - max(y1, y2))
    h_dis = abs(min((x1 + w1), (x2 + w2)) - max(x1, x2))
    if v_dis + h_dis < thre or (v_dis < 0.5 * thre and inter_w > 0) or \
            (h_dis < 0.5 * thre and inter_h > 0) or is_overlap(rect1, rect2):
        return True
    elif (x1 > x2) and ((x1+w1) < (x2+w2)) and (y1 > y2) and ((y1+h1) < (y2+h2)):
        # rect1 在 rect2 内部
        return True
    elif (x2 > x1) and ((x2+w2) < (x1+w1)) and (y2 > y1) and ((y2+h2) < (y1+h1)):
        # rect2 在 rect1 内部
        return True
    else:
        return False


def is_overlap(rect1, rect2):
    '''
    计算两个矩形的交并比
    :param rect1:第一个矩形框。表示为x,y,w,h，其中x,y表示矩形右上角的坐标
    :param rect2:第二个矩形框。
    :return:是否有重叠部分
    '''
    x1, y1, w1, h1 = rect1
    x2, y2, w2, h2 = rect2

    inter_w = (w1 + w2) - (max(x1 + w1, x2 + w2) - min(x1, x2))
    inter_h = (h1 + h2) - (max(y1 + h1, y2 + h2) - min(y1, y2))

    if inter_h <= 0 or inter_w <= 0:  # 代表相交区域面积为0
        return False
    else:
        return True


def guided_filter(I, p, radius=5, eps=0.01):
    """
    I: 引导图像 (如果为灰度图，则为单通道；如果是彩色图，则为三通道)
    p: 输入图像 (单通道或三通道)
    radius: 窗口半径
    eps: 正则化参数
    """
    # 判断是否为彩色图
    if len(I.shape) == 3:
        H, W, C = I.shape
    else:
        H, W = I.shape
        C = 1

    # 将输入图像转换为 float32
    I = I.astype(np.float32) / 255.0
    p = p.astype(np.float32) / 255.0

    # 创建均值滤波核
    size = 2 * radius + 1
    kernel = np.ones((size, size), np.float32) / (size * size)

    # 计算均值 μ
    mean_I = cv2.filter2D(I, -1, kernel)
    mean_p = cv2.filter2D(p, -1, kernel)

    # 计算协方差和方差
    corr_I = cv2.filter2D(I * I, -1, kernel)
    corr_Ip = cv2.filter2D(I * p, -1, kernel)

    var_I = corr_I - mean_I * mean_I
    cov_Ip = corr_Ip - mean_I * mean_p

    # 正则化项
    a = cov_Ip / (var_I + eps)
    b = mean_p - a * mean_I

    # 对 a 和 b 做均值滤波
    mean_a = cv2.filter2D(a, -1, kernel)
    mean_b = cv2.filter2D(b, -1, kernel)

    # 输出 q = a_bar * I + b_bar
    q = mean_a * I + mean_b
    q = np.clip(q, 0, 1) * 255
    return q.astype(np.uint8)


def guided_filter_optimized(I, p, radius=5, eps=0.01):
    # 预处理
    I = I.astype(np.float32) / 255.0
    p = p.astype(np.float32) / 255.0

    # 使用积分图像加速均值计算
    def box_filter(img, r):
        return cv2.boxFilter(img, -1, (2 * r + 1, 2 * r + 1), normalize=True)

    mean_I = box_filter(I, radius)
    mean_p = box_filter(p, radius)
    corr_I = box_filter(I * I, radius)
    corr_Ip = box_filter(I * p, radius)

    var_I = corr_I - mean_I * mean_I
    cov_Ip = corr_Ip - mean_I * mean_p

    a = cov_Ip / (var_I + eps)
    b = mean_p - a * mean_I

    mean_a = box_filter(a, radius)
    mean_b = box_filter(b, radius)

    q = mean_a * I + mean_b
    return np.clip(q * 255, 0, 255).astype(np.uint8)


def non_max_suppression(boxes, overlap_th=0.3):
    if len(boxes) == 0:
        return []

    boxes = np.array(boxes)
    pick = []

    x1 = boxes[:, 0]
    y1 = boxes[:, 1]
    x2 = boxes[:, 0] + boxes[:, 2]
    y2 = boxes[:, 1] + boxes[:, 3]
    area = (boxes[:, 2]) * (boxes[:, 3])
    idxs = np.argsort(y2)

    while len(idxs) > 0:
        last = len(idxs) - 1
        i = idxs[last]
        pick.append(i)
        xx1 = np.maximum(x1[i], x1[idxs[:last]])
        yy1 = np.maximum(y1[i], y1[idxs[:last]])
        xx2 = np.minimum(x2[i], x2[idxs[:last]])
        yy2 = np.minimum(y2[i], y2[idxs[:last]])
        w = np.maximum(0, xx2 - xx1 + 1)
        h = np.maximum(0, yy2 - yy1 + 1)
        overlap = (w * h) / area[idxs[:last]]
        idxs = np.delete(idxs, np.concatenate(([last], np.where(overlap > overlap_th)[0])))
    return boxes[pick].astype("int")


def crop_by_window(image_temp,
                   image_insp,
                   image_mask,
                   image_name,
                   output_root,
                   is_back=True,
                   special_name='a',
                   window_size=(640, 640), random_crop=True, crop_count=100, is_gen_online=False):
    """
    说明：根据给定的一组图片（模板、检测、对应的文本mask大图）进行指定尺寸的裁切，便于生成一组目标检测的训练原始数据
    参数：
    image_temp/image_insp/image_mask：模板大图，检测大图，对应的文本区域的mask大图
    image_name：给定一组图片的主名称，ex: xxx_temp.bmp，image_name=xxx
    output_root：裁切后的图片存储根目录
    is_back: 通過文本mask判斷裁切文本区图片还是非文本区图片
    special_name：比如运行该函数生成一批小图，如果我还想继续生成名字会重复覆盖，该参数会在土坯那名称前加上指定名称防止文件重复覆盖
    window_size：指定的裁切尺寸
    random_crop: 是否启用随机裁切，默认为 False（行列滑动裁切）
    """

    if is_back:
        ratio_th = 0.9  # 背景图阈值
    else:
        ratio_th = 0.12  # 文字区域阈值

    height, width = image_mask.shape[:2]
    win_h, win_w = window_size

    if random_crop:
        idx = 0
        attempts = 0
        max_attempts = 200  # 设置最大尝试次数避免死循环

        while idx < crop_count and attempts < max_attempts:
            attempts += 1

            x_start = np.random.randint(0, max(width - win_w + 1, 1))
            y_start = np.random.randint(0, max(height - win_h + 1, 1))

            x_end = x_start + win_w
            y_end = y_start + win_h

            mask_window = image_mask[y_start:y_end, x_start:x_end]

            if is_back:
                black_pixel = np.count_nonzero(mask_window == 0)
                ratio = black_pixel / mask_window.size

                sub_img_temp = image_temp[y_start:y_end, x_start:x_end]
                white_pixels = np.all(sub_img_temp > 245, axis=2)
                white_ratio = np.count_nonzero(white_pixels) / mask_window.size
                if white_ratio > 0.95:
                    ratio = 0
            else:
                white_pixel = np.count_nonzero(mask_window == 255)
                ratio = white_pixel / mask_window.size

            if ratio > ratio_th:
                sub_img_temp = image_temp[y_start:y_end, x_start:x_end]
                sub_img_insp = image_insp[y_start:y_end, x_start:x_end]
                sub_img_text = mask_window

                save_temp_path = os.path.join(output_root, f"{special_name}_{image_name}_{idx}_temp.jpg")
                save_insp_path = os.path.join(output_root, f"{special_name}_{image_name}_{idx}_insp.jpg")
                save_text_path = os.path.join(output_root, f"{special_name}_{image_name}_{idx}_text.jpg")

                if is_gen_online is True:
                    cv2.imwrite(save_insp_path, sub_img_insp)
                    cv2.imwrite(save_text_path, sub_img_text)
                else:
                    cv2.imwrite(save_temp_path, sub_img_temp)
                    cv2.imwrite(save_insp_path, sub_img_insp)
                    cv2.imwrite(save_text_path, sub_img_text)

                idx += 1
                attempts = 0  # 成功后重置尝试次数

    else:
        idx = 0
        for y in range(0, height, win_h):
            for x in range(0, width, win_w):
                y_start = max(y, 0)
                y_end = min(y + win_h, height)
                x_start = max(x, 0)
                x_end = min(x + win_w, width)

                if y_end - y_start < win_h:
                    y_start = y_end - win_h
                if x_end - x_start < win_w:
                    x_start = x_end - win_w

                mask_window = image_mask[y_start:y_end, x_start:x_end]

                if is_back:
                    black_pixel = np.count_nonzero(mask_window == 0)
                    ratio = black_pixel / mask_window.size

                    sub_img_temp = image_temp[y_start:y_end, x_start:x_end]
                    white_pixels = np.all(sub_img_temp > 245, axis=2)
                    white_ratio = np.count_nonzero(white_pixels) / mask_window.size
                    if white_ratio > 0.95:
                        ratio = 0
                else:
                    white_pixel = np.count_nonzero(mask_window == 255)
                    ratio = white_pixel / mask_window.size

                if ratio > ratio_th:
                    sub_img_temp = image_temp[y_start:y_end, x_start:x_end]
                    sub_img_insp = image_insp[y_start:y_end, x_start:x_end]
                    sub_img_text = mask_window

                    save_temp_path = os.path.join(output_root, f"{special_name}_{image_name}_{idx}_temp.jpg")
                    save_insp_path = os.path.join(output_root, f"{special_name}_{image_name}_{idx}_insp.jpg")
                    save_text_path = os.path.join(output_root, f"{special_name}_{image_name}_{idx}_text.jpg")

                    if is_gen_online is True:
                        cv2.imwrite(save_insp_path, sub_img_insp)
                        cv2.imwrite(save_text_path, sub_img_text)
                    else:
                        cv2.imwrite(save_temp_path, sub_img_temp)
                        cv2.imwrite(save_insp_path, sub_img_insp)
                        cv2.imwrite(save_text_path, sub_img_text)
                    idx += 1


def process_single_group(args):
    """
    封装 crop_by_window 调用，接收打包的参数
    """
    image_temp_path, image_insp_path, image_text_path, image_name, output_root,\
    is_back, special_name, window_size, random_crop, crop_count, is_gen_online = args

    if is_gen_online is True:
        image_insp = cv2.imread(image_insp_path, 1)
        image_temp = image_insp.copy()
        image_text = cv2.imread(image_text_path, 0)
    else:
        image_insp = cv2.imread(image_insp_path, 1)
        image_temp = cv2.imread(image_temp_path, 1)
        image_text = cv2.imread(image_text_path, 0)

    crop_by_window(image_temp, image_insp, image_text, image_name, output_root,
                   is_back=is_back,
                   special_name=special_name,
                   window_size=window_size,
                   random_crop=random_crop, crop_count=crop_count, is_gen_online=is_gen_online)
    print(image_name)


def run_with_threads(input_root,
                     output_root,
                     is_back=True,
                     special_name='back_1408',
                     window_size=(1024, 1024),
                     random_crop=True,
                     crop_count=100,
                     num_workers=16, is_gen_online=False):
    """
    说明：外部函数调用当前函数，就可以实现并行的crop训练所需要的原始数据
    """
    path_list = get_file_path_list(input_root, FileType.IMAGE_TYPE)
    skip = 2 if is_gen_online is True else 3
    # 构造参数列表
    parameter_list = []
    for i in range(0, len(path_list), skip):
        if is_gen_online is True:
            image_insp_path = path_list[i]
            image_temp_path = path_list[i]
            image_text_path = path_list[i + 1]
        else:
            image_insp_path = path_list[i]
            image_temp_path = path_list[i + 1]
            image_text_path = path_list[i + 2]

        image_name = os.path.basename(image_insp_path).split('_')[0]
        parameter_list.append((image_temp_path, image_insp_path, image_text_path, image_name, output_root,
                          is_back, special_name, window_size, random_crop, crop_count, is_gen_online))

    with ThreadPoolExecutor(max_workers=num_workers) as executor:
        executor.map(process_single_group, parameter_list)


def random_excluding(center, exclude_range, min_val=0, max_val=255):
    # 构造所有合法值的列表
    valid_numbers = [x for x in range(min_val, max_val + 1) if
                     not (center - exclude_range <= x <= center + exclude_range)]

    if not valid_numbers:
        raise ValueError("没有可选的合法数值")

    return random.choice(valid_numbers)


def clamp_value(center, offset):
    """
    给定一个中心值和偏移量，计算加或减偏移量后的值，并将其限制在 [0, 255] 范围内。

    :param center: 中心值（int 或 float）
    :param offset: 偏移量（int 或 float），可以是正数也可以是负数
    :return: 限制在 [0, 255] 范围内的结果值（int）
    """
    # 计算新的值
    new_value = center + offset

    # 使用 max 和 min 来限制范围
    clamped_value = max(0, min(255, new_value))

    # 返回整数形式的结果
    return int(clamped_value)


def gen_color_mask(rgb_image):
    """
    返回一张纯色区域掩膜mask(0:代表纸张颜色， 255：代表纯色区域)
    """
    bi_image = cv2.bilateralFilter(rgb_image, d=9, sigmaColor=25, sigmaSpace=25)
    channel_b, channel_g, channel_r = cv2.split(bi_image)
    diff_rg = cv2.absdiff(channel_r, channel_g)
    diff_gb = cv2.absdiff(channel_g, channel_b)
    diff_br = cv2.absdiff(channel_b, channel_r)
    max_diff_mask = np.maximum(np.maximum(diff_rg, diff_gb), diff_br)
    max_diff_mask[max_diff_mask > 35] = 255
    return max_diff_mask



def sobel(img):
    sobelx = cv2.Sobel(img, cv2.CV_64F, 1, 0)
    sobelx = cv2.convertScaleAbs(sobelx)
    sobely = cv2.Sobel(img, cv2.CV_64F, 0, 1)
    sobely = cv2.convertScaleAbs(sobely)
    sobelxy = cv2.addWeighted(sobelx, 0.5, sobely, 0.5, 0)
    return sobelxy


def text_mask_extact(image_gray, mask, drak_gray_offset=20, bright_gray_offset=10,
                     drak_otsu_offset=30, bright_otsu_offset=10, gaussian_size=5,
                     dark_grad_var_thre=300, bright_grad_var_thre=100):

    gauss_blur_image = cv2.GaussianBlur(image_gray, (gaussian_size, gaussian_size), 0)
    grad_image = sobel(gauss_blur_image)

    _, binary_img = cv2.threshold(mask, 128, 255, cv2.THRESH_BINARY)
    text_mask = np.zeros_like(image_gray)
    contours, _ = cv2.findContours(binary_img, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
    contours = list(contours)
    for blob_id, blob in enumerate(contours):
        blob_mask = np.zeros_like(binary_img)
        cv2.drawContours(blob_mask, [blob], -1, 255, cv2.FILLED)
        kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3))
        dilate_mask = cv2.dilate(blob_mask, kernel)

        # 用最外一圈的像素取中值提取背景灰度
        contour_mask = cv2.bitwise_xor(dilate_mask, blob_mask)
        # cv2.imwrite("image_gray.bmp", image_gray)
        # cv2.imwrite("blob_mask.bmp", blob_mask)
        # cv2.imwrite("contour_mask.bmp", contour_mask)
        # cv2.imwrite("grad_image.bmp", grad_image)
        # 提取灰度图中对应于mask为True的像素值
        pixels_of_contour = image_gray[contour_mask == 255]
        pixels_of_blob = image_gray[blob_mask == 255]
        pixels_of_grad = grad_image[blob_mask == 255]
        variance_value = np.var(pixels_of_grad)
        # print("variance_value: %f" % variance_value)

        if len(pixels_of_blob) > 0:
            otsu_threshold, _ = cv2.threshold(pixels_of_blob, 0, 255, cv2.THRESH_OTSU)
            median_value = np.median(pixels_of_contour)

            if median_value > otsu_threshold + drak_gray_offset and variance_value > dark_grad_var_thre:
                # 背景浅色
                _, temp_binary = cv2.threshold(image_gray, otsu_threshold - drak_otsu_offset, 255, cv2.THRESH_BINARY_INV)
                text_binary = cv2.bitwise_and(blob_mask, temp_binary)
                # cv2.imwrite("text_binary.bmp", text_binary)
                text_mask = cv2.bitwise_or(text_binary, text_mask)

            elif median_value < otsu_threshold - bright_gray_offset and variance_value > bright_grad_var_thre:
                # 背景深色
                _, temp_binary = cv2.threshold(image_gray, otsu_threshold + bright_otsu_offset, 255, cv2.THRESH_BINARY)
                text_binary = cv2.bitwise_and(blob_mask, temp_binary)
                # cv2.imwrite("text_binary.bmp", text_binary)
                text_mask = cv2.bitwise_or(text_binary, text_mask)
    return text_mask

