import base64

import cv2
import numpy as np
import matplotlib.pyplot as plt
import gradio as gr
import numpy as np
from enum import Enum
import gms_matcher
import math
import logging
from PIL import Image

# blacklist = []
# with open('black_list.txt', 'r') as f:
#     lines = f.readlines()
#     blacklist = [line.strip() for line in lines]
EDGE = 20
logger = logging.getLogger(__name__)


class DrawingType(Enum):
    ONLY_LINES = 1
    LINES_AND_POINTS = 2
    COLOR_CODED_POINTS_X = 3
    COLOR_CODED_POINTS_Y = 4
    COLOR_CODED_POINTS_XpY = 5


def draw_matches(src1, src2, kp1, kp2, matches, drawing_type, band=0):
    """
        绘制匹配的特征点和它们之间的连线
    """
    height = max(src1.shape[0], src2.shape[0])
    width = src1.shape[1] + src2.shape[1] - 2 * band
    output = np.zeros((height, width, 3), dtype=np.uint8)
    output[0:src1.shape[0], 0:src1.shape[1] - band] = src1[:, 0:src1.shape[1] - band]
    output[0:src2.shape[0], src1.shape[1] - band:] = src2[:, band:src2.shape[1]]

    if drawing_type == DrawingType.ONLY_LINES:
        for i in range(len(matches)):
            left = (kp1[matches[i].queryIdx].pt.x, kp1[matches[i].queryIdx].pt.y)
            right = tuple(sum(x) for x in zip((kp2[matches[i].trainIdx].pt.x, kp2[matches[i].trainIdx].pt.y),
                                              (src1.shape[1] - 2 * band, 0)))
            # left = tuple(sum(x) for x in zip(kp1[matches[i].queryIdx].pt, (0, -band)))
            # right = tuple(sum(x) for x in zip(kp2[matches[i].trainIdx].pt, (src1.shape[1], -band)))
            cv2.line(output, tuple(map(int, left)), tuple(map(int, right)), (0, 255, 255))
    elif drawing_type == DrawingType.LINES_AND_POINTS:
        for i in range(len(matches)):
            left = kp1[matches[i].queryIdx].pt
            right = tuple(sum(x) for x in zip(kp2[matches[i].trainIdx].pt, (src1.shape[1], 0)))
            cv2.line(output, tuple(map(int, left)), tuple(map(int, right)), (255, 0, 0))

        for i in range(len(matches)):
            left = kp1[matches[i].queryIdx].pt
            right = tuple(sum(x) for x in zip(kp2[matches[i].trainIdx].pt, (src1.shape[1], 0)))
            cv2.circle(output, tuple(map(int, left)), 1, (0, 255, 255), 2)
            cv2.circle(output, tuple(map(int, right)), 1, (0, 255, 0), 2)

    elif drawing_type == DrawingType.COLOR_CODED_POINTS_X or drawing_type == DrawingType.COLOR_CODED_POINTS_Y or drawing_type == DrawingType.COLOR_CODED_POINTS_XpY:
        _1_255 = np.expand_dims(np.array(range(0, 256), dtype='uint8'), 1)
        _colormap = cv2.applyColorMap(_1_255, cv2.COLORMAP_HSV)

        for i in range(len(matches)):
            left = kp1[matches[i].queryIdx].pt
            right = tuple(sum(x) for x in zip(kp2[matches[i].trainIdx].pt, (src1.shape[1], 0)))

            if drawing_type == DrawingType.COLOR_CODED_POINTS_X:
                colormap_idx = int(left[0] * 256. / src1.shape[1])  # x-gradient
            if drawing_type == DrawingType.COLOR_CODED_POINTS_Y:
                colormap_idx = int(left[1] * 256. / src1.shape[0])  # y-gradient
            if drawing_type == DrawingType.COLOR_CODED_POINTS_XpY:
                colormap_idx = int((left[0] - src1.shape[1] * .5 + left[1] - src1.shape[0] * .5) * 256. / (
                        src1.shape[0] * .5 + src1.shape[1] * .5))  # manhattan gradient

            color = tuple(map(int, _colormap[colormap_idx, 0, :]))
            cv2.circle(output, tuple(map(int, left)), 1, color, 2)
            cv2.circle(output, tuple(map(int, right)), 1, color, 2)
    return output


def draw_rect(src1, src2, x1, y1, x2, y2, band):
    """ 绘制矩形框，并将两个图拼接到一起

    Args:
        src1 (_type_): 图像1
        src2 (_type_): 图像2
        x1 (_type_): 图像1矩形框坐标
        y1 (_type_): 图像1矩形框坐标
        x2 (_type_): 图像2矩形框坐标
        y2 (_type_): 图像2矩形框坐标
        band : 图像边框宽度
    Returns:
        _type_: output 两个图拼接结果【画框】
    """
    height = max(src1.shape[0], src2.shape[0])
    width = src1.shape[1] + src2.shape[1] - 2 * band

    src1 = cv2.rectangle(src1, (x1[0], x1[1]), (y1[0], y1[1]), (0, 0, 255), 5)
    src2 = cv2.rectangle(src2, (x2[0], x2[1]), (y2[0], y2[1]), (0, 0, 255), 5)

    output = np.zeros((height, width, 3), dtype=np.uint8)
    output[0:src1.shape[0], 0:src1.shape[1] - band] = src1[:, 0:src1.shape[1] - band]
    output[0:src2.shape[0], src1.shape[1] - band:] = src2[:, band:src2.shape[1]]

    return output


def cut_rect(src, x1, y1):
    """ 截取矩形框"""
    output = src[x1[1]:y1[1], x1[0]:y1[0]]
    return output


def edge_or_not(image, x, y, edge):
    """ 判断匹配框是否处于边缘"""
    height = image.shape[0] - 2 * edge
    weight = image.shape[1]
    #     width = image.shape[1]
    #     min_h = x1[1]
    #     max_h = y1[1]
    #     min_w = x1[0]
    #     max_w = y1[0]
    if (x[1] - edge) < (height / 9) and (y[1] - edge) < (height / 3):
        return True
    elif (x[1] - edge) > (height * 2 / 3) and (y[1] - edge) > (height * 8 / 9):
        return True
    elif (x[0]) < (weight / 9) and (y[0]) < (weight / 3):
        return True
    elif (x[0]) > (weight * 2 / 3) and (y[0]) > (weight * 8 / 9):
        return True
    else:
        return False


def remove_outliers(data, threshold):
    """
        根据标准差排除离群点
        【注】：离群点阈值本次选为3
    """
    center = np.mean(data, axis=0)
    # print(center)
    distance = []
    res = []
    for i, item in enumerate(data):
        distance.append(np.abs(np.linalg.norm((item - center), ord=2)))
    z = [np.abs(x - np.mean(distance)) for x in distance]
    z = z / np.std(distance)
    # print(z)
    for i in range(len(data)):
        if z[i] < threshold:
            res.append(data[i])
    return res


def get_ranse_rect(res, kp, idx=0):
    # def get_ranse_rect(res, kp, idx=0, img1=None, id_save=None, img2=None):
    """ 针对染色图，获取去除离群点后的匹配区域"""
    point_img = []
    point = []
    for i in res[:]:
        if idx == 0:
            # center = cv2.KeyPoint_convert(kp, keypointIndexes=[i.queryIdx])
            center = [kp[i.queryIdx].pt.x, kp[i.queryIdx].pt.y]
        elif idx == 1:
            # center = cv2.KeyPoint_convert(kp, keypointIndexes=[i.trainIdx])
            center = [kp[i.trainIdx].pt.x, kp[i.trainIdx].pt.y]
        center = [int(center[0]), int(center[1])]
        point_img.append(center)
    # 阈值去除之前点 保存看结果
    # pp = point_img
    point_img = remove_outliers(point_img, 3)
    if len(point_img) == 0:
        print("去除离散点后为空==============")
        # # 若去除为空，则保存图片，看对不对
        # cv2.imwrite(f'points_on_image_duibi_0.png', img1)
        # cv2.imwrite(f'points_on_image_duibi_1.png', img2)
        # # 循环遍历点的列表，在每个点上画一个半径为1的圆（点）
        # for point in pp:
        #     cv2.circle(img1, (point[0], point[1]), radius=1, color=(0, 0, 255), thickness=-1)
        #     # 保存图像 查看取出的点
        #     cv2.imwrite(f'points_on_image_{id_save}.png', img1)
    if len(point_img) > 0:
        # 获得框的左上角点和右下角点
        minres = np.argmin(point_img, axis=0)
        maxres = np.argmax(point_img, axis=0)
        minpoint = [point_img[minres[0]][0], point_img[minres[1]][1]]
        maxpoint = [point_img[maxres[0]][0], point_img[maxres[1]][1]]
        area = abs(minpoint[0] - maxpoint[0]) * abs(minpoint[1] - maxpoint[1])
        return area + 1, minpoint, maxpoint
    else:
        return 0, 0, 0


def get_rect(res, kp, idx=0):
    """ 特征点匹配定位 遍历所有点，找到边框坐标，并统计点数

    Args:
        res (_type_): _description_
        kp (_type_): _description_
        idx (int, optional): _description_. Defaults to 0.

    Returns:
        _type_: area + 1, len(point) + 1, minpoint, maxpoint
    """
    point_img = []
    point = []
    for i in res[:]:
        if idx == 0:
            # center = cv2.KeyPoint_convert(kp, keypointIndexes=[i.queryIdx])
            center = [kp[i.queryIdx].pt.x, kp[i.queryIdx].pt.y]
        elif idx == 1:
            # center = cv2.KeyPoint_convert(kp, keypointIndexes=[i.trainIdx])
            center = [kp[i.trainIdx].pt.x, kp[i.trainIdx].pt.y]
        center = [int(center[0]), int(center[1])]
        point_img.append(center)
        point.append(str(center[0]) + str(center[1]))
    # 获得框的左上角点和右下角点
    minres = np.argmin(point_img, axis=0)
    maxres = np.argmax(point_img, axis=0)
    minpoint = [point_img[minres[0]][0], point_img[minres[1]][1]]
    maxpoint = [point_img[maxres[0]][0], point_img[maxres[1]][1]]
    height = abs(minpoint[0] - maxpoint[0])
    width = abs(minpoint[1] - maxpoint[1])
    area = height * width
    point = list(dict.fromkeys(point))
    return area + 1, len(point) + 1, minpoint, maxpoint, height, width


def grayscale_Image(image: str, resize_width: int, resize_heith: int):
    """ 图像resize 并灰度化

    Args:
        image (str): 图像地址
        resize_width (int): 为缩放图片的宽度
        resize_heith (int): 为缩放图片的高度

    Returns:
        _type_: grayscale_image 灰度化之后图片
    """
    # im = Image.open(image)  # 使用Image的open方法打开图片
    im = Image.fromarray(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))
    smaller_image = im.resize((resize_width, resize_heith))  # 将图片进行缩放
    grayscale_image = smaller_image.convert('L')  # 将图片灰度化
    return grayscale_image


def hash_String(image: str, resize_width=24, resize_height=8):
    """ 获取图像hash值 平均哈希值

    Args:
        image (str): 图片地址
        resize_width (int, optional): _description_. Defaults to 24.
        resize_height (int, optional): _description_. Defaults to 8.

    Returns:
        int: 把64位数当作2进制的数值并转换成十进制数值 24 * 8
    """
    hash_string = ""  # 定义空字符串的变量，用于后续构造比较后的字符串
    pixels = list(grayscale_Image(image, resize_width, resize_height).getdata())
    # 上一个函数grayscale_Image()缩放图片并返回灰度化图片，.getdata()方法可以获得每个像素的灰度值，使用内置函数list()将获得的灰度值序列化
    for row in range(1, len(pixels) + 1):  # 获取pixels元素个数，从1开始遍历
        if row % resize_width != 0:  # 因不同行之间的灰度值不进行比较，当与宽度的余数为0时，即表示当前位置为行首位，我们不进行比较
            if pixels[row - 1] > pixels[row]:  # 当前位置非行首位时，我们拿前一位数值与当前位进行比较
                hash_string += '1'  # 当为真时，构造字符串为1
            else:
                hash_string += '0'  # 否则，构造字符串为0
        # 最后可得出由0、1组64位数字字符串，可视为图像的指纹
    return int(hash_string, 2)  # 把64位数当作2进制的数值并转换成十进制数值


def Difference(dhash1: int, dhash2: int):
    """ 比较（异或）哈希值

    Args:
        dhash1 (int): _description_
        dhash2 (int): _description_

    Returns:
        _type_: bin(difference).count('1') 不同个数
    """
    difference = dhash1 ^ dhash2  # 将两个数值进行异或运算
    return bin(difference).count('1')  # 异或运算后计算两数不同的个数，即个数<5，可视为同一或相似图片


def add_circle(src, band):
    """ 给图像上下方添加边框    """

    height = src.shape[0]
    width = src.shape[1]
    # color = int(src.mean())
    output = np.zeros((height + 2 * band, width + 2 * band, 3), dtype=np.uint8)

    output[band:-(band), band:-(band)] = src
    return output


def remove_edge(src, band):
    """ 去除上下方边框    """
    height = src.shape[0]
    width = src.shape[1]
    output = np.zeros((height - 2 * band, width - 2 * band, 3), dtype=np.uint8)
    output = src[band:-(band), band:-(band)]
    return output


def white_area(image):
    """ 判定染色图匹配框是否为纯白色文字    """
    img = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY);
    # 二值化
    ret, img = cv2.threshold(img, 230, 255, cv2.THRESH_BINARY);
    area = cv2.countNonZero(img)
    ratio = area / (image.shape[1] * image.shape[0])
    return ratio


def segmentation(filepath1, box1, filepath2, box2):
    img1 = cv2.imread(filepath1)
    img1 = img1[int(box1['ymin']):int(box1['ymax']), int(box1['xmin']):int(box1['xmax'])]
    # box2 = eval(box2)
    # print(filepath2)
    img2 = cv2.imread(filepath2)
    # print(type(img2))
    img2 = img2[int(box2['ymin']):int(box2['ymax']), int(box2['xmin']):int(box2['xmax'])]
    orb = cv2.ORB_create(10000)
    orb.setFastThreshold(0)

    kp1, des1 = orb.detectAndCompute(img1, None)
    kp2, des2 = orb.detectAndCompute(img2, None)
    matcher = cv2.BFMatcher(cv2.NORM_HAMMING)
    matches_all = matcher.match(des1, des2)

    matches_gms = matchGMS(img1.shape[:2], img2.shape[:2], kp1, kp2, matches_all, withScale=False, withRotation=True,
                           thresholdFactor=6)

    fig = draw_matches(img1, img2, kp1, kp2, matches_gms, DrawingType.ONLY_LINES)

    image = cv2.imencode('.jpg', fig)[1]
    image_code = str(base64.b64encode(image))[2:-1]

    return image_code


def point_match(filepath1, box1, filepath2, box2):
    img1 = cv2.imread(filepath1)
    img1 = img1[int(box1['ymin']):int(box1['ymax']), int(box1['xmin']):int(box1['xmax'])]
    # box2 = eval(box2)
    # print(filepath2)
    img2 = cv2.imread(filepath2)
    # print(type(img2))
    img2 = img2[int(box2['ymin']):int(box2['ymax']), int(box2['xmin']):int(box2['xmax'])]
    if type == "染色图":
        if img1.shape[1] <= 200 and img1.shape[0] <= 200 and img2.shape[1] <= 200 and img2.shape[0] <= 200:
            img1 = cv2.resize(img1, dsize=(3 * img1.shape[1], 3 * img1.shape[0]), interpolation=cv2.INTER_LINEAR)
            img2 = cv2.resize(img2, dsize=(3 * img2.shape[1], 3 * img2.shape[0]), interpolation=cv2.INTER_LINEAR)

    try:
        orb = cv2.ORB_create(10000)
        orb.setFastThreshold(0)

        kp1, des1 = orb.detectAndCompute(img1, None)
        kp2, des2 = orb.detectAndCompute(img2, None)
        matcher = cv2.BFMatcher(cv2.NORM_HAMMING)
        matches_all = matcher.match(des1, des2)

        matches_gms = matchGMS(img1.shape[:2], img2.shape[:2], kp1, kp2, matches_all, withScale=False,
                               withRotation=True, thresholdFactor=6)

        number = len(matches_gms)

    except:
        logger.exception("Exception occurred")
        return 0
    return number


def delete_match_ocr(kp1, kp2, matches, subimg1_text_rects, subimg2_text_rects):
    """ 删除匹配点在ocr检测出来的框内
        查看每一对match点，看是否在各个文本框，如果不在，将该match点保留

    Args:
        kp1 (_type_): img1 关键点列表
        kp2 (_type_): img2 关键点列表
        matches (_type_): 相似点匹配对
        subimg1_text_rects (_type_): img1 文本框列表
        subimg2_text_rects (_type_): img1 文本框列表

    Returns:
        _type_: 过滤文本框之后的匹配对
    """
    filtered_matches = []
    for match in matches:
        # 检查 img1 中的匹配点
        if is_point_in_text_rects(kp1[match.queryIdx].pt, subimg1_text_rects):
            continue

        # 检查 img2 中的匹配点
        if is_point_in_text_rects(kp2[match.trainIdx].pt, subimg2_text_rects):
            continue

        filtered_matches.append(match)

    return filtered_matches


def is_point_in_text_rects(point, text_rects):
    """  辅助函数，判断点是否在文本框内

    Args:
        point (_type_): 关键点坐标
        text_rects (_type_): 文本框列表

    Returns:
        _type_: 标志位，是否在文本框内，false不在，匹配点保留；true为在，匹配点过滤
    """
    for text_rect in text_rects:
        # 解包文本框参数
        x_mid, y_mid, width, height, angle = text_rect[0]
        # 计算文本框边界
        w, h = width / 2, height / 2
        x0, y0, x1, y1 = x_mid - w, y_mid - h, x_mid + w, y_mid + h
        # 检查点是否在文本框内
        if x0 - 5 <= point.x <= x1 + 5 and y0 - 5 <= point.y <= y1 + 5:
            return True
    return False


def edge_scaled_text_rect(rects, scale_factor, edge=None):
    """ 根据图像缩放比例调整文字框位置 以及是否需要根据添加边框【edge】进行处理

    Args:
        rects (_type_): 文字框列表
        scale_factor (_type_): 宽高缩放因子，无缩放时 为1 (宽，高)
        edge (_type_, optional): 边缘edge宽度，默认为None

    Returns:
        _type_: _description_
    """
    for text_rect in rects:
        # (w1 / img1.shape[1], 300 / img1.shape[0])
        # text_rect[0] 参数包含文本框的中心点位置，和长宽 x_mid, y_mid, width, height, angle
        text_rect[0][0] = text_rect[0][0] * scale_factor[0]
        text_rect[0][1] = text_rect[0][1] * scale_factor[1]
        text_rect[0][2] = text_rect[0][2] * scale_factor[0]
        text_rect[0][3] = text_rect[0][3] * scale_factor[1]
        if edge is not None:
            # 若图像处理经过了上下加edge，x坐标不变，y坐标+edge
            text_rect[0][1] = text_rect[0][1] + edge
            # 若图像处理经过了左右加edge，x坐标+edge，y坐标不变
            text_rect[0][0] = text_rect[0][0] + edge
    return rects


def restore_rect(rects, scale_factor, edge=None):
    """  对 text_rect 进行处理的恢复

    Args:
        rects (_type_): 文字框列表
        scale_factor (_type_): 宽高缩放因子，无缩放时 为1 (宽，高)
        edge (_type_, optional): 边缘edge宽度，默认为None

    Returns:
        _type_: _description_
    """
    for text_rect in rects:
        # text_rect[0] 参数包含文本框的中心点位置，和长宽 x_mid, y_mid, width, height, angle
        text_rect[0][0] = text_rect[0][0] / scale_factor[0]
        text_rect[0][1] = text_rect[0][1] / scale_factor[1]
        text_rect[0][2] = text_rect[0][2] / scale_factor[0]
        text_rect[0][3] = text_rect[0][3] / scale_factor[1]
        if edge is not None:
            # 若图像处理经过了上下加edge，x坐标不变，y坐标-edge
            text_rect[0][1] = text_rect[0][1] - (edge / scale_factor[1])
            # 若图像处理经过了左右加edge，x坐标-edge，y坐标不变
            text_rect[0][0] = text_rect[0][0] - (edge / scale_factor[1])
    return rects


def flip_text_rect(rects, flip_flag, img_width, img_height):
    """ 根据图像翻转进行text框坐标的调整

    Args:
        rects (list): 文字框列表，每个文本框是一个元组((x_mid, y_mid, width, height, angle), text, confidence)
        flip_flag (int): 翻转方式，如果1 水平翻转，2为垂直翻转
        img_width (float): 图像的宽度
        img_height (float): 图像的高度

    Returns:
        list: 调整后的文本框列表
    """
    for text_rect in rects:
        # text_rect[0] 参数包含文本框的中心点位置，和长宽 x_mid, y_mid, width, height, angle
        if flip_flag == 1:  # 水平翻转
            # 水平翻转时，x坐标变为原图宽度减去当前矩形的右边界
            text_rect[0][0] = img_width - text_rect[0][0]
        elif flip_flag == 2:  # 垂直翻转
            # 垂直翻转时，y坐标变为原图高度减去当前矩形的下边界
            text_rect[0][1] = img_height - text_rect[0][1]
    return rects


def modify_flip_match(kp2, matches, flip_flag, img2_w, img2_h):
    """重新调整img2 中的kp坐标位置

    Args:
        kp (_type_): kp关键点
        matches (_type_): 匹配对
        flip_flag (_type_): 翻转信息 1为水平 2为垂直

    Returns:
        _type_: 翻转后的matches
    """
    for match in matches:
        # 水平翻转，y不变，new_x = w-x
        if flip_flag == 1:
            kp2[match.trainIdx].pt.x = img2_w - kp2[match.trainIdx].pt.x
        # 垂直翻转，x不变，new_y = h-y
        elif flip_flag == 2:
            kp2[match.trainIdx].pt.y = img2_h - kp2[match.trainIdx].pt.y
    return matches

def merge_filter(img1,img2):
    sumnumb = 0
    # out = np.zeros((len(img1), len(img1[0]), 3))
    for i in range(len(img1)):
        for j in range(len(img1[0])):
            tmp = []
            for k in range(3):
                item2 = int(img2[i, j, k])
                item1 = int(img1[i, j, k])
                tmp.append(item2 - item1)

            if sum(tmp) >= 0:
                sumnumb += 1
            #     out[i, j] = [255, 255, 255]
            # else:
            #     out[i, j] = [0, 0, 0]
    ratio = sumnumb / (len(img1) * len(img1))
    return  ratio

def single_paper_check(figx: list, figy: list, Threshold: int, clas: str):
    """ 子图特征相似点提取  figx, figy, Threshold, clas

    Args:
        figx (str): 大图地址、bounding box、ocr检测信息
        figy (str): 大图地址、bounding box、ocr检测信息
        Threshold (int): 阈值 不同类型class对应不用阈值
        clas (str): 类型class 【变量名不要使用 type，与python内置函数同名】
    Returns:
        _type_: number【matches number】, fig_rect 【图像match框】, fig_line 【图像 match line】
    """
    # if filepath1 in blacklist or filepath2 in blacklist:
    #     return 0,None
    filepath1, box1, subimg1_text_rects = figx[0], figx[1], figx[3]
    filepath2, box2, subimg2_text_rects = figy[0], figy[1], figy[3]
    scale_factor1 = (1, 1)  # 缩放比例参数，宽高缩放比例，无缩放时为1 image1的缩放系数
    scale_factor2 = (1, 1)  # 缩放比例参数，宽高缩放比例，无缩放时为1 image2的缩放系数
    filter_text_flag = 0  # 是否经过了文字过滤

    resize_back = False  # resize 和 hashcount 都是专为条带图准备的
    hashcount = 0
    reverse = 0
    edge = 0  # 除了染色图 edge默认为0
    with open(filepath1, "rb") as f:
        img_data_1 = f.read()
    img_array_1 = np.asarray(bytearray(img_data_1), dtype=np.uint8)
    img1 = cv2.imdecode(img_array_1, cv2.IMREAD_COLOR)
    with open(filepath2, "rb") as f:
        img_data_2 = f.read()
    img_array_2 = np.asarray(bytearray(img_data_2), dtype=np.uint8)
    img2 = cv2.imdecode(img_array_2, cv2.IMREAD_COLOR)
    img1 = img1[int(box1['ymin']):int(box1['ymax']), int(box1['xmin']):int(box1['xmax'])]
    # img1 = img1[int(box1['ymin']):int(box1['ymax']), int(box1['xmin']):int(box1['xmax'])]
    if isinstance(box2, str):
        box2 = eval(box2)
    img2 = img2[int(box2['ymin']):int(box2['ymax']), int(box2['xmin']):int(box2['xmax'])]

    if img1 is None or img2 is None:
        print("one of subimage is None!")
        return 0, None, None
    if clas == "染色图":
        # img2 = cv2.flip(img2, -1)
        edge = EDGE
        try:
            area_ratio = (img1.shape[1] * img1.shape[0]) / (img2.shape[1] * img2.shape[0])
            if area_ratio > 10:
                img2 = cv2.resize(img2, dsize=(3 * img2.shape[1], 3 * img2.shape[0]), interpolation=cv2.INTER_LINEAR)
                # image1 面积超 image2 10倍，将image2 放大三倍
                # 对应的image2 的 文字坐标要跟着变换 image1 的不用动
                scale_factor2 = (3, 3)
            elif area_ratio < 0.1:
                img1 = cv2.resize(img1, dsize=(3 * img1.shape[1], 3 * img1.shape[0]), interpolation=cv2.INTER_LINEAR)
                # image1 面积小于 image2 10倍，将image1 放大三倍
                # 对应的image1 的 文字坐标要跟着变换 image2 的不用动
                scale_factor1 = (3, 3)
            if img1.shape[1] <= 600 and img1.shape[0] <= 600 and img2.shape[1] <= 600 and img2.shape[0] <= 600:
                img1 = cv2.resize(img1, dsize=(3 * img1.shape[1], 3 * img1.shape[0]), interpolation=cv2.INTER_LINEAR)
                img2 = cv2.resize(img2, dsize=(3 * img2.shape[1], 3 * img2.shape[0]), interpolation=cv2.INTER_LINEAR)
                # img2_re = cv2.resize(img2, dsize=(3 * img2_re.shape[1], 3 * img2_re.shape[0]), interpolation=cv2.INTER_LINEAR)
                edge = 3 * EDGE
                resize_back = True
                # 该情况下，image1、image2 放大三倍
                scale_factor1 = (3, 3)  # 缩放比例参数，宽高缩放比例，随着图像进行变换
                scale_factor2 = (3, 3)
            img1 = add_circle(img1, edge)
            img2 = add_circle(img2, edge)

            # ---------------------------start 文字框对应处理----------------------------------
            subimg1_text_rects = edge_scaled_text_rect(subimg1_text_rects, scale_factor1, edge)
            subimg2_text_rects = edge_scaled_text_rect(subimg2_text_rects, scale_factor2, edge)
            # ---------------------------end 文字框对应处理----------------------------------

        except Exception as e:
            logger.exception("Exception occurred")
            print("图像匹配输入图像格式问题，filepath1: %s, filepath2: %s" % (filepath1, filepath2), e)

    if clas == "条带图":
        hash1 = hash_String(img1)
        hash2 = hash_String(img2)
        hashcount = Difference(hash1, hash2)
        # if img1.shape[1] <= 400 and img1.shape[0] <= 80 and img2.shape[1] <= 400 and img2.shape[0] <= 80:
        try:
            # img1 = cv2.resize(img1, dsize=(1 * img1.shape[1], 2 * img1.shape[0]), interpolation=cv2.INTER_LINEAR)
            # img2 = cv2.resize(img2, dsize=(1 * img2.shape[1], 2 * img2.shape[0]), interpolation=cv2.INTER_LINEAR)
            resize_back = True
            w1 = int(150 * img1.shape[1] / img1.shape[0])
            w2 = int(150 * img2.shape[1] / img2.shape[0])
            img1 = cv2.resize(img1, dsize=(w1, 300), interpolation=cv2.INTER_LINEAR)
            img2 = cv2.resize(img2, dsize=(w2, 300), interpolation=cv2.INTER_LINEAR)
            # ---------------------------start 文字框对应处理----------------------------------
            scale_factor1 = (w1 / img1.shape[1], 300 / img1.shape[0])  # 宽高分别缩放多少
            scale_factor2 = (w2 / img2.shape[1], 300 / img2.shape[0])  # 宽高分别缩放多少
            edge = 0  # 边缘宽度
            subimg1_text_rects = edge_scaled_text_rect(subimg1_text_rects, scale_factor1, edge)
            subimg2_text_rects = edge_scaled_text_rect(subimg2_text_rects, scale_factor2, edge)
        except Exception as e:
            logger.exception("Exception occurred")
            print("图像匹配输入图像格式问题，filepath1: %s, filepath2: %s" % (filepath1, filepath2), e)

    if clas == "统计图":
        hash1 = hash_String(img1)
        hash2 = hash_String(img2)
        hashcount = Difference(hash1, hash2)

    figure = None
    fig_rect = None
    fig_line = None
    matches_gms = []
    number = 0
    try:
        # img1 = np.asfortranarray(img1)
        # img2 = np.asfortranarray(img2)

        figure, matches_gms, kp1, kp2, flip_flag = gms_matcher.runImagePair(img1, img2, Threshold, edge)

        # 为真，说明img2经过了翻转，需要处理matches_gms匹配点坐标，将翻转后的坐标转回去
        if flip_flag:
            img2_height, img2_width = img2.shape[:2]
            matches_gms = modify_flip_match(kp2, matches_gms, flip_flag, img2_width, img2_height)

        img2_flip = None
        # 如果经过了flip，需要重新计算img2 的 hashcount
        if flip_flag:
            if clas == "统计图" or clas == "条带图":
                if flip_flag == 1:
                    # 水平翻转
                    img2_flip = cv2.flip(img2, 1)
                elif flip_flag == 2:
                    # 垂直翻转
                    img2_flip = cv2.flip(img2, 0)
                print("Recalculate hashcount")
                hash2_flip = hash_String(img2_flip)
                hashcount = Difference(hash1, hash2_flip)

        if len(kp1) > 0 and len(kp2) > 0:
            # mat 对象 被包装为 NumPy 数组 检查描述符数组是否连续
            # des1 = des1 if des1.flags['C_CONTIGUOUS'] else np.ascontiguousarray(des1)
            # des2 = des2 if des2.flags['C_CONTIGUOUS'] else np.ascontiguousarray(des2)
            # 要判断一下匹配的点，是否在文字框里
            if len(subimg1_text_rects) or len(subimg2_text_rects):
                # # 判断img2是否经过了翻转，如果经过了翻转，那么subimg2_text_rects也要变动
                # # 如果img2 翻转了，且有文字text存在
                # if flip_flag and len(subimg2_text_rects) != 0:
                #     # 设置text有进行翻转的标志，将图像怎么翻转的送入标志位中
                #     flip_flag_text = flip_flag
                #     subimg2_text_rects = flip_text_rect(subimg2_text_rects, flip_flag_text, img2_width, img2_height)
                # 文字框 匹配点过滤
                matches_gms = delete_match_ocr(kp1, kp2, matches_gms, subimg1_text_rects, subimg2_text_rects)
                filter_text_flag = 1
            number = len(matches_gms)

        # 判断是否经过了翻转或者文字过滤，若经过了 需要重新画图
        if filter_text_flag or flip_flag:
            figure = draw_matches(img1, img2, kp1, kp2, matches_gms, DrawingType.ONLY_LINES, edge)

        # 由于条带图和染色图在进行匹配之前，先对文字框进行缩放 edge处理，所以需要恢复坐标
        if clas == "条带图" or clas == "染色图":
            subimg1_text_rects = restore_rect(subimg1_text_rects, scale_factor1, edge)
            subimg2_text_rects = restore_rect(subimg2_text_rects, scale_factor2, edge)
            # if flip_flag and len(subimg2_text_rects) != 0:
            #     subimg2_text_rects = flip_text_rect(subimg2_text_rects, flip_flag_text, img2_width, img2_height)

        if number > Threshold:
            area1, point_num1, x1, y1, h1, w1 = get_rect(matches_gms, kp1, 0)
            area2, point_num2, x2, y2, h2, w2 = get_rect(matches_gms, kp2, 1)

            print("非重复特征点", point_num1, point_num2)
            print("特征点分布面积", area1, area2)
            # if area1 >0 and area2 >0:
            if area1 / area2 > 10 or area2 / area1 > 10:  # 特征点分布面积比值超过阈值进行过滤
                print("find a area mismatch")
                return 0, fig_rect, fig_line

            if (h1 / w1) < 0.1 or (h1 / w1) > 10 or (h2 / w2) < 0.1 or (h2 / w2) > 10:
                print("find a height & width ratio mismatch")
                return 0, fig_rect, fig_line
            # if (img1.shape[1] * img1.shape[0]) / area1 >= 25 or (img2.shape[1] * img2.shape[0]) / area2 >= 25:
            if clas == "染色图":
                rect1 = cut_rect(img1, x1, y1)
                rect2 = cut_rect(img2, x2, y2)
                if ((img1.shape[1] - 2 * edge) * (img1.shape[0] - 2 * edge)) / area1 >= 30 or \
                        ((img2.shape[1] - 2 * edge) * (img2.shape[0] - 2 * edge)) / area2 >= 30:
                    print("find a ranse mini match area")
                    return 0, fig_rect, fig_line

                # # ------------ start  使用ocr检测，这里就不需要了 【先留着吧】-------------------
                # elif ((img1.shape[1]) * (img1.shape[0] - 2 * edge)) / area1 >= 10 or \
                #             ((img2.shape[1]) * (img2.shape[0] - 2 * edge)) / area2 >= 10:
                #     if edge_or_not(img1,x1,y1,edge) and white_area(rect1) >= 0.02:
                #         print("find a edge white word match area")
                #         return 0, fig_rect, fig_line
                #     if edge_or_not(img2,x2,y2,edge) and white_area(rect2) >= 0.02:
                #         print("find a edge white word match area")
                #         return 0, fig_rect, fig_line
                # else:
                #     print("find a match area, 首先要对染色图进行分析，去除离散点")
                #     sarea1, sx1, sy1 = get_ranse_rect(matches_gms, kp1, 0)
                #     # sarea1, sx1, sy1 = get_ranse_rect(matches_gms, kp1, 0, img1, id_save = "0", img2=img2)  # 离群点为0时 使用，传入看是否对
                #     sarea2, sx2, sy2 = get_ranse_rect(matches_gms, kp2, 1)
                #     # 检查特征点是否都过滤完了
                #     if sarea1 != 0 and sx1 != 0 and sy1 != 0 and sarea2 != 0 and sx2 != 0 and sy2 != 0:
                #         srect1 = cut_rect(img1, sx1, sy1)
                #         srect2 = cut_rect(img2, sx2, sy2)
                #         if ((img1.shape[1]) * (img1.shape[0] - 2 * edge)) / sarea1 >= 10 or \
                #                     ((img2.shape[1]) * (img2.shape[0] - 2 * edge)) / sarea2 >= 10:

                #             # if white_area(srect1) >= 0.02 or white_area(srect2) >= 0.02:
                #             #     print("find a white word match area")
                #             #     return 0, fig_rect, fig_line
                #             if edge_or_not(img1,sx1,sy1,edge) and white_area(srect1) >= 0.02:
                #                 print("find a edge white word match area")
                #                 return 0, fig_rect, fig_line
                #             if edge_or_not(img2,sx2,sy2,edge) and white_area(srect2) >= 0.02:
                #                 print("find a edge white word match area")
                #                 return 0, fig_rect, fig_line
                # # ------------ end  使用ocr检测，这里就不需要了 【先留着吧】-------------------
            elif (img1.shape[1] * img1.shape[0]) / area1 >= 100 or (img2.shape[1] * img2.shape[0]) / area2 >= 100:
                print("find a mini match area")
                return 0, fig_rect, fig_line

            if point_num2 / point_num1 < 0.7 or point_num1 / point_num2 < 0.7:
                print("find a individual point mismatch")
                return 0, fig_rect, fig_line

            if clas == "条带图" and hashcount > 100:
                print("条带图哈希过滤：", hashcount)
                return 0, fig_rect, fig_line
            elif clas == "条带图" and hashcount <= 100:

                rect1 = cut_rect(img1, x1, y1)
                if img2_flip is not None:
                    rect2 = cut_rect(img2_flip, x2, y2)
                else:
                    rect2 = cut_rect(img2, x2, y2)
                r1 = hash_String(rect1, 20, 4)
                r2 = hash_String(rect2, 20, 4)
                hashcount2 = Difference(r1, r2)
                if hashcount2 >= 15:
                    print("条带图框图内哈希过滤：", hashcount2)
                    return 0, fig_rect, fig_line

            if clas == "统计图" and hashcount > 15:
                print("统计图哈希过滤：", hashcount)
                return 0, fig_rect, fig_line

            if clas == "染色图":
                # o1 = remove_edge(img1, edge)
                # o2 = remove_edge(img2, edge)
                # fig_line = draw_matches(img1, img2, kp1, kp2, matches_gms, DrawingType.ONLY_LINES)
                fig_line = figure
                fig_line = remove_edge(fig_line, edge)
                fig_rect = draw_rect(img1, img2, x1, y1, x2, y2, edge)
                fig_rect = remove_edge(fig_rect, edge)

            else:
                fig_line = figure
                fig_rect = draw_rect(img1, img2, x1, y1, x2, y2, edge)

            ## 合理复用过滤：
            # 1、判断是否同一张大图
            if filepath1 == filepath2 and clas=="染色图":
                # 2、 box判断子图平行或垂直
                if abs(int(box1['ymin']) - int(box2['ymin'])) <= 10 or abs(int(box1['xmin']) - int(box2['xmin'])) <= 10:
                    if abs(img1.shape[0] - img2.shape[0]) <= 15 and abs(img1.shape[1] - img2.shape[1]) <= 15:
                        mid_height = int((img1.shape[0] + img2.shape[0]) / 2)
                        mid_width = int((img1.shape[1] + img2.shape[1]) / 2)
                        img1 = cv2.resize(img1, dsize=(mid_width, mid_height), interpolation=cv2.INTER_LINEAR)
                        img2 = cv2.resize(img2, dsize=(mid_width, mid_height), interpolation=cv2.INTER_LINEAR)
                        # 3、 遍历两张图矩阵，判断包含或被包含关系
                        ratio1 = merge_filter(img1,img2)
                        ratio2 = merge_filter(img2,img1)
                        if ratio1 >=0.75 or ratio2 >= 0.75:
                            print("find a merge pair")
                            return 0, fig_rect, fig_line

            # 尺寸复原
            if resize_back == True:
                if clas == "条带图":
                    dstheight = figure.shape[0] // 2  # 缩小为原来的0.5倍   可根据自己的要求定义
                    dstwidth = figure.shape[1]
                    fig_line = cv2.resize(fig_line, (dstwidth, dstheight), 0, 0)
                    fig_rect = cv2.resize(fig_rect, (dstwidth, dstheight), 0, 0)
                elif clas == "染色图":
                    dstheight = fig_rect.shape[0] // 3  # 缩小为原来的0.5倍   可根据自己的要求定义
                    dstwidth = fig_rect.shape[1] // 3
                    # fig = np.ascontiguousarray(fig)
                    fig_line = cv2.resize(fig_line, (dstwidth, dstheight), 0, 0)
                    fig_rect = cv2.resize(fig_rect, (dstwidth, dstheight), 0, 0)
    except:
        logger.exception("Exception occurred")
        return 0, fig_rect, fig_line

    return number, fig_rect, fig_line

