from flask_restx import Api
from gevent import pywsgi
import json
import platform
import shutil
import time
from exif import Image as ExifImage
import os
import numpy as np
import cv2
from get_video_code import convert_video_to_h264
from result_true import process_annotation_files
from flask_cors import *
from flask import Flask, request, jsonify
import os.path
import yaml
from ultralytics import YOLO
# from heavy import fake_heavy_task
from change_gpt import changes
from gevent.pool import Pool

model_link = r'F:\workstation\服务器代码\best20241230.pt'  # 通用模型
model_link1 = r'F:\workstation\服务器代码\fish20241229.pt'  # 钓鱼模型
pixel_to_cm = 4.2
sys = platform.system()


def load_yaml(file_path):
    """加载yaml文件并返回其中的 'names' 内容"""
    with open(file_path, 'r', encoding='utf-8') as f:
        result_dict = yaml.load(f.read(), Loader=yaml.FullLoader)
        return result_dict['names']


# 创建 Flask 应用
app1 = Flask(__name__)
app2 = Flask(__name__)

# 创建 API 实例
api1 = Api(app1)
api2 = Api(app2)


def load_model(model_type=0):  # 默认model_type=0
    # 根据模型类型选择对应的模型
    if model_type == 1:
        model = YOLO(model_link1)  # 加载钓鱼模型
        model_config = 'F:\\workstation\\ultralytics-main\\fish.yaml'
    else:
        model = YOLO(model_link)  # road.yaml需要改名
        model_config = 'F:\\workstation\\ultralytics-main\\road.yaml'

    # 加载配置文件
    result_dict = load_yaml(model_config)

    # 返回模型和配置
    return model, result_dict


def predict_img_new(image_path, model_type):
    # model = YOLO(model_link)

    model, result_dict = load_model(model_type)  # 正确解包 model 和 result_dict

    if sys == "Windows":
        dir_path = '\\'.join(image_path.split('\\')[:-1])
        image_name = image_path.split('\\')[-1]
    else:
        dir_path = '/'.join(image_path.split('/')[:-1])
        image_name = image_path.split('/')[-1]

    results = model.predict(image_path, device='cpu', save=True, save_txt=True, conf=0.5, iou=0.4)

    save_dir = results[0].save_dir

    image_folder = save_dir
    annotation_folder = os.path.join(save_dir, 'labels')
    process_annotation_files(image_folder, annotation_folder)
    result_txt = os.path.join(save_dir, 'labels', f"{image_name.split('.')[0]}_transformed.txt")
    data = {
        "file_path": dir_path,
        "correct_results": [
        ],
    }
    try:
        with open(result_txt) as fp:
            predict_result = fp.readlines()
        current_path = os.path.dirname(__file__)
        image_link = os.path.abspath(os.path.join(current_path, image_name))
        predict_result = [result.replace('\n', '').split(' ') for result in predict_result]
    except:
        predict_result = []
    for result in predict_result:
        bbox = {
            "[x1][y1]": [float(result[1]), float(result[2])],
            "[x2][y2]": [float(result[3]), float(result[4])],
            "[x3][y3]": [float(result[5]), float(result[6])],
            "[x4][y4]": [float(result[7]), float(result[8])],
        }
        # for i in range(8):
        #     bbox.append(float(result[i+1]))
        # area = (float(result[3]) - float(result[1])) * (float(result[6]) - float(result[4])) * 3.97 * 3.97/10000
        area = calculate_real_area(float(result[1]), float(result[2]), float(result[3]), float(result[4]),
                                   float(result[5]), float(result[6]), float(result[7]), float(result[8]), pixel_to_cm)
        data['correct_results'].append({
            "image_name": image_name,
            "datas": {
                "class_id": int(result[0]),
                "class_name": result_dict[int(result[0])],
                "bbox": bbox,
                "area": area
            }
        })
    # print(f"Results for {image_path}: {results}")
    return model, result_dict, data

def predict_img(image_path,model_type):
    #model = YOLO(model_link)


    model,result_dict= load_model(model_type)  # 调用时传入 model_type
    if sys == "Windows":
        dir_path = '\\'.join(image_path.split('\\')[:-1])
        image_name = image_path.split('\\')[-1]
    else:
        dir_path = '/'.join(image_path.split('/')[:-1])
        image_name = image_path.split('/')[-1]
    # results = model.predict(device="cpu", source=image_path, save=True,show_labels=True,show_conf=True,imgsz=640,line_width=10,save_txt=True)
    results = model.predict(image_path, device='cuda:0', save=True, save_txt=True, conf=0.3,  iou=0.2)

    save_dir = results[0].save_dir

    image_folder = save_dir
    annotation_folder = os.path.join(save_dir, 'labels')
    process_annotation_files(image_folder, annotation_folder)
    result_txt = os.path.join(save_dir, 'labels', f"{image_name.split('.')[0]}_transformed.txt")
    data = {
        "file_path": dir_path,
        "correct_results": [
        ],
    }
    try:
        with open(result_txt) as fp:
            predict_result = fp.readlines()
        current_path = os.path.dirname(__file__)
        image_link = os.path.abspath(os.path.join(current_path, image_name))
        predict_result = [result.replace('\n', '').split(' ') for result in predict_result]
    except:
        predict_result = []
    for result in predict_result:
        bbox = {
            "[x1][y1]": [float(result[1]), float(result[2])],
            "[x2][y2]": [float(result[3]), float(result[4])],
            "[x3][y3]": [float(result[5]), float(result[6])],
            "[x4][y4]": [float(result[7]), float(result[8])],
        }
        # for i in range(8):
        #     bbox.append(float(result[i+1]))
        # area = (float(result[3]) - float(result[1])) * (float(result[6]) - float(result[4])) * 3.97 * 3.97/10000
        area=calculate_real_area(float(result[1]), float(result[2]),float(result[3]), float(result[4]),float(result[5]), float(result[6]),float(result[7]), float(result[8]),pixel_to_cm)
        data['correct_results'].append({
            "image_name": image_name,
            "datas": {
                 "class_id": int(result[0]),
                 "class_name": result_dict[int(result[0])],
                 "bbox": bbox,
                 "area":area
            }
         })

    return data

@cross_origin(supports_credentials=True)
@app1.route('/predict', methods=['POST'])
def hello_world():
    try:
        # base_dir = 'datas'
        base_predicted_dir = 'runs'
        try:
            for file in os.listdir(os.path.join('runs', 'obb')):
                shutil.rmtree(os.path.join('runs', 'obb', file))  # 递归删除文件夹
        except:
            pass
        file_path = request.json.get("inputPath")
        out_path = request.json.get("outputPath")
        video_out_path = request.json.get("videoPath")
        model_type = request.json.get("model_type")

        out_path_dir_name = os.path.dirname(out_path)
        if not os.path.exists(out_path_dir_name):
            os.makedirs(out_path_dir_name)
        # files = os.listdir(os.path.join(base_dir, file_path))
        files = os.listdir(os.path.join(file_path))
        results = []
        for file in files:
            # predict_img(r'D:\Work\PythonProjects\wyr\nty\yolo\datas\bus.jpeg')
            # img_path = os.path.join(base_dir, file_path, file)
            img_path = os.path.join(file_path, file)
            result = predict_img(img_path, model_type)
            results.append(result)

        first_file = files[0]
        # first_file_path = os.path.join(base_dir, file_path, first_file)
        first_file_path = os.path.join(file_path, first_file)

        img = cv2.imread(first_file_path)
        (h, w, c) = img.shape
        fps = 1
        out_path_dir_name1 = os.path.dirname(video_out_path)
        if not os.path.exists(out_path_dir_name1):
            os.makedirs(out_path_dir_name1)
        # out = cv2.VideoWriter(video_out_path, cv2.VideoWriter_fourcc(*'XVID'), fps, (w, h), True)
        out = cv2.VideoWriter(video_out_path, cv2.VideoWriter_fourcc(*'mp4v'), fps, (w, h), True)
        predicted_dirs = os.listdir(os.path.join(base_predicted_dir, 'obb'))
        predicted_images_path = []
        for predicted_dir in predicted_dirs:
            files = os.listdir(os.path.join(base_predicted_dir, 'obb', predicted_dir))
            for file in files:
                if not os.path.isdir(os.path.join(base_predicted_dir, 'obb', predicted_dir, file)):
                    predicted_images_path.append(os.path.join(base_predicted_dir, 'obb', predicted_dir, file))
        for file in predicted_images_path:
            img = cv2.imread(file)
            out.write(img)
        out.release()
        convert_video_to_h264(video_out_path)
        res = {
            'results': results
        }
        with open(out_path, 'w') as f:
            f.write(json.dumps(res, separators=(',', ':')))
        return jsonify({"code": 200, "message": "识别检测成功", "datas": ""})
    except Exception as e:
        msg = str(e)
        return jsonify({"code": 500, "message": msg, "datas": ""})


def calculate_real_area(x1, y1, x2, y2, x3, y3, x4, y4, pixel_to_cm_ratio):
    """
    Calculate the real area of an oriented bounding box (OBB) given by four vertices,
    where each pixel represents a certain number of centimeters.

    Args:
    coords (list of tuples): List of four tuples, each containing the x and y coordinates of a vertex in pixels.
    pixel_to_cm_ratio (float): Number of centimeters each pixel represents.

    Returns:
    float: The real area of the oriented bounding box in square meters.
    """
    # Calculate the pixel area using the shoelace formula

    pixel_area = abs(
        (x1 * y2 + x2 * y3 + x3 * y4 + x4 * y1) -
        (y1 * x2 + y2 * x3 + y3 * x4 + y4 * x1)
    ) / 2

    # Convert pixel area to square centimeters
    cm_area = pixel_area * (pixel_to_cm_ratio ** 2)

    # Convert square centimeters to square meters
    real_area = cm_area / 10000

    return real_area


# Compare部分的修改
def sift_feature_matching(img1, img2):
    sift = cv2.SIFT_create()
    kp1, des1 = sift.detectAndCompute(img1, None)
    kp2, des2 = sift.detectAndCompute(img2, None)

    index_params = dict(algorithm=1, trees=5)
    search_params = dict(checks=50)
    flann = cv2.FlannBasedMatcher(index_params, search_params)
    matches = flann.knnMatch(des1, des2, k=2)

    good_matches = [m for m, n in matches if m.distance < 0.5 * n.distance]
    return kp1, kp2, good_matches


# Compare部分的修改
# Compare部分的修改
def find_overlap_region(img1, img2, kp1, kp2, matches):
    # 如果匹配点数少于4个，则跳过此对图片
    if len(matches) < 4:
        #print("匹配点数量不足，跳过此对图片")
        return None

    # 提取匹配的点对
    src_pts = np.float32([kp1[m.queryIdx].pt for m in matches]).reshape(-1, 1, 2)
    dst_pts = np.float32([kp2[m.trainIdx].pt for m in matches]).reshape(-1, 1, 2)

    # 计算单应性矩阵
    H, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0)

    if H is None:
        return None

    # 获取图片1的尺寸
    h1, w1 = img1.shape[:2]
    pts = np.float32([[0, 0], [0, h1 - 1], [w1 - 1, h1 - 1], [w1 - 1, 0]]).reshape(-1, 1, 2)

    # 透视变换，计算重叠区域
    dst = cv2.perspectiveTransform(pts, H)

    # 计算重叠区域的坐标
    x_min = max(0, int(np.min(dst[:, 0, 0])))
    x_max = min(img2.shape[1], int(np.max(dst[:, 0, 0])))
    y_min = max(0, int(np.min(dst[:, 0, 1])))
    y_max = min(img2.shape[0], int(np.max(dst[:, 0, 1])))

    return (x_min, y_min, x_max - x_min, y_max - y_min)



# 裁剪图像2并保存
def crop_and_save_image(img2, overlap_rect, save_path):
    """
    根据重叠区域裁剪图像2，并保存裁剪后的图像。

    :param img2: 图像2，待裁剪的图像
    :param overlap_rect: 重叠区域的矩形框 (x, y, w, h)
    :param save_path: 保存裁剪后图像的路径
    """
    if overlap_rect is None:

        return  # 如果重叠区域无效，跳过裁剪
    x, y, w, h = overlap_rect

    # print(f"裁剪前图像尺寸: {img2.shape[:2]} (height, width)")  # 打印裁剪前的图像尺寸
    # print(f"裁剪区域: (x={x}, y={y}, w={w}, h={h})")  # 打印裁剪的坐标和尺寸

    cropped_img = img2[y:y + h, x:x + w]

    # print(f"裁剪后图像尺寸: {cropped_img.shape[:2]} (height, width)")  # 打印裁剪后的图像尺寸

    cv2.imwrite(save_path, cropped_img)
    print(f"裁剪后的图像已保存到: {save_path}")


# 绘制检测框
def draw_boxes(image, boxes, color, thickness=5, font_scale=3, font_thickness=4):
    """
    在图像上绘制 OBB (Oriented Bounding Box) 目标框，并在左上角显示类别标签。

    :param image: 输入的图像
    :param boxes: 目标框的列表，每个框包含 (cx, cy, w, h, angle, class_id, class_name)
    :param color: 框的颜色
    :param thickness: 框的线条厚度
    :param font_scale: 字体大小
    :param font_thickness: 字体厚度
    """
    for box in boxes:
        cx, cy, w, h, angle = box['processed_bbox']

        # 从 box 中提取 class_id 和 class_name
        cls = box['class_id']
        class_name = box['class_name']

        # 使用旋转矩形的数据计算四个角的坐标
        rect = ((cx, cy), (w, h), angle)
        box_points = cv2.boxPoints(rect)  # 获取四个角的坐标
        box_points = np.int32(box_points)  # 转换为整数

        # 在图像上绘制旋转矩形
        cv2.polylines(image, [box_points], isClosed=True, color=color, thickness=thickness)

        # 计算左上角坐标，作为文本的位置
        x1, y1 = box_points[0]  # 左上角的坐标
        label = f"{class_name}"  # 使用类别名称作为标签

        # 设置字体、颜色等
        font = cv2.FONT_HERSHEY_SIMPLEX
        cv2.putText(image, label, (x1, y1 - 10), font, font_scale, (255, 0, 0), font_thickness, cv2.LINE_AA)  # 在左上角显示类别


# 新函数：提取框的信息
def extract_obb_boxes(results, confidence_threshold=0.5):
    """
    从检测结果中提取 OBB（Oriented Bounding Box）框，并筛选出符合置信度阈值的框。

    :param results: YOLO 模型的预测结果
    :param confidence_threshold: 置信度阈值，低于该阈值的框会被忽略
    :return: 筛选后的 OBB 检测框列表，每个框包含 (cx, cy, w, h, angle, class_id, class_name, original_bbox)
    """
    boxes = []

    # 遍历检测结果中的每一项
    for result in results['correct_results']:
        if 'datas' in result and 'bbox' in result['datas']:
            bbox = result['datas']['bbox']
            conf = result['datas'].get('confidence', 1.0)  # 获取框的置信度，默认为1.0
            if conf >= confidence_threshold:
                # 获取四个角的坐标
                x1, y1 = bbox["[x1][y1]"]
                x2, y2 = bbox["[x2][y2]"]
                x3, y3 = bbox["[x3][y3]"]
                x4, y4 = bbox["[x4][y4]"]

                # 将四个角点的坐标转换为 NumPy 数组
                points = np.array([[x1, y1], [x2, y2], [x3, y3], [x4, y4]], dtype=np.float32)

                # 使用 minAreaRect 计算旋转矩形
                rect = cv2.minAreaRect(points)  # 获取旋转矩形 (center, (width, height), angle)
                (cx, cy), (w, h), angle = rect

                # 如果角度小于 -45，就加90度，以确保旋转矩形的角度在 (-45, 45) 之间
                if angle < -45:
                    angle += 90

                # 获取 class_id 或 class_name
                class_id = result['datas'].get('class_id', -1)  # 默认使用 -1 表示未知类
                class_name = result['datas'].get('class_name', 'Unknown')  # 获取类别名称

                # 获取原始框的四个角坐标
                original_bbox = {
                    "[x1][y1]": bbox["[x1][y1]"],
                    "[x2][y2]": bbox["[x2][y2]"],
                    "[x3][y3]": bbox["[x3][y3]"],
                    "[x4][y4]": bbox["[x4][y4]"]
                }

                # 将数据保存到框列表中
                boxes.append({
                    "processed_bbox": (cx, cy, w, h, angle),  # 旋转矩形数据
                    "original_bbox": original_bbox,  # 原始框的四个角坐标
                    "class_id": class_id,
                    "class_name": class_name
                })  # 使用 class_id 和 class_name 来区分框的类别

    return boxes


def rotate_to_bbox(cx, cy, w, h, angle):
    """
    将旋转矩形转换为四个角的坐标。

    :param cx: 中心点 x
    :param cy: 中心点 y
    :param w: 宽度
    :param h: 高度
    :param angle: 旋转角度
    :return: 旋转矩形的四个角坐标
    """
    rect = ((cx, cy), (w, h), angle)
    box_points = cv2.boxPoints(rect)  # 获取四个角的坐标
    box_points = np.int32(box_points)  # 转换为整数坐标
    return box_points


# 判断是否相似函数
def is_similar_box(box1, box2, distance_threshold=250, angle_threshold=25, area_threshold=0.30):
    """
    判断两个 OBB 框是否相似。

    :param box1: 第一个 OBB 框，包含中心坐标、宽度、高度、旋转角度和类别
    :param box2: 第二个 OBB 框，包含中心坐标、宽度、高度、旋转角度和类别
    :param distance_threshold: 最大允许的距离差异
    :param angle_threshold: 最大允许的角度差异
    :param area_threshold: 最大允许的面积差异
    :return: 如果两个框相似，则返回 True，否则返回 False
    """
    # 提取 box1 和 box2 的 processed_bbox 数据和 class_id
    cx1, cy1, w1, h1, angle1 = box1['processed_bbox']  # 解包前5个值
    cx2, cy2, w2, h2, angle2 = box2['processed_bbox']  # 同上
    cls1 = box1['class_id']  # 类别 ID
    cls2 = box2['class_id']  # 类别 ID

    # 计算框1和框2的中心坐标 (cx, cy)
    distance = np.sqrt((cx1 - cx2) ** 2 + (cy1 - cy2) ** 2)
  #  print(f"计算的距离：{distance}, 距离阈值：{distance_threshold}")

    # 计算框1和框2的旋转角度差异
    angle_diff = abs(angle1 - angle2)  # 角度差异取绝对值
    if angle_diff > 45:
        angle_diff = 90 - angle_diff  # 调整角度差异在0-45度范围内

   # print(f"计算的角度差异：{angle_diff}, 角度阈值：{angle_threshold}")

    # 计算框的面积，假设框的宽度和高度代表矩形框的面积
    area1 = w1 * h1
    area2 = w2 * h2

    # 计算面积差异
    area_diff = abs(area1 - area2) / max(area1, area2)
  #  print(f"计算的面积差异：{area_diff}, 面积阈值：{area_threshold}")

    # 判断框是否相似：距离、角度和面积都在阈值范围内
    is_similar = (distance < distance_threshold and
                  angle_diff < angle_threshold and
                  area_diff < area_threshold and
                  cls1 == cls2)  # 类别必须相同

   # print(f"两个框是否相似：{is_similar}")
    return is_similar


# Compare部分的修改
# 修改 compare 路由中的相关部分：
@cross_origin(supports_credentials=True)
@app2.route('/compare', methods=['POST'])
def compare():
    try:

        try:
            for file in os.listdir(os.path.join('runs', 'obb')):
                shutil.rmtree(os.path.join('runs', 'obb', file))  # 递归删除文件夹
        except:
            pass
        # 获取传递的 JSON 文件路径
        folder1 = request.json.get('image_path1')
        folder2 = request.json.get('image_path2')
        output_path = request.json.get('output_path', "compare_results.json")  # 默认保存为 compare_results.json


        # 确保文件夹路径有效
        if not folder1 or not folder2:
            return jsonify({"code": 400, "message": "Both image folder paths are required."}), 400

        # 检查文件夹路径是否存在
        if not os.path.exists(folder1) or not os.path.exists(folder2):
            return jsonify({"code": 400, "message": "One or both image folders do not exist."}), 400

        # # 创建一个唯一的文件夹来保存本次任务的所有文件
        # task_folder = os.path.join('tasks', 'compare_' + time.strftime("%Y%m%d_%H%M%S", time.localtime()))
        # os.makedirs(task_folder, exist_ok=True)
        #
        # # 在任务文件夹下创建子文件夹：change_images 和 cropped_image_with_boxes
        # change_images_subfolder = os.path.join(task_folder, 'change_images')
        # cropped_image_with_boxes_subfolder = os.path.join(task_folder, 'cropped_image_with_boxes')
        # 处理路径，如果路径包含 'date'，则替换为 'crop_date'
        if 'data' in folder1:
            crop_image_path1 = folder1.replace('data', 'crop_data')
        else:
            # 如果路径不包含 'date'，可以选择默认路径处理
            crop_image_path1 = os.path.join(folder1, 'crop_data')  # 或者你可以直接设置为默认路径

        if 'data' in folder2:
            crop_image_path2 = folder2.replace('data', 'crop_data')
        else:
            # 同上
            crop_image_path2 = os.path.join(folder2, 'crop_data')

        os.makedirs(crop_image_path1, exist_ok=True)  # 保存基准裁剪图像
        os.makedirs(crop_image_path2, exist_ok=True) # 保存对比裁剪图像

 #       os.makedirs(cropped_image_with_boxes_subfolder, exist_ok=True)  # 保存带框图像

        # # 3. 初始化变量
        # results = []  # 用于存储每个图像的处理结果
        # change_images_dir = "change_images"  # 保存变化图片的文件夹
        # if not os.path.exists(change_images_dir):
        #     os.makedirs(change_images_dir)  # 如果文件夹不存在，则创建
        results = []  # 用于存储每个图像的处理结果
        # 4. 遍历 folder1 中的所有图片
        # 4. 遍历 folder1 中的所有图片
        # 添加一个集合来记录已经匹配的 folder2 图片
        matched_folder2_files_set = set()  # 用于记录已匹配的 folder2 中的图片

        for filename in os.listdir(folder1):
            if filename.lower().endswith(('.jpg', '.jpeg', '.png')):  # 仅处理图片文件
                file_path1 = os.path.join(folder1, filename)  # 获取图片的完整路径
                gps1 = get_gps_from_image(file_path1)  # 提取该图片的 GPS 信息

                if gps1:
                    # 5. 在 folder2 中寻找与当前图片相匹配的图片（基于 GPS）
                    matched_file = find_closest_image(gps1, folder2)
                    if matched_file:
                        filename2 = os.path.basename(matched_file)
                        # 6. 使用 SIFT 特征匹配，找到重叠区域
                        img1 = cv2.imread(file_path1)
                        img2 = cv2.imread(matched_file)

                        if img1 is None or img2 is None:
                            print("无法加载图片")
                            continue

                        # 使用 SIFT 特征匹配
                        kp1, kp2, good_matches = sift_feature_matching(img1, img2)

                        # 7. 计算重叠区域
                        overlap_rect = find_overlap_region(img1, img2, kp1, kp2, good_matches)
                        if overlap_rect is None:
                            print(f"匹配点数量不足，跳过此对图片: {filename} 和 {filename2}")
                            continue  # 跳过当前循环，直接进行下一个匹配对

                        if overlap_rect:
                            # 8. 裁剪图像的重叠区域并保存
                            save_path2 = os.path.join(crop_image_path2, f"{filename2}")
                            crop_and_save_image(img2, overlap_rect, save_path2)

                        # 6. 使用 SIFT 特征匹配，找到重叠区域
                        img2 = cv2.imread(file_path1)
                        img1 = cv2.imread(matched_file)

                        if img1 is None or img2 is None:
                            print("无法加载图片")
                            continue

                        # 使用 SIFT 特征匹配
                        kp1, kp2, good_matches = sift_feature_matching(img1, img2)

                        # 7. 计算重叠区域
                        overlap_rect = find_overlap_region(img1, img2, kp1, kp2, good_matches)
                        if overlap_rect is None:
                            print(f"匹配点数量不足，跳过此对图片: {filename} 和 {filename2}")
                            continue  # 跳过当前循环，直接进行下一个匹配对

                        if overlap_rect:
                            save_path1 = os.path.join(crop_image_path1, f"{filename}")
                            crop_and_save_image(img2, overlap_rect, save_path1)
                            # # 1. 在裁剪后的图像上绘制目标框
                            # cropped_img1 = img1[overlap_rect[1]:overlap_rect[1] + overlap_rect[3],
                            #                overlap_rect[0]:overlap_rect[0] + overlap_rect[2]]
                            # cropped_img2 = img2[overlap_rect[1]:overlap_rect[1] + overlap_rect[3],
                            #                overlap_rect[0]:overlap_rect[0] + overlap_rect[2]]

                            model, result_dict, result1 = predict_img_new(save_path1, model_type=0)  # 获取预测结果和result_dict
                            model, result_dict, result2 = predict_img_new(save_path2, model_type=0)

                            # 提取 OBB 框并比较
                            boxes1 = extract_obb_boxes(result1, confidence_threshold=0.5)
                            boxes2 = extract_obb_boxes(result2, confidence_threshold=0.5)

                            # 筛选独有的检测框
                            unique_boxes1 = [box for box in boxes1 if
                                             all(not is_similar_box(box, other_box) for other_box in boxes2)]
                            unique_boxes2 = [box for box in boxes2 if
                                             all(not is_similar_box(box, other_box) for other_box in boxes1)]

                            # 更新 result 数据
                            added_features = []
                            removed_features = []

                            # 保存新增框
                            for box in unique_boxes2:
                                x1, y1 = box['original_bbox']['[x1][y1]']
                                x2, y2 = box['original_bbox']['[x2][y2]']
                                x3, y3 = box['original_bbox']['[x3][y3]']
                                x4, y4 = box['original_bbox']['[x4][y4]']

                                added_features.append({
                                    'bbox': box['original_bbox'],
                                    'area': calculate_real_area(x1, y1, x2, y2, x3, y3, x4, y4, pixel_to_cm),
                                    'class_id': box['class_id'],
                                    'class_name': box['class_name']
                                })

                            # 保存删除框
                            for box in unique_boxes1:
                                x1, y1 = box['original_bbox']['[x1][y1]']
                                x2, y2 = box['original_bbox']['[x2][y2]']
                                x3, y3 = box['original_bbox']['[x3][y3]']
                                x4, y4 = box['original_bbox']['[x4][y4]']

                                removed_features.append({
                                    'bbox': box['original_bbox'],
                                    'area': calculate_real_area(x1, y1, x2, y2, x3, y3, x4, y4, pixel_to_cm),
                                    'class_id': box['class_id'],
                                    'class_name': box['class_name']
                                })

                            if added_features or removed_features:
                                result = {
                                    "added_features": added_features,
                                    "image_name1": filename,
                                    "image_name2": filename2,
                                    "removed_features": removed_features
                                }
                                # 将有变化的结果添加到 `results` 中
                                results.append(result)

        # 所有图片处理完成后，再返回响应
        res = {
            "code": 200,
            "data": {
                "base_task_path": crop_image_path1,
                "current_task_path": crop_image_path2,
                "results": results
            }
        }

        # 保存结果到 output_path
        with open(output_path, 'w', encoding='utf-8') as f:
            f.write(json.dumps(res, separators=(',', ':')))

        # 返回成功响应
        return jsonify(res)
    except Exception as e:
        import traceback
        traceback.print_exc()  # 打印详细的异常信息
        return jsonify({"code": 500, "message": str(e), "datas": ""})
        # 绘制独有检测框到裁剪后的图像
                            # if unique_boxes1:
                            #     print(f"绘制 {len(unique_boxes1)} 个红框")  # 打印绘制红框数量
                            #     draw_boxes(cropped_img1, unique_boxes1, (0, 0, 255), thickness=2)  # 红框为图像1独有
                            # if unique_boxes2:
                            #     print(f"绘制 {len(unique_boxes2)} 个绿框")  # 打印绘制绿框数量
                            #     draw_boxes(cropped_img2, unique_boxes2, (0, 255, 0), thickness=2)  # 绿框为图像2独有

                            # # 1. 定义一个新的文件夹用于保存带框的裁剪图像
                            # boxed_images_dir = "cropped_image_with_boxes"  # 新文件夹的名称
                            # if not os.path.exists(boxed_images_dir):
                            #     os.makedirs(boxed_images_dir)  # 如果文件夹不存在，创建新的文件夹

                            # # 2. 修改保存路径，使用新的文件夹
                            # save_path3 = os.path.join(cropped_image_with_boxes_subfolder,
                            #                           f"cropped_image1_with_boxes_{filename}")
                            # save_path4 = os.path.join(cropped_image_with_boxes_subfolder,
                            #                           f"cropped_image2_with_boxes_{filename}")

                            # # 3. 保存带框的裁剪图像
                            # cv2.imwrite(save_path3, cropped_img1)  # 保存带框的裁剪图像1
                            # cv2.imwrite(save_path4, cropped_img2)  # 保存带框的裁剪图像2


                            # # 获取文件所在文件夹的路径并转换为绝对路径
                            # save_path1_dir = os.path.abspath(os.path.dirname(save_path1))
                            # save_path2_dir = os.path.abspath(os.path.dirname(save_path2))
                            # 更新 result 数据
                                # 更新 result 数据
                            # 保存变化检测信息



def get_gps_from_image(image_path):
    """从图片中提取 GPS 信息"""
    try:
        with open(image_path, 'rb') as img_file:
            img = ExifImage(img_file)
            if img.has_exif and hasattr(img, 'gps_latitude') and hasattr(img, 'gps_longitude'):
                lat = convert_to_degrees(img.gps_latitude)
                lon = convert_to_degrees(img.gps_longitude)

                if hasattr(img, 'gps_latitude_ref') and img.gps_latitude_ref == "S":
                    lat = -lat
                if hasattr(img, 'gps_longitude_ref') and img.gps_longitude_ref == "W":
                    lon = -lon

                return lat, lon
    except Exception as e:
        print(f"Error reading GPS from {image_path}: {e}")
    return None


def get_exif_gps(file_path):
    """获取图片的经纬度信息并转换为十进制格式"""
    with open(file_path, 'rb') as image_file:
        img = ExifImage(image_file)
        if img.has_exif:
            if img.gps_latitude and img.gps_longitude:
                lat = convert_to_degrees(img.gps_latitude)
                lon = convert_to_degrees(img.gps_longitude)

                # 增加对 GPS 参考的检查，避免 RuntimeWarning
                if hasattr(img, 'gps_latitude_ref') and img.gps_latitude_ref == "S":  # 南纬
                    lat = -lat
                if hasattr(img, 'gps_longitude_ref') and img.gps_longitude_ref == "W":  # 西经
                    lon = -lon
                return lat, lon
    return None


def convert_to_degrees(value):
    """将经纬度从度、分、秒格式转换为十进制数"""
    d, m, s = value
    return d + (m / 60.0) + (s / 3600.0)


def find_closest_image(target_coords, folder_path, threshold=0.0005):
    """在文件夹中找到与目标 GPS 坐标最接近的图片"""
    closest_image = None
    min_distance = float('inf')

    for filename in os.listdir(folder_path):
        if filename.lower().endswith(('.jpg', '.jpeg', '.png')):  # 仅处理图片文件
            file_path = os.path.join(folder_path, filename)
            coords = get_gps_from_image(file_path)
            if coords:
                distance = calculate_distance(target_coords, coords)

                # 如果距离小于当前最小距离并且小于阈值，则更新最接近的图片
                if distance < min_distance and distance <= threshold:
                    min_distance = distance
                    closest_image = file_path

    # 如果最接近的图片距离仍然大于阈值，返回 None
    if closest_image is None:
        print(f"没有找到与目标 GPS 坐标匹配的图片 (阈值: {threshold})")

    return closest_image


def calculate_distance(coord1, coord2):
    """计算两个 GPS 坐标的欧几里得距离"""
    return np.sqrt((coord1[0] - coord2[0]) ** 2 + (coord1[1] - coord2[1]) ** 2)


if __name__ == '__main__':
    # 启动第一个服务器（5000端口）
    server1 = pywsgi.WSGIServer(('0.0.0.0', 5002), app1)
    print('服务器1已创建，正在监听端口 5002...')

    # 启动第二个服务器（5001端口）
    server2 = pywsgi.WSGIServer(('0.0.0.0', 5001), app2)
    print('服务器2已创建，正在监听端口 5001...')
    # 使用 gevent 的 Pool 来并行运行两个服务器

    green_pool = Pool(2)  # 创建一个绿色线程池，最多并发2个任务
    green_pool.spawn(server1.serve_forever)  # 启动第一个服务器
    green_pool.spawn(server2.serve_forever)  # 启动第二个服务器

    # 阻塞主线程，让服务器一直运行
    green_pool.join()  # 等待所有绿色线程（服务器）结束


