from flask_restx import Api
from gevent import pywsgi
import json
import platform
import shutil
from exif import Image as ExifImage
import os
import numpy as np
import cv2
from get_video_code import convert_video_to_h264
from result_true import process_annotation_files
from flask_cors import *
from flask import Flask, request, jsonify
import os.path
import yaml
from ultralytics import YOLO

from gevent.pool import Pool

model_link = r'best20250223.pt'  # 通用模型
model_link1 = r'fish20241229.pt'  # 钓鱼模型
pixel_to_cm = 4.2
sys = platform.system()


def load_yaml(file_path):
    """加载yaml文件并返回其中的 'names' 内容"""
    with open(file_path, 'r', encoding='utf-8') as f:
        result_dict = yaml.load(f.read(), Loader=yaml.FullLoader)
        return result_dict['names']


app = Flask(__name__)
api = Api(app)


def load_model(model_type=0):  # 默认model_type=0
    # 根据模型类型选择对应的模型
    if model_type == 1:
        model = YOLO(model_link1)  # 加载钓鱼模型
        model_config = 'fish.yaml'
    else:
        model = YOLO(model_link)  # road.yaml需要改名
        model_config = 'road.yaml'

    # 加载配置文件
    result_dict = load_yaml(model_config)

    # 返回模型和配置
    return model, result_dict


def predict_img(image_path,model_type):
    #model = YOLO(model_link)


    model,result_dict= load_model(model_type)  # 调用时传入 model_type
    if sys == "Windows":
        dir_path = '\\'.join(image_path.split('\\')[:-1])
        image_name = image_path.split('\\')[-1]
    else:
        dir_path = '/'.join(image_path.split('/')[:-1])
        image_name = image_path.split('/')[-1]

    # results = model.predict(device="cpu", source=image_path, save=True,show_labels=True,show_conf=True,imgsz=640,line_width=10,save_txt=True)
    results = model.predict(image_path, device='cpu', save=True, save_txt=True, conf=0.3,  iou=0.2)

    save_dir = results[0].save_dir

    image_folder = save_dir
    annotation_folder = os.path.join(save_dir, 'labels')
    process_annotation_files(image_folder, annotation_folder)
    result_txt = os.path.join(save_dir, 'labels', f"{image_name.split('.')[0]}_transformed.txt")
    data = {
        "file_path": dir_path,
        "correct_results": [
        ],
    }
    try:
        with open(result_txt) as fp:
            predict_result = fp.readlines()
        current_path = os.path.dirname(__file__)
        image_link = os.path.abspath(os.path.join(current_path, image_name))
        predict_result = [result.replace('\n', '').split(' ') for result in predict_result]
    except:
        predict_result = []
    for result in predict_result:
        bbox = {
            "[x1][y1]": [float(result[1]), float(result[2])],
            "[x2][y2]": [float(result[3]), float(result[4])],
            "[x3][y3]": [float(result[5]), float(result[6])],
            "[x4][y4]": [float(result[7]), float(result[8])],
        }
        # for i in range(8):
        #     bbox.append(float(result[i+1]))
        # area = (float(result[3]) - float(result[1])) * (float(result[6]) - float(result[4])) * 3.97 * 3.97/10000
        area=calculate_real_area(float(result[1]), float(result[2]),float(result[3]), float(result[4]),float(result[5]), float(result[6]),float(result[7]), float(result[8]),pixel_to_cm)
        data['correct_results'].append({
            "image_name": image_name,
            "datas": {
                 "class_id": int(result[0]),
                 "class_name": result_dict[int(result[0])],
                 "bbox": bbox,
                 "area":area
            }
         })

    return data

@cross_origin(supports_credentials=True)
@app.route('/predict', methods=['POST'])
def hello_world():
    try:
        # base_dir = 'datas'
        base_predicted_dir = 'runs'
        try:
            for file in os.listdir(os.path.join('runs', 'obb')):
                shutil.rmtree(os.path.join('runs', 'obb', file))  # 递归删除文件夹
        except:
            pass
        file_path = request.json.get("inputPath")
        out_path = request.json.get("outputPath")
        video_out_path = request.json.get("videoPath")
        model_type = request.json.get("model_type")



        out_path_dir_name = os.path.dirname(out_path)
        if not os.path.exists(out_path_dir_name):
            os.makedirs(out_path_dir_name)
        # files = os.listdir(os.path.join(base_dir, file_path))
        files = os.listdir(os.path.join(file_path))
        results = []
        for file in files:
            # predict_img(r'D:\Work\PythonProjects\wyr\nty\yolo\datas\bus.jpeg')
            # img_path = os.path.join(base_dir, file_path, file)
            img_path = os.path.join(file_path, file)
            result = predict_img(img_path,model_type)
            results.append(result)

        first_file = files[0]
        # first_file_path = os.path.join(base_dir, file_path, first_file)
        first_file_path = os.path.join( file_path, first_file)

        img = cv2.imread(first_file_path)
        (h, w, c) = img.shape
        fps = 1
        out_path_dir_name1 = os.path.dirname(video_out_path)
        if not os.path.exists(out_path_dir_name1):
            os.makedirs(out_path_dir_name1)
        # out = cv2.VideoWriter(video_out_path, cv2.VideoWriter_fourcc(*'XVID'), fps, (w, h), True)
        out = cv2.VideoWriter(video_out_path, cv2.VideoWriter_fourcc(*'mp4v'), fps, (w, h), True)
        predicted_dirs = os.listdir(os.path.join(base_predicted_dir, 'obb'))
        predicted_images_path = []
        for predicted_dir in predicted_dirs:
            files = os.listdir(os.path.join(base_predicted_dir, 'obb', predicted_dir))
            for file in files:
                if not os.path.isdir(os.path.join(base_predicted_dir, 'obb', predicted_dir, file)):
                    predicted_images_path.append(os.path.join(base_predicted_dir, 'obb', predicted_dir, file))
        for file in predicted_images_path:
            img = cv2.imread(file)
            out.write(img)
        out.release()
        convert_video_to_h264(video_out_path)
        res = {
            'results': results
        }
        with open(out_path, 'w') as f:
            f.write(json.dumps(res, separators=(',', ':')))
        return jsonify({"code": 200, "message": "识别检测成功", "datas": ""})
    except Exception as e:
        msg = str(e)
        return jsonify({"code": 500,  "message": msg, "datas": ""})


def calculate_real_area(x1, y1, x2, y2, x3, y3, x4, y4, pixel_to_cm_ratio):

    # Calculate the pixel area using the shoelace formula

    pixel_area = abs(
        (x1 * y2 + x2 * y3 + x3 * y4 + x4 * y1) -
        (y1 * x2 + y2 * x3 + y3 * x4 + y4 * x1)
    ) / 2

    # Convert pixel area to square centimeters
    cm_area = pixel_area * (pixel_to_cm_ratio ** 2)

    # Convert square centimeters to square meters
    real_area = cm_area / 10000

    return real_area


# Compare部分的修改
def sift_feature_matching(img1, img2):
    sift = cv2.SIFT_create()
    kp1, des1 = sift.detectAndCompute(img1, None)
    kp2, des2 = sift.detectAndCompute(img2, None)

    index_params = dict(algorithm=1, trees=5)
    search_params = dict(checks=50)
    flann = cv2.FlannBasedMatcher(index_params, search_params)
    matches = flann.knnMatch(des1, des2, k=2)

    good_matches = [m for m, n in matches if m.distance < 0.5 * n.distance]
    return kp1, kp2, good_matches


# Compare部分的修改
# Compare部分的修改
def find_overlap_region(img1, img2, kp1, kp2, matches):
    # 如果匹配点数少于4个，则跳过此对图片
    if len(matches) < 4:
        #print("匹配点数量不足，跳过此对图片")
        return None

    # 提取匹配的点对
    src_pts = np.float32([kp1[m.queryIdx].pt for m in matches]).reshape(-1, 1, 2)
    dst_pts = np.float32([kp2[m.trainIdx].pt for m in matches]).reshape(-1, 1, 2)

    # 计算单应性矩阵
    H, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0)

    if H is None:
        return None

    # 获取图片1的尺寸
    h1, w1 = img1.shape[:2]
    pts = np.float32([[0, 0], [0, h1 - 1], [w1 - 1, h1 - 1], [w1 - 1, 0]]).reshape(-1, 1, 2)

    # 透视变换，计算重叠区域
    dst = cv2.perspectiveTransform(pts, H)

    # 计算重叠区域的坐标
    x_min = max(0, int(np.min(dst[:, 0, 0])))
    x_max = min(img2.shape[1], int(np.max(dst[:, 0, 0])))
    y_min = max(0, int(np.min(dst[:, 0, 1])))
    y_max = min(img2.shape[0], int(np.max(dst[:, 0, 1])))

    return (x_min, y_min, x_max - x_min, y_max - y_min)



def extract_obb_boxes(bboxes):
    """
    从检测结果中提取 OBB（Oriented Bounding Box）框。

    :param bboxes: 单个框的字典，包含原始 OBB 坐标（bbox）
    :return: OBB 框数据 (cx, cy, w, h, angle, class_id, class_name, original_bbox)
    """
    boxes = []

    for bbox_info in bboxes:  # bboxes 现在是直接传入的框列表
        bbox = bbox_info["original_bbox"]  # 改为访问 original_bbox

        # 获取框的四个角坐标
        x1, y1 = bbox["[x1][y1]"]
        x2, y2 = bbox["[x2][y2]"]
        x3, y3 = bbox["[x3][y3]"]
        x4, y4 = bbox["[x4][y4]"]

        # 转换为 NumPy 数组以计算旋转矩形
        points = np.array([[x1, y1], [x2, y2], [x3, y3], [x4, y4]], dtype=np.float32)
        rect = cv2.minAreaRect(points)  # 获取旋转矩形 (cx, cy, w, h, angle)
        (cx, cy), (w, h), angle = rect

        # 确保角度在 -45 到 45 之间
        if angle < -45:
            angle += 90

        # 提取类别和其他信息
        class_id = bbox_info.get("class_id", -1)
        class_name = bbox_info.get("class_name", "Unknown")

        # 将数据保存
        boxes.append({
            "processed_bbox": (cx, cy, w, h, angle),  # 旋转矩形数据
            "original_bbox": bbox,  # 保存原始框的四个角坐标
            "class_id": class_id,
            "class_name": class_name
        })

    return boxes






def rotate_to_bbox(cx, cy, w, h, angle):
    """
    将旋转矩形转换为四个角的坐标。

    :param cx: 中心点 x
    :param cy: 中心点 y
    :param w: 宽度
    :param h: 高度
    :param angle: 旋转角度
    :return: 旋转矩形的四个角坐标
    """
    rect = ((cx, cy), (w, h), angle)
    box_points = cv2.boxPoints(rect)  # 获取四个角的坐标
    box_points = np.int32(box_points)  # 转换为整数坐标
    return box_points


# 判断是否相似函数
def is_similar_box(box1, box2, distance_threshold=250, angle_threshold=25, area_threshold=0.30):
    """
    判断两个 OBB 框是否相似。

    :param box1: 第一个 OBB 框，包含中心坐标、宽度、高度、旋转角度和类别
    :param box2: 第二个 OBB 框，包含中心坐标、宽度、高度、旋转角度和类别
    :param distance_threshold: 最大允许的距离差异
    :param angle_threshold: 最大允许的角度差异
    :param area_threshold: 最大允许的面积差异
    :return: 如果两个框相似，则返回 True，否则返回 False
    """
    # 提取 box1 和 box2 的 processed_bbox 数据和 class_id
    cx1, cy1, w1, h1, angle1 = box1['processed_bbox']  # 解包前5个值
    cx2, cy2, w2, h2, angle2 = box2['processed_bbox']  # 同上
    cls1 = box1['class_id']  # 类别 ID
    cls2 = box2['class_id']  # 类别 ID

    # 计算框1和框2的中心坐标 (cx, cy)
    distance = np.sqrt((cx1 - cx2) ** 2 + (cy1 - cy2) ** 2)
    #print(f"计算的距离：{distance}, 距离阈值：{distance_threshold}")

    # 计算框1和框2的旋转角度差异
    angle_diff = abs(angle1 - angle2)  # 角度差异取绝对值
    if angle_diff > 45:
        angle_diff = 90 - angle_diff  # 调整角度差异在0-45度范围内

    #print(f"计算的角度差异：{angle_diff}, 角度阈值：{angle_threshold}")

    # 计算框的面积，假设框的宽度和高度代表矩形框的面积
    area1 = w1 * h1
    area2 = w2 * h2

    # 计算面积差异
    area_diff = abs(area1 - area2) / max(area1, area2)
    #print(f"计算的面积差异：{area_diff}, 面积阈值：{area_threshold}")

    # 判断框是否相似：距离、角度和面积都在阈值范围内
    is_similar = (distance < distance_threshold and
                  angle_diff < angle_threshold and
                  area_diff < area_threshold and
                  cls1 == cls2)  # 类别必须相同

    #print(f"两个框是否相似：{is_similar}")
    return is_similar


@cross_origin(supports_credentials=True)
@app.route('/compare', methods=['POST'])
def compare():
    try:
        # 获取传递的 JSON 文件路径
        json_file1_path = request.json.get('json_path1')
        json_file2_path = request.json.get('json_path2')
        output_path = request.json.get('output_path', "compare_results.json")  # 默认保存为 compare_results.json

        # 确保文件路径有效
        if not json_file1_path or not json_file2_path:
            return jsonify({"code": 400, "message": "Both JSON file paths are required."}), 400

        # 检查路径是否存在
        if not os.path.exists(json_file1_path) or not os.path.exists(json_file2_path):
            return jsonify({"code": 400, "message": "One or both JSON files do not exist."}), 400

        # 读取 JSON 文件内容
        with open(json_file1_path, 'r', encoding='utf-8') as f1:
            data1 = json.load(f1)

        with open(json_file2_path, 'r', encoding='utf-8') as f2:
            data2 = json.load(f2)

        # 获取文件夹路径列表
        folder1 = data1["results"][0]["file_path"]
        folder2 = data2["results"][0]["file_path"]

        # 检查文件夹路径是否有效
        if not os.path.exists(folder1) or not os.path.exists(folder2):
            return jsonify({"code": 400, "message": "One or both folders do not exist."}), 400

        # 用于存储每个图像的处理结果
        results = []
        # print(f"Data1 results structure: {data1['results']}")
        # print(f"Data2 results structure: {data2['results']}")

        # 遍历 folder1 中的所有图片
        for filename in os.listdir(folder1):
            if filename.lower().endswith(('V.jpg', 'v.jpeg', 'V.png')):  # 只处理普通照片
                file_path1 = os.path.join(folder1, filename)  # 获取图片的完整路径
                gps1 = get_gps_from_image(file_path1)  # 提取该图片的 GPS 信息

                if gps1:
                    # 在 folder2 中寻找与当前图片相匹配的图片（基于 GPS）
                    matched_file = find_closest_image(gps1, folder2)
                    if matched_file:
                        filename2 = os.path.basename(matched_file)
                        boxes1 = []
                        boxes2 = []
                        for result in data1['results']:
                            boxes1.extend(
                                [r for r in result['correct_results'] if os.path.basename(r['image_name']) == filename])
                        for result in data2['results']:
                            boxes2.extend([r for r in result['correct_results'] if
                                           os.path.basename(r['image_name']) == filename2])
                        # print(f"Boxes for {filename}: {boxes1}")
                        # print(f"Boxes for {filename2}: {boxes2}")
                        # print(f"Data1 boxes: {data1['results'][0]['correct_results']}")
                        # print(f"Data2 boxes: {data2['results'][0]['correct_results']}")

                        if boxes1 or boxes2:
                            print(f"Processing boxes for {filename} and {filename2}")
                            # 6. 使用 SIFT 特征匹配，找到重叠区域
                            img1 = cv2.imread(file_path1)
                            img2 = cv2.imread(matched_file)

                            if img1 is None or img2 is None:
                                print("无法加载图片")
                                continue

                            # 使用 SIFT 特征匹配
                            kp1, kp2, good_matches = sift_feature_matching(img1, img2)

                            # 7. 计算重叠区域
                            overlap_rect = find_overlap_region(img1, img2, kp1, kp2, good_matches)
                            if overlap_rect is None:
                                print(f"匹配点数量不足，跳过此对图片: {filename} 和 {filename2}")
                                continue  # 跳过当前循环，直接进行下一个匹配对
                            print(f"Overlap Rect 1: {overlap_rect}")
                            # # 6. 使用 SIFT 特征匹配，找到重叠区域
                            # img2 = cv2.imread(file_path1)
                            # img1 = cv2.imread(matched_file)
                            #
                            # if img1 is None or img2 is None:
                            #     print("无法加载图片")
                            #     continue
                            #
                            # # 使用 SIFT 特征匹配
                            # kp1, kp2, good_matches = sift_feature_matching(img1, img2)
                            #
                            # # 7. 计算重叠区域
                            # # overlap_rect2 = overlap_rect
                            # overlap_rect2 = find_overlap_region(img1, img2, kp1, kp2, good_matches)
                            # print(f"Overlap Rect 2: {overlap_rect2}")
                            # if overlap_rect2 is None:
                            #     print(f"匹配点数量不足，跳过此对图片: {filename} 和 {filename2}")
                            #     continue  # 跳过当前循环，直接进行下一个匹配对
                            if overlap_rect:
                                added_features = []
                                removed_features = []

                                # 提取所有框并转换坐标
                                transformed_boxes1 = []
                                for box in boxes1:
                                    transformed_bbox = transform_bbox_coords(box['datas']['bbox'], overlap_rect[0],
                                                                             overlap_rect[1])
                                    transformed_boxes1.append({
                                        "original_bbox": box['datas']['bbox'],
                                        "transformed_bbox": transformed_bbox,
                                        "class_id": box['datas']['class_id'],
                                        "class_name": box['datas']['class_name']
                                    })

                                transformed_boxes2 = []
                                for box in boxes2:
                                    transformed_bbox = transform_bbox_coords(box['datas']['bbox'], overlap_rect[0],
                                                                             overlap_rect[1])
                                    transformed_boxes2.append({
                                        "original_bbox": box['datas']['bbox'],
                                        "transformed_bbox": transformed_bbox,
                                        "class_id": box['datas']['class_id'],
                                        "class_name": box['datas']['class_name']
                                    })

                                # 提取OBB信息
                                obb_boxes1 = extract_obb_boxes(transformed_boxes1)
                                obb_boxes2 = extract_obb_boxes(transformed_boxes2)

                                # 匹配框：记录已匹配的索引
                                matched_indices2 = set()
                                # 检查每个box1是否有匹配的box2
                                for i, box1 in enumerate(obb_boxes1):
                                    matched = False
                                    for j, box2 in enumerate(obb_boxes2):
                                        if j not in matched_indices2 and is_similar_box(box1, box2):
                                            matched_indices2.add(j)
                                            matched = True
                                            break
                                    if not matched:
                                        # 无匹配，记录为移除
                                        removed_features.append({
                                            'area': boxes1[i]['datas']['area'],  # 保存 area
                                            'bbox': box1['original_bbox'],  # 保存原始 bbox
                                            'class_id': boxes1[i]['datas']['class_id'],  # 保存 class_id
                                            'class_name': boxes1[i]['datas']['class_name']  # 保存 class_name
                                        })
                                print(f"未变化: {len(matched_indices2)}")
                                # 检查未被匹配的box2，记录为新增
                                for j, box2 in enumerate(obb_boxes2):
                                    if j not in matched_indices2:
                                        added_features.append({
                                            'area': boxes2[j]['datas']['area'],  # 保存 area
                                            'bbox': box2['original_bbox'],  # 保存原始 bbox
                                            'class_id': boxes2[j]['datas']['class_id'],  # 保存 class_id
                                            'class_name': boxes2[j]['datas']['class_name']  # 保存 class_name
                                        })
                                # 保存结果
                                if added_features or removed_features:
                                    results.append({
                                        "image_name1": filename,
                                        "image_name2": filename2,
                                        "added_features": added_features,
                                        "removed_features": removed_features
                                    })
                                    # 记录添加的特征和移除的特征

                                        # 打印出每对图片的变化点数
                                    print(f" 新增: {len(added_features)}")
                                    print(f" 减少: {len(removed_features)}")

        # 保存结果到 output_path，循环外保存
        with open(output_path, 'w', encoding='utf-8') as f:
            f.write(json.dumps({"code": 200, "data": {"results": results}}, separators=(',', ':')))

        # 返回响应
        return jsonify({"code": 200, "data": {"results": results}})

    except Exception as e:
        import traceback
        traceback.print_exc()  # 打印详细的异常信息
        return jsonify({"code": 500, "message": str(e), "datas": ""})






def transform_bbox_coords(bbox, overlap_x1, overlap_y1):
    """
    将原始 OBB 框坐标转换为重叠区域坐标。
    :param bbox: 原始 OBB 框坐标字典，包含 '[x1][y1]', '[x2][y2]', '[x3][y3]', '[x4][y4]'
    :param overlap_x1: 重叠区域的左上角 x 坐标
    :param overlap_y1: 重叠区域的左上角 y 坐标
    :return: 调整后的 OBB 框坐标字典
    """
    transformed_bbox = {}

    # 遍历原始框的四个角，进行坐标转换
    for key, coords in bbox.items():
        x, y = coords
        # 坐标转换：减去重叠区域的左上角坐标
        transformed_x = x - overlap_x1
        transformed_y = y - overlap_y1
        transformed_bbox[key] = [transformed_x, transformed_y]

    return transformed_bbox

# 更新后的代码，在裁剪后进行坐标恢复
def transform_to_original_coords(cropped_bbox, crop_x1, crop_y1):
    """
    将裁剪后的 OBB 框坐标转换回原始图像的坐标。
    :param cropped_bbox: 裁剪后的框坐标
    :param crop_x1: 裁剪区域的左上角 x 坐标
    :param crop_y1: 裁剪区域的左上角 y 坐标
    :return: 转换回原始图像坐标的框
    """
    restored_bbox = {}
    for key, coords in cropped_bbox.items():
        x, y = coords
        # 恢复坐标：加上裁剪区域的左上角坐标
        restored_x = x + crop_x1
        restored_y = y + crop_y1
        restored_bbox[key] = [restored_x, restored_y]
    return restored_bbox

def get_gps_from_image(image_path):
    """从图片中提取 GPS 信息"""
    try:
        with open(image_path, 'rb') as img_file:
            img = ExifImage(img_file)
            if img.has_exif and hasattr(img, 'gps_latitude') and hasattr(img, 'gps_longitude'):
                lat = convert_to_degrees(img.gps_latitude)
                lon = convert_to_degrees(img.gps_longitude)

                if hasattr(img, 'gps_latitude_ref') and img.gps_latitude_ref == "S":
                    lat = -lat
                if hasattr(img, 'gps_longitude_ref') and img.gps_longitude_ref == "W":
                    lon = -lon

                return lat, lon
    except Exception as e:
        print(f"Error reading GPS from {image_path}: {e}")
    return None
def convert_to_degrees(value):
    """将经纬度从度、分、秒格式转换为十进制数"""
    d, m, s = value
    return d + (m / 60.0) + (s / 3600.0)


def find_closest_image(target_coords, folder_path, threshold=0.0002):
    """在文件夹中找到与目标 GPS 坐标最接近的图片"""
    closest_image = None
    min_distance = float('inf')

    for filename in os.listdir(folder_path):
        if filename.lower().endswith(('v.jpg', 'v.jpeg', 'v.png')):
            file_path = os.path.join(folder_path, filename)
            coords = get_gps_from_image(file_path)
            if coords:
                distance = calculate_distance(target_coords, coords)

                # 如果距离小于当前最小距离并且小于阈值，则更新最接近的图片
                if distance < min_distance and distance <= threshold:
                    min_distance = distance
                    closest_image = file_path

    # 如果最接近的图片距离仍然大于阈值，返回 None
    if closest_image is None:
        print(f"没有找到与目标 GPS 坐标匹配的图片 当前图片: {file_path}(阈值: {threshold})")

    return closest_image


def calculate_distance(coord1, coord2):
    """计算两个 GPS 坐标的欧几里得距离"""
    return np.sqrt((coord1[0] - coord2[0]) ** 2 + (coord1[1] - coord2[1]) ** 2)


if __name__ == '__main__':
    from gevent import pywsgi

    server = pywsgi.WSGIServer(('0.0.0.0', 5001), app)
    print('服务器已创建，正在监听端口 5001...', flush=True)
    server.serve_forever()


