import cv2
import numpy as np
import logging
from dataclasses import dataclass, asdict
from typing import List, Dict, Any, Optional, Union, Tuple
# 从项目根目录导入
from utils import convert_numpy_types

logger = logging.getLogger("template_match")


@dataclass
class BaseMatchResult:
    """模板匹配结果基类"""
    success: bool
    method: str  # 匹配方法: 'pixel_matching', 'feature_sift', 'feature_orb'
    confidence: float
    bbox_norm: List[float]  # YOLO风格归一化坐标 [x_center, y_center, width, height]

    def to_dict(self) -> Dict[str, Any]:
        """将结果转换为字典"""
        return asdict(self)


@dataclass
class SimpleMatchResult(BaseMatchResult):
    """传统模板匹配结果类"""
    pass


@dataclass
class FeatureMatchResult(BaseMatchResult):
    """特征匹配结果类"""
    scale_x: float = 1.0
    scale_y: float = 1.0
    angle: float = 0.0
    inliers: int = None
    inlier_ratio: float = None
    matches_count: int = None


def feature_based_matching(img_data, template_data, min_match_count=8):
    """基于特征点的模板匹配，能够处理旋转、缩放和透视变换"""
    # 加载图像
    img = cv2.imdecode(np.frombuffer(img_data, np.uint8), cv2.IMREAD_COLOR)
    template = cv2.imdecode(np.frombuffer(
        template_data, np.uint8), cv2.IMREAD_COLOR)

    if img is None:
        logger.error("目标图像解码失败")
        return FeatureMatchResult(success=False, method='feature_based', confidence=0.0, bbox_norm=[0.5, 0.5, 0, 0])
    if template is None:
        logger.error("模板图像解码失败")
        return FeatureMatchResult(success=False, method='feature_based', confidence=0.0, bbox_norm=[0.5, 0.5, 0, 0])

    # 获取图像尺寸用于归一化
    img_h, img_w = img.shape[:2]

    # 转换为灰度图
    img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    template_gray = cv2.cvtColor(template, cv2.COLOR_BGR2GRAY)

    # 创建特征检测器
    try:
        detector = cv2.SIFT_create()
        feature_method = 'feature_sift'  # 设置方法为 SIFT
    except:
        detector = cv2.ORB_create(nfeatures=2000)
        feature_method = 'feature_orb'  # 设置方法为 ORB

    # 检测关键点并计算描述符
    kp1, des1 = detector.detectAndCompute(template_gray, None)
    kp2, des2 = detector.detectAndCompute(img_gray, None)

    # 判断使用的特征类型来选择匹配器
    if detector.__class__.__name__ in ['SIFT', 'SURF'] or 'SIFT' in str(detector):
        # FLANN匹配器适用于SIFT/SURF
        FLANN_INDEX_KDTREE = 1
        index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
        search_params = dict(checks=50)
        flann = cv2.FlannBasedMatcher(index_params, search_params)

        matches = flann.knnMatch(des1, des2, k=2)

        # 应用Lowe比率测试过滤匹配
        good_matches = []
        for m, n in matches:
            if m.distance < 0.7 * n.distance:
                good_matches.append(m)
    else:
        # 暴力匹配器适合ORB/BRISK/AKAZE
        bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)
        matches = bf.match(des1, des2)

        # 按距离排序，取前n个
        matches = sorted(matches, key=lambda x: x.distance)
        good_matches = matches[:50]

    # 计算匹配质量指标
    match_confidence = len(good_matches) / max(1, min(len(kp1), len(kp2)))
    logger.info(f"找到 {len(good_matches)} 个特征匹配点, 匹配质量: {match_confidence:.4f}")

    # 如果找到足够的匹配点
    if len(good_matches) >= min_match_count:
        # 提取匹配点坐标
        src_pts = np.float32(
            [kp1[m.queryIdx].pt for m in good_matches]).reshape(-1, 1, 2)
        dst_pts = np.float32(
            [kp2[m.trainIdx].pt for m in good_matches]).reshape(-1, 1, 2)

        # 计算单应性矩阵
        H, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0)

        # 计算内点数量
        inliers = int(np.sum(mask))
        inlier_ratio = float(inliers / len(mask) if len(mask) > 0 else 0)
        logger.info(f"内点数量: {inliers}/{len(mask)}, 比率: {inlier_ratio:.4f}")

        try:
            # 变换模板角点以定位目标
            h, w = template.shape[:2]
            pts = np.float32([[0, 0], [0, h-1], [w-1, h-1],
                              [w-1, 0]]).reshape(-1, 1, 2)

            # 使用单应性矩阵变换角点
            dst = cv2.perspectiveTransform(pts, H)

            # 将角点转换为Python列表
            corners = np.int32(dst)
            corners_list = corners.reshape(-1, 2).tolist()

            # 计算包围矩形的左上角和右下角坐标
            x_coords = [pt[0] for pt in corners_list]
            y_coords = [pt[1] for pt in corners_list]

            # 计算YOLO格式的归一化坐标
            x_min, y_min = min(x_coords), min(y_coords)
            x_max, y_max = max(x_coords), max(y_coords)

            center_x = (x_min + x_max) / 2
            center_y = (y_min + y_max) / 2
            width = x_max - x_min
            height = y_max - y_min

            # 归一化坐标
            bbox_norm = [
                float(center_x / img_w),
                float(center_y / img_h),
                float(width / img_w),
                float(height / img_h)
            ]

            # 计算额外的变换信息
            scale_x = float(np.sqrt(H[0, 0]**2 + H[0, 1]**2))
            scale_y = float(np.sqrt(H[1, 0]**2 + H[1, 1]**2))
            angle_rad = float(np.arctan2(H[1, 0], H[0, 0]))
            angle_deg = float(angle_rad * 180 / np.pi)

            # 创建特征匹配结果对象
            return FeatureMatchResult(
                success=True,
                method=feature_method,
                confidence=float(inlier_ratio),
                bbox_norm=bbox_norm,
                scale_x=float(scale_x),
                scale_y=float(scale_y),
                angle=float(angle_deg),
                inliers=int(inliers),
                inlier_ratio=float(inlier_ratio),
                matches_count=len(good_matches)
            )

        except Exception as e:
            logger.error(f"变换计算失败: {str(e)}")

    logger.warning(f"没有找到足够的匹配点 ({len(good_matches)}/{min_match_count})")

    # 特征匹配失败，返回失败状态
    return FeatureMatchResult(
        success=False,
        method=feature_method,
        confidence=float(match_confidence),
        bbox_norm=[0.5, 0.5, 0, 0],  # 失败时使用默认值
        matches_count=len(good_matches)
    )


def simple_template_matching(img_data, template_data, method=cv2.TM_CCOEFF_NORMED):
    """传统滑动窗口模板匹配"""
    # 加载图像
    img = cv2.imdecode(np.frombuffer(img_data, np.uint8), cv2.IMREAD_COLOR)
    template = cv2.imdecode(np.frombuffer(
        template_data, np.uint8), cv2.IMREAD_COLOR)

    if img is None:
        logger.error("目标图像解码失败")
        return SimpleMatchResult(success=False, method='pixel_matching', confidence=0.0, bbox_norm=[0.5, 0.5, 0, 0])
    if template is None:
        logger.error("模板图像解码失败")
        return SimpleMatchResult(success=False, method='pixel_matching', confidence=0.0, bbox_norm=[0.5, 0.5, 0, 0])

    # 获取图像尺寸用于归一化
    img_h, img_w = img.shape[:2]
    template_h, template_w = template.shape[:2]

    # 执行模板匹配
    result = cv2.matchTemplate(img, template, method)

    # 找到最佳匹配位置
    min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(result)

    # 根据所选方法确定最佳位置
    if method in [cv2.TM_SQDIFF, cv2.TM_SQDIFF_NORMED]:
        top_left = min_loc  # 这些方法是越小越好
        confidence = 1 - min_val
    else:
        top_left = max_loc  # 其他方法是越大越好
        confidence = max_val

    # 计算右下角坐标
    x, y = top_left
    w, h = template_w, template_h

    # 计算YOLO格式的归一化坐标 [x_center, y_center, width, height]
    center_x = x + w / 2
    center_y = y + h / 2

    # 归一化坐标
    bbox_norm = [
        float(center_x / img_w),
        float(center_y / img_h),
        float(w / img_w),
        float(h / img_h)
    ]

    # 返回简单匹配结果对象
    return SimpleMatchResult(
        success=True,
        method='pixel_matching',
        confidence=float(confidence),
        bbox_norm=bbox_norm
    )


def hybrid_matching(img_data, template_data, tm_threshold=0.8):
    """混合匹配策略：先传统匹配，失败后转SIFT，返回包含所有尝试结果的数组"""
    results = []

    # 第一阶段：传统模板匹配
    img = cv2.imdecode(np.frombuffer(img_data, np.uint8), cv2.IMREAD_COLOR)
    template = cv2.imdecode(np.frombuffer(
        template_data, np.uint8), cv2.IMREAD_COLOR)

    if img is None:
        logger.error("目标图像解码失败")
        results.append(SimpleMatchResult(
            success=False,
            method='pixel_matching',
            confidence=0.0,
            bbox_norm=[0.5, 0.5, 0, 0]
        ).to_dict())
        return results
    if template is None:
        logger.error("模板图像解码失败")
        results.append(SimpleMatchResult(
            success=False,
            method='pixel_matching',
            confidence=0.0,
            bbox_norm=[0.5, 0.5, 0, 0]
        ).to_dict())
        return results

    # 获取图像尺寸用于归一化
    img_h, img_w = img.shape[:2]
    template_h, template_w = template.shape[:2]

    logger.info(f"执行混合匹配，阈值: {tm_threshold}")

    # 传统匹配（使用归一化相关系数）
    try:
        result = cv2.matchTemplate(img, template, cv2.TM_CCOEFF_NORMED)
        min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(result)

        logger.info(f"传统匹配置信度: {max_val:.4f}")

        # 计算YOLO格式的归一化坐标
        x, y = max_loc
        w, h = template_w, template_h

        center_x = x + w / 2
        center_y = y + h / 2

        bbox_norm = [
            float(center_x / img_w),
            float(center_y / img_h),
            float(w / img_w),
            float(h / img_h)
        ]

        # 创建传统匹配结果对象
        tm_result = SimpleMatchResult(
            success=max_val >= tm_threshold,
            method='pixel_matching',
            confidence=float(max_val),
            bbox_norm=bbox_norm
        )

        # 添加到结果数组
        results.append(tm_result.to_dict())

        # 如果置信度足够高，不需要继续SIFT匹配
        if max_val >= tm_threshold:
            # 虽然不需要SIFT匹配，但我们仍然返回包含单个结果的数组
            return results

    except Exception as e:
        logger.error(f"传统匹配出错: {str(e)}")
        # 添加错误结果（使用默认值）
        results.append(SimpleMatchResult(
            success=False,
            method='pixel_matching',
            confidence=0.0,
            bbox_norm=[0.5, 0.5, 0, 0]
        ).to_dict())

    # 第二阶段：SIFT特征匹配
    logger.info(f"传统匹配置信度不足 ({max_val:.4f} < {tm_threshold}), 启用SIFT...")
    sift_result = feature_based_matching(img_data, template_data)

    # 添加SIFT结果到数组
    results.append(sift_result.to_dict())

    return results
