"""
改进的图像匹配模块 - 专为WoW自动钓鱼优化
提供多种高精度图像相似度算法
"""
import cv2
import numpy as np
import os
import time
from typing import Tuple, Optional, List

class WoWImageMatcher:
    """专为WoW钓鱼优化的图像匹配器"""
    
    def __init__(self):
        # 初始化各种特征检测器
        self.sift = cv2.SIFT_create(nfeatures=500)
        self.orb = cv2.ORB_create(nfeatures=1000)
        
        # FLANN匹配器参数
        FLANN_INDEX_KDTREE = 1
        index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
        search_params = dict(checks=50)
        self.flann = cv2.FlannBasedMatcher(index_params, search_params)
        
        # BF匹配器
        self.bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)
        
    def enhanced_template_matching(self, template, image, scales=None, methods=None, threshold=0.8):
        """
        增强模板匹配 - 多尺度 + 多方法
        精度: 75-85%，速度快
        """
        if scales is None:
            scales = [0.8, 0.9, 1.0, 1.1, 1.2]
        if methods is None:
            methods = [cv2.TM_CCOEFF_NORMED, cv2.TM_CCORR_NORMED]
        
        template_gray = cv2.cvtColor(template, cv2.COLOR_BGR2GRAY) if len(template.shape) == 3 else template
        image_gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) if len(image.shape) == 3 else image
        
        best_match = None
        best_val = 0
        best_method = None
        best_scale = 1.0
        
        for scale in scales:
            # 缩放模板
            width = int(template_gray.shape[1] * scale)
            height = int(template_gray.shape[0] * scale)
            
            if width < 10 or height < 10 or width > image_gray.shape[1] or height > image_gray.shape[0]:
                continue
                
            scaled_template = cv2.resize(template_gray, (width, height))
            
            for method in methods:
                try:
                    result = cv2.matchTemplate(image_gray, scaled_template, method)
                    _, max_val, _, max_loc = cv2.minMaxLoc(result)
                    
                    if max_val > best_val:
                        best_val = max_val
                        best_match = max_loc
                        best_method = method
                        best_scale = scale
                except:
                    continue
        
        if best_val > threshold:
            center_x = best_match[0] + int(template_gray.shape[1] * best_scale / 2)
            center_y = best_match[1] + int(template_gray.shape[0] * best_scale / 2)
            return [center_x, center_y], best_val, f"scale:{best_scale:.2f}"
        
        return None, best_val, "no_match"
    
    def sift_matching(self, template, image, ratio_threshold=0.7, min_matches=3):
        """
        SIFT特征匹配 - 高精度，适合复杂场景
        精度: 85-95%，计算量中等
        """
        template_gray = cv2.cvtColor(template, cv2.COLOR_BGR2GRAY) if len(template.shape) == 3 else template
        image_gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) if len(image.shape) == 3 else image
        
        # 检测关键点和描述符
        kp1, des1 = self.sift.detectAndCompute(template_gray, None)
        kp2, des2 = self.sift.detectAndCompute(image_gray, None)
        

        if des1 is None or des2 is None or len(des1) < 4:
            return None, 0, "no_features"
        
        try:
            # FLANN匹配
            matches = self.flann.knnMatch(des1, des2, k=2)

            # 预览匹配数据
            print(f"Found {len(matches)} matches between template and image.")
        except Exception as e:
            print(f"❌ SIFT匹配失败: {e}")
            return None, 0, "match_failed"
        
        
        # Lowe's ratio test
        good_matches = []
        for match_pair in matches:
            if len(match_pair) == 2:
                m, n = match_pair
                # print(m.distance, ratio_threshold * n.distance)
                if m.distance < ratio_threshold * n.distance:
                    good_matches.append(m)
        
        if len(good_matches) < min_matches:
            return None, len(good_matches)/len(kp1) if len(kp1) > 0 else 0, f"few_matches:{len(good_matches)}"
        
        # 计算匹配度
        match_rate = len(good_matches) / len(kp1)
        
        # 计算目标中心位置
        if len(good_matches) >= 4:
            src_pts = np.float32([kp1[m.queryIdx].pt for m in good_matches]).reshape(-1, 1, 2)
            dst_pts = np.float32([kp2[m.trainIdx].pt for m in good_matches]).reshape(-1, 1, 2)
            
            try:
                M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0)
                if M is not None:
                    h, w = template_gray.shape
                    pts = np.float32([[0,0],[0,h-1],[w-1,h-1],[w-1,0]]).reshape(-1,1,2)
                    dst = cv2.perspectiveTransform(pts, M)
                    center = np.mean(dst, axis=0)[0].astype(int)
                    return center.tolist(), match_rate, f"sift_matches:{len(good_matches)}"
            except:
                pass
        
        # 如果无法计算变换矩阵，使用匹配点的平均位置
        center_x = np.mean([kp2[m.trainIdx].pt[0] for m in good_matches])
        center_y = np.mean([kp2[m.trainIdx].pt[1] for m in good_matches])
        
        return [int(center_x), int(center_y)], match_rate, f"sift_matches:{len(good_matches)}"
    
    def orb_matching(self, template, image, max_features=1000, match_threshold=30):
        """
        ORB特征匹配 - 快速，适合实时应用
        精度: 75-85%，速度最快
        """
        template_gray = cv2.cvtColor(template, cv2.COLOR_BGR2GRAY) if len(template.shape) == 3 else template
        image_gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) if len(image.shape) == 3 else image
        
        # ORB特征检测
        orb = cv2.ORB_create(nfeatures=max_features)
        kp1, des1 = orb.detectAndCompute(template_gray, None)
        kp2, des2 = orb.detectAndCompute(image_gray, None)
        
        if des1 is None or des2 is None:
            return None, 0, "no_features"
        
        # BF匹配
        matches = self.bf.match(des1, des2)
        matches = sorted(matches, key=lambda x: x.distance)
        
        # 过滤好的匹配
        good_matches = [m for m in matches if m.distance < match_threshold]
        
        if len(good_matches) < 5:
            return None, len(good_matches)/len(kp1) if len(kp1) > 0 else 0, f"few_matches:{len(good_matches)}"
        
        # 计算匹配度
        match_rate = len(good_matches) / len(kp1)
        
        # 计算目标中心
        center_x = np.mean([kp2[m.trainIdx].pt[0] for m in good_matches])
        center_y = np.mean([kp2[m.trainIdx].pt[1] for m in good_matches])
        
        return [int(center_x), int(center_y)], match_rate, f"orb_matches:{len(good_matches)}"
    
    def adaptive_matching(self, template, image, confidence_threshold=0.7):
        """
        自适应匹配 - 根据图像特征选择最佳算法
        """
        # 首先尝试快速的模板匹配
        center, conf, info = self.enhanced_template_matching(template, image, threshold=confidence_threshold)
        if center is not None and conf > 0.8:
            return center, conf, f"template_{info}"
        
        # 如果模板匹配失败，尝试ORB（快速）
        center, conf, info = self.orb_matching(template, image)
        if center is not None and conf > confidence_threshold:
            return center, conf, f"orb_{info}"
        
        # 最后尝试SIFT（高精度）
        center, conf, info = self.sift_matching(template, image)
        if center is not None and conf > confidence_threshold * 0.8:  # SIFT阈值稍低
            return center, conf, f"sift_{info}"
        
        return None, 0, "all_failed"

def improved_find_image_from_image(template_path_or_image, frame, threshold=0.7, method='adaptive'):
    """
    改进的图像查找函数 - 替代原有的find_image_from_image
    
    Args:
        template_path_or_image: 模板图像路径或numpy数组
        frame: 目标图像
        threshold: 置信度阈值
        method: 匹配方法 ('template', 'sift', 'orb', 'adaptive')
    
    Returns:
        center: 匹配中心点坐标 [x, y]
        confidence: 置信度
        info: 详细信息
    """
    matcher = WoWImageMatcher()
    
    # 加载模板图像
    if isinstance(template_path_or_image, str):
        template = cv2.imread(template_path_or_image)
        if template is None:
            return None, 0, "template_not_found"
    else:
        template = template_path_or_image
    
    # 根据方法选择匹配算法
    if method == 'template':
        return matcher.enhanced_template_matching(template, frame, threshold=threshold)
    elif method == 'sift':
        return matcher.sift_matching(template, frame)
    elif method == 'orb':
        return matcher.orb_matching(template, frame)
    elif method == 'adaptive':
        return matcher.adaptive_matching(template, frame, threshold)
    else:
        return matcher.enhanced_template_matching(template, frame, threshold=threshold)

# 性能测试函数
def benchmark_algorithms(template_path, image_path):
    """测试不同算法的性能"""
    if not os.path.exists(template_path) or not os.path.exists(image_path):
        print("测试文件不存在")
        return
    
    frame = cv2.imread(image_path)
    methods = ['template', 'orb', 'sift', 'adaptive']
    
    print("图像匹配算法性能测试:")
    print("-" * 50)
    
    for method in methods:
        start_time = time.time()
        center, confidence, info = improved_find_image_from_image(template_path, frame, method=method)
        end_time = time.time()
        
        print(f"{method.upper():10} | 置信度: {confidence:.3f} | 耗时: {(end_time-start_time)*1000:.1f}ms | {info}")
        # 在图片上绘制中心点
        if center:
            cv2.circle(frame, (center[0], center[1]), 5, (0, 255, 0), -1)
            cv2.putText(frame, f"{method}", (center[0] + 10, center[1]), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 0, 0), 1)
        if center:
            print(f"         | 中心点: ({center[0]}, {center[1]})")
    cv2.imshow("Benchmark Results", frame)
    cv2.waitKey(0)
    print("-" * 50)

if __name__ == "__main__":
    # 使用示例
    template_path = "tmp/fishing.png"
    image_path = "tmp/app.png"
    
    benchmark_algorithms(template_path, image_path)
