import cv2
import numpy as np
import os
import time

# 可选的深度学习导入 - 如果没有安装TensorFlow可以跳过
try:
    from tensorflow.keras.applications import VGG16
    from tensorflow.keras.preprocessing import image
    from tensorflow.keras.applications.vgg16 import preprocess_input
    from sklearn.metrics.pairwise import cosine_similarity
    DEEP_LEARNING_AVAILABLE = True
except ImportError:
    DEEP_LEARNING_AVAILABLE = False
    print("TensorFlow或sklearn未安装，深度学习功能不可用")

class AdvancedImageMatcher:
    def __init__(self):
        # 初始化深度学习模型用于特征提取
        if DEEP_LEARNING_AVAILABLE:
            try:
                self.vgg_model = VGG16(weights='imagenet', include_top=False, pooling='avg')
            except:
                self.vgg_model = None
        else:
            self.vgg_model = None
        
    def sift_matching(self, img1, img2, threshold=0.75):
        """
        SIFT特征匹配 - 高精度方法
        """
        # 转换为灰度图
        gray1 = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY) if len(img1.shape) == 3 else img1
        gray2 = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY) if len(img2.shape) == 3 else img2
        
        # 创建SIFT检测器
        sift = cv2.SIFT_create()
        
        # 检测关键点和描述符
        kp1, des1 = sift.detectAndCompute(gray1, None)
        kp2, des2 = sift.detectAndCompute(gray2, None)
        
        if des1 is None or des2 is None:
            return None, 0, []
        
        # 使用FLANN匹配器
        FLANN_INDEX_KDTREE = 1
        index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
        search_params = dict(checks=50)
        flann = cv2.FlannBasedMatcher(index_params, search_params)
        
        matches = flann.knnMatch(des1, des2, k=2)
        
        # 应用Lowe's ratio test
        good_matches = []
        for match_pair in matches:
            if len(match_pair) == 2:
                m, n = match_pair
                if m.distance < threshold * n.distance:
                    good_matches.append(m)
        
        # 计算匹配度
        match_rate = len(good_matches) / min(len(kp1), len(kp2)) if min(len(kp1), len(kp2)) > 0 else 0
        
        # 如果有足够的匹配点，计算变换矩阵
        center = None
        if len(good_matches) >= 4:
            src_pts = np.float32([kp1[m.queryIdx].pt for m in good_matches]).reshape(-1, 1, 2)
            dst_pts = np.float32([kp2[m.trainIdx].pt for m in good_matches]).reshape(-1, 1, 2)
            
            M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0)
            if M is not None:
                h, w = gray1.shape
                pts = np.float32([[0,0],[0,h-1],[w-1,h-1],[w-1,0]]).reshape(-1,1,2)
                dst = cv2.perspectiveTransform(pts, M)
                center = np.mean(dst, axis=0)[0].astype(int)
        
        return center, match_rate, good_matches
    
    def orb_matching(self, img1, img2, threshold=0.75):
        """
        ORB特征匹配 - 快速且相对精确
        """
        gray1 = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY) if len(img1.shape) == 3 else img1
        gray2 = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY) if len(img2.shape) == 3 else img2
        
        # 创建ORB检测器
        orb = cv2.ORB_create(nfeatures=1000)
        
        # 检测关键点和描述符
        kp1, des1 = orb.detectAndCompute(gray1, None)
        kp2, des2 = orb.detectAndCompute(gray2, None)
        
        if des1 is None or des2 is None:
            return None, 0
        
        # 使用BruteForce匹配器
        bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)
        matches = bf.match(des1, des2)
        
        # 按距离排序
        matches = sorted(matches, key=lambda x: x.distance)
        
        # 计算匹配度
        good_matches = [m for m in matches if m.distance < threshold * 100]
        match_rate = len(good_matches) / min(len(kp1), len(kp2)) if min(len(kp1), len(kp2)) > 0 else 0
        
        return None, match_rate
    
    def deep_feature_matching(self, img1, img2):
        """
        使用深度学习特征进行匹配 - 最高精度
        """
        if not DEEP_LEARNING_AVAILABLE or self.vgg_model is None:
            return None, 0
            
        def extract_features(img):
            # 调整图像大小到VGG16期望的输入
            img_resized = cv2.resize(img, (224, 224))
            img_array = image.img_to_array(img_resized)
            img_array = np.expand_dims(img_array, axis=0)
            img_array = preprocess_input(img_array)
            
            features = self.vgg_model.predict(img_array, verbose=0)
            return features.flatten()
        
        # 提取特征
        features1 = extract_features(img1)
        features2 = extract_features(img2)
        
        # 计算余弦相似度
        similarity = cosine_similarity([features1], [features2])[0][0]
        
        return None, similarity
    
    def multi_scale_template_matching(self, template, image, scales=None, threshold=0.8):
        """
        多尺度模板匹配 - 改进的模板匹配
        """
        if scales is None:
            scales = [0.5, 0.75, 1.0, 1.25, 1.5]
        
        template_gray = cv2.cvtColor(template, cv2.COLOR_BGR2GRAY) if len(template.shape) == 3 else template
        image_gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) if len(image.shape) == 3 else image
        
        best_match = None
        best_val = 0
        best_scale = 1.0
        
        for scale in scales:
            # 缩放模板
            width = int(template_gray.shape[1] * scale)
            height = int(template_gray.shape[0] * scale)
            
            if width < 10 or height < 10 or width > image_gray.shape[1] or height > image_gray.shape[0]:
                continue
                
            scaled_template = cv2.resize(template_gray, (width, height))
            
            # 模板匹配
            result = cv2.matchTemplate(image_gray, scaled_template, cv2.TM_CCOEFF_NORMED)
            _, max_val, _, max_loc = cv2.minMaxLoc(result)
            
            if max_val > best_val:
                best_val = max_val
                best_match = max_loc
                best_scale = scale
        
        if best_val > threshold:
            center_x = best_match[0] + int(template_gray.shape[1] * best_scale / 2)
            center_y = best_match[1] + int(template_gray.shape[0] * best_scale / 2)
            return [center_x, center_y], best_val
        
        return None, best_val

def enhanced_find_image(template_path, frame, method='sift', threshold=0.75):
    """
    增强的图像查找函数
    """
    matcher = AdvancedImageMatcher()
    template = cv2.imread(template_path)
    
    if template is None:
        return None, 0
    
    if method == 'sift':
        center, confidence, matches = matcher.sift_matching(template, frame, threshold)
        return center, confidence
    elif method == 'orb':
        center, confidence = matcher.orb_matching(template, frame, threshold)
        return center, confidence
    elif method == 'deep':
        center, confidence = matcher.deep_feature_matching(template, frame)
        return center, confidence
    elif method == 'multi_scale':
        center, confidence = matcher.multi_scale_template_matching(template, frame, threshold=threshold)
        return center, confidence
    else:
        # 默认使用基础模板匹配
        template_gray = cv2.cvtColor(template, cv2.COLOR_BGR2GRAY) if len(template.shape) == 3 else template
        frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) if len(frame.shape) == 3 else frame
        
        result = cv2.matchTemplate(frame_gray, template_gray, cv2.TM_CCOEFF_NORMED)
        min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(result)
        
        if max_val > threshold:
            center_x = max_loc[0] + template_gray.shape[1] // 2
            center_y = max_loc[1] + template_gray.shape[0] // 2
            return [center_x, center_y], max_val
        
        return None, max_val

# 使用示例
if __name__ == "__main__":
    # 测试不同算法的精度
    template_path = "tmp/fish.png"
    frame_path = "tmp/app.jpg"
    
    if os.path.exists(template_path) and os.path.exists(frame_path):
        frame = cv2.imread(frame_path)
        
        methods = ['sift', 'orb', 'multi_scale']
        for method in methods:
            center, confidence = enhanced_find_image(template_path, frame, method=method)
            print(f"{method.upper()}: 中心点={center}, 置信度={confidence:.3f}")
