import cv2
import requests
import numpy as np
import pytesseract
import json


picUrl1="https://"
picUrl2="https://"
output_path_result='findresult.png'


def find_image(url1, url2, output_path):
    # Download the images
    response1 = requests.get(url1)
    image1 = np.asarray(bytearray(response1.content), dtype="uint8")
    img1 = cv2.imdecode(image1, cv2.IMREAD_COLOR)
    response2 = requests.get(url2)
    image2 = np.asarray(bytearray(response2.content), dtype="uint8")
    img2 = cv2.imdecode(image2, cv2.IMREAD_COLOR)

    # 将第二张图片转换为灰度图像
    gray_img2 = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY)

    # 使用SIFT算法检测关键点和描述符
    sift = cv2.xfeatures2d.SIFT_create()
    keypoints1, descriptors1 = sift.detectAndCompute(img1, None)
    keypoints2, descriptors2 = sift.detectAndCompute(img2, None)

    # 使用FLANN匹配器进行特征点匹配
    matcher = cv2.FlannBasedMatcher()
    matches = matcher.knnMatch(descriptors1, descriptors2, k=2)

    # 根据Lowe's比率测试保留匹配点
    good_matches = []
    for m, n in matches:
        if m.distance < 0.7 * n.distance:
            good_matches.append(m)

    # 提取匹配点的坐标
    src_pts = np.float32([keypoints1[m.queryIdx].pt for m in good_matches]).reshape(-1, 1, 2)
    dst_pts = np.float32([keypoints2[m.trainIdx].pt for m in good_matches]).reshape(-1, 1, 2)

    # 计算透视变换矩阵
    M, mask = cv2.findHomography(dst_pts, src_pts, cv2.RANSAC, 5.0)

    # 对第二张图片进行透视变换
    h, w = gray_img2.shape
    transformed_img2 = cv2.warpPerspective(img2, M, (w, h))

    # 比较两张图片的相似度
    similarity = cv2.matchTemplate(img1, transformed_img2, cv2.TM_CCOEFF_NORMED)[0][0]

    # 如果相似度大于0.7，说明匹配到了图片
    if similarity > 0.8:
        # 在第一张图片中标注第二张图片的位置
        h, w, _ = transformed_img2.shape
        top_left = np.float32([0, 0, 1])
        top_left_transformed = np.matmul(M, top_left)
        top_left_transformed = top_left_transformed / top_left_transformed[2]
        bottom_right = np.float32([w, h, 1])
        bottom_right_transformed = np.matmul(M, bottom_right)
        bottom_right_transformed = bottom_right_transformed / bottom_right_transformed[2]
        cv2.rectangle(img1, (int(top_left_transformed[0]), int(top_left_transformed[1])),
                    (int(bottom_right_transformed[0]), int(bottom_right_transformed[1])), (0, 255, 0), 2)
        
        cv2.imwrite(output_path, img1)
        # 输出结果为true
        print('true')
    else:
        # 输出结果为false
        print('false')

find_image(picUrl1, picUrl2, output_path_result) 
