from fastapi import FastAPI, Form, HTTPException
import cv2
import numpy as np
from PIL import Image
import io
import base64

app = FastAPI(title="图片匹配服务", description="基于OpenCV的模板匹配与特征匹配接口")

def image_to_array(img_bytes: bytes) -> np.ndarray:
    """将输入的图片字节流转换为OpenCV可用的numpy数组（统一灰度处理）"""
    img_bytes = io.BytesIO(img_bytes)
    # 统一通过PIL转换为灰度图
    pil_img = Image.open(img_bytes).convert("L")
    return np.array(pil_img)

def template_matching(main_img: np.ndarray, template_img: np.ndarray) -> dict:
    """执行模板匹配并返回结果"""
    if template_img.shape[0] > main_img.shape[0] or template_img.shape[1] > main_img.shape[1]:
        raise HTTPException(status_code=400, detail="模板图尺寸不能大于主图")
    
    result = cv2.matchTemplate(main_img, template_img, cv2.TM_CCOEFF_NORMED)
    min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(result)
    similarity = float(max_val)
    
    if similarity < 0.5:  # 基础阈值过滤
        return {"matched": False, "similarity": similarity}
    
    th, tw = template_img.shape
    top_left = max_loc
    bottom_right = (top_left[0] + tw, top_left[1] + th)
    top_right = (bottom_right[0], top_left[1])
    bottom_left = (top_left[0], bottom_right[1])
    
    return {
        "matched": True,
        "similarity": similarity,
        "coordinates": {
            "top_left": top_left,
            "top_right": top_right,
            "bottom_right": bottom_right,
            "bottom_left": bottom_left
        }
    }

def feature_matching(main_img: np.ndarray, template_img: np.ndarray) -> dict:
    """执行特征匹配并返回结果"""
    sift = cv2.SIFT_create()
    kp1, des1 = sift.detectAndCompute(main_img, None)
    kp2, des2 = sift.detectAndCompute(template_img, None)

    if des1 is None or des2 is None:
        return {"matched": False, "similarity": 0}

    bf = cv2.BFMatcher()
    matches = bf.knnMatch(des1, des2, k=2)

    good_matches = []
    for m, n in matches:
        if m.distance < 0.75 * n.distance:
            good_matches.append(m)

    similarity = len(good_matches) / max(len(kp1), len(kp2))

    if similarity < 0.2:  # 基础阈值过滤
        return {"matched": False, "similarity": similarity}

    return {
        "matched": True,
        "similarity": similarity
    }

@app.post("/match/base64", description="通过Base64编码传入主图和子图，进行模板匹配和特征匹配")
async def match_images_base64(
    main_base64: str = Form(..., description="主图Base64数据（含Data URI前缀）"),
    template_base64: str = Form(..., description="子图Base64数据（含Data URI前缀）"),
    threshold: float = 0.8
):
    try:
        def parse_base64(b64_str):
            # 处理Data URI前缀（如"data:image/png;base64,"）
            if "data:image/" in b64_str:
                b64_str = b64_str.split(",", 1)[1]
            return base64.b64decode(b64_str)
        
        main_bytes = parse_base64(main_base64)
        template_bytes = parse_base64(template_base64)
        main_array = image_to_array(main_bytes)
        template_array = image_to_array(template_bytes)

        template_result = template_matching(main_array, template_array)
        feature_result = feature_matching(main_array, template_array)

        final_matched = template_result["matched"] and feature_result["matched"] and \
                        template_result["similarity"] >= threshold and feature_result["similarity"] >= threshold

        return {
            "status": "success",
            "template_result": template_result,
            "feature_result": feature_result,
            "final_matched": final_matched,
            "threshold": threshold
        }
        
    except Exception as e:
        return {"status": "error", "message": f"Base64解析或匹配失败: {str(e)}"}

if __name__ == "__main__":
    import uvicorn
    uvicorn.run(
        # 改为导入字符串："模块名:应用实例名"
        "match_http_server:app",  # 假设文件名为 match_http_server.py
        host="0.0.0.0",
        port=8002,
        reload=False,  # 开发模式启用热重载
        # workers=4,  # 生产环境按需开启
    )