from fastapi import FastAPI, UploadFile, File, Form, HTTPException
from PIL import Image
import torch
import numpy as np
from torchvision import models, transforms
from sentence_transformers import SentenceTransformer
from elasticsearch7 import Elasticsearch
import io

# ---------------------- 全局配置（尺寸在这里调整） ----------------------
TARGET_SIZE = (224, 224)  # 全局尺寸变量，与入库文件保持一致
MODEL_IMAGE_DIMS = 2048    # ResNet50输出维度（固定）
MODEL_TEXT_DIMS = 768      # Sentence-BERT输出维度（固定）
INDEX = "trademarks"

# ---------------------- 初始化服务 ----------------------
app = FastAPI(title="Trademark Search API")
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

# ---------------------- 图片预处理（全依赖全局尺寸） ----------------------
def process_image(image_bytes):
    # 1. 读取图片
    image = Image.open(io.BytesIO(image_bytes))
    image = image.convert('RGB') if image.mode == 'L' else image
    
    # 2. 裁剪空白
    mask = image.getchannel('A') if image.mode == 'RGBA' else image.convert('L')
    mask_np = np.array(mask)
    non_bg = np.where(mask_np > 10)
    if non_bg[0].size:
        image = image.crop((non_bg[1].min(), non_bg[0].min(), non_bg[1].max()+1, non_bg[0].max()+1))
    
    # 3. 按全局尺寸的最大边缩放
    target_max_edge = max(TARGET_SIZE)
    scale = target_max_edge / max(image.size)
    new_size = (int(image.size[0] * scale), int(image.size[1] * scale))
    image = image.resize(new_size, Image.Resampling.LANCZOS)
    
    # 4. 填充到全局尺寸
    pad_left = (TARGET_SIZE[0] - image.size[0]) // 2
    pad_top = (TARGET_SIZE[1] - image.size[1]) // 2
    padded_image = Image.new(
        image.mode, 
        TARGET_SIZE, 
        (0, 0, 0, 0) if image.mode == 'RGBA' else (255, 255, 255)
    )
    padded_image.paste(
        image, 
        (pad_left, pad_top), 
        mask=image if image.mode == 'RGBA' else None
    )
    
    # 5. 归一化
    tensor = transforms.ToTensor()(padded_image).unsqueeze(0).to(device)
    tensor = transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])(tensor)
    return tensor

# ---------------------- 模型初始化 ----------------------
image_model = models.resnet50(pretrained=True).eval().to(device)
text_model = SentenceTransformer('hfl/chinese-sentence-bert-wwm-ext')
es = Elasticsearch(["http://localhost:9200"])

# ---------------------- 特征提取 ----------------------
def get_image_feat(image_bytes):
    try:
        tensor = process_image(image_bytes)
        with torch.no_grad():
            feat = image_model(tensor).squeeze().cpu().numpy()
        return feat / np.linalg.norm(feat)
    except Exception as e:
        raise HTTPException(400, f"Image error: {str(e)}")

def get_text_feat(text):
    try:
        feat = text_model.encode(text)
        return feat / np.linalg.norm(feat)
    except Exception as e:
        raise HTTPException(400, f"Text error: {str(e)}")

# ---------------------- 查询接口 ----------------------
@app.post("/search")
async def search(
    image: UploadFile = File(None),
    name: str = Form(None),
    top_k: int = Form(10)
):
    if not image and not name:
        raise HTTPException(400, "Need image or name")

    query = {"bool": {"must": []}}

    # 处理图片查询
    if image:
        img_bytes = await image.read()
        feat = get_image_feat(img_bytes)
        query["bool"]["must"].append({
            "knn": {
                "field": "image_vector",
                "query_vector": feat.tolist(),
                "k": top_k,
                "num_candidates": 1000
            }
        })

    # 处理文字查询
    if name:
        feat = get_text_feat(name)
        query["bool"]["must"].append({
            "knn": {
                "field": "text_vector",
                "query_vector": feat.tolist(),
                "k": top_k,
                "num_candidates": 1000
            }
        })

    # 执行查询
    try:
        result = es.search(
            index=INDEX,
            body={
                "query": query,
                "_source": ["trademark_id", "name", "image_path","brand_info"],
                "size": top_k
            }
        )
    except Exception as e:
        raise HTTPException(500, f"Search error: {str(e)}")

    # 整理结果
    results = [
        {**hit["_source"], "score": hit["_score"]}
        for hit in result["hits"]["hits"]
    ]
    return {"count": len(results), "results": results}

# ---------------------- 启动服务 ----------------------
if __name__ == "__main__":
    import uvicorn
    uvicorn.run(app, host="0.0.0.0", port=8000)