from requests.auth import parse_dict_header
from transformers import pipeline
from threading import local
import torch
from transformers import AutoModel, Siglip2Processor,AutoProcessor
from transformers.image_utils import load_image
import numpy as np
from fastapi import FastAPI, UploadFile, File, Form, HTTPException,Body
from fastapi.responses import JSONResponse
import io
from io import BytesIO
from PIL import Image
import uvicorn
import requests
import os 


# 使用siglip2-so400m-patch14-384模型
model_name = "google/siglip2-so400m-patch14-384"

# 加载模型和处理器
print(f"正在加载模型: {model_name}")
model = AutoModel.from_pretrained(model_name, torch_dtype=torch.float32).eval()
#processor = AutoProcessor.from_pretrained(model_name)
processor = Siglip2Processor.from_pretrained(model_name)
print(f"模型加载成功，词汇表大小: {model.config.text_config.vocab_size}")


image_url = "/Users/kcross/Desktop/mepai/data/v/02.jpg"
texts = ["a chinese girl", "a english girl", "美女", "a man", "a dog", "a cat"] # 候选文本列表
# 下载并预处理图像
image = Image.open(image_url)
# 模型推理
with torch.no_grad():
    # 处理图像（可以选择不包含文本）
    image_inputs = processor(images=image, return_tensors="pt")
    image_features = model.get_image_features(**image_inputs)
    
    # 处理文本（可以选择不包含图像）
    text_inputs = processor(text=texts,  return_tensors="pt")
    text_features = model.get_text_features(**text_inputs)

    # 假设 image_features 形状为 [1, dim], text_features 形状为 [num_texts, dim]
    # 首先对特征进行归一化
    image_embeds = image_features / image_features.norm(dim=-1, keepdim=True)
    text_embeds = text_features / text_features.norm(dim=-1, keepdim=True)
    
similarities = [torch.nn.functional.cosine_similarity(image_embeds, text_embeds[i]).item() for i in range(len(texts))]

# 创建标签并打印结果
labels = ["Image"] + [f"{text}" for text in texts]
print("文本相似度比较结果:")
print("=" * 50)
for text, similarity in zip(texts, similarities):
    print(f"{text:<35} {similarity:<10.6f}")