import numpy as np  # 导入 NumPy
import torch
from PIL import Image
from torchvision import transforms
from torchvision.transforms.functional import InterpolationMode

from BLIP.BLIP.models.blip_retrieval import blip_retrieval

device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')


def load_image(image_path, image_size, device):
    raw_image = Image.open(image_path).convert('RGB')
    transform = transforms.Compose([
        transforms.Resize((image_size, image_size), interpolation=InterpolationMode.BICUBIC),
        transforms.ToTensor(),
        transforms.Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711))
    ])
    image = transform(raw_image).unsqueeze(0).to(device)
    return image


def extract_image_features(model, image_paths, image_size, device):
    image_features = []
    for image_path in image_paths:
        image = load_image(image_path, image_size, device)
        with torch.no_grad():
            image_embeds = model.visual_encoder(image)
            image_feat = torch.nn.functional.normalize(model.vision_proj(image_embeds[:, 0, :]), dim=-1)
        image_features.append(image_feat.cpu().numpy())  # 将特征移动到 CPU 并转换为 NumPy 数组
    return np.concatenate(image_features, axis=0)  # 将列表合并成一个 NumPy 数组


def extract_text_features(model, text, device):
    with torch.no_grad():
        text_inputs = model.tokenizer(text, padding='max_length', truncation=True, max_length=35,
                                      return_tensors="pt").to(device)
        text_output = model.text_encoder(text_inputs.input_ids, attention_mask=text_inputs.attention_mask,
                                         return_dict=True, mode='text')
        text_feat = torch.nn.functional.normalize(model.text_proj(text_output.last_hidden_state[:, 0, :]), dim=-1)
    return text_feat.cpu().numpy()


def calculate_similarity(text_feat, image_features):
    # 使用余弦相似度
    similarity_scores = np.dot(text_feat, image_features.T)
    return similarity_scores


def retrieve_images(model, query_text, image_paths, image_size, device, top_k=1):
    # 1. 提取图像特征 (构建索引)
    image_features = extract_image_features(model, image_paths, image_size, device)

    # 2. 提取文本特征
    text_feat = extract_text_features(model, query_text, device)

    # 3. 计算相似度
    similarity_scores = calculate_similarity(text_feat, image_features)

    # 4. 获取最相似的图像索引 (检索)
    top_k_indices = np.argsort(similarity_scores[0])[::-1][:top_k]

    # 5. 返回最相似的图像路径
    retrieved_image_paths = [image_paths[i] for i in top_k_indices]
    return retrieved_image_paths


# 加载模型
image_size = 384
model_url = '/Volumes/For_Mac/Download/model_base_retrieval_coco.pth'  # 替换为你的预训练模型路径, 如果有
model = blip_retrieval(pretrained=model_url, image_size=image_size, vit='base')
model.eval()
model = model.to(device)

# 示例用法
image_paths = ['image_test.jpg', 'image_test-1.png', 'img_test2.jpeg']  # 替换为你的图像路径
query_text = "A woman is playing the piano"  # 替换为你的查询文本

# 执行检索
retrieved_images = retrieve_images(model, query_text, image_paths, image_size, device)

print(f"Query: {query_text}")
print("Retrieved Images:")
for image_path in retrieved_images:
    print(image_path)
