import torch
import torchvision.transforms as transforms
from PIL import Image
import numpy as np
import cv2
import os

if __name__ == '__main__':
    model = torch.hub.load('../yolov5'
                           , 'yolov5s'
                           , trust_repo=True
                           , source='local')

    pic1_path = 'PIC1.jpg'
    img = Image.open(pic1_path)
    results = model(img)
    predictions = results.xyxy[0].numpy()
    original_img = cv2.imread(pic1_path)
    vehicle_boxes = []
    vehicle_images = []
    os.makedirs('vehicle_crops', exist_ok=True)
    for *box, conf, cls in predictions:
        if int(cls) in [2, 3, 5, 7]:
            x_min, y_min, x_max, y_max = map(int, box)
            vehicle_img = original_img[y_min:y_max, x_min:x_max]
            crop_filename = f'vehicle_crops/vehicle_{x_min}_{y_min}.jpg'
            cv2.imwrite(crop_filename, vehicle_img)
            vehicle_images.append(crop_filename)
            vehicle_boxes.append((x_min, y_min, x_max, y_max))
    print(f"检测到 {len(vehicle_images)} 辆车并保存了它们的图像。")

    # 加载预训练的ResNet模型并设置为评估模式
    model = torch.hub.load('pytorch/vision:v0.10.0', 'resnet18', pretrained=True)
    model.eval()
    preprocess = transforms.Compose([
        transforms.Resize(256),
        transforms.CenterCrop(224),
        transforms.ToTensor(),
        transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
    ])


    def extract_features(image_path):
        input_image = Image.open(image_path).convert("RGB")
        input_tensor = preprocess(input_image)
        input_batch = input_tensor.unsqueeze(0)
        with torch.no_grad():
            output = model(input_batch)
        return output.squeeze().numpy()


    pic2_path = 'PIC2.jpg'
    target_vehicle_feature = extract_features(pic2_path)
    best_match_index = -1
    best_similarity = -1
    for i, vehicle_image in enumerate(vehicle_images):
        feature = extract_features(vehicle_image)
        similarity = np.dot(target_vehicle_feature, feature) / (
                np.linalg.norm(target_vehicle_feature) * np.linalg.norm(feature))
        if similarity > best_similarity:
            best_similarity = similarity
            best_match_index = i
    if best_match_index != -1:
        print(f"最佳匹配车辆位于PIC1中的坐标: {vehicle_boxes[best_match_index]}")
    else:
        print("未找到匹配的车辆")

    if best_match_index != -1:
        x_min, y_min, x_max, y_max = vehicle_boxes[best_match_index]
        original_img = cv2.imread(pic1_path)
        color = (0, 255, 0)
        thickness = 2
        cv2.rectangle(original_img, (x_min, y_min), (x_max, y_max), color, thickness)
        label = f'Matched Vehicle {best_similarity:.2f}'
        font = cv2.FONT_HERSHEY_SIMPLEX
        font_scale = 0.5
        font_thickness = 1
        text_size = cv2.getTextSize(label, font, font_scale, font_thickness)[0]
        cv2.rectangle(original_img, (x_min, y_min - 20), (x_min + text_size[0], y_min), color, -1)
        cv2.putText(original_img, label, (x_min, y_min - 5), font, font_scale, (255, 255, 255), font_thickness)
        cv2.imshow('Detected and Matched Vehicle', original_img)
        cv2.waitKey(0)
        cv2.destroyAllWindows()
        cv2.imwrite('matched_vehicle.jpg', original_img)
    else:
        print("未找到匹配的车辆，无法标记。")
