import torch
from PIL import Image
import cv2
import numpy as np
import os
import shutil
import tempfile
import torchvision.transforms as transforms
import torchvision.models as models

model = torch.hub.load('../yolov5'
                       , 'yolov5s'
                       , trust_repo=True
                       , source='local')

# resnet_model = torch.hub.load('pytorch/vision:v0.10.0', 'resnet18', pretrained=True)
resnet_model = models.resnet18(pretrained=True)
torch.save(resnet_model, 'resnet18.pth')

resnet_model.eval()

preprocess = transforms.Compose([
    transforms.Resize(256),
    transforms.CenterCrop(224),
    transforms.ToTensor(),
    transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
])


def extract_features(image_path):
    input_image = Image.open(image_path).convert("RGB")
    input_tensor = preprocess(input_image)
    input_batch = input_tensor.unsqueeze(0)

    with torch.no_grad():
        output = resnet_model(input_batch)

    return output.squeeze().numpy()


pic3_path = 'PIC3.jpg'
target_vehicle_feature = extract_features(pic3_path)


def detect_vehicles(frame):
    results = model(frame)
    predictions = results.xyxy[0].numpy()
    vehicle_boxes = []
    for *box, conf, cls in predictions:
        if int(cls) in [2, 3, 5, 7]:
            x_min, y_min, x_max, y_max = map(int, box)
            vehicle_boxes.append((x_min, y_min, x_max, y_max))
    return vehicle_boxes


def find_best_match(frame, vehicle_boxes, temp_dir):
    best_match_index = -1
    best_similarity = -1
    for i, (x_min, y_min, x_max, y_max) in enumerate(vehicle_boxes):
        vehicle_img = frame[y_min:y_max, x_min:x_max]
        temp_filename = os.path.join(temp_dir, f'temp_vehicle_{x_min}_{y_min}.jpg')
        cv2.imwrite(temp_filename, vehicle_img)
        feature = extract_features(temp_filename)
        similarity = np.dot(target_vehicle_feature, feature) / (
                np.linalg.norm(target_vehicle_feature) * np.linalg.norm(feature))
        if similarity > best_similarity:
            best_similarity = similarity
            best_match_index = i
    return best_match_index, best_similarity


if __name__ == '__main__':
    video_path = 'video.mp4'
    cap = cv2.VideoCapture(video_path)
    frame_width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
    frame_height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
    center_x = frame_width // 2
    center_y = frame_height // 2
    output_video_path = 'output_video.mp4'
    fourcc = cv2.VideoWriter_fourcc(*'mp4v')
    out = cv2.VideoWriter(output_video_path, fourcc, 30.0, (frame_width, frame_height))
    temp_dir = tempfile.mkdtemp()
    detect_interval = 5
    frame_count = 0
    best_match_index = -1
    while cap.isOpened():
        ret, frame = cap.read()
        if not ret:
            break
        frame_count += 1
        if frame_count % detect_interval == 0:
            vehicle_boxes = detect_vehicles(frame)
            best_match_index, best_similarity = find_best_match(frame, vehicle_boxes, temp_dir)
        else:
            pass
        if best_match_index != -1:
            x_min, y_min, x_max, y_max = vehicle_boxes[best_match_index]
            vehicle_center_x = (x_min + x_max) // 2
            vehicle_center_y = (y_min + y_max) // 2
            offset_x = center_x - vehicle_center_x
            offset_y = center_y - vehicle_center_y
            color = (0, 255, 0)
            thickness = 2
            cv2.rectangle(frame, (x_min, y_min), (x_max, y_max), color, thickness)
            label = f'Matched Vehicle {best_similarity:.2f}'
            font = cv2.FONT_HERSHEY_SIMPLEX
            font_scale = 0.5
            font_thickness = 2
            text_size = cv2.getTextSize(label, font, font_scale, font_thickness)[0]
            cv2.rectangle(frame, (x_min, y_min - 20), (x_min + text_size[0], y_min), color, -1)
            cv2.putText(frame, label, (x_min, y_min - 5), font, font_scale, (255, 255, 255), font_thickness)
            if offset_x > 0:
                x_direction = f"LEFT {abs(offset_x)} pm"
            else:
                x_direction = f"RIGHT {abs(offset_x)} pm"

            if offset_y > 0:
                y_direction = f"UP {abs(offset_y)} pm"
            else:
                y_direction = f"DOWN {abs(offset_y)} pm"
            camera_pose_text = f'{x_direction}, {y_direction}'

            text_size, _ = cv2.getTextSize(camera_pose_text, font, font_scale, font_thickness)
            text_x = (frame_width - text_size[0]) // 2
            text_y = 30
            font_color = (255, 0, 0)
            cv2.putText(frame, camera_pose_text, (text_x, text_y), font, font_scale, font_color, font_thickness)
        out.write(frame)
    cap.release()
    out.release()
    cv2.destroyAllWindows()
    shutil.rmtree(temp_dir)
