import cv2
import numpy as np
from insightface.app import FaceAnalysis
from qdrant_client import QdrantClient
from qdrant_client.models import PointStruct, VectorParams, Distance, CollectionStatus
import uuid
import os


class InsightfaceModel:
    def __init__(self, model_name: str = "buffalo_l", providers: list = ["CPUExecutionProvider"], ctx_id: int = -1):
        self.model_name = model_name
        self.providers = providers
        self.ctx_id = ctx_id

        self.__init_model()
        self.__init_qdrant()

    def extract_embeddings_from_directory(self, image_folder: str):
        for filename in os.listdir(image_folder):
            if filename.lower().endswith((".jpg", ".jpeg", ".png")):
                image_path = os.path.join(image_folder, filename)
                self.extract_embeddings_from_file(image_path)

    def extract_embeddings_from_file(self, image_path: str):
        original_image = cv2.imread(image_path)
        original_h, original_w = original_image.shape[:2]

        resized_image = self.__resize_image(original_image)
        resized_h, resized_w = resized_image.shape[:2]

        faces = self.face_app.get(resized_image)
        points = []

        for i, face in enumerate(faces):
            # 512-d vector
            embedding = face.embedding

            # (left, top, right, bottom)
            bbox = face.bbox.astype(int).tolist()

            original_bbox = self.__rescale_bbox(
                bbox, (original_h, original_w), (resized_h, resized_w))

            # Create a unique ID for Qdrant
            point_id = str(uuid.uuid4())

            # Optional metadata
            payload = {
                "image_name": os.path.basename(image_path),
                "face_index": i,
                "bbox": original_bbox,
            }

            # Create a Qdrant PointStruct
            point = PointStruct(
                id=point_id,
                vector=embedding.tolist(),
                payload=payload,
            )

            points.append(point)

        # Upload all points to Qdrant
        if points:
            self.qdrant.upsert(
                collection_name=self.collection_name, points=points)
            print(f"Uploaded {len(points)} faces from {image_path}")
        else:
            print(f"No faces found in {image_path}")

    def search_face(self, image_path: str, top_k: int = 5):
        original_image = cv2.imread(image_path)
        original_h, original_w = original_image.shape[:2]

        resized_image = self.__resize_image(original_image)
        resized_h, resized_w = resized_image.shape[:2]

        faces = self.face_app.get(resized_image)

        if not faces:
            print("No faces found in the image.")
            return

        for face in faces:
            # Convert to list
            embedding = face.embedding.tolist()

            # (left, top, right, bottom)
            bbox = face.bbox.astype(int).tolist()

            original_bbox = self.__rescale_bbox(
                bbox, (original_h, original_w), (resized_h, resized_w))

            # Search in Qdrant using the embedding
            results = self.qdrant.search(
                collection_name=self.collection_name,
                query_vector=embedding,
                limit=top_k,
                with_payload=True,
                score_threshold=0.4,
            )

            if not results:
                print("No similar faces found.")
            else:
                # print("Top match")
                # top_match = results[0]

                # print(
                #     f"ID: {top_match.id}, Score: {top_match.score:.4f}, Payload: {top_match.payload}, bbox: {original_bbox}")

                for r in results:
                    print(
                        f"ID: {r.id}, Score: {r.score:.4f}, Payload: {r.payload}, bbox: {original_bbox}")

    def __init_model(self):
        # Load InsightFace face detection + embedding model
        # "buffalo_l" = good default model
        self.face_app = FaceAnalysis(
            name=self.model_name, providers=self.providers)

        # 0 = use GPU if available, -1 = use CPU
        self.face_app.prepare(ctx_id=self.ctx_id)

    def __init_qdrant(self):
        # Connect to Qdrant (assumes it's running locally on port 6333)
        self.qdrant = QdrantClient(host="192.168.2.56", port=6333)
        self.collection_name = "faces"

        existing_collections = self.qdrant.get_collections().collections
        collection_names = [col.name for col in existing_collections]

        # Create the collection if it doesn't exist
        #
        # Distance.COSINE
        # InsightFace already outputs L2-normalized embeddings by default.
        # Qdrant internally handles cosine similarity, which assumes normalized vectors.
        # You do not need to normalize the embeddings yourself.
        # Search results will have score values in the range [0.0, 1.0]
        if self.collection_name not in collection_names:
            self.qdrant.recreate_collection(
                collection_name=self.collection_name,
                vectors_config=VectorParams(size=512, distance=Distance.COSINE)
            )

    def __resize_image(self, image: np.ndarray, target_size: int = 640):
        h, w = image.shape[:2]

        if max(h, w) > target_size:
            scale = target_size / max(h, w)
            new_w, new_h = int(w * scale), int(h * scale)
            image = cv2.resize(image, (new_w, new_h))

        return image

    def __rescale_bbox(self, bbox: np.ndarray, original_shape: tuple, resized_shape: tuple):
        """Convert bbox from resized image coordinates back to original image coordinates."""
        orig_h, orig_w = original_shape[:2]
        resized_h, resized_w = resized_shape[:2]

        scale_x = orig_w / resized_w
        scale_y = orig_h / resized_h

        left, top, right, bottom = bbox

        return [
            int(left * scale_x),
            int(top * scale_y),
            int(right * scale_x),
            int(bottom * scale_y),
        ]
