"""
Feature Extraction Module using DINOv2

Extracts deep visual features from skeleton images using
Meta's DINOv2 Vision Transformer model.
"""

import torch
import torch.nn as nn
import numpy as np
from typing import List, Union
from PIL import Image
from transformers import AutoImageProcessor, AutoModel
import logging

logger = logging.getLogger(__name__)


class FeatureExtractor:
    """
    Extracts deep features from skeleton images using DINOv2.

    Uses facebook/dinov2-base (ViT-B/14) to extract 768-dimensional
    feature vectors from rendered skeleton images.
    """

    def __init__(self,
                 model_name: str = "facebook/dinov2-base",
                 device: str = "mps",
                 batch_size: int = 16):
        """
        Initialize the feature extractor.

        Args:
            model_name: HuggingFace model identifier
            device: "cuda" or "cpu"
            batch_size: Batch size for processing
        """
        self.model_name = model_name
        self.batch_size = batch_size

        # Determine device
        if device == "cuda" and not torch.cuda.is_available():
            logger.warning("CUDA not available, falling back to CPU")
            device = "cpu"
        elif device == "mps" and not torch.backends.mps.is_available():
            logger.warning("MPS not available, falling back to CPU")
            device = "cpu"
        self.device = torch.device(device)

        # Load model and processor
        logger.info(f"Loading {model_name} on {device}...")
        self.processor = AutoImageProcessor.from_pretrained(model_name)
        self.model = AutoModel.from_pretrained(model_name)
        self.model.to(self.device)
        self.model.eval()

        self.feature_dim = self.model.config.hidden_size
        logger.info(f"Feature extractor initialized: dim={self.feature_dim}")

    @torch.no_grad()
    def extract_from_image(self, image: Union[np.ndarray, Image.Image]) -> np.ndarray:
        """
        Extract features from a single image.

        Args:
            image: RGB image as numpy array (H, W, 3) or PIL Image

        Returns:
            Feature vector of shape (feature_dim,)
        """
        # Convert numpy array to PIL Image if needed
        if isinstance(image, np.ndarray):
            image = Image.fromarray(image)

        # Preprocess
        inputs = self.processor(images=image, return_tensors="pt")
        inputs = {k: v.to(self.device) for k, v in inputs.items()}

        # Extract features
        outputs = self.model(**inputs)

        # Use [CLS] token embedding as image feature
        features = outputs.last_hidden_state[:, 0, :].cpu().numpy()

        return features.squeeze()

    @torch.no_grad()
    def extract_from_images(self, images: List[Union[np.ndarray, Image.Image]]) -> np.ndarray:
        """
        Extract features from multiple images in batches.

        Args:
            images: List of RGB images

        Returns:
            Feature matrix of shape (num_images, feature_dim)
        """
        all_features = []

        # Process in batches
        for i in range(0, len(images), self.batch_size):
            batch = images[i:i + self.batch_size]

            # Convert to PIL Images if needed
            pil_batch = []
            for img in batch:
                if isinstance(img, np.ndarray):
                    pil_batch.append(Image.fromarray(img))
                else:
                    pil_batch.append(img)

            # Preprocess batch
            inputs = self.processor(images=pil_batch, return_tensors="pt")
            inputs = {k: v.to(self.device) for k, v in inputs.items()}

            # Extract features
            outputs = self.model(**inputs)
            features = outputs.last_hidden_state[:, 0, :].cpu().numpy()

            all_features.append(features)

        # Concatenate all batches
        all_features = np.concatenate(all_features, axis=0)

        logger.info(f"Extracted features from {len(images)} images: shape={all_features.shape}")

        return all_features

    def extract_from_sequence(self,
                             skeleton_images: List[np.ndarray],
                             filter_none: bool = True) -> np.ndarray:
        """
        Extract features from a sequence of skeleton images.

        Args:
            skeleton_images: List of rendered skeleton images (H, W, 3)
            filter_none: Whether to skip None values

        Returns:
            Feature sequence of shape (num_frames, feature_dim)
        """
        if filter_none:
            # Filter out None values
            valid_images = [img for img in skeleton_images if img is not None]
        else:
            valid_images = skeleton_images

        if len(valid_images) == 0:
            logger.warning("No valid images in sequence")
            return np.array([])

        features = self.extract_from_images(valid_images)

        return features

    def compute_similarity(self,
                          features1: np.ndarray,
                          features2: np.ndarray,
                          metric: str = "cosine") -> float:
        """
        Compute similarity between two feature vectors.

        Args:
            features1: Feature vector or matrix
            features2: Feature vector or matrix
            metric: "cosine" or "euclidean"

        Returns:
            Similarity score
        """
        if metric == "cosine":
            # Cosine similarity
            norm1 = np.linalg.norm(features1, axis=-1, keepdims=True)
            norm2 = np.linalg.norm(features2, axis=-1, keepdims=True)

            features1_normalized = features1 / (norm1 + 1e-8)
            features2_normalized = features2 / (norm2 + 1e-8)

            similarity = np.sum(features1_normalized * features2_normalized, axis=-1)

            return float(similarity) if similarity.ndim == 0 else similarity

        elif metric == "euclidean":
            # Negative Euclidean distance (higher = more similar)
            distance = np.linalg.norm(features1 - features2, axis=-1)
            return -float(distance) if distance.ndim == 0 else -distance

        else:
            raise ValueError(f"Unknown metric: {metric}")

    def compute_pairwise_similarity(self,
                                   features1: np.ndarray,
                                   features2: np.ndarray,
                                   metric: str = "cosine") -> np.ndarray:
        """
        Compute pairwise similarity matrix between two feature sequences.

        Args:
            features1: Feature matrix (n1, feature_dim)
            features2: Feature matrix (n2, feature_dim)
            metric: "cosine" or "euclidean"

        Returns:
            Similarity matrix of shape (n1, n2)
        """
        if metric == "cosine":
            # Normalize features
            norm1 = np.linalg.norm(features1, axis=1, keepdims=True)
            norm2 = np.linalg.norm(features2, axis=1, keepdims=True)

            features1_normalized = features1 / (norm1 + 1e-8)
            features2_normalized = features2 / (norm2 + 1e-8)

            # Compute cosine similarity matrix
            similarity = features1_normalized @ features2_normalized.T

            return similarity

        elif metric == "euclidean":
            # Compute Euclidean distance matrix
            # Distance(i,j) = ||features1[i] - features2[j]||
            n1, n2 = features1.shape[0], features2.shape[0]

            # Expand dimensions for broadcasting
            f1_expanded = features1[:, np.newaxis, :]  # (n1, 1, dim)
            f2_expanded = features2[np.newaxis, :, :]  # (1, n2, dim)

            # Compute squared differences
            diff = f1_expanded - f2_expanded  # (n1, n2, dim)
            distances = np.linalg.norm(diff, axis=2)  # (n1, n2)

            # Return negative distance (higher = more similar)
            return -distances

        else:
            raise ValueError(f"Unknown metric: {metric}")


class FeatureCacheManager:
    """
    Manages caching of extracted features to avoid recomputation.
    """

    def __init__(self, cache_dir: str = "./cache/features"):
        """
        Initialize the cache manager.

        Args:
            cache_dir: Directory to store cached features
        """
        self.cache_dir = cache_dir
        import os
        os.makedirs(cache_dir, exist_ok=True)

    def save_features(self, features: np.ndarray, cache_key: str):
        """
        Save features to cache.

        Args:
            features: Feature array
            cache_key: Unique identifier for the features
        """
        import os
        cache_path = os.path.join(self.cache_dir, f"{cache_key}.npy")
        np.save(cache_path, features)
        logger.info(f"Saved features to cache: {cache_key}")

    def load_features(self, cache_key: str) -> Union[np.ndarray, None]:
        """
        Load features from cache.

        Args:
            cache_key: Unique identifier for the features

        Returns:
            Cached features or None if not found
        """
        import os
        cache_path = os.path.join(self.cache_dir, f"{cache_key}.npy")

        if os.path.exists(cache_path):
            features = np.load(cache_path)
            logger.info(f"Loaded features from cache: {cache_key}")
            return features

        return None

    def has_cache(self, cache_key: str) -> bool:
        """
        Check if cache exists for given key.

        Args:
            cache_key: Unique identifier

        Returns:
            True if cache exists
        """
        import os
        cache_path = os.path.join(self.cache_dir, f"{cache_key}.npy")
        return os.path.exists(cache_path)
