# -*- coding: utf-8 -*-
"""
FAISS HNSW Index Retriever

This module implements a FAISS HNSW (Hierarchical Navigable Small World)
index retriever that provides very fast approximate search using graph-based indexing.
"""

import faiss
import numpy as np
from typing import List, Optional, Tuple
from loguru import logger

from ...core.base_faiss_retriever import BaseFaissRetriever


class HNSWIndexRetriever(BaseFaissRetriever):
    """
    FAISS HNSW Index Retriever for very fast approximate similarity search.
    
    This retriever uses FAISS IndexHNSWFlat for approximate search based on
    Hierarchical Navigable Small World graphs. It provides excellent speed
    and good accuracy for high-dimensional data.
    
    Features:
    - Very fast approximate search
    - Good accuracy even with high dimensions
    - Memory efficient for large datasets
    - Configurable graph connectivity (M parameter)
    - Tunable search parameters (efSearch)
    """
    
    def __init__(self, 
                 model_name: str = 'all-MiniLM-L6-v2',
                 dimension: Optional[int] = None,
                 normalize_vectors: bool = True,
                 metric: str = 'cosine',
                 M: int = 16,
                 efConstruction: int = 200,
                 efSearch: int = 50,
                 device: str = 'cpu'):
        """
        Initialize the HNSW Index Retriever.
        
        Args:
            model_name: Name of the sentence transformer model
            dimension: Vector dimension (auto-detected if None)
            normalize_vectors: Whether to normalize vectors
            metric: Distance metric ('cosine' or 'l2')
            M: Number of bi-directional links for each node (4-64, default 16)
            efConstruction: Size of dynamic candidate list during construction (100-800)
            efSearch: Size of dynamic candidate list during search (10-500)
            device: Device to run the model on
        """
        super().__init__(model_name, dimension, normalize_vectors, device)
        
        if metric not in ['cosine', 'l2']:
            raise ValueError("Metric must be 'cosine' or 'l2'")
            
        self.metric = metric
        self.M = M
        self.efConstruction = efConstruction
        self.efSearch = efSearch
        
        # Validate parameters
        if M < 4 or M > 64:
            logger.warning(f"M={M} is outside recommended range [4, 64]")
        if efConstruction < 100:
            logger.warning(f"efConstruction={efConstruction} is quite low, consider >= 100")
        if efSearch < 10:
            logger.warning(f"efSearch={efSearch} is quite low, consider >= 10")
            
        # For cosine similarity, we need normalized vectors
        if metric == 'cosine' and not normalize_vectors:
            logger.warning("Cosine similarity requires normalized vectors. Setting normalize_vectors=True")
            self.normalize_vectors = True
            
        logger.info(f"Initialized HNSW Index Retriever with {metric} metric, M={M}, efConstruction={efConstruction}, efSearch={efSearch}")
    
    def _build_index(self):
        """
        Build the FAISS HNSW index.
        
        Creates IndexHNSWFlat with the specified parameters.
        The index is built incrementally as vectors are added.
        """
        logger.info(f"Building HNSW index with M={self.M}, efConstruction={self.efConstruction}...")
        
        if self.metric == 'cosine':
            # Use Inner Product for cosine similarity (with normalized vectors)
            self.index = faiss.IndexHNSWFlat(self.dimension, self.M, faiss.METRIC_INNER_PRODUCT)
        else:
            # Use L2 distance
            self.index = faiss.IndexHNSWFlat(self.dimension, self.M, faiss.METRIC_L2)
        
        # Set construction parameter
        self.index.hnsw.efConstruction = self.efConstruction
        
        # Set search parameter
        self.index.hnsw.efSearch = self.efSearch
        
        # Add vectors to index (HNSW builds the graph incrementally)
        logger.info("Adding vectors and building HNSW graph...")
        self.index.add(self.vectors)
        
        logger.info(f"HNSW index built successfully with {self.index.ntotal} vectors")
        logger.info(f"Graph has {self._get_graph_stats()['total_connections']} connections")
    
    def set_search_params(self, efSearch: int):
        """
        Update search parameters for the index.
        
        Args:
            efSearch: Size of dynamic candidate list during search (10-500)
        """
        if efSearch < 1:
            raise ValueError("efSearch must be >= 1")
            
        self.efSearch = efSearch
        if self.index is not None:
            self.index.hnsw.efSearch = efSearch
            logger.info(f"Updated search parameter: efSearch={efSearch}")
    
    def recommend(self, query_question: str, top_k: int = 5, efSearch: Optional[int] = None, **kwargs) -> List[Tuple]:
        """
        Recommend similar questions using HNSW search.
        
        Args:
            query_question: The input question to find recommendations for
            top_k: Number of recommendations to return
            efSearch: Size of candidate list during search (overrides default if provided)
            **kwargs: Additional search parameters
            
        Returns:
            List of tuples containing (question, similarity_score, original_score)
        """
        if self.index is None:
            raise ValueError("Index not built. Call load_candidates() first.")
        
        # Temporarily update efSearch if provided
        original_efSearch = self.index.hnsw.efSearch
        if efSearch is not None:
            self.index.hnsw.efSearch = efSearch
            
        try:
            # Use parent class method for actual search
            results = super().recommend(query_question, top_k, **kwargs)
        finally:
            # Restore original efSearch
            self.index.hnsw.efSearch = original_efSearch
            
        return results
    
    def get_index_info(self) -> dict:
        """
        Get detailed information about the HNSW index.
        
        Returns:
            Dictionary containing index information
        """
        base_info = super().get_index_info()
        
        if self.index is not None:
            graph_stats = self._get_graph_stats()
            base_info.update({
                "metric": self.metric,
                "index_description": "HNSW index for very fast approximate search",
                "M": self.M,
                "efConstruction": self.efConstruction,
                "efSearch": self.efSearch,
                "memory_usage_mb": self._estimate_memory_usage(),
                "search_complexity": f"O(log(n) * M * efSearch) where n={self.index.ntotal}, M={self.M}, efSearch={self.efSearch}",
                "graph_stats": graph_stats
            })
            
        return base_info
    
    def _estimate_memory_usage(self) -> float:
        """
        Estimate memory usage of the HNSW index in MB.
        
        Returns:
            Estimated memory usage in megabytes
        """
        if self.index is None:
            return 0.0
            
        # Vector storage
        vector_memory = self.index.ntotal * self.dimension * 4
        
        # Graph structure memory (approximate)
        # Each node has on average M connections at level 0, fewer at higher levels
        avg_connections_per_node = self.M * 1.5  # Rough estimate including higher levels
        graph_memory = self.index.ntotal * avg_connections_per_node * 4  # 4 bytes per connection ID
        
        # Additional overhead for HNSW structure
        overhead = (vector_memory + graph_memory) * 0.2
        
        total_memory = vector_memory + graph_memory + overhead
        
        return total_memory / (1024 * 1024)  # Convert to MB
    
    def _get_graph_stats(self) -> dict:
        """
        Get statistics about the HNSW graph structure.
        
        Returns:
            Dictionary with graph statistics
        """
        if self.index is None or not hasattr(self.index, 'hnsw'):
            return {"status": "not_available"}
            
        try:
            hnsw = self.index.hnsw
            
            # Get basic graph information
            stats = {
                "max_level": hnsw.max_level,
                "entry_point": hnsw.entry_point,
                "total_connections": 0,
                "levels_distribution": {}
            }
            
            # Count connections and level distribution
            level_counts = {}
            total_connections = 0
            
            for i in range(min(1000, self.index.ntotal)):  # Sample first 1000 nodes
                node_level = 0
                # Find the highest level for this node
                while node_level <= hnsw.max_level:
                    try:
                        connections = hnsw.get_neighbors(i, node_level)
                        if len(connections) == 0:
                            break
                        total_connections += len(connections)
                        node_level += 1
                    except:
                        break
                        
                level_counts[node_level] = level_counts.get(node_level, 0) + 1
            
            stats["total_connections"] = total_connections
            stats["levels_distribution"] = level_counts
            stats["avg_connections_per_node"] = total_connections / min(1000, self.index.ntotal) if self.index.ntotal > 0 else 0
            
            return stats
            
        except Exception as e:
            logger.warning(f"Could not get graph statistics: {e}")
            return {"status": "error", "message": str(e)}
    
    def benchmark_search_params(self, query_questions: List[str], 
                               efSearch_values: List[int] = None,
                               top_k: int = 5) -> dict:
        """
        Benchmark different efSearch values to find optimal speed-accuracy tradeoff.
        
        Args:
            query_questions: List of query questions for benchmarking
            efSearch_values: List of efSearch values to test
            top_k: Number of results to retrieve per query
            
        Returns:
            Dictionary containing benchmark results for each efSearch value
        """
        import time
        
        if self.index is None:
            raise ValueError("Index not built. Call load_candidates() first.")
            
        if efSearch_values is None:
            efSearch_values = [10, 20, 50, 100, 200]
            
        logger.info(f"Benchmarking HNSW index with {len(query_questions)} queries and efSearch values: {efSearch_values}")
        
        # Encode queries once
        query_vectors = self.encode_texts(query_questions)
        
        results = {}
        original_efSearch = self.index.hnsw.efSearch
        
        try:
            for efSearch in efSearch_values:
                self.index.hnsw.efSearch = efSearch
                
                # Perform searches
                start_time = time.time()
                similarities, indices = self.index.search(query_vectors, top_k)
                search_time = time.time() - start_time
                
                results[efSearch] = {
                    "efSearch": efSearch,
                    "search_time_ms": search_time * 1000,
                    "queries_per_second": len(query_questions) / search_time,
                    "avg_time_per_query_ms": (search_time * 1000) / len(query_questions)
                }
                
        finally:
            # Restore original efSearch
            self.index.hnsw.efSearch = original_efSearch
            
        logger.info(f"Benchmark completed for {len(results)} efSearch values")
        return results
    
    def get_optimal_efSearch(self, target_recall: float = 0.9, 
                            query_questions: List[str] = None) -> int:
        """
        Find optimal efSearch value for a target recall rate.
        
        Args:
            target_recall: Target recall rate (0.0 to 1.0)
            query_questions: Sample queries for testing (uses random if None)
            
        Returns:
            Optimal efSearch value
        """
        if query_questions is None:
            # Use a sample of existing questions as queries
            num_samples = min(100, len(self.questions) // 10)
            indices = np.random.choice(len(self.questions), num_samples, replace=False)
            query_questions = [self.questions[i] for i in indices]
            
        logger.info(f"Finding optimal efSearch for target recall {target_recall}")
        
        # Get ground truth with high efSearch
        original_efSearch = self.index.hnsw.efSearch
        self.index.hnsw.efSearch = 500  # High value for ground truth
        
        query_vectors = self.encode_texts(query_questions)
        ground_truth_similarities, ground_truth_indices = self.index.search(query_vectors, 10)
        
        best_efSearch = 10
        best_recall = 0.0
        
        try:
            for efSearch in [10, 20, 30, 50, 70, 100, 150, 200, 300]:
                self.index.hnsw.efSearch = efSearch
                similarities, indices = self.index.search(query_vectors, 10)
                
                # Calculate recall
                total_recall = 0.0
                for i in range(len(query_questions)):
                    gt_set = set(ground_truth_indices[i][:5])  # Top 5 ground truth
                    pred_set = set(indices[i][:5])  # Top 5 predictions
                    recall = len(gt_set.intersection(pred_set)) / len(gt_set)
                    total_recall += recall
                    
                avg_recall = total_recall / len(query_questions)
                
                if avg_recall >= target_recall:
                    best_efSearch = efSearch
                    best_recall = avg_recall
                    break
                    
                if avg_recall > best_recall:
                    best_efSearch = efSearch
                    best_recall = avg_recall
                    
        finally:
            self.index.hnsw.efSearch = original_efSearch
            
        logger.info(f"Optimal efSearch: {best_efSearch} (recall: {best_recall:.3f})")
        return best_efSearch
    
    def rebuild_with_params(self, M: int = None, efConstruction: int = None):
        """
        Rebuild the index with new construction parameters.
        
        Args:
            M: New M parameter (number of connections)
            efConstruction: New efConstruction parameter
        """
        if self.vectors is None:
            raise ValueError("No vectors loaded. Call load_candidates() first.")
            
        if M is not None:
            self.M = M
        if efConstruction is not None:
            self.efConstruction = efConstruction
            
        logger.info(f"Rebuilding HNSW index with M={self.M}, efConstruction={self.efConstruction}")
        self._build_index()