# -*- coding: utf-8 -*-
"""
FAISS IVF Index Retriever

This module implements a FAISS IVF (Inverted File) index retriever
that provides fast approximate search using clustering-based indexing.
"""

import faiss
import numpy as np
from typing import List, Optional, Tuple
from loguru import logger

from ...core.base_faiss_retriever import BaseFaissRetriever


class IVFIndexRetriever(BaseFaissRetriever):
    """
    FAISS IVF Index Retriever for fast approximate similarity search.
    
    This retriever uses FAISS IndexIVFFlat or IndexIVFPQ for approximate
    search with configurable speed-accuracy tradeoffs. It clusters the
    vector space and searches only relevant clusters.
    
    Features:
    - Fast approximate search
    - Configurable speed-accuracy tradeoff
    - Good for large datasets
    - Supports both cosine similarity and L2 distance
    - Tunable search parameters (nprobe)
    """
    
    def __init__(self, 
                 model_name: str = 'all-MiniLM-L6-v2',
                 dimension: Optional[int] = None,
                 normalize_vectors: bool = True,
                 metric: str = 'cosine',
                 nlist: int = 100,
                 nprobe: int = 10,
                 device: str = 'cpu'):
        """
        Initialize the IVF Index Retriever.
        
        Args:
            model_name: Name of the sentence transformer model
            dimension: Vector dimension (auto-detected if None)
            normalize_vectors: Whether to normalize vectors
            metric: Distance metric ('cosine' or 'l2')
            nlist: Number of clusters for IVF (more clusters = more memory, faster search)
            nprobe: Number of clusters to search (more probes = better recall, slower search)
            device: Device to run the model on
        """
        super().__init__(model_name, dimension, normalize_vectors, device)
        
        if metric not in ['cosine', 'l2']:
            raise ValueError("Metric must be 'cosine' or 'l2'")
            
        self.metric = metric
        self.nlist = nlist
        self.nprobe = nprobe
        
        # For cosine similarity, we need normalized vectors
        if metric == 'cosine' and not normalize_vectors:
            logger.warning("Cosine similarity requires normalized vectors. Setting normalize_vectors=True")
            self.normalize_vectors = True
            
        logger.info(f"Initialized IVF Index Retriever with {metric} metric, nlist={nlist}, nprobe={nprobe}")
    
    def _build_index(self):
        """
        Build the FAISS IVF index.
        
        Creates IndexIVFFlat with the specified number of clusters.
        The index needs to be trained on the data before adding vectors.
        """
        logger.info(f"Building IVF index with nlist={self.nlist}...")
        
        # Create quantizer (used for clustering)
        if self.metric == 'cosine':
            quantizer = faiss.IndexFlatIP(self.dimension)
            self.index = faiss.IndexIVFFlat(quantizer, self.dimension, self.nlist, faiss.METRIC_INNER_PRODUCT)
        else:
            quantizer = faiss.IndexFlatL2(self.dimension)
            self.index = faiss.IndexIVFFlat(quantizer, self.dimension, self.nlist, faiss.METRIC_L2)
        
        # Set search parameters
        self.index.nprobe = self.nprobe
        
        # Train the index (clustering step)
        logger.info("Training IVF index (clustering vectors)...")
        self.index.train(self.vectors)
        
        # Add vectors to index
        logger.info("Adding vectors to trained index...")
        self.index.add(self.vectors)
        
        logger.info(f"IVF index built successfully with {self.index.ntotal} vectors in {self.nlist} clusters")
    
    def set_search_params(self, nprobe: int):
        """
        Update search parameters for the index.
        
        Args:
            nprobe: Number of clusters to search (1 to nlist)
        """
        if nprobe < 1 or nprobe > self.nlist:
            raise ValueError(f"nprobe must be between 1 and {self.nlist}")
            
        self.nprobe = nprobe
        if self.index is not None:
            self.index.nprobe = nprobe
            logger.info(f"Updated search parameter: nprobe={nprobe}")
    
    def recommend(self, query_question: str, top_k: int = 5, nprobe: Optional[int] = None, **kwargs) -> List[Tuple]:
        """
        Recommend similar questions using IVF search.
        
        Args:
            query_question: The input question to find recommendations for
            top_k: Number of recommendations to return
            nprobe: Number of clusters to search (overrides default if provided)
            **kwargs: Additional search parameters
            
        Returns:
            List of tuples containing (question, similarity_score, original_score)
        """
        if self.index is None:
            raise ValueError("Index not built. Call load_candidates() first.")
        
        # Temporarily update nprobe if provided
        original_nprobe = self.index.nprobe
        if nprobe is not None:
            self.index.nprobe = nprobe
            
        try:
            # Use parent class method for actual search
            results = super().recommend(query_question, top_k, **kwargs)
        finally:
            # Restore original nprobe
            self.index.nprobe = original_nprobe
            
        return results
    
    def get_index_info(self) -> dict:
        """
        Get detailed information about the IVF index.
        
        Returns:
            Dictionary containing index information
        """
        base_info = super().get_index_info()
        
        if self.index is not None:
            base_info.update({
                "metric": self.metric,
                "index_description": "IVF index for fast approximate search",
                "nlist": self.nlist,
                "nprobe": self.nprobe,
                "memory_usage_mb": self._estimate_memory_usage(),
                "search_complexity": f"O(nprobe * k * d) where nprobe={self.nprobe}, k=top_k, d=dimension",
                "cluster_distribution": self._get_cluster_distribution()
            })
            
        return base_info
    
    def _estimate_memory_usage(self) -> float:
        """
        Estimate memory usage of the IVF index in MB.
        
        Returns:
            Estimated memory usage in megabytes
        """
        if self.index is None:
            return 0.0
            
        # Vector storage (same as flat index)
        vector_memory = self.index.ntotal * self.dimension * 4
        
        # Cluster centroids
        centroid_memory = self.nlist * self.dimension * 4
        
        # Inverted lists overhead (approximate)
        inverted_lists_overhead = self.index.ntotal * 8  # 8 bytes per vector ID
        
        # Total memory with some overhead
        total_memory = (vector_memory + centroid_memory + inverted_lists_overhead) * 1.2
        
        return total_memory / (1024 * 1024)  # Convert to MB
    
    def _get_cluster_distribution(self) -> dict:
        """
        Get information about how vectors are distributed across clusters.
        
        Returns:
            Dictionary with cluster distribution statistics
        """
        if self.index is None or not hasattr(self.index, 'invlists'):
            return {"status": "not_available"}
            
        try:
            cluster_sizes = []
            for i in range(self.nlist):
                cluster_size = self.index.invlists.list_size(i)
                cluster_sizes.append(cluster_size)
                
            cluster_sizes = np.array(cluster_sizes)
            
            return {
                "total_clusters": self.nlist,
                "min_cluster_size": int(cluster_sizes.min()),
                "max_cluster_size": int(cluster_sizes.max()),
                "avg_cluster_size": float(cluster_sizes.mean()),
                "std_cluster_size": float(cluster_sizes.std()),
                "empty_clusters": int(np.sum(cluster_sizes == 0))
            }
        except Exception as e:
            logger.warning(f"Could not get cluster distribution: {e}")
            return {"status": "error", "message": str(e)}
    
    def benchmark_search_params(self, query_questions: List[str], 
                               nprobe_values: List[int] = None,
                               top_k: int = 5) -> dict:
        """
        Benchmark different nprobe values to find optimal speed-accuracy tradeoff.
        
        Args:
            query_questions: List of query questions for benchmarking
            nprobe_values: List of nprobe values to test
            top_k: Number of results to retrieve per query
            
        Returns:
            Dictionary containing benchmark results for each nprobe value
        """
        import time
        
        if self.index is None:
            raise ValueError("Index not built. Call load_candidates() first.")
            
        if nprobe_values is None:
            nprobe_values = [1, 5, 10, 20, min(50, self.nlist)]
            
        logger.info(f"Benchmarking IVF index with {len(query_questions)} queries and nprobe values: {nprobe_values}")
        
        # Encode queries once
        query_vectors = self.encode_texts(query_questions)
        
        results = {}
        original_nprobe = self.index.nprobe
        
        try:
            for nprobe in nprobe_values:
                if nprobe > self.nlist:
                    logger.warning(f"Skipping nprobe={nprobe} (> nlist={self.nlist})")
                    continue
                    
                self.index.nprobe = nprobe
                
                # Perform searches
                start_time = time.time()
                similarities, indices = self.index.search(query_vectors, top_k)
                search_time = time.time() - start_time
                
                results[nprobe] = {
                    "nprobe": nprobe,
                    "search_time_ms": search_time * 1000,
                    "queries_per_second": len(query_questions) / search_time,
                    "avg_time_per_query_ms": (search_time * 1000) / len(query_questions)
                }
                
        finally:
            # Restore original nprobe
            self.index.nprobe = original_nprobe
            
        logger.info(f"Benchmark completed for {len(results)} nprobe values")
        return results
    
    def get_optimal_nprobe(self, target_recall: float = 0.9, 
                          query_questions: List[str] = None) -> int:
        """
        Find optimal nprobe value for a target recall rate.
        
        Args:
            target_recall: Target recall rate (0.0 to 1.0)
            query_questions: Sample queries for testing (uses random if None)
            
        Returns:
            Optimal nprobe value
        """
        if query_questions is None:
            # Use a sample of existing questions as queries
            num_samples = min(100, len(self.questions) // 10)
            indices = np.random.choice(len(self.questions), num_samples, replace=False)
            query_questions = [self.questions[i] for i in indices]
            
        logger.info(f"Finding optimal nprobe for target recall {target_recall}")
        
        # Get ground truth with flat search (nprobe = nlist)
        original_nprobe = self.index.nprobe
        self.index.nprobe = self.nlist
        
        query_vectors = self.encode_texts(query_questions)
        ground_truth_similarities, ground_truth_indices = self.index.search(query_vectors, 10)
        
        best_nprobe = 1
        best_recall = 0.0
        
        try:
            for nprobe in range(1, self.nlist + 1, max(1, self.nlist // 20)):
                self.index.nprobe = nprobe
                similarities, indices = self.index.search(query_vectors, 10)
                
                # Calculate recall
                total_recall = 0.0
                for i in range(len(query_questions)):
                    gt_set = set(ground_truth_indices[i][:5])  # Top 5 ground truth
                    pred_set = set(indices[i][:5])  # Top 5 predictions
                    recall = len(gt_set.intersection(pred_set)) / len(gt_set)
                    total_recall += recall
                    
                avg_recall = total_recall / len(query_questions)
                
                if avg_recall >= target_recall:
                    best_nprobe = nprobe
                    best_recall = avg_recall
                    break
                    
                if avg_recall > best_recall:
                    best_nprobe = nprobe
                    best_recall = avg_recall
                    
        finally:
            self.index.nprobe = original_nprobe
            
        logger.info(f"Optimal nprobe: {best_nprobe} (recall: {best_recall:.3f})")
        return best_nprobe