# -*- coding: utf-8 -*-
"""
FAISS Flat Index Retriever

This module implements a FAISS flat index retriever that performs
exact brute-force search for maximum accuracy.
"""

import faiss
import numpy as np
from typing import List, Optional
from loguru import logger

from ...core.base_faiss_retriever import BaseFaissRetriever


class FlatIndexRetriever(BaseFaissRetriever):
    """
    FAISS Flat Index Retriever for exact similarity search.
    
    This retriever uses FAISS IndexFlatIP (Inner Product) or IndexFlatL2
    for exact brute-force search. It provides 100% accuracy but may be
    slower for large datasets.
    
    Features:
    - Exact search results (100% recall)
    - Simple and reliable
    - Good for small to medium datasets
    - Supports both cosine similarity and L2 distance
    """
    
    def __init__(self, 
                 model_name: str = 'all-MiniLM-L6-v2',
                 dimension: Optional[int] = None,
                 normalize_vectors: bool = True,
                 metric: str = 'cosine',
                 device: str = 'cpu'):
        """
        Initialize the Flat Index Retriever.
        
        Args:
            model_name: Name of the sentence transformer model
            dimension: Vector dimension (auto-detected if None)
            normalize_vectors: Whether to normalize vectors
            metric: Distance metric ('cosine' or 'l2')
            device: Device to run the model on
        """
        super().__init__(model_name, dimension, normalize_vectors, device)
        
        if metric not in ['cosine', 'l2']:
            raise ValueError("Metric must be 'cosine' or 'l2'")
            
        self.metric = metric
        
        # For cosine similarity, we need normalized vectors and use Inner Product
        if metric == 'cosine' and not normalize_vectors:
            logger.warning("Cosine similarity requires normalized vectors. Setting normalize_vectors=True")
            self.normalize_vectors = True
            
        logger.info(f"Initialized Flat Index Retriever with {metric} metric")
    
    def _build_index(self):
        """
        Build the FAISS flat index.
        
        Creates either IndexFlatIP for cosine similarity or
        IndexFlatL2 for L2 distance.
        """
        logger.info(f"Building flat index with {self.metric} metric...")
        
        if self.metric == 'cosine':
            # Use Inner Product for cosine similarity (with normalized vectors)
            self.index = faiss.IndexFlatIP(self.dimension)
        else:
            # Use L2 distance
            self.index = faiss.IndexFlatL2(self.dimension)
            
        # Add vectors to index
        self.index.add(self.vectors)
        
        logger.info(f"Flat index built successfully with {self.index.ntotal} vectors")
    
    def get_index_info(self) -> dict:
        """
        Get detailed information about the flat index.
        
        Returns:
            Dictionary containing index information
        """
        base_info = super().get_index_info()
        
        if self.index is not None:
            base_info.update({
                "metric": self.metric,
                "index_description": "Flat index for exact search",
                "memory_usage_mb": self._estimate_memory_usage(),
                "search_complexity": "O(n*d) where n=num_vectors, d=dimension"
            })
            
        return base_info
    
    def _estimate_memory_usage(self) -> float:
        """
        Estimate memory usage of the index in MB.
        
        Returns:
            Estimated memory usage in megabytes
        """
        if self.index is None:
            return 0.0
            
        # Each vector is stored as float32 (4 bytes per dimension)
        vector_memory = self.index.ntotal * self.dimension * 4
        
        # Add some overhead for index structure
        total_memory = vector_memory * 1.1
        
        return total_memory / (1024 * 1024)  # Convert to MB
    
    def benchmark_search(self, query_questions: List[str], top_k: int = 5) -> dict:
        """
        Benchmark search performance on a set of queries.
        
        Args:
            query_questions: List of query questions for benchmarking
            top_k: Number of results to retrieve per query
            
        Returns:
            Dictionary containing benchmark results
        """
        import time
        
        if self.index is None:
            raise ValueError("Index not built. Call load_candidates() first.")
            
        logger.info(f"Benchmarking flat index with {len(query_questions)} queries...")
        
        # Encode all queries at once for fair timing
        start_time = time.time()
        query_vectors = self.encode_texts(query_questions)
        encoding_time = time.time() - start_time
        
        # Perform searches
        start_time = time.time()
        similarities, indices = self.index.search(query_vectors, top_k)
        search_time = time.time() - start_time
        
        total_time = encoding_time + search_time
        
        results = {
            "index_type": "Flat",
            "metric": self.metric,
            "num_queries": len(query_questions),
            "top_k": top_k,
            "total_vectors": self.index.ntotal,
            "dimension": self.dimension,
            "encoding_time_ms": encoding_time * 1000,
            "search_time_ms": search_time * 1000,
            "total_time_ms": total_time * 1000,
            "queries_per_second": len(query_questions) / total_time,
            "avg_time_per_query_ms": (total_time * 1000) / len(query_questions),
            "memory_usage_mb": self._estimate_memory_usage()
        }
        
        logger.info(f"Benchmark completed: {results['queries_per_second']:.2f} queries/sec")
        return results
    
    def get_vector_by_index(self, idx: int) -> np.ndarray:
        """
        Get the vector for a specific question index.
        
        Args:
            idx: Index of the question
            
        Returns:
            Vector for the specified question
        """
        if idx < 0 or idx >= len(self.questions):
            raise IndexError(f"Index {idx} out of range [0, {len(self.questions)}]")
            
        return self.vectors[idx]
    
    def find_similar_vectors(self, vector: np.ndarray, top_k: int = 5) -> List[tuple]:
        """
        Find similar vectors given a specific vector.
        
        Args:
            vector: Input vector to search for
            top_k: Number of similar vectors to return
            
        Returns:
            List of tuples containing (question, similarity, original_score)
        """
        if self.index is None:
            raise ValueError("Index not built. Call load_candidates() first.")
            
        # Ensure vector is the right shape and type
        if vector.ndim == 1:
            vector = vector.reshape(1, -1)
        vector = vector.astype(np.float32)
        
        if self.normalize_vectors:
            norm = np.linalg.norm(vector, axis=1, keepdims=True)
            vector = vector / (norm + 1e-8)
            
        # Search
        similarities, indices = self.index.search(vector, top_k)
        
        # Prepare results
        results = []
        for similarity, idx in zip(similarities[0], indices[0]):
            if idx != -1:
                question = self.questions[idx]
                original_score = self.scores[idx]
                results.append((question, float(similarity), original_score))
                
        return results