# -*- coding: utf-8 -*-
"""
Base FAISS retriever implementation

This module provides the abstract base class for all FAISS-based retrievers,
including common functionality for vector encoding and index management.
"""

import os
import faiss
import numpy as np
from abc import abstractmethod
from typing import List, Tuple, Optional, Union
from sentence_transformers import SentenceTransformer
from loguru import logger

from .base import BaseRetriever

# 导入配置以确保镜像设置生效
try:
    from ..config import HF_MIRROR_ENDPOINT, SENTENCE_TRANSFORMERS_CACHE
    model_cache_dir = SENTENCE_TRANSFORMERS_CACHE
except ImportError:
    # 备用配置
    model_cache_dir = os.path.expanduser("~/.cache/sentence_transformers")
    HF_MIRROR_ENDPOINT = "https://hf-mirror.com"


class BaseFaissRetriever(BaseRetriever):
    """
    Abstract base class for FAISS-based retrievers.
    
    This class provides common functionality for vector encoding,
    index management, and similarity search using FAISS.
    """
    
    def __init__(self, 
                 model_name: str = 'all-MiniLM-L6-v2',
                 dimension: Optional[int] = None,
                 normalize_vectors: bool = True,
                 device: str = 'cpu'):
        """
        Initialize the FAISS retriever.
        
        Args:
            model_name: Name of the sentence transformer model
            dimension: Vector dimension (auto-detected if None)
            normalize_vectors: Whether to normalize vectors for cosine similarity
            device: Device to run the model on ('cpu' or 'cuda')
        """
        self.model_name = model_name
        self.normalize_vectors = normalize_vectors
        self.device = device
        
        # Initialize sentence transformer with cache and mirror support
        try:
            # 检查本地缓存是否存在
            cached_model_path = os.path.join(model_cache_dir, model_name.replace('/', '_'))
            if os.path.exists(cached_model_path):
                logger.info(f"🔄 从本地缓存加载模型: {model_name}")
                self.encoder = SentenceTransformer(cached_model_path, device=device)
            else:
                logger.info(f"📥 首次下载模型: {model_name}，将缓存到 {model_cache_dir}")
                logger.info(f"🚀 使用HuggingFace国内镜像: {HF_MIRROR_ENDPOINT}")
                self.encoder = SentenceTransformer(model_name, cache_folder=model_cache_dir, device=device)
                # 保存到本地缓存
                self.encoder.save(cached_model_path)
            logger.info(f"✅ 成功加载模型: {model_name}")
        except Exception as e:
            logger.warning(f"模型加载失败: {e}")
            logger.info("尝试使用备用模型...")
            # 备用方案：使用更小的模型
            backup_model = 'all-MiniLM-L6-v2'
            cached_backup_path = os.path.join(model_cache_dir, backup_model.replace('/', '_'))
            if os.path.exists(cached_backup_path):
                self.encoder = SentenceTransformer(cached_backup_path, device=device)
            else:
                self.encoder = SentenceTransformer(backup_model, cache_folder=model_cache_dir, device=device)
                self.encoder.save(cached_backup_path)
            logger.info(f"使用备用模型: {backup_model}")
        
        # Auto-detect dimension if not provided
        if dimension is None:
            test_embedding = self.encoder.encode(["test"])
            self.dimension = test_embedding.shape[1]
            logger.info(f"Auto-detected embedding dimension: {self.dimension}")
        else:
            self.dimension = dimension
            
        # Initialize storage
        self.questions = []
        self.scores = []
        self.vectors = None
        self.index = None
        
    def encode_texts(self, texts: List[str]) -> np.ndarray:
        """
        Encode texts into vectors using sentence transformer.
        
        Args:
            texts: List of texts to encode
            
        Returns:
            Numpy array of encoded vectors
        """
        logger.info(f"Encoding {len(texts)} texts...")
        vectors = self.encoder.encode(texts, convert_to_numpy=True)
        
        if self.normalize_vectors:
            # Normalize for cosine similarity
            norms = np.linalg.norm(vectors, axis=1, keepdims=True)
            vectors = vectors / (norms + 1e-8)
            
        return vectors.astype(np.float32)
    
    def load_candidates(self, questions: List[str], scores: List[float] = None):
        """
        Load candidate questions and build FAISS index.
        
        Args:
            questions: List of candidate questions
            scores: Optional scores for each question
        """
        logger.info(f"Loading {len(questions)} candidate questions...")
        
        self.questions = questions
        self.scores = scores if scores is not None else [1.0] * len(questions)
        
        # Encode questions to vectors
        self.vectors = self.encode_texts(questions)
        
        # Build FAISS index
        self._build_index()
        
        logger.info(f"Successfully built FAISS index with {len(questions)} vectors")
    
    @abstractmethod
    def _build_index(self):
        """
        Build the specific FAISS index.
        
        This method should be implemented by subclasses to create
        the appropriate FAISS index type.
        """
        pass
    
    def recommend(self, query_question: str, top_k: int = 5, **kwargs) -> List[Tuple]:
        """
        Recommend similar questions using FAISS search.
        
        Args:
            query_question: The input question to find recommendations for
            top_k: Number of recommendations to return
            **kwargs: Additional search parameters
            
        Returns:
            List of tuples containing (question, similarity_score, original_score)
        """
        if self.index is None:
            raise ValueError("Index not built. Call load_candidates() first.")
            
        # Encode query
        query_vector = self.encode_texts([query_question])
        
        # Search in FAISS index
        similarities, indices = self.index.search(query_vector, top_k)
        
        # Prepare results
        results = []
        for i, (similarity, idx) in enumerate(zip(similarities[0], indices[0])):
            if idx != -1:  # Valid result
                question = self.questions[idx]
                original_score = self.scores[idx]
                results.append((question, float(similarity), original_score))
                
        logger.info(f"Found {len(results)} recommendations for query")
        return results
    
    def get_index_info(self) -> dict:
        """
        Get information about the current FAISS index.
        
        Returns:
            Dictionary containing index information
        """
        if self.index is None:
            return {"status": "not_built"}
            
        return {
            "status": "built",
            "total_vectors": self.index.ntotal,
            "dimension": self.dimension,
            "index_type": type(self.index).__name__,
            "is_trained": getattr(self.index, 'is_trained', True),
            "normalize_vectors": self.normalize_vectors
        }
    
    def save_index(self, filepath: str):
        """
        Save the FAISS index to disk.
        
        Args:
            filepath: Path to save the index
        """
        if self.index is None:
            raise ValueError("No index to save. Build index first.")
            
        faiss.write_index(self.index, filepath)
        logger.info(f"Index saved to {filepath}")
    
    def load_index(self, filepath: str):
        """
        Load a FAISS index from disk.
        
        Args:
            filepath: Path to the saved index
        """
        self.index = faiss.read_index(filepath)
        logger.info(f"Index loaded from {filepath}")