# -*- coding: utf-8 -*-
"""
向量检索器
基于语义向量的检索方法
"""

import numpy as np
from typing import Dict, List, Any, Tuple
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import cosine_similarity
from loguru import logger
import pickle
import os

from .base_retriever import BaseRetriever, RetrievalResult
from ..ollama_client import ollama_client


class VectorRetriever(BaseRetriever):
    """向量检索器"""
    
    def __init__(self, config: Dict[str, Any] = None):
        """
        初始化向量检索器
        
        Args:
            config: 配置参数
        """
        super().__init__("VectorRetriever", config)
        
        # 配置参数
        self.embedding_method = self.config.get("embedding_method", "ollama")  # ollama 或 tfidf
        self.similarity_threshold = self.config.get("similarity_threshold", 0.1)
        self.max_features = self.config.get("max_features", 5000)
        self.cache_path = self.config.get("cache_path", "data/vector_cache.pkl")
        
        # 向量化器和数据
        self.vectorizer = None
        self.document_vectors = None
        self.documents = []
        self.document_metadata = []
        self.embedding_cache = {}
        
        logger.info(f"🔍 向量检索器初始化完成，使用 {self.embedding_method} 方法")
    
    def initialize(self, data: List[Dict[str, Any]]) -> bool:
        """
        初始化向量检索器
        
        Args:
            data: 训练数据
        
        Returns:
            bool: 初始化是否成功
        """
        try:
            logger.info(f"📊 开始初始化向量检索器，数据量: {len(data)}")
            
            # 准备文档数据
            self.documents = []
            self.document_metadata = []
            
            for item in data:
                # 组合问题和答案作为文档
                doc_text = f"{item['question']} {item.get('answer', '')}"
                self.documents.append(doc_text)
                self.document_metadata.append({
                    'id': item.get('id'),
                    'question': item['question'],
                    'answer': item.get('answer', ''),
                    'category': item.get('category'),
                    'keywords': item.get('keywords', []),
                    'difficulty': item.get('difficulty', 'medium')
                })
            
            # 尝试加载缓存
            if self._load_cache():
                logger.info("✅ 从缓存加载向量数据")
                self.is_initialized = True
                return True
            
            # 构建向量
            if self.embedding_method == "ollama":
                success = self._build_ollama_vectors()
            elif self.embedding_method == "tfidf":
                success = self._build_tfidf_vectors()
            else:
                logger.error(f"❌ 不支持的向量化方法: {self.embedding_method}")
                return False
            
            if success:
                self._save_cache()
                self.is_initialized = True
                logger.info("✅ 向量检索器初始化成功")
                return True
            else:
                logger.error("❌ 向量检索器初始化失败")
                return False
                
        except Exception as e:
            logger.error(f"❌ 向量检索器初始化异常: {e}")
            return False
    
    def _build_tfidf_vectors(self) -> bool:
        """
        构建TF-IDF向量
        
        Returns:
            bool: 构建是否成功
        """
        try:
            logger.info("🔧 构建TF-IDF向量...")
            
            # 初始化TF-IDF向量化器
            self.vectorizer = TfidfVectorizer(
                max_features=self.max_features,
                stop_words=None,  # 不使用英文停用词，因为我们主要处理中文
                ngram_range=(1, 2),  # 使用1-gram和2-gram
                min_df=1,  # 最小文档频率
                max_df=0.95  # 最大文档频率
            )
            
            # 训练并转换文档
            self.document_vectors = self.vectorizer.fit_transform(self.documents)
            
            logger.info(f"✅ TF-IDF向量构建完成，向量维度: {self.document_vectors.shape}")
            return True
            
        except Exception as e:
            logger.error(f"❌ 构建TF-IDF向量失败: {e}")
            return False
    
    def _build_ollama_vectors(self) -> bool:
        """
        构建Ollama向量（使用Ollama的embedding功能）
        
        Returns:
            bool: 构建是否成功
        """
        try:
            logger.info("🤖 使用Ollama构建语义向量...")
            
            vectors = []
            for i, doc in enumerate(self.documents):
                logger.info(f"📝 处理文档 {i+1}/{len(self.documents)}")
                
                # 检查缓存
                cache_key = hash(doc)
                if cache_key in self.embedding_cache:
                    vector = self.embedding_cache[cache_key]
                else:
                    # 使用Ollama生成embedding
                    vector = self._get_ollama_embedding(doc)
                    if vector is not None:
                        self.embedding_cache[cache_key] = vector
                    else:
                        # 如果Ollama失败，使用简单的词频向量作为后备
                        vector = self._get_simple_vector(doc)
                
                if vector is not None:
                    vectors.append(vector)
                else:
                    logger.warning(f"文档 {i} 的embedding为空")
                    vectors.append([0.0] * 1024)  # 使用零向量作为fallback
            
            if vectors:
                self.document_vectors = np.array(vectors)
                logger.info(f"✅ Ollama向量构建完成，向量维度: {self.document_vectors.shape}")
                return True
            else:
                logger.error("❌ 没有成功生成任何向量")
                return False
                
        except Exception as e:
            logger.error(f"❌ 构建Ollama向量失败: {e}")
            return False
    
    def _get_ollama_embedding(self, text: str) -> np.ndarray:
        """
        使用Ollama生成文本的embedding向量
        
        Args:
            text: 输入文本
        
        Returns:
            np.ndarray: embedding向量
        """
        try:
            # 使用Ollama的embedding功能
            embedding = ollama_client.get_embedding(text)
            if embedding:
                return np.array(embedding)
            
            return None
            
        except Exception as e:
            logger.warning(f"⚠️ Ollama embedding生成失败: {e}")
            return None
    
    def _get_simple_vector(self, text: str) -> np.ndarray:
        """
        生成简单的词频向量作为后备
        
        Args:
            text: 输入文本
        
        Returns:
            np.ndarray: 简单向量
        """
        # 简单的字符频率向量
        char_counts = {}
        for char in text:
            if char.isalnum():
                char_counts[char] = char_counts.get(char, 0) + 1
        
        # 转换为固定长度的向量
        vector = []
        for i in range(50):  # 50维向量
            char = chr(ord('a') + (i % 26)) if i < 26 else chr(ord('0') + (i % 10))
            vector.append(char_counts.get(char, 0) / len(text) if text else 0)
        
        return np.array(vector)
    
    def retrieve(self, question: str, top_k: int = 5) -> RetrievalResult:
        """
        执行向量检索
        
        Args:
            question: 查询问题
            top_k: 返回结果数量
        
        Returns:
            RetrievalResult: 检索结果
        """
        if not self.is_initialized:
            logger.error("❌ 向量检索器未初始化")
            return RetrievalResult(question, "", 0.0, 0.0)
        
        try:
            # 测量检索时间
            def _retrieve_internal():
                # 将查询转换为向量
                if self.embedding_method == "ollama":
                    query_vector = self._get_ollama_embedding(question)
                    if query_vector is None:
                        query_vector = self._get_simple_vector(question)
                    # 计算相似度
                    similarities = cosine_similarity([query_vector], self.document_vectors).flatten()
                elif self.embedding_method == "tfidf":
                    query_vector = self.vectorizer.transform([question])
                    # 计算相似度
                    similarities = cosine_similarity(query_vector, self.document_vectors).flatten()
                else:
                    raise ValueError(f"不支持的向量化方法: {self.embedding_method}")
                
                # 获取最相似的文档
                top_indices = np.argsort(similarities)[::-1][:top_k]
                top_similarities = similarities[top_indices]
                
                # 过滤低相似度结果
                filtered_results = []
                for idx, sim in zip(top_indices, top_similarities):
                    if sim >= self.similarity_threshold:
                        filtered_results.append((idx, sim))
                
                if not filtered_results:
                    # 如果没有满足阈值的结果，返回最相似的一个
                    if len(top_indices) > 0:
                        filtered_results = [(top_indices[0], top_similarities[0])]
                
                # 构建检索结果
                retrieved_docs = []
                for idx, sim in filtered_results:
                    doc_meta = self.document_metadata[idx]
                    retrieved_docs.append({
                        'id': doc_meta['id'],
                        'question': doc_meta['question'],
                        'answer': doc_meta['answer'],
                        'similarity': float(sim),
                        'metadata': doc_meta
                    })
                
                # 生成答案
                if retrieved_docs:
                    best_doc = retrieved_docs[0]
                    answer = best_doc['answer']
                    confidence = best_doc['similarity']
                    
                    # 如果最佳匹配的相似度较低，使用LLM生成答案
                    if confidence < 0.5:
                        context = "\n".join([doc['answer'] for doc in retrieved_docs[:3]])
                        llm_answer = ollama_client.answer_question(question, context)
                        if llm_answer:
                            answer = llm_answer
                            confidence = min(confidence + 0.2, 1.0)  # 提升置信度
                else:
                    answer = ollama_client.answer_question(question)
                    confidence = 0.3  # 低置信度
                    retrieved_docs = []
                
                return answer, confidence, retrieved_docs
            
            result_data, response_time = self._measure_time(_retrieve_internal)
            answer, confidence, retrieved_docs = result_data
            
            # 更新成功计数
            if answer:
                self.success_count += 1
            
            return RetrievalResult(
                question=question,
                answer=answer,
                confidence=confidence,
                response_time=response_time,
                retrieved_docs=retrieved_docs,
                metadata={
                    'method': 'vector',
                    'embedding_method': self.embedding_method,
                    'num_retrieved': len(retrieved_docs)
                }
            )
            
        except Exception as e:
            logger.error(f"❌ 向量检索失败: {e}")
            return RetrievalResult(question, "", 0.0, 0.0)
    
    def _load_cache(self) -> bool:
        """
        加载缓存的向量数据
        
        Returns:
            bool: 加载是否成功
        """
        try:
            if os.path.exists(self.cache_path):
                with open(self.cache_path, 'rb') as f:
                    cache_data = pickle.load(f)
                
                self.vectorizer = cache_data.get('vectorizer')
                self.document_vectors = cache_data.get('document_vectors')
                
                if self.vectorizer is not None and self.document_vectors is not None:
                    return True
            
            return False
            
        except Exception as e:
            logger.warning(f"⚠️ 加载向量缓存失败: {e}")
            return False
    
    def _save_cache(self) -> None:
        """
        保存向量数据到缓存
        """
        try:
            os.makedirs(os.path.dirname(self.cache_path), exist_ok=True)
            
            cache_data = {
                'vectorizer': self.vectorizer,
                'document_vectors': self.document_vectors
            }
            
            with open(self.cache_path, 'wb') as f:
                pickle.dump(cache_data, f)
            
            logger.info(f"💾 向量缓存已保存到 {self.cache_path}")
            
        except Exception as e:
            logger.warning(f"⚠️ 保存向量缓存失败: {e}")


if __name__ == "__main__":
    # 测试向量检索器
    config = {
        "embedding_method": "ollama",
        "similarity_threshold": 0.1,
        "max_features": 1000
    }
    
    retriever = VectorRetriever(config)
    
    # 测试数据
    test_data = [
        {
            "id": 1,
            "question": "什么是机器学习？",
            "answer": "机器学习是人工智能的一个分支，通过算法让计算机从数据中学习。",
            "category": "AI"
        },
        {
            "id": 2,
            "question": "深度学习的原理是什么？",
            "answer": "深度学习使用多层神经网络来模拟人脑的学习过程。",
            "category": "AI"
        }
    ]
    
    # 初始化
    if retriever.initialize(test_data):
        # 测试检索
        result = retriever.retrieve("机器学习是什么？")
        print(f"检索结果: {result.to_dict()}")
        
        # 性能统计
        stats = retriever.get_performance_stats()
        print(f"性能统计: {stats}")