"""
Demo Search: FAISS向量检索
使用FAISS进行高效的向量相似度搜索
"""

import numpy as np
try:
    import faiss
except ImportError:
    print("请先安装faiss: pip install faiss-cpu")
    faiss = None
from demo_01_raw import RawVectorizer
from demo_02_stats import StatisticalVectorizer
import time


class FAISSSearchEngine:
    """FAISS向量搜索引擎"""
    
    def __init__(self, dimension, index_type='FlatIP', normalize=True):
        """
        初始化搜索引擎
        
        Args:
            dimension: 向量维度
            index_type: 索引类型 ('FlatIP', 'FlatL2', 'IVFFlat', 'HNSW')
            normalize: 是否归一化向量（用于余弦相似度）
        """
        self.dimension = dimension
        self.index_type = index_type
        self.normalize = normalize
        self.index = None
        self.vectors = None
        self.metadata = []
        
    def create_index(self):
        """创建FAISS索引"""
        if faiss is None:
            raise ImportError("FAISS未安装")
        
        if self.index_type == 'FlatIP':
            # 内积索引（归一化后等于余弦相似度）
            self.index = faiss.IndexFlatIP(self.dimension)
        elif self.index_type == 'FlatL2':
            # L2距离索引
            self.index = faiss.IndexFlatL2(self.dimension)
        elif self.index_type == 'IVFFlat':
            # IVF索引（适合大规模数据）
            quantizer = faiss.IndexFlatIP(self.dimension)
            self.index = faiss.IndexIVFFlat(quantizer, self.dimension, 
                                           min(100, self.dimension))
        elif self.index_type == 'HNSW':
            # HNSW索引（层次化小世界图）
            self.index = faiss.IndexHNSWFlat(self.dimension, 32)
        else:
            raise ValueError(f"不支持的索引类型: {self.index_type}")
    
    def add_vectors(self, vectors, metadata=None):
        """
        添加向量到索引
        
        Args:
            vectors: 向量矩阵 (n_vectors, dimension)
            metadata: 可选的元数据列表
        """
        if self.index is None:
            self.create_index()
        
        # 确保是float32类型
        vectors = vectors.astype('float32')
        
        # 归一化
        if self.normalize:
            faiss.normalize_L2(vectors)
        
        # 训练索引（如果需要）
        if hasattr(self.index, 'train'):
            self.index.train(vectors)
        
        # 添加向量
        self.index.add(vectors)
        self.vectors = vectors
        
        # 保存元数据
        if metadata is not None:
            self.metadata.extend(metadata)
        else:
            self.metadata.extend(range(len(vectors)))
    
    def search(self, query_vectors, k=5):
        """
        搜索最相似的向量
        
        Args:
            query_vectors: 查询向量 (n_queries, dimension)
            k: 返回的最相似向量数量
            
        Returns:
            tuple: (距离矩阵, 索引矩阵)
        """
        if self.index is None or self.index.ntotal == 0:
            raise ValueError("索引为空，请先添加向量")
        
        # 确保是float32类型
        query_vectors = query_vectors.astype('float32')
        
        # 归一化
        if self.normalize:
            faiss.normalize_L2(query_vectors)
        
        # 搜索
        distances, indices = self.index.search(query_vectors, k)
        
        return distances, indices
    
    def search_with_metadata(self, query_vectors, k=5):
        """
        搜索并返回元数据
        
        Args:
            query_vectors: 查询向量
            k: 返回的最相似向量数量
            
        Returns:
            list: 搜索结果列表
        """
        distances, indices = self.search(query_vectors, k)
        
        results = []
        for i in range(len(query_vectors)):
            query_results = []
            for j in range(k):
                idx = indices[i, j]
                if idx >= 0:  # 有效索引
                    query_results.append({
                        'index': idx,
                        'distance': distances[i, j],
                        'metadata': self.metadata[idx] if self.metadata else idx
                    })
            results.append(query_results)
        
        return results
    
    def remove_vectors(self, indices):
        """
        从索引中移除向量（仅支持某些索引类型）
        
        Args:
            indices: 要移除的向量索引
        """
        if hasattr(self.index, 'remove_ids'):
            self.index.remove_ids(np.array(indices))
        else:
            print("当前索引类型不支持移除操作")
    
    def save_index(self, filepath):
        """
        保存索引到文件
        
        Args:
            filepath: 文件路径
        """
        if self.index is not None:
            faiss.write_index(self.index, filepath)
    
    def load_index(self, filepath):
        """
        从文件加载索引
        
        Args:
            filepath: 文件路径
        """
        self.index = faiss.read_index(filepath)
    
    def get_statistics(self):
        """获取索引统计信息"""
        if self.index is None:
            return {}
        
        stats = {
            'total_vectors': self.index.ntotal,
            'dimension': self.dimension,
            'index_type': self.index_type,
            'is_trained': self.index.is_trained if hasattr(self.index, 'is_trained') else True
        }
        
        return stats


def benchmark_search_performance(search_engine, query_vectors, k_values=[1, 5, 10, 50]):
    """
    基准测试搜索性能
    
    Args:
        search_engine: 搜索引擎
        query_vectors: 查询向量
        k_values: 不同的k值列表
        
    Returns:
        dict: 性能统计
    """
    results = {}
    
    for k in k_values:
        start_time = time.time()
        distances, indices = search_engine.search(query_vectors, k)
        elapsed_time = time.time() - start_time
        
        results[f'k={k}'] = {
            'time': elapsed_time,
            'queries_per_second': len(query_vectors) / elapsed_time,
            'avg_distance': np.mean(distances)
        }
    
    return results


def demo_similarity_search():
    """演示相似度搜索"""
    print("=" * 60)
    print("Demo: FAISS向量相似度搜索")
    print("=" * 60)
    
    # 生成示例数据
    print("\n1. 准备数据...")
    vectorizer = StatisticalVectorizer(window_size=60, step_size=5)
    X_raw, X_stats = vectorizer.fit_transform(
        ticker="AAPL",
        start="2020-01-01",
        end="2024-12-31"
    )
    
    print(f"   数据shape: {X_stats.shape}")
    
    # 创建搜索引擎
    print("\n2. 创建FAISS索引...")
    search_engine = FAISSSearchEngine(
        dimension=X_stats.shape[1],
        index_type='FlatIP',
        normalize=True
    )
    
    # 添加向量
    metadata = [f"Window_{i}" for i in range(len(X_stats))]
    search_engine.add_vectors(X_stats, metadata)
    
    stats = search_engine.get_statistics()
    print(f"   索引统计: {stats}")
    
    # 搜索示例
    print("\n3. 执行相似度搜索...")
    query_idx = 10
    query_vector = X_stats[query_idx:query_idx+1]
    
    results = search_engine.search_with_metadata(query_vector, k=5)
    
    print(f"\n查询窗口: {metadata[query_idx]}")
    print("最相似的5个窗口:")
    for result in results[0]:
        print(f"  {result['metadata']}: 相似度={result['distance']:.4f}")
    
    # 批量搜索
    print("\n4. 批量搜索...")
    batch_queries = X_stats[:5]  # 前5个窗口作为查询
    batch_results = search_engine.search_with_metadata(batch_queries, k=3)
    
    for i, query_results in enumerate(batch_results):
        print(f"\n查询 {i}:")
        for result in query_results:
            print(f"  {result['metadata']}: 相似度={result['distance']:.4f}")
    
    # 性能测试
    print("\n5. 性能基准测试...")
    perf_results = benchmark_search_performance(
        search_engine, 
        X_stats[:20],  # 使用20个查询
        k_values=[1, 5, 10]
    )
    
    for k_setting, metrics in perf_results.items():
        print(f"\n{k_setting}:")
        print(f"  时间: {metrics['time']:.4f}秒")
        print(f"  QPS: {metrics['queries_per_second']:.1f}")
        print(f"  平均距离: {metrics['avg_distance']:.4f}")
    
    return search_engine, X_stats


def demo_anomaly_detection():
    """使用向量搜索进行异常检测"""
    print("\n" + "=" * 60)
    print("应用示例: 基于相似度的异常检测")
    print("=" * 60)
    
    # 准备数据
    vectorizer = StatisticalVectorizer(window_size=60, step_size=5)
    X_raw, X_stats = vectorizer.fit_transform()
    
    # 创建搜索引擎
    search_engine = FAISSSearchEngine(
        dimension=X_stats.shape[1],
        index_type='FlatL2',  # 使用L2距离
        normalize=False
    )
    search_engine.add_vectors(X_stats)
    
    # 对每个窗口，找出最近邻的平均距离
    k_neighbors = 5
    distances, indices = search_engine.search(X_stats, k=k_neighbors+1)
    
    # 排除自己（第一个最近邻）
    neighbor_distances = distances[:, 1:]
    avg_distances = neighbor_distances.mean(axis=1)
    
    # 异常分数（距离越大越异常）
    threshold = np.percentile(avg_distances, 95)
    anomalies = avg_distances > threshold
    anomaly_indices = np.where(anomalies)[0]
    
    print(f"\n异常检测结果:")
    print(f"  阈值: {threshold:.4f}")
    print(f"  异常数量: {len(anomaly_indices)}")
    print(f"  异常比例: {len(anomaly_indices)/len(X_stats):.2%}")
    
    if len(anomaly_indices) > 0:
        print(f"\n前5个异常窗口:")
        for idx in anomaly_indices[:5]:
            print(f"  窗口{idx}: 异常分数={avg_distances[idx]:.4f}")
    
    return anomaly_indices, avg_distances


def main():
    """主函数"""
    # 演示基本的相似度搜索
    search_engine, vectors = demo_similarity_search()
    
    # 演示异常检测应用
    if faiss is not None:
        anomaly_indices, anomaly_scores = demo_anomaly_detection()
    
    print("\n" + "=" * 60)
    print("演示完成！")
    print("=" * 60)
    
    return search_engine, vectors


if __name__ == "__main__":
    search_engine, vectors = main()
