#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
文档质量优化数据集向量库检索器

提供多种方式读取和使用构建好的embedding向量库：
1. 相似性检索 - 根据查询文本找到最相似的样本
2. 标签内检索 - 在特定标签内进行相似性搜索
3. 数据浏览 - 浏览和统计向量库内容
4. 批量检索 - 批量处理多个查询
"""

import json
import os
from pathlib import Path
from typing import Dict, List, Tuple, Any, Optional

import faiss
import numpy as np
from sentence_transformers import SentenceTransformer
from config import Config


class EmbeddingRetriever:
    """向量库检索器"""
    
    def __init__(self, 
                 embedding_dir: str = None,
                 model_name: str = None):
        """
        初始化检索器
        
        Args:
            embedding_dir: 向量库目录
            model_name: 用于编码查询的模型名称（需与构建时一致）
        """
        self.embedding_dir = Path(embedding_dir or Config.get_embedding_dir())
        self.model_name = model_name or Config.get_embedding_model()
        
        if not self.embedding_dir.exists():
            raise FileNotFoundError(f"向量库目录不存在: {embedding_dir}")
        
        # 加载全局元数据
        global_metadata_file = self.embedding_dir / "global_metadata.json"
        if global_metadata_file.exists():
            with open(global_metadata_file, 'r', encoding='utf-8') as f:
                self.global_metadata = json.load(f)
        else:
            self.global_metadata = {}
        
        # 懒加载模型（仅在需要编码查询时加载）
        self._model = None
        self._label_caches = {}  # 缓存已加载的标签数据
        
        print(f"向量库检索器初始化完成")
        print(f"向量库目录: {embedding_dir}")
        print(f"总样本数: {self.global_metadata.get('total_samples', 'unknown')}")
        print(f"标签类别数: {self.global_metadata.get('total_labels', 'unknown')}")
    
    @property
    def model(self):
        """懒加载模型"""
        if self._model is None:
            print(f"正在加载编码模型: {self.model_name}")
            self._model = SentenceTransformer(self.model_name)
        return self._model
    
    def get_available_labels(self) -> List[str]:
        """获取所有可用的标签列表"""
        labels = []
        for item in self.embedding_dir.iterdir():
            if item.is_dir() and item.name.startswith('label_'):
                label_id = item.name.replace('label_', '')
                labels.append(label_id)
        return sorted(labels, key=lambda x: int(x) if x.isdigit() else float('inf'))
    
    def load_label_data(self, label_id: str) -> Dict[str, Any]:
        """
        加载指定标签的数据
        
        Args:
            label_id: 标签ID
            
        Returns:
            包含数据、embedding、索引和元数据的字典
        """
        if label_id in self._label_caches:
            return self._label_caches[label_id]
        
        label_dir = self.embedding_dir / f"label_{label_id}"
        if not label_dir.exists():
            raise FileNotFoundError(f"标签{label_id}的数据目录不存在: {label_dir}")
        
        # 加载元数据
        metadata_file = label_dir / "metadata.json"
        with open(metadata_file, 'r', encoding='utf-8') as f:
            metadata = json.load(f)
        
        # 加载数据
        data_file = label_dir / "data.jsonl"
        data_items = []
        with open(data_file, 'r', encoding='utf-8') as f:
            for line in f:
                data_items.append(json.loads(line.strip()))
        
        # 加载embedding
        embeddings_file = label_dir / "embeddings.npy"
        embeddings = np.load(embeddings_file)
        
        # 加载FAISS索引
        index_file = label_dir / "faiss_index.bin"
        faiss_index = faiss.read_index(str(index_file))
        
        label_data = {
            'metadata': metadata,
            'data_items': data_items,
            'embeddings': embeddings,
            'faiss_index': faiss_index
        }
        
        # 缓存数据
        self._label_caches[label_id] = label_data
        
        print(f"标签{label_id} ({metadata.get('label_name', '')}) 数据加载完成，样本数: {len(data_items)}")
        return label_data
    
    def search_in_label(self, 
                       query_text: str, 
                       label_id: str, 
                       top_k: int = 5) -> List[Tuple[float, Dict[str, Any]]]:
        """
        在指定标签内搜索最相似的样本
        
        Args:
            query_text: 查询文本
            label_id: 标签ID
            top_k: 返回最相似的k个结果
            
        Returns:
            (相似度得分, 数据项) 的列表，按相似度降序排列
        """
        # 加载标签数据
        label_data = self.load_label_data(label_id)
        
        # 编码查询文本
        query_embedding = self.model.encode([query_text], convert_to_numpy=True)
        
        # 标准化查询向量
        faiss.normalize_L2(query_embedding)
        
        # 搜索
        scores, indices = label_data['faiss_index'].search(query_embedding, top_k)
        
        # 组装结果
        results = []
        for score, idx in zip(scores[0], indices[0]):
            if idx != -1:  # 有效索引
                data_item = label_data['data_items'][idx]
                results.append((float(score), data_item))
        
        return results
    
    def search_across_all_labels(self, 
                                query_text: str, 
                                top_k_per_label: int = 3,
                                min_labels: int = 5) -> Dict[str, List[Tuple[float, Dict[str, Any]]]]:
        """
        跨所有标签搜索最相似的样本
        
        Args:
            query_text: 查询文本
            top_k_per_label: 每个标签返回的最大结果数
            min_labels: 至少搜索的标签数量
            
        Returns:
            {标签ID: [(相似度得分, 数据项), ...]} 的字典
        """
        available_labels = self.get_available_labels()
        
        # 编码查询文本（一次编码，多次使用）
        query_embedding = self.model.encode([query_text], convert_to_numpy=True)
        faiss.normalize_L2(query_embedding)
        
        all_results = {}
        
        # 搜索前几个标签或者所有标签
        labels_to_search = available_labels[:min_labels] if len(available_labels) > min_labels else available_labels
        
        for label_id in labels_to_search:
            try:
                label_data = self.load_label_data(label_id)
                
                # 搜索该标签
                scores, indices = label_data['faiss_index'].search(query_embedding, top_k_per_label)
                
                # 组装结果
                label_results = []
                for score, idx in zip(scores[0], indices[0]):
                    if idx != -1:
                        data_item = label_data['data_items'][idx]
                        label_results.append((float(score), data_item))
                
                if label_results:  # 只保存有结果的标签
                    all_results[label_id] = label_results
                    
            except Exception as e:
                print(f"搜索标签{label_id}时出错: {e}")
                continue
        
        return all_results
    
    def get_label_statistics(self, label_id: Optional[str] = None) -> Dict[str, Any]:
        """
        获取标签统计信息
        
        Args:
            label_id: 标签ID，如果为None则返回全局统计
            
        Returns:
            统计信息字典
        """
        if label_id is None:
            # 返回全局统计
            return {
                'global_metadata': self.global_metadata,
                'available_labels': self.get_available_labels(),
                'label_distribution': self.global_metadata.get('label_distribution', {})
            }
        else:
            # 返回特定标签统计
            label_data = self.load_label_data(label_id)
            return label_data['metadata']
    
    def browse_samples(self, 
                      label_id: str, 
                      start_idx: int = 0, 
                      count: int = 10) -> List[Dict[str, Any]]:
        """
        浏览指定标签的样本
        
        Args:
            label_id: 标签ID
            start_idx: 起始索引
            count: 返回的样本数量
            
        Returns:
            样本列表
        """
        label_data = self.load_label_data(label_id)
        data_items = label_data['data_items']
        
        end_idx = min(start_idx + count, len(data_items))
        return data_items[start_idx:end_idx]
    
    def find_similar_samples(self, 
                            reference_item: Dict[str, Any], 
                            label_id: Optional[str] = None,
                            top_k: int = 5) -> List[Tuple[float, Dict[str, Any]]]:
        """
        基于给定样本找到相似的样本
        
        Args:
            reference_item: 参考样本
            label_id: 在指定标签内搜索，如果为None则跨标签搜索
            top_k: 返回的相似样本数量
            
        Returns:
            相似样本列表
        """
        # 构建参考样本的文本
        reference_text = self._extract_text_for_embedding(reference_item)
        
        if label_id:
            return self.search_in_label(reference_text, label_id, top_k)
        else:
            # 跨标签搜索，返回最相似的结果
            all_results = self.search_across_all_labels(reference_text, top_k_per_label=top_k)
            
            # 合并所有结果并排序
            combined_results = []
            for label_results in all_results.values():
                combined_results.extend(label_results)
            
            # 按相似度排序并返回top_k
            combined_results.sort(key=lambda x: x[0], reverse=True)
            return combined_results[:top_k]
    
    def _extract_text_for_embedding(self, data_item: Dict[str, Any]) -> str:
        """
        提取用于embedding的文本内容（与构建时保持一致）
        """
        add_content = data_item.get('add_content', '').strip()
        remove_content = data_item.get('remove_content', '').strip()
        context_before = data_item.get('context_before', '').strip()
        context_after = data_item.get('context_after', '').strip()
        
        text_parts = []
        
        if context_before:
            text_parts.append(f"修改前上下文: {context_before}")
            
        if remove_content:
            text_parts.append(f"修改前内容: {remove_content}")
            
        if add_content:
            text_parts.append(f"修改后内容: {add_content}")
            
        if context_after:
            text_parts.append(f"修改后上下文: {context_after}")
            
        combined_text = " ".join(text_parts)
        
        if not combined_text.strip():
            combined_text = data_item.get('file_path', 'unknown_file')
            
        return combined_text


def demo_usage():
    """演示如何使用向量库检索器"""
    print("=" * 60)
    print("向量库检索器使用演示")
    print("=" * 60)
    
    # 初始化检索器
    retriever = EmbeddingRetriever()
    
    # 1. 查看可用标签
    print("\n1. 可用标签列表:")
    available_labels = retriever.get_available_labels()
    for label_id in available_labels[:10]:  # 显示前10个
        stats = retriever.get_label_statistics(label_id)
        print(f"  标签{label_id}: {stats.get('label_name', '')} ({stats.get('sample_count', 0)}条)")
    
    # 2. 在特定标签内搜索
    print(f"\n2. 在标签19（术语与技术准确性）内搜索相似样本:")
    query = "修改配置文件路径错误"
    results = retriever.search_in_label(query, "19", top_k=3)
    
    for i, (score, item) in enumerate(results, 1):
        print(f"\n  结果{i} (相似度: {score:.4f}):")
        print(f"    文件: {item.get('file_path', '')}")
        print(f"    修改前: {item.get('remove_content', '')[:100]}...")
        print(f"    修改后: {item.get('add_content', '')[:100]}...")
    
    # 3. 跨标签搜索
    print(f"\n3. 跨所有标签搜索:")
    query = "空格使用不规范"
    all_results = retriever.search_across_all_labels(query, top_k_per_label=2, min_labels=5)
    
    for label_id, label_results in list(all_results.items())[:3]:  # 显示前3个标签的结果
        stats = retriever.get_label_statistics(label_id)
        print(f"\n  标签{label_id} ({stats.get('label_name', '')}):")
        for i, (score, item) in enumerate(label_results, 1):
            print(f"    结果{i} (相似度: {score:.4f}): {item.get('file_path', '')}")
    
    # 4. 浏览样本
    print(f"\n4. 浏览标签5（空格错误）的样本:")
    samples = retriever.browse_samples("5", start_idx=0, count=3)
    for i, sample in enumerate(samples, 1):
        print(f"\n  样本{i}:")
        print(f"    文件: {sample.get('file_path', '')}")
        print(f"    标签原因: {sample.get('label_reason', '')}")
    
    print("\n" + "=" * 60)
    print("演示完成")


if __name__ == "__main__":
    demo_usage()
