# 语义检索模块，目前只更改了test_system里面的

import json
import os
from typing import List, Dict
import faiss
import numpy as np
from sentence_transformers import SentenceTransformer

# 下载faiss，numpy，sentence_transformers：pip install faiss-cpu numpy sentence-transformers

class SemanticRetriever:
    def init(self, kb_path='rust_knowledge_base/rust_docs_sample.json'):
        self.kb_path = kb_path
        self.knowledge_base = self._load_knowledge_base()

        # 初始化嵌入模型和FAISS索引
        self.model = SentenceTransformer('D:/all-MiniLM-L6-v2')  # 暂存在这里
        self.index = None
        self._build_faiss_index()
    
    def _load_knowledge_base(self) -> List[Dict]:
        """加载知识库并转换为统一格式"""
        if os.path.exists(self.kb_path):
            with open(self.kb_path, 'r', encoding='utf-8') as f:
                raw_docs = json.load(f)
    
            # 转换格式以匹配代码期望的结构
            converted_docs = []
            for i, doc in enumerate(raw_docs):
                converted_doc = {
                    "id": f"k{i + 1:03d}",
                    "topic": doc.get("title", ""),  # 使用title作为topic
                    "content": doc.get("content", ""),
                    "tags": [],  # 您的JSON中没有tags，设为空列表
                    "category": "unknown",  # 默认分类
                    "code": doc.get("code", "")
                }
                converted_docs.append(converted_doc)
    
            return converted_docs
        else:
            # 默认知识库（保持原样）
            return [
                {
                    "id": "k001",
                    "topic": "生命周期",
                    "content": "Rust的生命周期用于确保引用始终有效。生命周期是引用保持有效的作用域。",
                    "tags": ["生命周期", "引用", "作用域"],
                    "category": "definition",
                    "code": "fn longest<'a>(x: &'a str, y: &'a str) -> &'a str {\n    if x.len() > y.len() { x } else { y }\n}"
                },
                {
                    "id": "k002",
                    "topic": "所有权",
                    "content": "Rust的所有权系统是内存安全的核心。每个值都有一个所有者，当所有者离开作用域时，值会被丢弃。",
                    "tags": ["所有权", "内存管理", "作用域"],
                    "category": "definition",
                    "code": "let s1 = String::from(\"hello\");\nlet s2 = s1; // s1的所有权移动到s2"
                },
                {
                    "id": "k003",
                    "topic": "可变变量",
                    "content": "使用mut关键字声明可变变量，允许修改变量的值。",
                    "tags": ["变量", "mut", "可变性"],
                    "category": "usage",
                    "code": "let mut x = 5;\nx = 10; // 可以修改"
                },
                {
                    "id": "k004",
                    "topic": "借用检查器错误",
                    "content": "借用检查器错误通常是由于违反了Rust的借用规则导致的。常见错误包括同时存在可变和不可变借用。",
                    "tags": ["借用检查器", "错误", "调试"],
                    "category": "error_debug",
                    "code": "let mut v = vec![1, 2, 3];\nlet first = &v[0];\nv.push(4); // 错误：同时存在可变和不可变借用"
                },
                {
                    "id": "k005",
                    "topic": "Rust vs C++",
                    "content": "Rust和C++都是系统编程语言，但Rust通过所有权系统提供内存安全，而C++需要手动管理内存。",
                    "tags": ["Rust", "C++", "比较"],
                    "category": "comparison",
                    "code": "// Rust: 自动内存管理\nlet s = String::from(\"hello\");\n// C++: 手动内存管理\n// std::string s = \"hello\";"
                }
            ]
    
    def _build_faiss_index(self):
        """构建FAISS向量索引"""
        if not self.knowledge_base:
            return
    
        # 优化文本组合策略
        texts = []
        for doc in self.knowledge_base:
            # 多种文本组合方式，增加信息密度
            combined_texts = [
                # 方式1：标题+内容
                f"{doc['topic']} {doc['content']}",
                # 方式2：标题重复强调 + 内容
                f"{doc['topic']} {doc['topic']} {doc['content']}",
                # 方式3：纯内容
                doc['content']
            ]
    
            # 选择最长的文本（通常包含最多信息）
            best_text = max(combined_texts, key=len)
            texts.append(best_text)
    
        # 生成嵌入向量
        embeddings = self.model.encode(texts, normalize_embeddings=True)
        print(f"生成嵌入向量类型: {type(embeddings)}, 形状: {embeddings.shape}")
    
        # 创建FAISS索引
        dimension = embeddings.shape[1]
        self.index = faiss.IndexFlatIP(dimension)  # 使用内积相似度
    
        # 添加向量到索引
        self.index.add(embeddings.astype(np.float32))
        print(f"FAISS索引构建完成，共 {len(self.knowledge_base)} 个文档")
    
    def _calculate_comprehensive_score(self, vector_score: float, doc: Dict, question: str,
                                       question_type: str) -> float:
        """计算综合评分"""
        # 将向量相似度分数归一化到[0,1]范围
        normalized_vector_score = max(0, min(1, vector_score))
    
        # 关键词匹配
        question_words = set(question.lower().split())
        content_words = set(doc['content'].lower().split())
        tag_words = set([tag.lower() for tag in doc.get('tags', [])])
    
        # 计算匹配度
        content_match = len(question_words & content_words) / len(question_words) if question_words else 0
        tag_match = len(question_words & tag_words) / len(question_words) if question_words else 0
    
        # 计算问题类型匹配
        type_match = 0.15 if question_type and doc.get('category') == question_type else 0
    
        # 综合评分 = 向量相似度(0.7) + 内容匹配(0.1) + 标签匹配(0.05) + 类型匹配(0.15)
        # 总加和不超过1.0
        score = (
                normalized_vector_score * 0.7 +  # 向量相似度占比最大
                content_match * 0.1 +  # 内容匹配
                tag_match * 0.05 +  # 标签匹配
                type_match * 0.15 # 类型匹配
        )
    
        return min(1.0, score)  # 确保不超过1.0
    
    def retrieve(self, question: str, question_type: str = None, top_k: int = 3) -> List[Dict]:
        """语义检索相关文档 - 使用向量检索"""
        if not self.index:
            print("FAISS索引未初始化")
            return []
    
        # 将问题转换为向量
        query_embedding = self.model.encode([question], normalize_embeddings=True).astype(np.float32)
    
        # 使用FAISS进行相似度搜索 - 增加搜索范围
        search_k = min(top_k * 3, len(self.knowledge_base))
        scores, indices = self.index.search(query_embedding, search_k)
    
        print(f"检索问题: '{question}'")
        print(f"搜索到 {len(indices[0])} 个候选结果")
    
        results = []
        for i, (score, idx) in enumerate(zip(scores[0], indices[0])):
            if idx < len(self.knowledge_base):
                doc = self.knowledge_base[idx]
    
                # 计算综合评分
                final_score = self._calculate_comprehensive_score(score, doc, question, question_type)
    
                # 基于问题类型的过滤和最低分数阈值
                if (final_score > 0.3 and
                        (not question_type or doc.get('category') == question_type or question_type == "unknown")):
                    results.append({
                        **doc,
                        'score': float(final_score)
                    })
    
                # 如果已经收集到足够的文档，提前结束
                if len(results) >= top_k:
                    break
    
        # 按分数排序
        results.sort(key=lambda x: x['score'], reverse=True)
        print(f"返回 {len(results)} 个最终结果")
        return results
    
    def add_document(self, doc: Dict):
        """添加新文档到知识库"""
        if 'id' not in doc:
            doc['id'] = f"k{len(self.knowledge_base) + 1:03d}"
        self.knowledge_base.append(doc)
    
        # 保存到文件
        with open(self.kb_path, 'w', encoding='utf-8') as f:
            json.dump(self.knowledge_base, f, ensure_ascii=False, indent=2)
    
        # 重新构建索引
        self._build_faiss_index()

if name == "main":
    print("开始测试语义检索模块...")
    retriever = SemanticRetriever()

    # 更全面的测试问题
    test_questions = [
        "如何声明变量？",
        "mut关键字有什么用？",
        "怎样定义一个函数？",
        "Rust中的所有权是什么？",
        "生命周期怎么用？"
    ]
    
    for question in test_questions:
        print(f"\n{'=' * 50}")
        results = retriever.retrieve(question, top_k=2)
    
        print(f"=== 检索分析: {question} ===")
        if not results:
            print("未找到相关结果")
            continue
    
        print(f"找到 {len(results)} 个相关文档:")
        for i, result in enumerate(results):
            print(f"{i + 1}. [{result['score']:.3f}] {result['topic']}")
    
        # 分析分数分布
        scores = [r['score'] for r in results]
        if scores:
            print(f"分数范围: {min(scores):.3f} - {max(scores):.3f}")
            print(f"平均分数: {sum(scores) / len(scores):.3f}")
    
        for i, result in enumerate(results):
            print(f"\n{i + 1}. 主题: {result['topic']}")
            print(f"   分数: {result['score']:.4f}")
            print(f"   内容: {result['content'][:100]}...")
            if result.get('code'):
                print(f"   代码: {result['code'][:50]}...")
