"""
知识库管理模块
"""
import os
from typing import List, Dict
import numpy as np
import json
import config
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import cosine_similarity

class KnowledgeBase:
    def __init__(self):
        try:
            print("初始化知识库...")
            self.vectorizer = TfidfVectorizer()
            self.vectors = None
            self.documents = []
            self.load_knowledge()
            print("知识库初始化完成")
        except Exception as e:
            print(f"知识库初始化失败: {str(e)}")
            self.vectorizer = TfidfVectorizer()
            self.vectors = None
            self.documents = []

    def validate_document(self, doc: Dict) -> bool:
        """验证词条格式"""
        required_fields = ["id", "content", "category", "tags"]
        try:
            return all(field in doc and doc["content"].strip() for field in required_fields)
        except:
            return False

    def load_knowledge(self):
        """加载所有领域知识"""
        all_docs = []
        print("开始加载知识库...")
        
        # 遍历所有知识库目录
        for domain, info in config.KNOWLEDGE_BASE.items():
            path = info["path"]
            print(f"检查目录: {path}")
            
            if not os.path.exists(path):
                os.makedirs(path, exist_ok=True)
                print(f"创建知识目录: {path}")
                continue
                
            # 读取该领域下的所有.json文件
            files = [f for f in os.listdir(path) if f.endswith('.json')]
            print(f"在 {path} 中找到的JSON文件: {files}")
            
            for file in files:
                file_path = os.path.join(path, file)
                try:
                    print(f"正在读取: {file_path}")
                    with open(file_path, 'r', encoding='utf-8') as f:
                        docs = json.load(f)
                        valid_docs = [doc for doc in docs if self.validate_document(doc)]
                        print(f"文件 {file} 中有效词条数: {len(valid_docs)}")
                        all_docs.extend(valid_docs)
                except Exception as e:
                    print(f"读取文件 {file_path} 失败: {str(e)}")

        print(f"总共找到词条数: {len(all_docs)}")

        if not all_docs:
            print("警告：未找到任何有效知识词条")
            return

        try:
            self.documents = all_docs
            contents = [doc["content"] for doc in all_docs]
            self.vectors = self.vectorizer.fit_transform(contents)
            print(f"成功创建向量索引，词条数: {len(all_docs)}")
        except Exception as e:
            print(f"创建向量索引失败: {str(e)}")
            self.vectors = None
            self.documents = []

    def search_relevant_knowledge(self, query: str, top_k: int = 3) -> List[Dict]:
        """搜索相关知识"""
        if self.vectors is None or not self.documents:
            print(f"知识库未正确初始化 - vectors: {self.vectors is not None}, documents: {len(self.documents)}")
            return []
            
        try:
            query_vector = self.vectorizer.transform([query])
            similarities = cosine_similarity(query_vector, self.vectors)[0]
            top_indices = np.argsort(similarities)[-top_k:][::-1]
            return [self.documents[i] for i in top_indices]
        except Exception as e:
            print(f"搜索相关知识失败: {str(e)}")
            return []

    def enhance_prompt(self, base_prompt: str, query: str) -> str:
        """增强提示词"""
        relevant_docs = self.search_relevant_knowledge(query)
        if not relevant_docs:
            return base_prompt
        
        try:
            knowledge_context = "\n".join([doc["content"] for doc in relevant_docs])
            return f"""基于以下领域知识:
{knowledge_context}

{base_prompt}"""
        except Exception as e:
            print(f"增强提示词失败: {str(e)}")
            return base_prompt






