import os
import jieba
import numpy as np
from typing import List, Dict, Any
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import cosine_similarity

# 文档加载与处理
from langchain_community.document_loaders import PyPDFLoader, Docx2txtLoader, TextLoader
from langchain_text_splitters import RecursiveCharacterTextSplitter
from langchain_core.documents import Document

class TfidfKnowledgeBase:
    def __init__(self, documents_dir: str):
        self.documents_dir = documents_dir
        self.vectorizer = None
        self.document_vectors = None
        self.documents = []
        
    def load_documents(self) -> List[Document]:
        """加载各种格式的文档"""
        documents = []
        
        # 确保文档目录存在
        if not os.path.exists(self.documents_dir):
            os.makedirs(self.documents_dir)
            print(f"创建了文档目录: {self.documents_dir}")
            print("请在此目录中放置您的文档后再运行")
            return documents
            
        # 检查目录中是否有文件
        files = os.listdir(self.documents_dir)
        if not files:
            print(f"文档目录 {self.documents_dir} 为空，请添加一些文档")
            return documents
        
        for file in files:
            file_path = os.path.join(self.documents_dir, file)
            
            try:
                if file.endswith(".pdf"):
                    loader = PyPDFLoader(file_path)
                    docs = loader.load()
                    for doc in docs:
                        doc.metadata["source"] = file
                    documents.extend(docs)
                    print(f"已加载PDF: {file}")
                    
                elif file.endswith(".docx"):
                    loader = Docx2txtLoader(file_path)
                    docs = loader.load()
                    for doc in docs:
                        doc.metadata["source"] = file
                    documents.extend(docs)
                    print(f"已加载DOCX: {file}")
                    
                elif file.endswith(".txt"):
                    loader = TextLoader(file_path)
                    docs = loader.load()
                    for doc in docs:
                        doc.metadata["source"] = file
                    documents.extend(docs)
                    print(f"已加载TXT: {file}")
            except Exception as e:
                print(f"加载文件 {file} 时出错: {e}")
        
        return documents
    
    def split_documents(self, documents: List[Document]) -> List[Document]:
        """将文档分割成小块"""
        text_splitter = RecursiveCharacterTextSplitter(
            chunk_size=500,
            chunk_overlap=50,
            separators=["\n\n", "\n", "。", "！", "？", "；", "，", " ", ""]
        )
        
        splits = text_splitter.split_documents(documents)
        
        # 为每个分割添加ID
        for i, split in enumerate(splits):
            split.metadata["chunk_id"] = i
            
        return splits
    
    def preprocess_text(self, text: str) -> str:
        """使用jieba分词预处理文本"""
        # 分词
        words = jieba.cut(text)
        # 转回字符串，以空格分隔
        return " ".join(words)
    
    def build_index(self):
        """构建TF-IDF索引"""
        print("开始加载文档...")
        documents = self.load_documents()
        
        if not documents:
            print("没有成功加载任何文档")
            return
            
        print(f"成功加载了 {len(documents)} 个文档段落")
        
        # 分割文档
        print("正在分割文档...")
        self.documents = self.split_documents(documents)
        print(f"文档已分割为 {len(self.documents)} 个文本块")
        
        # 预处理文本 - 使用jieba分词
        print("正在处理文本...")
        preprocessed_texts = []
        for doc in self.documents:
            preprocessed_text = self.preprocess_text(doc.page_content)
            preprocessed_texts.append(preprocessed_text)
        
        # 创建TF-IDF向量化器
        print("正在创建TF-IDF索引...")
        self.vectorizer = TfidfVectorizer(
            analyzer='word',
            min_df=2,  # 至少出现在2个文档中
            max_df=0.9  # 最多出现在90%的文档中
        )
        
        # 生成文档向量
        self.document_vectors = self.vectorizer.fit_transform(preprocessed_texts)
        print("TF-IDF索引创建完成！")
    
    def search(self, query: str, n_results: int = 5):
        """搜索相关文档"""
        if not self.vectorizer or self.document_vectors is None:
            print("请先运行build_index()创建索引")
            return []
        
        # 预处理查询
        processed_query = self.preprocess_text(query)
        
        # 向量化查询
        query_vector = self.vectorizer.transform([processed_query])
        
        # 计算相似度
        similarities = cosine_similarity(query_vector, self.document_vectors).flatten()
        
        # 获取前N个最相似的文档
        top_indices = similarities.argsort()[::-1][:n_results]
        
        results = []
        for idx in top_indices:
            if similarities[idx] > 0:  # 只返回有相似度的结果
                doc = self.documents[idx]
                results.append({
                    "content": doc.page_content,
                    "source": doc.metadata.get("source", "未知来源"),
                    "page": doc.metadata.get("page", None),
                    "similarity": similarities[idx]
                })
        
        return results

# 使用示例
if __name__ == "__main__":
    # 创建知识库
    kb = TfidfKnowledgeBase(documents_dir="./public_health_docs")
    
    # 构建索引
    kb.build_index()
    
    # 简单搜索测试
    print("\n=== 搜索测试 ===")
    query = "新冠肺炎的症状"
    results = kb.search(query)
    
    print(f"查询: {query}")
    print(f"找到 {len(results)} 个相关结果:\n")
    
    for i, result in enumerate(results):
        print(f"结果 {i+1}:")
        print(f"来源: {result['source']}")
        if result['page']:
            print(f"页码: {result['page']}")
        print(f"相似度: {result['similarity']:.4f}")
        print(f"内容: {result['content'][:200]}...\n")