import os
from typing import List, Dict, Any
from dotenv import load_dotenv

# 加载环境变量
load_dotenv()

# 文档加载与处理
from langchain_community.document_loaders import PyPDFLoader, Docx2txtLoader, TextLoader
from langchain_text_splitters import RecursiveCharacterTextSplitter
from langchain_core.documents import Document

# 向量化与存储 - 使用更简单的嵌入模型
from langchain_community.embeddings import HuggingFaceEmbeddings
from langchain_community.vectorstores import Chroma

# 使用更小的LLM
from langchain_community.llms import HuggingFacePipeline

class SimpleKnowledgeBase:
    def __init__(self, documents_dir: str):
        self.documents_dir = documents_dir
        self.vectorstore = None
        
    def process_documents(self):
        """处理文档并创建向量库"""
        print("开始加载文档...")
        documents = []
        
        # 确保文档目录存在
        if not os.path.exists(self.documents_dir):
            os.makedirs(self.documents_dir)
            print(f"创建了文档目录: {self.documents_dir}")
            print("请在此目录中放置您的文档后再运行")
            return
            
        # 检查目录中是否有文件
        files = os.listdir(self.documents_dir)
        if not files:
            print(f"文档目录 {self.documents_dir} 为空，请添加一些文档")
            return
            
        # 加载文档
        for file in files:
            file_path = os.path.join(self.documents_dir, file)
            
            try:
                if file.endswith(".pdf"):
                    loader = PyPDFLoader(file_path)
                    docs = loader.load()
                    for doc in docs:
                        doc.metadata["source"] = file
                    documents.extend(docs)
                    print(f"已加载PDF: {file}")
                    
                elif file.endswith(".docx"):
                    loader = Docx2txtLoader(file_path)
                    docs = loader.load()
                    for doc in docs:
                        doc.metadata["source"] = file
                    documents.extend(docs)
                    print(f"已加载DOCX: {file}")
                    
                elif file.endswith(".txt"):
                    loader = TextLoader(file_path)
                    docs = loader.load()
                    for doc in docs:
                        doc.metadata["source"] = file
                    documents.extend(docs)
                    print(f"已加载TXT: {file}")
            except Exception as e:
                print(f"加载文件 {file} 时出错: {e}")
        
        if not documents:
            print("没有成功加载任何文档，请检查文件格式")
            return
            
        print(f"成功加载了 {len(documents)} 个文档段落")
        
        # 分割文档
        print("正在分割文档...")
        text_splitter = RecursiveCharacterTextSplitter(
            chunk_size=500,
            chunk_overlap=50,
            separators=["\n\n", "\n", "。", "！", "？", "；", "，", " ", ""]
        )
        
        splits = text_splitter.split_documents(documents)
        print(f"文档已分割为 {len(splits)} 个文本块")
        
        # 创建向量存储 - 使用更小、更快的嵌入模型
        print("正在创建向量存储...")
        
        # 使用稳定的多语言嵌入模型
        try:
            embeddings = HuggingFaceEmbeddings(
                model_name="sentence-transformers/paraphrase-multilingual-MiniLM-L12-v2",  # 支持多语言的模型
                model_kwargs={'device': 'cpu'},
                encode_kwargs={'normalize_embeddings': True}
            )
            print("成功加载嵌入模型")
        except Exception as e:
            print(f"加载嵌入模型时出错: {e}")
            print("尝试使用备用简单模型...")
            
            # 极其简单的备用方案 - 使用all-MiniLM-L6-v2模型
            embeddings = HuggingFaceEmbeddings(
                model_name="all-MiniLM-L6-v2",
                model_kwargs={'device': 'cpu'}
            )
            print("成功加载备用嵌入模型")
        
        self.vectorstore = Chroma.from_documents(
            documents=splits,
            embedding=embeddings,
            persist_directory="./simple_chroma_db"
        )
        
        self.vectorstore.persist()
        print("向量存储创建完成！")
        
    def search(self, query: str, n_results: int = 5):
        """搜索相关文档"""
        if not self.vectorstore:
            print("请先运行process_documents()创建向量存储")
            return []
            
        results = self.vectorstore.similarity_search_with_score(query, k=n_results)
        
        formatted_results = []
        for doc, score in results:
            formatted_results.append({
                "content": doc.page_content,
                "source": doc.metadata.get("source", "未知来源"),
                "page": doc.metadata.get("page", None),
                "similarity": score
            })
            
        return formatted_results
        
# 使用示例
if __name__ == "__main__":
    # 创建知识库
    kb = SimpleKnowledgeBase(documents_dir="./public_health_docs")
    
    # 处理文档
    kb.process_documents()
    
    # 简单搜索测试
    if kb.vectorstore:
        print("\n=== 搜索测试 ===")
        query = "新冠肺炎的症状"
        results = kb.search(query)
        
        print(f"查询: {query}")
        print(f"找到 {len(results)} 个相关结果:\n")
        
        for i, result in enumerate(results):
            print(f"结果 {i+1}:")
            print(f"来源: {result['source']}")
            if result['page']:
                print(f"页码: {result['page']}")
            print(f"相似度: {1 - result['similarity']:.2f}")
            print(f"内容: {result['content'][:200]}...\n")