"""
将公共卫生专业TXT文本转化为向量数据库
使用LangChain进行文本处理，使用FAISS进行向量存储
"""

# 导入所需的库
import os
import re
from langchain_community.document_loaders import TextLoader, DirectoryLoader
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain_community.vectorstores import FAISS
from langchain_community.embeddings import HuggingFaceEmbeddings
import logging

# 设置日志
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)

class PublicHealthKnowledgeBase:
    """公共卫生知识库构建类"""
    
    def __init__(self, 
                 txt_dir_path, 
                 vector_db_path="public_health_faiss_index",
                 chunk_size=1000, 
                 chunk_overlap=200):
        """
        初始化知识库构建类
        
        参数:
            txt_dir_path (str): TXT文本文件或目录的路径
            vector_db_path (str): 向量数据库存储路径
            chunk_size (int): 文本块大小
            chunk_overlap (int): 文本块重叠大小
        """
        self.txt_dir_path = txt_dir_path
        self.vector_db_path = vector_db_path
        self.chunk_size = chunk_size
        self.chunk_overlap = chunk_overlap
        
        # 使用HuggingFace模型初始化嵌入
        # 使用支持中文的模型，如果处理英文可以选择其他合适的模型
        self.embeddings = HuggingFaceEmbeddings(
            model_name="shibing624/text2vec-base-chinese", 
            model_kwargs={'device': 'cpu'}
        )
        
        logger.info(f"初始化完成，使用模型: shibing624/text2vec-base-chinese")
    
    def load_documents(self):
        """加载文档"""
        logger.info(f"开始加载文档: {self.txt_dir_path}")
        
        # 判断是文件还是目录
        if os.path.isfile(self.txt_dir_path) and self.txt_dir_path.endswith('.txt'):
            # 如果是单个文件
            loader = TextLoader(self.txt_dir_path, encoding='utf-8')
            documents = loader.load()
        elif os.path.isdir(self.txt_dir_path):
            # 如果是目录，加载所有txt文件
            loader = DirectoryLoader(
                self.txt_dir_path, 
                glob="**/*.txt", 
                loader_cls=TextLoader,
                loader_kwargs={'encoding': 'utf-8'}
            )
            documents = loader.load()
        else:
            raise ValueError(f"路径 {self.txt_dir_path} 必须是一个.txt文件或包含.txt文件的目录")
        
        logger.info(f"文档加载完成，共 {len(documents)} 个文档")
        return documents
    
    def split_text(self, documents):
        """将文档分割成适合嵌入的文本块"""
        logger.info(f"开始文本分割，块大小: {self.chunk_size}，重叠大小: {self.chunk_overlap}")
        
        # 使用RecursiveCharacterTextSplitter进行分割
        text_splitter = RecursiveCharacterTextSplitter(
            chunk_size=self.chunk_size,
            chunk_overlap=self.chunk_overlap,
            length_function=len,
            separators=["\n\n", "\n", "。", "！", "？", ".", " ", ""]
        )
        
        chunks = text_splitter.split_documents(documents)
        logger.info(f"文本分割完成，共 {len(chunks)} 个文本块")
        return chunks
    
    def clean_text(self, chunks):
        """清洗文本，处理特殊字符和格式"""
        logger.info("开始文本清洗...")
        
        cleaned_chunks = []
        for chunk in chunks:
            # 获取文本内容
            text = chunk.page_content
            
            # 清理特殊字符、多余空格等
            text = re.sub(r'\s+', ' ', text)  # 替换多个空白字符为单个空格
            text = re.sub(r'[^\w\s\u4e00-\u9fff.,;:!?，。；：！？]', '', text)  # 保留中文、英文、数字和基本标点
            
            # 更新清洗后的文本
            chunk.page_content = text.strip()
            cleaned_chunks.append(chunk)
        
        logger.info(f"文本清洗完成，保留 {len(cleaned_chunks)} 个有效文本块")
        return cleaned_chunks
    
    def create_vector_db(self, chunks):
        """创建向量数据库"""
        logger.info("开始创建FAISS向量数据库...")
        
        # 使用FAISS存储向量数据
        vector_db = FAISS.from_documents(chunks, self.embeddings)
        
        # 保存向量数据库到本地
        vector_db.save_local(self.vector_db_path)
        
        logger.info(f"向量数据库创建完成，已保存至: {self.vector_db_path}")
        return vector_db
    
    def load_vector_db(self):
        """加载已有的向量数据库"""
        logger.info(f"加载向量数据库: {self.vector_db_path}")
        
        if os.path.exists(self.vector_db_path):
            vector_db = FAISS.load_local(self.vector_db_path, self.embeddings)
            logger.info("向量数据库加载成功")
            return vector_db
        else:
            logger.error(f"向量数据库路径 {self.vector_db_path} 不存在")
            return None
    
    def search(self, query, top_k=5):
        """搜索相关文本"""
        vector_db = self.load_vector_db()
        if vector_db:
            results = vector_db.similarity_search_with_score(query, k=top_k)
            return results
        else:
            logger.error("无法执行搜索，向量数据库未加载")
            return []
    
    def build(self):
        """构建整个知识库流程"""
        logger.info("开始构建知识库流程...")
        
        # 1. 加载文档
        documents = self.load_documents()
        
        # 2. 分割文本
        chunks = self.split_text(documents)
        
        # 3. 清洗文本
        cleaned_chunks = self.clean_text(chunks)
        
        # 4. 创建向量数据库
        vector_db = self.create_vector_db(cleaned_chunks)
        
        logger.info("知识库构建完成!")
        return vector_db


# 使用示例
if __name__ == "__main__":
    # 输入参数
    txt_path = "./data/documents"  # TXT文件路径或包含TXT文件的文件夹
    vector_db_path = "public_health_faiss_index"  # 向量数据库保存路径
    
    # 创建知识库构建器
    kb_builder = PublicHealthKnowledgeBase(
        txt_dir_path=txt_path,
        vector_db_path=vector_db_path,
        chunk_size=1000,  # 文本块大小，可根据需要调整
        chunk_overlap=200  # 文本块重叠大小，可根据需要调整
    )
    
    # 构建知识库
    kb_builder.build()
    
    # 测试搜索功能
    query = "公共卫生紧急事件的应对措施"
    results = kb_builder.search(query, top_k=3)
    
    print("\n搜索结果:")
    for i, (doc, score) in enumerate(results):
        print(f"\n--- 结果 {i+1} (相似度: {1-score:.4f}) ---")
        print(doc.page_content[:300] + "..." if len(doc.page_content) > 300 else doc.page_content)