import os
import sys
import yaml
import numpy as np
from langchain.document_loaders import TextLoader
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.vectorstores import FAISS
from loguru import logger

# 添加项目根目录到路径以便导入backend模块
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from backend.llm import llm_manager
from backend.common.config import config

# 配置日志
logger.remove()
logger.add(sys.stdout, level="INFO")
logger.add("import_documents.log", level="INFO", rotation="10 MB")

def load_config():
    # 加载配置文件
    config_path = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), "configs", "config.yaml")
    try:
        with open(config_path, "r", encoding="utf-8") as f:
            config = yaml.safe_load(f)
        return config
    except Exception as e:
        logger.error(f"加载配置文件失败: {str(e)}")
        # 返回默认配置
        return {
            "rag": {
                "chunk_size": 800,
                "chunk_overlap": 100,
                "embedding_model": "sentence-transformers/all-mpnet-base-v2"
            }
        }

def import_documents(directory_path):
    # 导入目录中的所有文档
    try:
        # 加载配置
        config_data = load_config()
        rag_config = config_data.get("rag", {})
        
        # 获取RAG配置参数
        chunk_size = rag_config.get("chunk_size", 800)
        chunk_overlap = rag_config.get("chunk_overlap", 100)
        
        logger.info(f"开始导入文档，配置: chunk_size={chunk_size}, chunk_overlap={chunk_overlap}")
        
        # 初始化文本分割器
        text_splitter = RecursiveCharacterTextSplitter(
            chunk_size=chunk_size,
            chunk_overlap=chunk_overlap,
            separators=["\n\n", "\n", " ", ""]
        )
        
        # 获取当前激活的LLM提供商
        active_provider = config.llm_providers.active_provider
        logger.info(f"使用LLM提供商生成嵌入: {active_provider}")
        
        # 获取LLM提供商实例
        try:
            llm_provider = llm_manager.get_provider(active_provider)
            logger.info(f"成功获取LLM提供商实例")
        except Exception as e:
            logger.error(f"获取LLM提供商实例失败: {str(e)}")
            # 如果失败，使用一个简单的mock实现
            class MockEmbeddingModel:
                def embed_documents(self, texts):
                    return [np.random.rand(4096).tolist() for _ in texts]
                def embed_query(self, text):
                    return np.random.rand(4096).tolist()
            llm_provider = MockEmbeddingModel()
            logger.warning("使用mock嵌入模型")
        
        # 创建一个兼容LangChain的嵌入模型包装器
        class LLMProviderEmbeddings:
            def __init__(self, llm_provider):
                self.llm_provider = llm_provider
                
            def embed_documents(self, texts):
                # 根据llm_provider的类型决定如何调用
                if hasattr(self.llm_provider, 'generate_embeddings'):
                    return [embedding.tolist() for embedding in self.llm_provider.generate_embeddings(texts)]
                elif hasattr(self.llm_provider, 'embed_documents'):
                    return self.llm_provider.embed_documents(texts)
                else:
                    # 如果都没有，返回随机向量
                    return [np.random.rand(4096).tolist() for _ in texts]
                    
            def embed_query(self, text):
                # 根据llm_provider的类型决定如何调用
                if hasattr(self.llm_provider, 'generate_embedding'):
                    return self.llm_provider.generate_embedding(text).tolist()
                elif hasattr(self.llm_provider, 'embed_query'):
                    return self.llm_provider.embed_query(text)
                else:
                    # 如果都没有，返回随机向量
                    return np.random.rand(4096).tolist()
        
        # 初始化嵌入模型
        embeddings = LLMProviderEmbeddings(llm_provider)
        
        # 收集所有文档
        documents = []
        supported_formats = ['.txt', '.md']
        
        # 遍历目录
        for root, dirs, files in os.walk(directory_path):
            for file in files:
                file_path = os.path.join(root, file)
                file_ext = os.path.splitext(file)[1].lower()
                
                # 只处理支持的文件格式
                if file_ext in supported_formats:
                    try:
                        logger.info(f"加载文档: {file_path}")
                        loader = TextLoader(file_path, encoding="utf-8")
                        loaded_docs = loader.load()
                        
                        # 分割文档
                        split_docs = text_splitter.split_documents(loaded_docs)
                        
                        # 添加元数据
                        for doc in split_docs:
                            doc.metadata["source"] = file_path
                            doc.metadata["filename"] = file
                        
                        documents.extend(split_docs)
                        logger.info(f"成功处理文档: {file_path}, 生成 {len(split_docs)} 个片段")
                    except Exception as e:
                        logger.error(f"处理文档失败: {file_path}, 错误: {str(e)}")
        
        if not documents:
            logger.warning("没有找到可处理的文档")
            return
        
        # 创建向量数据库
        logger.info(f"创建向量数据库，共 {len(documents)} 个文档片段")
        db = FAISS.from_documents(documents, embeddings)
        
        # 保存向量数据库
        db_path = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), "data", "vector_db")
        os.makedirs(db_path, exist_ok=True)
        db.save_local(db_path)
        
        logger.info(f"文档导入完成，向量数据库已保存到: {db_path}")
        logger.info(f"共导入 {len(documents)} 个文档片段")
        
    except Exception as e:
        logger.error(f"导入过程失败: {str(e)}")

if __name__ == "__main__":
    # 默认导入data/docs目录
    default_dir = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), "data", "docs")
    
    # 如果命令行提供了目录参数，则使用该目录
    if len(sys.argv) > 1:
        docs_dir = sys.argv[1]
    else:
        docs_dir = default_dir
    
    logger.info(f"开始从目录导入文档: {docs_dir}")
    import_documents(docs_dir)
    logger.info("文档导入过程已完成")