import os
import os
from langchain_community.document_loaders import TextLoader, DirectoryLoader
from langchain.text_splitter import RecursiveCharacterTextSplitter
import numpy as np
from typing import List, Dict, Optional
from backend.common.vector_db_manager import vector_db_manager
from backend.common.config import config
from backend.settings import CHUNK_SIZE, CHUNK_OVERLAP, DOCS_DIR
from backend.logger_setup import get_logger

logger = get_logger('ingest')

class DocumentIngester:
    def __init__(self):
        # 初始化向量数据库管理器
        self.vector_db = vector_db_manager
        try:
            self.vector_db.initialize()
            logger.info("向量数据库管理器初始化成功")
        except Exception as e:
            logger.error(f"向量数据库管理器初始化失败: {str(e)}")
        
        # 获取当前激活的LLM提供商
        self.active_provider = config.llm_providers.active_provider
        logger.info(f"使用LLM提供商生成嵌入: {self.active_provider}")
        
        # 初始化文本分割器
        self.text_splitter = RecursiveCharacterTextSplitter(
            chunk_size=CHUNK_SIZE,
            chunk_overlap=CHUNK_OVERLAP,
            separators=["\n\n", "\n", " ", ""]
        )
    
    def _create_mock_embedding_model(self):
        """创建一个简单的mock嵌入模型作为备选"""
        class MockEmbeddingModel:
            def encode(self, texts, **kwargs):
                # 返回随机向量作为mock嵌入
                if isinstance(texts, str):
                    return np.random.rand(4096)  # 匹配Ollama的默认嵌入维度
                return [np.random.rand(4096) for _ in texts]
        
        logger.warning("使用mock嵌入模型")
        return MockEmbeddingModel()
    
    def load_document(self, file_path: str) -> List[Dict]:
        """加载单个文档"""
        try:
            loader = TextLoader(file_path, encoding='utf-8')
            documents = loader.load()
            logger.info(f"成功加载文档: {file_path}")
            return documents
        except Exception as e:
            logger.error(f"加载文档失败 {file_path}: {str(e)}")
            return []
    
    def load_documents_from_directory(self, directory_path: str) -> List[Dict]:
        """从目录加载所有文档"""
        try:
            loader = DirectoryLoader(
                directory_path,
                glob="**/*.md",  # 只加载markdown文件
                loader_cls=TextLoader,
                show_progress=True,
                loader_kwargs={'encoding': 'utf-8'}
            )
            documents = loader.load()
            logger.info(f"从目录 {directory_path} 成功加载 {len(documents)} 个文档")
            return documents
        except Exception as e:
            logger.error(f"从目录加载文档失败 {directory_path}: {str(e)}")
            return []
    
    def split_documents(self, documents: List[Dict]) -> List[Dict]:
        """将文档分割成块"""
        try:
            split_docs = self.text_splitter.split_documents(documents)
            logger.info(f"成功分割文档为 {len(split_docs)} 个块")
            return split_docs
        except Exception as e:
            logger.error(f"分割文档失败: {str(e)}")
            return []
    
    def generate_embeddings(self, texts: List[str]) -> List[np.ndarray]:
        """为文本生成嵌入向量"""
        try:
            # 使用向量数据库管理器生成嵌入向量
            embeddings = []
            for text in texts:
                embedding = self.vector_db.generate_embedding(text)
                embeddings.append(embedding)
            logger.info(f"成功生成 {len(embeddings)} 个嵌入向量")
            return embeddings
        except Exception as e:
            logger.error(f"生成嵌入向量失败: {str(e)}")
            # 返回随机向量作为备选
            return [np.random.rand(4096) for _ in texts]  # 匹配Ollama的默认嵌入维度
    
    def add_document_to_vector_db(self, file_path: str) -> bool:
        """将单个文档添加到向量数据库"""
        try:
            # 加载文档
            documents = self.load_document(file_path)
            if not documents:
                return False
            
            # 分割文档
            split_docs = self.split_documents(documents)
            if not split_docs:
                return False
            
            # 提取文档内容
            file_name = os.path.basename(file_path)
            main_content = "\n".join([doc.page_content for doc in documents])
            
            # 为每个文档块生成嵌入
            texts = [doc.page_content for doc in split_docs]
            embeddings = self.generate_embeddings(texts)
            
            # 批量存储向量
            metadatas = []
            for i, doc_chunk in enumerate(split_docs):
                metadata = {
                    'content': doc_chunk.page_content,
                    'metadata': {
                        'file_name': file_name,
                        'file_path': file_path,
                        'chunk_index': i,
                        'source': file_name,
                        'total_chunks': len(split_docs)
                    }
                }
                metadatas.append(metadata)
            
            # 使用向量数据库管理器批量存储
            vector_ids = self.vector_db.batch_store_vectors(embeddings, metadatas)
            
            if not vector_ids or any(id == -1 for id in vector_ids):
                logger.error(f"存储文档向量失败: {file_name}")
                return False
            
            logger.info(f"成功将文档添加到向量数据库: {file_name}, 存储了 {len(vector_ids)} 个向量")
            return True
        except Exception as e:
            logger.error(f"添加文档到向量数据库失败 {file_path}: {str(e)}")
            return False
    
    def add_documents_from_directory(self, directory_path: str) -> Dict:
        """从目录添加所有文档到向量数据库"""
        results = {
            'total': 0,
            'success': 0,
            'failed': 0,
            'failed_files': []
        }
        
        try:
            # 获取目录中的所有md文件
            for root, _, files in os.walk(directory_path):
                for file in files:
                    if file.endswith('.md'):
                        file_path = os.path.join(root, file)
                        results['total'] += 1
                        
                        if self.add_document_to_vector_db(file_path):
                            results['success'] += 1
                        else:
                            results['failed'] += 1
                            results['failed_files'].append(file_path)
            
            logger.info(f"从目录添加文档结果: 总数={results['total']}, 成功={results['success']}, 失败={results['failed']}")
            return results
        except Exception as e:
            logger.error(f"从目录添加文档到向量数据库失败 {directory_path}: {str(e)}")
            results['error'] = str(e)
            return results
    
    def initialize_default_documents(self):
        """初始化默认文档（从data/docs目录）"""
        if os.path.exists(DOCS_DIR):
            logger.info(f"初始化默认文档，从目录: {DOCS_DIR}")
            return self.add_documents_from_directory(DOCS_DIR)
        else:
            logger.warning(f"默认文档目录不存在: {DOCS_DIR}")
            return {'error': '默认文档目录不存在'}

# 全局文档摄取器实例
doc_ingester = DocumentIngester()