import os
import logging
from typing import List, Dict, Any, Optional
from pathlib import Path
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain_community.document_loaders import (
    TextLoader,
    PyPDFLoader,
    UnstructuredWordDocumentLoader,
    CSVLoader
)
from langchain.schema import Document
from langchain_community.vectorstores import Chroma
from config import Config
from .embedding_manager import ArkEmbeddings
from .chroma_manager import chroma_manager

logger = logging.getLogger(__name__)

class DocumentProcessor:
    """文档处理器：负责文档上传、拆分和向量化"""
    
    # 使用全局ChromaDB管理器
    
    def __init__(self, vector_db_path: str = None):
        self.vector_db_path = vector_db_path or Config.VECTOR_DB_PATH
        # 使用全局ChromaDB管理器的embeddings
        self.embeddings = chroma_manager._embeddings
        self.text_splitter = RecursiveCharacterTextSplitter(
            chunk_size=Config.CHUNK_SIZE,
            chunk_overlap=Config.CHUNK_OVERLAP,
            length_function=len,
            separators=["\n\n", "\n", "。", "！", "？", ".", "!", "?", " ", ""]
        )
        
        # 确保向量数据库目录存在
        os.makedirs(self.vector_db_path, exist_ok=True)
    
    def load_document(self, file_path: str) -> List[Document]:
        """加载文档"""
        try:
            file_path = Path(file_path)
            file_extension = file_path.suffix.lower()
            
            logger.info(f"开始加载文档: {file_path}")
            
            # 根据文件类型选择对应的loader
            if file_extension == '.txt':
                loader = TextLoader(str(file_path), encoding='utf-8')
            elif file_extension == '.pdf':
                loader = PyPDFLoader(str(file_path))
            elif file_extension in ['.docx', '.doc']:
                loader = UnstructuredWordDocumentLoader(str(file_path))
            elif file_extension in ['.md', '.markdown']:
                # 使用TextLoader处理markdown文件，避免unstructured依赖
                loader = TextLoader(str(file_path), encoding='utf-8')
            elif file_extension == '.csv':
                loader = CSVLoader(str(file_path))
            else:
                raise ValueError(f"不支持的文件类型: {file_extension}")
            
            documents = loader.load()
            logger.info(f"成功加载文档，共 {len(documents)} 页")
            
            # 添加元数据
            for doc in documents:
                doc.metadata.update({
                    "source": str(file_path),
                    "file_type": file_extension,
                    "file_name": file_path.name
                })
            
            return documents
            
        except Exception as e:
            logger.error(f"加载文档失败: {str(e)}")
            raise
    
    def split_documents(self, documents: List[Document]) -> List[Document]:
        """拆分文档"""
        try:
            logger.info(f"开始拆分文档，共 {len(documents)} 个文档")
            
            split_docs = self.text_splitter.split_documents(documents)
            
            logger.info(f"文档拆分完成，共生成 {len(split_docs)} 个文本块")
            
            return split_docs
            
        except Exception as e:
            logger.error(f"拆分文档失败: {str(e)}")
            raise
    
    def create_vector_store(self, documents: List[Document], collection_name: str = "default") -> Chroma:
        """创建向量存储"""
        try:
            logger.info(f"开始创建向量存储，集合名称: {collection_name}")
            
            # 使用全局ChromaDB管理器
            vector_store = chroma_manager.get_chroma_instance(collection_name)
            
            # 添加文档
            vector_store.add_documents(documents)
            
            logger.info(f"向量存储创建完成，共存储 {len(documents)} 个文档块")
            
            return vector_store
            
        except Exception as e:
            logger.error(f"创建向量存储失败: {str(e)}")
            raise
    
    def load_vector_store(self, collection_name: str = "default") -> Chroma:
        """加载现有的向量存储"""
        try:
            logger.info(f"加载向量存储，集合名称: {collection_name}")
            
            # 使用全局ChromaDB管理器
            vector_store = chroma_manager.get_chroma_instance(collection_name)
            
            return vector_store
            
        except Exception as e:
            logger.error(f"加载向量存储失败: {str(e)}")
            raise
    
    def add_documents_to_store(self, documents: List[Document], collection_name: str = "default") -> Chroma:
        """向现有向量存储添加文档"""
        try:
            logger.info(f"向向量存储添加文档，集合名称: {collection_name}")
            
            # 使用全局ChromaDB管理器
            vector_store = chroma_manager.get_chroma_instance(collection_name)
            
            # 添加新文档
            vector_store.add_documents(documents)
            
            logger.info(f"成功添加 {len(documents)} 个文档块到向量存储")
            
            return vector_store
            
        except Exception as e:
            logger.error(f"添加文档到向量存储失败: {str(e)}")
            raise
    
    def process_document(self, file_path: str, collection_name: str = "default") -> Chroma:
        """完整的文档处理流程：加载 -> 拆分 -> 向量化"""
        try:
            logger.info(f"开始处理文档: {file_path}")
            
            # 1. 加载文档
            documents = self.load_document(file_path)
            
            # 2. 拆分文档
            split_docs = self.split_documents(documents)
            
            # 3. 创建或更新向量存储
            try:
                vector_store = self.add_documents_to_store(split_docs, collection_name)
            except:
                # 如果集合不存在，创建新的
                vector_store = self.create_vector_store(split_docs, collection_name)
            
            logger.info(f"文档处理完成: {file_path}")
            
            return vector_store
            
        except Exception as e:
            logger.error(f"文档处理失败: {str(e)}")
            raise
    
    def search_similar(self, query: str, collection_name: str = "default", top_k: int = None) -> List[Document]:
        """搜索相似文档"""
        try:
            top_k = top_k or Config.TOP_K_RETRIEVAL
            
            vector_store = self.load_vector_store(collection_name)
            
            results = vector_store.similarity_search(query, k=top_k)
            
            logger.info(f"搜索完成，找到 {len(results)} 个相关文档")
            
            return results
            
        except Exception as e:
            logger.error(f"搜索失败: {str(e)}")
            raise
    
    def search_similar_with_score(self, query: str, collection_name: str = "default", top_k: int = None) -> List[tuple]:
        """搜索相似文档并返回相似度分数"""
        try:
            top_k = top_k or Config.TOP_K_RETRIEVAL
            
            vector_store = self.load_vector_store(collection_name)
            
            results = vector_store.similarity_search_with_score(query, k=top_k)
            
            logger.info(f"搜索完成，找到 {len(results)} 个相关文档")
            
            return results
            
        except Exception as e:
            logger.error(f"搜索失败: {str(e)}")
            raise
    
    def get_collection_info(self, collection_name: str = "default") -> Dict[str, Any]:
        """获取集合信息"""
        try:
            # 直接使用ChromaDB客户端获取集合信息，避免LangChain实例冲突
            import chromadb
            from chromadb.config import Settings
            
            client = chromadb.PersistentClient(
                path=self.vector_db_path,
                settings=Settings(anonymized_telemetry=False)
            )
            
            # 获取集合
            collection = client.get_collection(name=collection_name)
            count = collection.count()
            
            return {
                "collection_name": collection_name,
                "document_count": count,
                "embedding_model": Config.EMBEDDING_MODEL_NAME
            }
            
        except Exception as e:
            logger.error(f"获取集合信息失败: {str(e)}")
            # 返回默认信息而不是抛出异常
            return {
                "collection_name": collection_name,
                "document_count": 0,
                "embedding_model": Config.EMBEDDING_MODEL_NAME
            }
    
    def list_collections(self) -> List[str]:
        """列出所有集合"""
        try:
            import chromadb
            from chromadb.config import Settings
            
            # 创建ChromaDB客户端
            client = chromadb.PersistentClient(
                path=self.vector_db_path,
                settings=Settings(anonymized_telemetry=False)
            )
            
            # 获取所有集合
            collections = client.list_collections()
            collection_names = [col.name for col in collections]
            
            logger.info(f"发现 {len(collection_names)} 个集合: {collection_names}")
            return collection_names
            
        except Exception as e:
            logger.error(f"列出集合失败: {str(e)}")
            return [] 