# app/processors/base.py

from abc import ABC, abstractmethod
from typing import Dict, Any, List
from config.settings import settings
from config.model_config import ModelConfig

class BaseProcessor(ABC):
    """基础处理器抽象类"""
    
    def __init__(self):
        self.supported_types = []
        self.chunk_size = ModelConfig.CHUNK_SIZE
        self.chunk_overlap = ModelConfig.CHUNK_OVERLAP
    
    @abstractmethod
    def process(self, file_path: str, file_type: str, knowledge_base: str = "default") -> Dict[str, Any]:
        """处理文件的主要方法"""
        pass
    
    def store_vectors(self, text_chunks: List[str], file_path: str, file_type: str, knowledge_base: str = "default", oss_url: str = None) -> int:
        """将文本块向量化并存储到Milvus"""
        from app.core.logging import get_logger
        logger = get_logger(__name__)
        
        if not text_chunks:
            logger.warning("没有文本块需要向量化")
            return 0
        
        # 添加超时保护
        import threading
        import time
        
        def vectorize_with_timeout():
            nonlocal result
            try:
                from app.core.database import get_text_collection
                from app.api.rag.utils import call_aliyun_embedding_api
                
                # 过滤掉空的文本块
                filtered_chunks = []
                for i, chunk in enumerate(text_chunks):
                    if isinstance(chunk, dict):
                        text = chunk.get('text', '')
                    else:
                        text = chunk
                    
                    if text and text.strip():
                        filtered_chunks.append(chunk)
                    else:
                        logger.warning(f"跳过空的文本块 {i+1}")
                
                logger.info(f"过滤后剩余 {len(filtered_chunks)} 个有效文本块（原始 {len(text_chunks)} 个）")
                
                if not filtered_chunks:
                    logger.warning("没有有效的文本块，跳过向量化")
                    result = 0
                    return
                
                logger.info(f"开始向量化 {len(filtered_chunks)} 个文本块...")
                
                # 获取集合
                from app.core.knowledge_base_manager import get_kb_manager
                kb_manager = get_kb_manager()
                collection_names = kb_manager.get_collection_names(knowledge_base)
                text_collection_name = collection_names["text_collection"]
                
                # 确保知识库存在
                if not kb_manager.get_knowledge_base(knowledge_base):
                    logger.info(f"知识库 {knowledge_base} 不存在，自动创建")
                    kb_manager.create_knowledge_base(knowledge_base, f"自动创建的知识库: {knowledge_base}")
                
                collection = get_text_collection(text_collection_name)
                
                # 使用并发处理提高向量化速度
                chunk_vectors = []
                batch_size = 10  # 并发批次大小
                max_retries = 2  # 减少重试次数，避免超时
                
                for i in range(0, len(filtered_chunks), batch_size):
                    batch_chunks = filtered_chunks[i:i + batch_size]
                    logger.info(f"并发处理第 {i+1}-{min(i+batch_size, len(filtered_chunks))}/{len(filtered_chunks)} 个文本块...")
                    
                    # 并发处理一批文本块
                    import concurrent.futures
                    batch_vectors = []
                    
                    def process_chunk(chunk, index):
                        try:
                            # 提取文本内容
                            if isinstance(chunk, dict):
                                text = chunk.get('text', '')
                            else:
                                text = chunk
                            
                            vector = call_aliyun_embedding_api(text)
                            logger.info(f"  - 第 {i+index+1} 个文本块向量化成功")
                            return vector
                        except Exception as e:
                            logger.error(f"  - 第 {i+index+1} 个文本块向量化失败: {e}")
                            # 使用随机向量作为降级方案
                            import numpy as np
                            fallback_vector = np.random.rand(settings.VECTOR_DIMENSION).astype(np.float32)
                            return fallback_vector
                    
                    # 使用线程池并发处理
                    with concurrent.futures.ThreadPoolExecutor(max_workers=5) as executor:
                        futures = [executor.submit(process_chunk, chunk, idx) for idx, chunk in enumerate(batch_chunks)]
                        for future in concurrent.futures.as_completed(futures):
                            try:
                                vector = future.result()
                                batch_vectors.append(vector)
                            except Exception as e:
                                logger.error(f"并发处理异常: {e}")
                                # 使用随机向量作为降级方案
                                import numpy as np
                                fallback_vector = np.random.rand(settings.VECTOR_DIMENSION).astype(np.float32)
                                batch_vectors.append(fallback_vector)
                    
                    chunk_vectors.extend(batch_vectors)
                    logger.info(f"  - 并发处理 {len(batch_chunks)} 个文本块完成")
                    
                    # 每批处理后稍作等待，避免API限制
                    time.sleep(0.2)  # 适当等待时间
                
                logger.info(f"批量向量化完成，成功生成 {len(chunk_vectors)} 个向量")
                
                # 准备实体数据
                entities = []
                for i, (chunk_data, vector) in enumerate(zip(filtered_chunks[:len(chunk_vectors)], chunk_vectors)):
                    # 处理文本块数据
                    if isinstance(chunk_data, dict):
                        text = chunk_data.get('text', '')
                        page_num = chunk_data.get('page_num', 0)
                        slide_num = chunk_data.get('slide_num', 0)
                        paragraph_num = chunk_data.get('paragraph_num', 0)
                        # 优先使用OSS链接，如果没有则使用本地路径
                        source = oss_url if oss_url else chunk_data.get('source', file_path)
                    else:
                        # 兼容旧格式
                        text = chunk_data
                        page_num = 0
                        slide_num = 0
                        paragraph_num = 0
                        # 优先使用OSS链接，如果没有则使用本地路径
                        source = oss_url if oss_url else file_path
                    
                    entity = {
                        "text": text,
                        "source": source,
                        "file_type": file_type,
                        "vector": vector.tolist() if hasattr(vector, 'tolist') else vector,
                        "page_num": page_num,
                        "slide_num": slide_num,
                        "paragraph_num": paragraph_num
                    }
                    entities.append(entity)
                
                if not entities:
                    logger.warning("没有有效的向量数据")
                    result = 0
                    return
                
                # 插入到Milvus
                logger.info(f"插入 {len(entities)} 个向量到Milvus集合 {text_collection_name}...")
                
                # 如果知识库不是default，使用分区
                if knowledge_base != "default":
                    # 确保分区存在
                    partition_name = knowledge_base
                    if not collection.has_partition(partition_name):
                        logger.info(f"创建分区: {partition_name}")
                        collection.create_partition(partition_name)
                    
                    # 插入到指定分区
                    logger.info(f"插入到分区: {partition_name}")
                    collection.insert(entities, partition_name=partition_name)
                else:
                    # 对于default知识库，插入到默认分区
                    collection.insert(entities)
                
                collection.flush()
                
                logger.info(f"成功存储 {len(entities)} 个向量")
                result = len(entities)
                
            except Exception as e:
                logger.error(f"向量化存储失败: {str(e)}")
                import traceback
                logger.error(traceback.format_exc())
                result = 0
        
        result = None
        thread = threading.Thread(target=vectorize_with_timeout)
        thread.daemon = True
        thread.start()
        thread.join(timeout=120)  # 增加超时时间到120秒
        
        if thread.is_alive():
            logger.error("向量化处理超时")
            return 0
        
        return result if result is not None else 0
    
    @abstractmethod
    def extract_text(self, file_path: str) -> List[str]:
        """提取文本内容"""
        pass
    
    @abstractmethod
    def extract_images(self, file_path: str) -> List[str]:
        """提取图片内容"""
        pass
    
    def split_text(self, text: str) -> List[str]:
        """分割文本"""
        try:
            # 优先使用chatchat项目中的递归文本分割器，效果更好
            from chatchat.server.file_rag.text_splitter.chinese_recursive_text_splitter import ChineseRecursiveTextSplitter
            splitter = ChineseRecursiveTextSplitter(
                chunk_size=self.chunk_size, 
                chunk_overlap=self.chunk_overlap,
                separators=[
                    "\n\n",
                    "\n",
                    "。|！|？",
                    "\.\s|\!\s|\?\s",
                    "；|;\s",
                    "，|,\s",
                ],
                keep_separator=True,
                is_separator_regex=True
            )
            return splitter.split_text(text)
        except ImportError:
            try:
                # 降级到中文文本分割器
            from chatchat.server.file_rag.text_splitter.chinese_text_splitter import ChineseTextSplitter
            splitter = ChineseTextSplitter(
                chunk_size=self.chunk_size, 
                chunk_overlap=self.chunk_overlap
            )
            return splitter.split_text(text)
        except ImportError:
                # 如果无法导入，使用改进的简单文本分割实现
                return self._improved_simple_split(text)
    
    def _improved_simple_split(self, text: str) -> List[str]:
        """改进的简单文本分割"""
            if not text or len(text) <= self.chunk_size:
                return [text] if text.strip() else []
            
        # 预处理文本
        text = self._preprocess_text(text)
        
            chunks = []
            start = 0
            
            while start < len(text):
                end = start + self.chunk_size
                
                # 如果还有更多内容，尝试在句子边界分割
                if end < len(text):
                # 寻找最近的句子结束符，优先级：段落 > 句子 > 逗号
                for i in range(end, max(start, end - 200), -1):
                    if text[i] in '\n\n':  # 段落分隔
                        end = i + 2
                        break
                    elif text[i] in '。！？；\n':  # 句子结束
                        end = i + 1
                        break
                    elif text[i] in '，,':  # 逗号分隔
                            end = i + 1
                            break
                
                chunk = text[start:end].strip()
            if chunk and len(chunk) > 10:  # 过滤太短的块
                    chunks.append(chunk)
                
                # 计算下一个开始位置，考虑重叠
                start = end - self.chunk_overlap
                if start >= len(text):
                    break
            
            return chunks
    
    def _preprocess_text(self, text: str) -> str:
        """预处理文本"""
        import re
        
        # 移除多余的空白字符
        text = re.sub(r'\n{3,}', '\n\n', text)
        text = re.sub(r' {2,}', ' ', text)
        
        # 处理特殊字符
        text = re.sub(r'([;；.!?。！？\?])([^"\'\"])', r'\1\n\2', text)
        text = re.sub(r'(\.{6})([^"\'\"])', r'\1\n\2', text)
        text = re.sub(r'(\…{2})([^"\'\"])', r'\1\n\2', text)
        
        return text.strip()
    
    def validate_file(self, file_path: str, file_type: str) -> bool:
        """验证文件"""
        import os
        if not os.path.exists(file_path):
            raise FileNotFoundError(f"文件不存在: {file_path}")
        
        if file_type not in self.supported_types:
            raise ValueError(f"不支持的文件类型: {file_type}")
        
        return True 