# app/processors/document_processor.py

import os
import fitz  # PyMuPDF
from pptx import Presentation
from docx import Document
from PIL import Image
from io import BytesIO
from typing import Dict, Any, List
from .base import BaseProcessor

class DocumentProcessor(BaseProcessor):
    """文档文件处理器"""
    
    def __init__(self):
        super().__init__()
        self.supported_types = ['pdf', 'docx', 'pptx']
    
    def process(self, file_path: str, file_type: str, knowledge_base: str = "default", oss_url: str = None) -> Dict[str, Any]:
        """处理文档文件"""
        from app.core.logging import get_logger
        logger = get_logger(__name__)
        
        self.validate_file(file_path, file_type)
        logger.info(f"开始处理文档文件: {file_path}, 知识库: {knowledge_base}")
        
        # 提取文本和图片
        logger.info("提取文本内容...")
        texts = self.extract_text(file_path)
        logger.info(f"提取到 {len(texts)} 个文本段落")
        
        logger.info("提取图片内容...")
        images = self.extract_images(file_path)
        logger.info(f"提取到 {len(images)} 个图片")
        
        # 分割文本
        logger.info("分割文本...")
        text_chunks = []
        for text_data in texts:
            if isinstance(text_data, dict):
                text = text_data['text']
                metadata = {k: v for k, v in text_data.items() if k != 'text'}
            else:
                text = text_data
                metadata = {}
            
            chunks = self.split_text(text)
            for chunk in chunks:
                chunk_with_metadata = {
                    'text': chunk,
                    **metadata
                }
                text_chunks.append(chunk_with_metadata)
        
        logger.info(f"分割得到 {len(text_chunks)} 个文本块")
        
        # 向量化并存储文本
        logger.info("开始向量化存储文本...")
        stored_text_count = self.store_vectors(text_chunks, file_path, file_type, knowledge_base, oss_url)
        logger.info(f"成功存储 {stored_text_count} 个文本向量到知识库 {knowledge_base}")
        
        # 向量化并存储图片
        stored_image_count = 0
        if images:
            logger.info("开始向量化存储图片...")
            stored_image_count = self.store_image_vectors(images, file_path, file_type, knowledge_base)
            logger.info(f"成功存储 {stored_image_count} 个图片向量到知识库 {knowledge_base}")
        
        total_stored = stored_text_count + stored_image_count
        
        return {
            'text_chunks_count': len(text_chunks),
            'images_count': len(images),
            'text_chunks': text_chunks,
            'images': images,
            'stored_vectors': total_stored,
            'stored_text_vectors': stored_text_count,
            'stored_image_vectors': stored_image_count,
            'knowledge_base': knowledge_base
        }
    
    def extract_text(self, file_path: str) -> List[str]:
        """提取文档文本"""
        file_type = os.path.splitext(file_path)[1].lower()
        
        if file_type == '.pdf':
            return self._extract_pdf_text(file_path)
        elif file_type == '.docx':
            return self._extract_docx_text(file_path)
        elif file_type == '.pptx':
            return self._extract_pptx_text(file_path)
        else:
            return []
    
    def extract_images(self, file_path: str) -> List[str]:
        """提取文档图片"""
        file_type = os.path.splitext(file_path)[1].lower()
        
        if file_type == '.pdf':
            return self._extract_pdf_images(file_path)
        elif file_type == '.pptx':
            return self._extract_pptx_images(file_path)
        else:
            return []
    
    def _extract_pdf_text(self, file_path: str) -> List[Dict[str, Any]]:
        """提取PDF文本，包含页码信息"""
        try:
            # 优先使用chatchat项目中的PDF加载器
            from chatchat.server.file_rag.document_loaders.mypdfloader import MyPDFLoader
            loader = MyPDFLoader(file_path)
            docs = loader.load()
            texts_with_metadata = []
            for doc in docs:
                if doc.page_content.strip():
                    # 尝试从元数据中获取页码
                    page_num = getattr(doc.metadata, 'page', 0) if hasattr(doc, 'metadata') else 0
                    texts_with_metadata.append({
                        'text': doc.page_content,
                        'page_num': page_num,
                        'source': file_path
                    })
            return texts_with_metadata
        except ImportError:
            # 降级到PyMuPDF
            doc = fitz.open(file_path)
            texts_with_metadata = []
            for page_num, page in enumerate(doc):
                text = page.get_text()
                if text.strip():
                    texts_with_metadata.append({
                        'text': text,
                        'page_num': page_num + 1,  # 页码从1开始
                        'source': file_path
                    })
            doc.close()
            return texts_with_metadata
    
    def _extract_pdf_images(self, file_path: str) -> List[str]:
        """提取PDF图片"""
        doc = fitz.open(file_path)
        image_paths = []
        for page_num in range(len(doc)):
            page = doc[page_num]
            image_list = page.get_images()
            for img_index, img in enumerate(image_list):
                xref = img[0]
                base_image = doc.extract_image(xref)
                image_bytes = base_image["image"]
                
                # 保存图片
                img_filename = f"{os.path.splitext(os.path.basename(file_path))[0]}_page{page_num}_img{img_index}.png"
                img_path = os.path.join("data/processed/frames", img_filename)
                os.makedirs(os.path.dirname(img_path), exist_ok=True)
                
                with open(img_path, "wb") as img_file:
                    img_file.write(image_bytes)
                image_paths.append(img_path)
        doc.close()
        return image_paths
    
    def _extract_docx_text(self, file_path: str) -> List[Dict[str, Any]]:
        """提取DOCX文本，包含段落信息"""
        try:
            # 优先使用chatchat项目中的DOCX加载器
            from chatchat.server.file_rag.document_loaders.mydocloader import MyDocLoader
            loader = MyDocLoader(file_path)
            docs = loader.load()
            texts_with_metadata = []
            for doc in docs:
                if doc.page_content.strip():
                    # 尝试从元数据中获取段落信息
                    paragraph_num = getattr(doc.metadata, 'paragraph', 0) if hasattr(doc, 'metadata') else 0
                    texts_with_metadata.append({
                        'text': doc.page_content,
                        'paragraph_num': paragraph_num,
                        'source': file_path
                    })
            return texts_with_metadata
        except ImportError:
            # 降级到python-docx
            doc = Document(file_path)
            texts_with_metadata = []
            for para_num, paragraph in enumerate(doc.paragraphs):
                if paragraph.text.strip():
                    texts_with_metadata.append({
                        'text': paragraph.text,
                        'paragraph_num': para_num + 1,  # 段落编号从1开始
                        'source': file_path
                    })
            return texts_with_metadata
    
    def _extract_pptx_text(self, file_path: str) -> List[Dict[str, Any]]:
        """提取PPTX文本，包含幻灯片编号"""
        try:
            # 优先使用chatchat项目中的PPTX加载器
            from chatchat.server.file_rag.document_loaders.mypptloader import MyPPTLoader
            loader = MyPPTLoader(file_path)
            docs = loader.load()
            texts_with_metadata = []
            for doc in docs:
                if doc.page_content.strip():
                    # 尝试从元数据中获取幻灯片编号
                    slide_num = getattr(doc.metadata, 'slide', 0) if hasattr(doc, 'metadata') else 0
                    texts_with_metadata.append({
                        'text': doc.page_content,
                        'slide_num': slide_num,
                        'source': file_path
                    })
            return texts_with_metadata
        except ImportError:
            # 降级到python-pptx
            prs = Presentation(file_path)
            texts_with_metadata = []
            for slide_num, slide in enumerate(prs.slides):
                slide_text = ""
                for shape in slide.shapes:
                    if hasattr(shape, "text") and shape.text.strip():
                        slide_text += shape.text + "\n"
                if slide_text.strip():
                    texts_with_metadata.append({
                        'text': slide_text.strip(),
                        'slide_num': slide_num + 1,  # 幻灯片编号从1开始
                        'source': file_path
                    })
            return texts_with_metadata
    
    def _extract_pptx_images(self, file_path: str) -> List[str]:
        """提取PPTX图片"""
        prs = Presentation(file_path)
        image_paths = []
        for slide_num, slide in enumerate(prs.slides):
            for shape_num, shape in enumerate(slide.shapes):
                if hasattr(shape, 'image'):
                    img_filename = f"{os.path.splitext(os.path.basename(file_path))[0]}_slide{slide_num}_img{shape_num}.png"
                    img_path = os.path.join("data/processed/frames", img_filename)
                    os.makedirs(os.path.dirname(img_path), exist_ok=True)
                    
                    with open(img_path, "wb") as img_file:
                        img_file.write(shape.image.blob)
                    image_paths.append(img_path)
        return image_paths
    
    def store_image_vectors(self, image_paths: List[str], file_path: str, file_type: str, knowledge_base: str = "default") -> int:
        """存储图片向量到图片集合"""
        from app.core.logging import get_logger
        from app.core.database import get_image_collection
        from app.core.knowledge_base_manager import get_kb_manager
        from app.api.rag.utils import call_aliyun_image_embedding_api
        from config.settings import settings
        import threading
        import time
        import concurrent.futures
        
        logger = get_logger(__name__)
        
        def vectorize_with_timeout():
            try:
                # 获取图片集合名称
                kb_manager = get_kb_manager()
                collection_names = kb_manager.get_collection_names(knowledge_base)
                image_collection_name = collection_names["image_collection"]
                
                # 获取图片集合
                collection = get_image_collection(image_collection_name)
                
                # 批量向量化
                logger.info(f"开始向量化 {len(image_paths)} 个文档图片...")
                chunk_vectors = []
                
                # 分批处理，避免API限制
                batch_size = 5  # 图片处理较慢，减少批次大小
                for i in range(0, len(image_paths), batch_size):
                    batch_paths = image_paths[i:i + batch_size]
                    logger.info(f"并发处理第 {i+1}-{min(i+batch_size, len(image_paths))}/{len(image_paths)} 个文档图片...")
                    
                    batch_vectors = []
                    
                    def process_chunk(image_path, index):
                        try:
                            vector = call_aliyun_image_embedding_api(image_path)
                            logger.info(f"  - 第 {index + 1} 个文档图片向量化成功")
                            return vector
                        except Exception as e:
                            logger.error(f"  - 第 {index + 1} 个文档图片向量化失败: {e}")
                            # 使用随机向量作为降级方案
                            import numpy as np
                            return np.random.rand(settings.IMAGE_VECTOR_DIMENSION).astype(np.float32)
                    
                    # 使用线程池并发处理
                    with concurrent.futures.ThreadPoolExecutor(max_workers=3) as executor:  # 减少并发数
                        futures = [executor.submit(process_chunk, path, idx) for idx, path in enumerate(batch_paths)]
                        for future in concurrent.futures.as_completed(futures):
                            try:
                                vector = future.result()
                                batch_vectors.append(vector)
                            except Exception as e:
                                logger.error(f"并发处理异常: {e}")
                                # 使用随机向量作为降级方案
                                import numpy as np
                                fallback_vector = np.random.rand(settings.IMAGE_VECTOR_DIMENSION).astype(np.float32)
                                batch_vectors.append(fallback_vector)
                    
                    chunk_vectors.extend(batch_vectors)
                    logger.info(f"  - 并发处理 {len(batch_paths)} 个文档图片完成")
                    
                    # 每批处理后稍作等待，避免API限制
                    time.sleep(0.5)  # 图片处理较慢，增加等待时间
                
                logger.info(f"批量向量化完成，成功生成 {len(chunk_vectors)} 个文档图片向量")
                
                # 准备实体数据
                entities = []
                for i, (image_path, vector) in enumerate(zip(image_paths[:len(chunk_vectors)], chunk_vectors)):
                    entity = {
                        "image_path": image_path,  # 使用实际图片路径
                        "source": file_path,
                        "file_type": file_type,
                        "vector": vector.tolist() if hasattr(vector, 'tolist') else vector
                    }
                    entities.append(entity)
                
                if not entities:
                    logger.warning("没有有效的文档图片向量数据")
                    result = 0
                    return
                
                # 插入到Milvus图片集合
                logger.info(f"插入 {len(entities)} 个文档图片向量到Milvus集合 {image_collection_name}...")
                
                # 如果知识库不是default，使用分区
                if knowledge_base != "default":
                    # 确保分区存在
                    partition_name = knowledge_base
                    if not collection.has_partition(partition_name):
                        logger.info(f"创建文档图片分区: {partition_name}")
                        collection.create_partition(partition_name)
                    
                    # 插入到指定分区
                    logger.info(f"插入文档图片到分区: {partition_name}")
                    collection.insert(entities, partition_name=partition_name)
                else:
                    # 对于default知识库，插入到默认分区
                    collection.insert(entities)
                
                collection.flush()
                
                logger.info(f"成功存储 {len(entities)} 个文档图片向量")
                
                # 向量化完成后删除本地图片文件
                logger.info("开始清理本地图片文件...")
                cleaned_count = 0
                for image_path in image_paths:
                    try:
                        if os.path.exists(image_path):
                            os.remove(image_path)
                            cleaned_count += 1
                            logger.debug(f"删除本地图片文件: {image_path}")
                    except Exception as e:
                        logger.error(f"删除本地图片文件 {image_path} 时出错: {e}")
                
                logger.info(f"清理完成，删除了 {cleaned_count}/{len(image_paths)} 个本地图片文件")
                result = len(entities)
                
            except Exception as e:
                logger.error(f"文档图片向量化存储失败: {str(e)}")
                import traceback
                logger.error(traceback.format_exc())
                
                # 即使失败也要清理本地图片文件
                logger.info("向量化失败，开始清理本地图片文件...")
                cleaned_count = 0
                for image_path in image_paths:
                    try:
                        if os.path.exists(image_path):
                            os.remove(image_path)
                            cleaned_count += 1
                            logger.debug(f"删除本地图片文件: {image_path}")
                    except Exception as del_e:
                        logger.error(f"删除本地图片文件 {image_path} 时出错: {del_e}")
                
                logger.info(f"清理完成，删除了 {cleaned_count}/{len(image_paths)} 个本地图片文件")
                result = 0
        
        result = None
        thread = threading.Thread(target=vectorize_with_timeout)
        thread.daemon = True
        thread.start()
        thread.join(timeout=120)  # 增加超时时间到120秒
        
        if thread.is_alive():
            logger.error("文档图片向量化处理超时")
            return 0
        
        return result if result is not None else 0 