# app/processors/image_processor.py

import os
from typing import Dict, Any, List
from .base import BaseProcessor

class ImageProcessor(BaseProcessor):
    """图片文件处理器"""
    
    def __init__(self):
        super().__init__()
        self.supported_types = ['jpg', 'jpeg', 'png', 'bmp', 'gif']
    
    def process(self, file_path: str, file_type: str, knowledge_base: str = "default") -> Dict[str, Any]:
        """处理图片文件"""
        from app.core.logging import get_logger
        logger = get_logger(__name__)
        
        self.validate_file(file_path, file_type)
        logger.info(f"开始处理图片文件: {file_path}, 知识库: {knowledge_base}")
        
        # 图片文件直接作为图片处理
        images = self.extract_images(file_path)
        logger.info(f"提取到 {len(images)} 个图片")
        
        # 为图片生成描述性文本，用于向量化
        image_descriptions = self.generate_image_descriptions(images)
        logger.info(f"生成 {len(image_descriptions)} 个图片描述")
        
        # 向量化并存储图片
        logger.info("开始向量化存储图片...")
        stored_count = self.store_image_vectors(images, file_path, file_type, knowledge_base)
        logger.info(f"成功存储 {stored_count} 个图片向量到知识库 {knowledge_base}")
        
        return {
            'text_chunks_count': len(image_descriptions),
            'images_count': len(images),
            'text_chunks': image_descriptions,
            'images': images,
            'stored_vectors': stored_count,
            'knowledge_base': knowledge_base
        }
    
    def extract_text(self, file_path: str) -> List[str]:
        """图片文件不包含文本"""
        return []
    
    def extract_images(self, file_path: str) -> List[str]:
        """提取图片路径"""
        # 简化处理，直接返回图片路径，避免复杂的依赖
        import os
        if os.path.exists(file_path):
            return [file_path]
        else:
            return []
    
    def generate_image_descriptions(self, image_paths: List[str]) -> List[str]:
        """为图片生成描述性文本"""
        descriptions = []
        for image_path in image_paths:
            filename = os.path.basename(image_path)
            # 生成简单的图片描述
            description = f"图片文件: {filename}，包含图像内容，可用于图像检索和识别。"
            descriptions.append(description)
        return descriptions
    
    def store_image_vectors(self, image_paths: List[str], file_path: str, file_type: str, knowledge_base: str = "default") -> int:
        """存储图片向量到图片集合"""
        from app.core.logging import get_logger
        from app.core.database import get_image_collection
        from app.core.knowledge_base_manager import get_kb_manager
        from app.api.rag.utils import call_aliyun_image_embedding_api
        from config.settings import settings
        import threading
        import time
        import concurrent.futures
        
        logger = get_logger(__name__)
        
        def vectorize_with_timeout():
            try:
                # 获取图片集合名称
                kb_manager = get_kb_manager()
                collection_names = kb_manager.get_collection_names(knowledge_base)
                image_collection_name = collection_names["image_collection"]
                
                # 获取图片集合
                collection = get_image_collection(image_collection_name)
                
                # 批量向量化
                logger.info(f"开始向量化 {len(image_paths)} 个图片...")
                chunk_vectors = []
                
                # 分批处理，避免API限制
                batch_size = 5  # 图片处理较慢，减少批次大小
                for i in range(0, len(image_paths), batch_size):
                    batch_paths = image_paths[i:i + batch_size]
                    logger.info(f"并发处理第 {i+1}-{min(i+batch_size, len(image_paths))}/{len(image_paths)} 个图片...")
                    
                    batch_vectors = []
                    
                    def process_chunk(image_path, index):
                        try:
                            vector = call_aliyun_image_embedding_api(image_path)
                            logger.info(f"  - 第 {index + 1} 个图片向量化成功")
                            return vector
                        except Exception as e:
                            logger.error(f"  - 第 {index + 1} 个图片向量化失败: {e}")
                            # 使用随机向量作为降级方案
                            import numpy as np
                            return np.random.rand(settings.IMAGE_VECTOR_DIMENSION).astype(np.float32)
                    
                    # 使用线程池并发处理
                    with concurrent.futures.ThreadPoolExecutor(max_workers=3) as executor:  # 减少并发数
                        futures = [executor.submit(process_chunk, path, idx) for idx, path in enumerate(batch_paths)]
                        for future in concurrent.futures.as_completed(futures):
                            try:
                                vector = future.result()
                                batch_vectors.append(vector)
                            except Exception as e:
                                logger.error(f"并发处理异常: {e}")
                                # 使用随机向量作为降级方案
                                import numpy as np
                                fallback_vector = np.random.rand(settings.IMAGE_VECTOR_DIMENSION).astype(np.float32)
                                batch_vectors.append(fallback_vector)
                    
                    chunk_vectors.extend(batch_vectors)
                    logger.info(f"  - 并发处理 {len(batch_paths)} 个图片完成")
                    
                    # 每批处理后稍作等待，避免API限制
                    time.sleep(0.5)  # 图片处理较慢，增加等待时间
                
                logger.info(f"批量向量化完成，成功生成 {len(chunk_vectors)} 个图片向量")
                
                # 准备实体数据
                entities = []
                for i, (image_path, vector) in enumerate(zip(image_paths[:len(chunk_vectors)], chunk_vectors)):
                    entity = {
                        "image_path": image_path,  # 使用实际图片路径
                        "source": file_path,
                        "file_type": file_type,
                        "vector": vector.tolist() if hasattr(vector, 'tolist') else vector
                    }
                    entities.append(entity)
                
                if not entities:
                    logger.warning("没有有效的图片向量数据")
                    result = 0
                    return
                
                # 插入到Milvus图片集合
                logger.info(f"插入 {len(entities)} 个图片向量到Milvus集合 {image_collection_name}...")
                
                # 如果知识库不是default，使用分区
                if knowledge_base != "default":
                    # 确保分区存在
                    partition_name = knowledge_base
                    if not collection.has_partition(partition_name):
                        logger.info(f"创建图片分区: {partition_name}")
                        collection.create_partition(partition_name)
                    
                    # 插入到指定分区
                    logger.info(f"插入图片到分区: {partition_name}")
                    collection.insert(entities, partition_name=partition_name)
                else:
                    # 对于default知识库，插入到默认分区
                    collection.insert(entities)
                
                collection.flush()
                
                logger.info(f"成功存储 {len(entities)} 个图片向量")
                result = len(entities)
                
            except Exception as e:
                logger.error(f"图片向量化存储失败: {str(e)}")
                import traceback
                logger.error(traceback.format_exc())
                result = 0
        
        result = None
        thread = threading.Thread(target=vectorize_with_timeout)
        thread.daemon = True
        thread.start()
        thread.join(timeout=120)  # 增加超时时间到120秒
        
        if thread.is_alive():
            logger.error("图片向量化处理超时")
            return 0
        
        return result if result is not None else 0 