# tasks/vector_tasks.py

import time
import traceback
from typing import List, Dict, Any
from celery import current_task

from config.settings import settings
from config.model_config import ModelConfig
from tasks.celery_app import celery_app
from app.core.database import connect_milvus, get_text_collection, get_image_collection
from app.core.logging import get_logger
from sentence_transformers import SentenceTransformer

logger = get_logger(__name__)

# 全局模型实例
_text_model = None
_multimodal_model = None

def get_text_model():
    """获取文本嵌入模型实例"""
    global _text_model
    if _text_model is None:
        _text_model = SentenceTransformer(ModelConfig.TEXT_EMBEDDING_MODEL)
    return _text_model

def get_multimodal_model():
    """获取多模态嵌入模型实例"""
    global _multimodal_model
    if _multimodal_model is None:
        # 这里需要根据实际的多模态模型API进行调用
        # 暂时使用文本模型作为占位符
        _multimodal_model = SentenceTransformer(ModelConfig.MULTIMODAL_EMBEDDING_MODEL)
    return _multimodal_model

@celery_app.task(bind=True, queue='vector_processing')
def vectorize_text_task(self, text_chunks: List[str], source: str, file_type: str):
    """文本向量化任务"""
    start_time = time.time()
    
    try:
        logger.info(f"开始向量化文本: {source} ({len(text_chunks)} 个文本块)")
        
        # 更新任务状态
        self.update_state(
            state=settings.TASK_STATUS_PROCESSING,
            meta={'current': 0, 'total': len(text_chunks), 'status': '开始文本向量化...'}
        )
        
        # 连接数据库
        connect_milvus()
        collection = get_text_collection()
        
        # 获取模型
        model = get_text_model()
        
        # 批量向量化
        vectors = model.encode(
            text_chunks, 
            show_progress_bar=False, 
            normalize_embeddings=True,
            batch_size=ModelConfig.BATCH_SIZE
        )
        
        # 准备数据
        entities = []
        for i, (text, vector) in enumerate(zip(text_chunks, vectors)):
            entities.append({
                "text": text,
                "source": source,
                "file_type": file_type,
                "vector": vector.tolist()
            })
            
            # 更新进度
            if (i + 1) % 10 == 0:
                progress = int(((i + 1) / len(text_chunks)) * 100)
                self.update_state(
                    state=settings.TASK_STATUS_PROCESSING,
                    meta={
                        'current': i + 1,
                        'total': len(text_chunks),
                        'progress': progress,
                        'status': f'已向量化 {i + 1}/{len(text_chunks)} 个文本块'
                    }
                )
        
        # 插入数据库
        collection.insert(entities)
        collection.flush()
        
        processing_time = time.time() - start_time
        
        result = {
            'status': settings.TASK_STATUS_COMPLETED,
            'text_chunks_count': len(text_chunks),
            'vectors_count': len(vectors),
            'processing_time': processing_time,
            'message': '文本向量化完成'
        }
        
        self.update_state(
            state=settings.TASK_STATUS_COMPLETED,
            meta=result
        )
        
        logger.info(f"文本向量化完成: {source} - 耗时: {processing_time:.2f}s")
        
        return result
        
    except Exception as e:
        error_msg = f"文本向量化失败: {str(e)}"
        logger.error(error_msg)
        logger.error(traceback.format_exc())
        
        result = {
            'status': settings.TASK_STATUS_FAILED,
            'error': str(e),
            'message': error_msg,
            'processing_time': time.time() - start_time
        }
        
        self.update_state(
            state=settings.TASK_STATUS_FAILED,
            meta=result
        )
        
        return result
    
    finally:
        # 断开数据库连接
        from pymilvus import connections
        if "default" in connections.list_connections():
            connections.disconnect("default")

@celery_app.task(bind=True, queue='vector_processing')
def vectorize_image_task(self, image_paths: List[str], source: str, file_type: str):
    """图片向量化任务"""
    start_time = time.time()
    
    try:
        logger.info(f"开始向量化图片: {source} ({len(image_paths)} 个图片)")
        
        # 更新任务状态
        self.update_state(
            state=settings.TASK_STATUS_PROCESSING,
            meta={'current': 0, 'total': len(image_paths), 'status': '开始图片向量化...'}
        )
        
        # 连接数据库
        connect_milvus()
        collection = get_image_collection()
        
        # 获取模型
        model = get_multimodal_model()
        
        # 批量向量化
        vectors = []
        for i, image_path in enumerate(image_paths):
            # 这里需要根据实际的多模态模型API进行调用
            # 暂时使用随机向量作为占位符
            import numpy as np
            vector = np.random.rand(ModelConfig.IMAGE_VECTOR_DIMENSION).astype(np.float32)
            vectors.append(vector)
            
            # 更新进度
            progress = int(((i + 1) / len(image_paths)) * 100)
            self.update_state(
                state=settings.TASK_STATUS_PROCESSING,
                meta={
                    'current': i + 1,
                    'total': len(image_paths),
                    'progress': progress,
                    'status': f'已向量化 {i + 1}/{len(image_paths)} 个图片'
                }
            )
        
        # 准备数据
        entities = []
        for image_path, vector in zip(image_paths, vectors):
            entities.append({
                "image_path": image_path,
                "source": source,
                "file_type": file_type,
                "vector": vector.tolist()
            })
        
        # 插入数据库
        collection.insert(entities)
        collection.flush()
        
        processing_time = time.time() - start_time
        
        result = {
            'status': settings.TASK_STATUS_COMPLETED,
            'images_count': len(image_paths),
            'vectors_count': len(vectors),
            'processing_time': processing_time,
            'message': '图片向量化完成'
        }
        
        self.update_state(
            state=settings.TASK_STATUS_COMPLETED,
            meta=result
        )
        
        logger.info(f"图片向量化完成: {source} - 耗时: {processing_time:.2f}s")
        
        return result
        
    except Exception as e:
        error_msg = f"图片向量化失败: {str(e)}"
        logger.error(error_msg)
        logger.error(traceback.format_exc())
        
        result = {
            'status': settings.TASK_STATUS_FAILED,
            'error': str(e),
            'message': error_msg,
            'processing_time': time.time() - start_time
        }
        
        self.update_state(
            state=settings.TASK_STATUS_FAILED,
            meta=result
        )
        
        return result
    
    finally:
        # 断开数据库连接
        from pymilvus import connections
        if "default" in connections.list_connections():
            connections.disconnect("default") 