from celery import Task
from fulltext.Document import Document
from fulltext.SearchDocument import SearchDocument
from dataprocess.elastic.ElasticsearchClient import ElasticsearchClient
from Logger import Logger
from .BaseSinker import BaseSinker
import time

logger = Logger.get_logger(__name__)

class EsSinker(Task, BaseSinker):
    """Elasticsearch implementation of document sinker"""
    
    def __init__(self):
        self.es_client = ElasticsearchClient()
        # Ensure index exists
        self.es_client.create_index()
        
    def index_document(self, document: Document) -> bool:
        """Index document pages into elasticsearch"""
        try:
            for page_number, content in document.text_content.items():
                # Create SearchDocument instance
                search_doc = SearchDocument.create(
                    file_path=document.file_path,
                    page_number=page_number,
                    text_content=content,
                    file_format=document.file_format,
                    size=document.file_size
                )
                # celery worker中输出调试日志 print(search_doc.model_dump_json)
                # embedding接口有限速
                time.sleep(0.5)
                if not search_doc:
                    logger.error(f"无法为页面 {page_number} 创建SearchDocument")
                    continue   
                # Convert to dict for indexing
                doc_dict = search_doc.model_dump()
                
                # Index the document
                if not self.es_client.index_document(doc_dict):
                    logger.error(f"索引页面 {page_number} 失败")
                    return False
            return True
        except Exception as e:
            logger.error(f"索引文档失败: {str(e)}")
            return False
