from celery import Task
from dataprocess.elastic.EsSinker import EsSinker
from fulltext.Document import Document
from Logger import Logger
from utils.TextChunker import TextChunker

logger = Logger.get_logger(__name__)

class TxtProcessor(EsSinker):
    """Process TXT files and index them in Elasticsearch"""
    
    def run(self, messages: dict) -> bool:
        """
        Process and index txt file content
        
        Args:
            messages: file instance to process
            
        Returns:
            bool: True if successful, False otherwise
        """
        try:
            file_path = messages['file_path']
            with open(file_path, 'r', encoding='utf-8') as f:
                content = f.read()
                chunks = TextChunker().chunk_text(content)
                merged_dict = {}
                for index, chunk in enumerate(chunks):
                    merged_dict[str(index+1)] = chunk
                document = Document.create(file_path=file_path, text_content=merged_dict)
                logger.info(f'start to process doc: {document.model_dump_json}')
                return self.index_document(document)
        except Exception as e:
            logger.error(f"处理文档失败: {str(e)}")
            return False
