from typing import List
from Logger import Logger

logger = Logger.get_logger(__name__)

class TextPaddingCleaner:
    """Text padding cleaning utility class"""
    
    @staticmethod
    def clean_text_padding(texts: List[str], min_padding_length: int = 20) -> List[str]:
        """
        Remove padding overlaps between adjacent text chunks and merge them.
        
        Args:
            texts: List of text chunks that may contain padding overlaps
            min_padding_length: Minimum length of padding to be considered for removal
            
        Returns:
            List[str]: Merged text chunks with padding removed
        """
        if not texts or len(texts) < 2:
            return texts

        result = []
        merged_text = texts[0]
        
        for i in range(1, len(texts)):
            current_text = texts[i]
            
            # Find longest common substring at the end of merged_text and start of current_text
            max_check_len = min(len(merged_text), len(current_text))
            padding_len = 0
            
            for length in range(min_padding_length, max_check_len + 1):
                if merged_text[-length:] == current_text[:length]:
                    padding_len = length
            
            if padding_len >= min_padding_length:
                # Remove padding and merge
                merged_text = merged_text + current_text[padding_len:]
                logger.debug(f"Found padding of length {padding_len}, merged texts")
            else:
                # No significant padding found, add to result and start new merged text
                result.append(merged_text)
                merged_text = current_text
                
        # Add the last merged text
        result.append(merged_text)
        return result

    @staticmethod
    def merge_for_context(texts: List[str], max_context_length: int = 10000) -> List[str]:
        """
        Merge text chunks optimized for LLM context, prioritizing longer chunks.
        
        Args:
            texts: List of text chunks to be merged
            max_context_length: Maximum length of context window for LLM
            
        Returns:
            List[str]: Merged text chunks optimized for context
        """
        if not texts:
            return []
            
        # Sort texts by length in descending order
        sorted_texts = sorted(texts, key=len, reverse=True)
        
        current_chunk = []
        current_length = 0
        
        for text in sorted_texts:
            text_length = len(text)
            # Skip if single text is already too long
            if text_length > max_context_length:
                logger.warning(f"Skipping text chunk exceeding max length: {text_length}")
                continue
                
            # Check if adding this text would exceed the context limit
            if current_length + text_length <= max_context_length:
                current_chunk.append(text)
                current_length += text_length
            else:
                return current_chunk
        
        return current_chunk
