"""
文本处理工具 - 文本分块和预处理
"""
import re
import logging
from typing import List, Dict, Any, Optional
from pathlib import Path
import json

from ..config.settings import FILE_CONFIG

logger = logging.getLogger(__name__)

class TextProcessor:
    """文本处理器"""
    
    def __init__(self):
        self.chunk_size = FILE_CONFIG["chunk_size"]
        self.chunk_overlap = FILE_CONFIG["chunk_overlap"]
        self.max_chunks_per_file = FILE_CONFIG["max_chunks_per_file"]
    
    def clean_text(self, text: str) -> str:
        """清理文本"""
        if not text:
            return ""
        
        # 移除多余的空白字符
        text = re.sub(r'\s+', ' ', text)
        
        # 移除特殊字符（保留中文、英文、数字、标点）
        text = re.sub(r'[^\u4e00-\u9fff\w\s.,!?;:()\[\]{}"\'-]', '', text)
        
        # 移除空行
        lines = [line.strip() for line in text.split('\n') if line.strip()]
        text = '\n'.join(lines)
        
        return text.strip()
    
    def split_text_by_sentences(self, text: str) -> List[str]:
        """按句子分割文本"""
        if not text:
            return []
        
        # 中文句子分割
        sentences = re.split(r'[。！？；\n]', text)
        sentences = [s.strip() for s in sentences if s.strip()]
        
        return sentences
    
    def split_text_by_paragraphs(self, text: str) -> List[str]:
        """按段落分割文本"""
        if not text:
            return []
        
        # 按双换行分割段落
        paragraphs = re.split(r'\n\s*\n', text)
        paragraphs = [p.strip() for p in paragraphs if p.strip()]
        
        return paragraphs
    
    def create_chunks_with_overlap(self, text: str) -> List[Dict[str, Any]]:
        """创建带重叠的文本块"""
        if not text:
            return []
        
        chunks = []
        start = 0
        chunk_index = 0
        
        while start < len(text) and chunk_index < self.max_chunks_per_file:
            # 计算当前块的结束位置
            end = start + self.chunk_size
            
            # 如果不是最后一块，尝试在句子边界分割
            if end < len(text):
                # 在结束位置附近寻找句子边界
                boundary_chars = ['。', '！', '？', '；', '\n', '.', '!', '?', ';']
                for char in boundary_chars:
                    pos = text.rfind(char, start, end)
                    if pos > start + self.chunk_size * 0.7:  # 确保不会太短
                        end = pos + 1
                        break
            
            # 提取当前块
            chunk_text = text[start:end].strip()
            if chunk_text:
                chunk_data = {
                    "chunk_id": f"chunk_{chunk_index:06d}",
                    "chunk_index": chunk_index,
                    "content": chunk_text,
                    "content_length": len(chunk_text),
                    "start_position": start,
                    "end_position": end,
                    "metadata": {
                        "chunk_type": "text",
                        "overlap_size": self.chunk_overlap if chunk_index > 0 else 0
                    }
                }
                chunks.append(chunk_data)
                chunk_index += 1
            
            # 计算下一块的开始位置（考虑重叠）
            start = end - self.chunk_overlap
            if start >= len(text):
                break
        
        return chunks
    
    def create_semantic_chunks(self, text: str) -> List[Dict[str, Any]]:
        """创建语义分块"""
        if not text:
            return []
        
        # 首先按段落分割
        paragraphs = self.split_text_by_paragraphs(text)
        chunks = []
        chunk_index = 0
        
        for para_index, paragraph in enumerate(paragraphs):
            if len(paragraph) <= self.chunk_size:
                # 段落小于块大小，直接作为一个块
                chunk_data = {
                    "chunk_id": f"chunk_{chunk_index:06d}",
                    "chunk_index": chunk_index,
                    "content": paragraph,
                    "content_length": len(paragraph),
                    "start_position": 0,
                    "end_position": len(paragraph),
                    "metadata": {
                        "chunk_type": "paragraph",
                        "paragraph_index": para_index
                    }
                }
                chunks.append(chunk_data)
                chunk_index += 1
            else:
                # 段落大于块大小，需要进一步分割
                sub_chunks = self.create_chunks_with_overlap(paragraph)
                for sub_chunk in sub_chunks:
                    sub_chunk["chunk_id"] = f"chunk_{chunk_index:06d}"
                    sub_chunk["chunk_index"] = chunk_index
                    sub_chunk["metadata"]["paragraph_index"] = para_index
                    chunks.append(sub_chunk)
                    chunk_index += 1
            
            if chunk_index >= self.max_chunks_per_file:
                break
        
        return chunks
    
    def process_document(self, text: str, metadata: Dict[str, Any] = None) -> Dict[str, Any]:
        """处理文档文本"""
        if not text:
            error_msg = "空文本"
            logger.warning(error_msg)
            return {
                "chunks": [],
                "metadata": metadata or {},
                "success": False,
                "error": error_msg
            }
        
        try:
            logger.info("开始处理文档文本")
            
            # 清理文本
            cleaned_text = self.clean_text(text)
            logger.debug(f"原始文本长度: {len(text)}, 清理后文本长度: {len(cleaned_text)}")
            
            if not cleaned_text:
                error_msg = "清理后文本为空"
                logger.warning(error_msg)
                return {
                    "chunks": [],
                    "metadata": metadata or {},
                    "success": False,
                    "error": error_msg
                }
            
            # 创建分块
            chunks = self.create_semantic_chunks(cleaned_text)
            logger.info(f"文档分块完成，共生成 {len(chunks)} 个分块")
            
            # 记录分块内容详情
            for i, chunk in enumerate(chunks):
                logger.debug(f"分块 {i}: 内容长度={chunk['content_length']}, 内容预览='{chunk['content'][:100]}...'")

            # 更新元数据
            if metadata is None:
                metadata = {}
            
            metadata.update({
                "original_length": len(text),
                "cleaned_length": len(cleaned_text),
                "chunk_count": len(chunks),
                "processing_method": "semantic_chunking"
            })
            
            logger.info("文档处理完成")
            return {
                "chunks": chunks,
                "metadata": metadata,
                "success": True
            }
            
        except Exception as e:
            error_msg = f"文档处理失败: {e}"
            logger.error(error_msg)
            return {
                "chunks": [],
                "metadata": metadata or {},
                "success": False,
                "error": error_msg
            }
    
    def merge_chunks(self, chunks: List[Dict[str, Any]], max_length: int = None) -> str:
        """合并文本块"""
        if not chunks:
            return ""
        
        if max_length is None:
            max_length = self.chunk_size * 2
        
        merged_text = ""
        for chunk in chunks:
            if len(merged_text) + len(chunk["content"]) <= max_length:
                merged_text += chunk["content"] + "\n\n"
            else:
                break
        
        return merged_text.strip()

class DocumentProcessor:
    """文档处理器"""
    
    def __init__(self):
        self.text_processor = TextProcessor()
    
    def process_file(self, file_path: str, file_metadata: Dict[str, Any] = None) -> Dict[str, Any]:
        """处理文件"""
        from .file_handlers import FileHandlerFactory
        
        try:
            # 提取文件文本
            extraction_result = FileHandlerFactory.extract_text(file_path)
            
            if not extraction_result["success"]:
                return {
                    "chunks": [],
                    "metadata": file_metadata or {},
                    "success": False,
                    "error": extraction_result.get("error", "文件处理失败")
                }
            
            # 获取文件元数据
            if file_metadata is None:
                file_metadata = FileHandlerFactory.get_metadata(file_path)
            
            # 处理文本
            text = extraction_result["text"]
            extraction_metadata = extraction_result.get("metadata", {})
            
            # 合并元数据
            merged_metadata = {**file_metadata, **extraction_metadata}
            
            # 文本分块处理
            result = self.text_processor.process_document(text, merged_metadata)
            
            return result
            
        except Exception as e:
            logger.error(f"文件处理失败: {file_path}, 错误: {e}")
            return {
                "chunks": [],
                "metadata": file_metadata or {},
                "success": False,
                "error": str(e)
            }
    
    def batch_process_files(self, file_paths: List[str]) -> List[Dict[str, Any]]:
        """批量处理文件"""
        results = []
        
        for file_path in file_paths:
            try:
                result = self.process_file(file_path)
                results.append({
                    "file_path": file_path,
                    "result": result
                })
            except Exception as e:
                logger.error(f"批量处理文件失败: {file_path}, 错误: {e}")
                results.append({
                    "file_path": file_path,
                    "result": {
                        "chunks": [],
                        "metadata": {},
                        "success": False,
                        "error": str(e)
                    }
                })
        
        return results

# 全局实例
text_processor = TextProcessor()
document_processor = DocumentProcessor()
