import re
import logging
from pathlib import Path
from typing import List, Dict, Optional
from pypdf import PdfReader
from docx import Document
import openpyxl
from xmindparser import xmind_to_dict
import unicodedata

logger = logging.getLogger(__name__)


class DocumentProcessor:
    def __init__(self, chunk_size: int = 500, chunk_overlap: int = 50):
        self.chunk_size = chunk_size
        self.chunk_overlap = chunk_overlap
        self.separators = ["\n\n", "\n", "。", "？", "！"]

    def _normalize_text(self, text: str) -> str:
        """实现文本预处理规则"""
        # 替换连续空白字符
        text = re.sub(r'\s+', ' ', text)
        # 统一Unicode格式
        return unicodedata.normalize('NFKC', text).strip()

    def _split_text(self, text: str) -> List[str]:
        """递归文本分块算法"""
        chunks = []
        start_idx = 0

        while start_idx < len(text):
            end_idx = min(start_idx + self.chunk_size, len(text))
            split_pos = -1

            # 查找最佳分割点
            for sep in self.separators:
                pos = text.rfind(sep, start_idx, end_idx)
                split_pos = max(split_pos, pos)

            if split_pos == -1:
                split_pos = end_idx

            chunk = text[start_idx:split_pos].strip()
            if chunk:
                chunks.append(chunk)

            start_idx = split_pos - self.chunk_overlap

        return chunks

    def process_file(self, file_path: str) -> List[Dict]:
        """处理文档主方法"""
        file_ext = Path(file_path).suffix.lower()

        try:
            if file_ext == '.pdf':
                text = self._parse_pdf(file_path)
            elif file_ext == '.docx':
                text = self._parse_docx(file_path)
            elif file_ext == '.xlsx':
                text = self._parse_excel(file_path)
            elif file_ext == '.xmind':
                text = self._parse_xmind(file_path)
            else:
                raise ValueError(f"Unsupported file type: {file_ext}")

            return self._process_text(text, file_path)
        except Exception as e:
            logger.error(f"Error processing {file_path}: {str(e)}")
            return []

    def _process_text(self, text: str, source: str) -> List[Dict]:
        """处理文本生成块"""
        normalized = self._normalize_text(text)
        chunks = self._split_text(normalized)

        return [{
            "content": chunk,
            "metadata": {
                "source": source,
                "length": len(chunk),
                "created_at": datetime.now().isoformat()
            }
        } for chunk in chunks]

    # 各格式解析方法（略，保持原有实现）
    # _parse_pdf(), _parse_docx(), _parse_excel(), _parse_xmind()