from pathlib import Path
from typing import List, Tuple

from docx import Document as DocxDocument
from PyPDF2 import PdfReader


def extract_text_from_document(document) -> Tuple[str, dict]:
    """
    根据文件类型提取文本内容并返回统计信息.

    Returns:
        content (str): 文本内容
        stats (dict): 包含 page_count / word_count
    """
    file_path = Path(document.file.path)
    file_type = (document.file_type or file_path.suffix.replace('.', '')).lower()

    if file_type == 'pdf':
        content, page_count = _extract_from_pdf(file_path)
    elif file_type == 'docx':
        content, page_count = _extract_from_docx(file_path)
    elif file_type == 'txt':
        content = _extract_from_txt(file_path)
        page_count = None
    else:
        raise ValueError(f'暂不支持的文档类型: {file_type}')

    normalized_text = _normalize_text(content)
    word_count = len(normalized_text.split())

    return normalized_text, {
        'page_count': page_count,
        'word_count': word_count,
    }


def split_text_into_chunks(
    text: str,
    chunk_size: int,
    overlap: int
) -> List[str]:
    """
    将文本拆分为固定大小的块, 按词数估算.
    """
    if chunk_size <= 0:
        raise ValueError('chunk_size 必须大于0')
    if overlap >= chunk_size:
        raise ValueError('chunk_overlap 需小于 chunk_size')

    words = text.split()
    if not words:
        return []

    chunks = []
    step = chunk_size - overlap
    total = len(words)
    for start in range(0, total, step):
        chunk_words = words[start:start + chunk_size]
        chunk_text = ' '.join(chunk_words).strip()
        if chunk_text:
            chunks.append(chunk_text)
    return chunks


def _extract_from_pdf(file_path: Path) -> Tuple[str, int]:
    content = []
    with file_path.open('rb') as pdf_file:
        reader = PdfReader(pdf_file)
        for page in reader.pages:
            text = page.extract_text() or ''
            content.append(text)
    full_text = '\n'.join(content)
    return full_text, len(getattr(reader, 'pages', []))


def _extract_from_docx(file_path: Path) -> Tuple[str, int | None]:
    doc = DocxDocument(str(file_path))
    paragraphs = [para.text for para in doc.paragraphs if para.text.strip()]
    full_text = '\n'.join(paragraphs)
    return full_text, None


def _extract_from_txt(file_path: Path) -> str:
    try:
        with file_path.open('r', encoding='utf-8') as txt_file:
            return txt_file.read()
    except UnicodeDecodeError:
        with file_path.open('r', encoding='gbk', errors='ignore') as txt_file:
            return txt_file.read()


def _normalize_text(text: str) -> str:
    return '\n'.join(
        line.strip()
        for line in text.splitlines()
        if line.strip()
    )
