import os
import zipfile
from pathlib import Path
from typing import List, Tuple, Optional, Set
from ebooklib import epub
from bs4 import BeautifulSoup
import regex as re

# core/book_parser.py（完整适配 0.18+）
import os
import zipfile
from pathlib import Path
from typing import List, Tuple, Optional, Set
from ebooklib import epub
from ebooklib.epub import EpubReader
from bs4 import BeautifulSoup
import regex as re


class BookParser:
    def __init__(self, epub_path: Path):
        self.epub_path = epub_path.resolve()
        self.book: Optional[epub.EpubBook] = None
        self._reader: Optional[EpubReader] = None

    def validate_and_load(self) -> None:
        """严格验证并加载EPUB"""
        self._validate_file()
        self._load_content()

    def _validate_file(self) -> None:
        """三步验证"""
        if not self.epub_path.exists():
            raise FileNotFoundError(f"EPUB文件不存在: {self.epub_path}")
        if self.epub_path.suffix.lower() != '.epub':
            raise ValueError(f"非EPUB文件: {self.epub_path.suffix}")
        if os.path.getsize(self.epub_path) == 0:
            raise ValueError("空文件")

    def _load_content(self) -> None:
        """适配 0.18+ 的加载方式"""
        try:
            self._reader = EpubReader(str(self.epub_path))
            self.book = self._reader.load()

            # 验证容器有效性
            if not self._reader.container or not self._reader.container.rootfiles:
                raise RuntimeError("无效的EPUB容器结构")
        except Exception as e:
            raise RuntimeError(f"EPUB加载失败: {str(e)}")

    def extract_chapters(
            self,
            selected: Optional[Set[str]] = None
    ) -> List[Tuple[str, str]]:
        """增强章节提取"""
        chapters = []
        for idx, item in enumerate(self.book.get_items(), 1):
            if item.get_type() == epub.ITEM_DOCUMENT:
                title, text = self._process_item(item, idx)
                if self._should_include(title, idx, selected):
                    chapters.append((title, text))
        return chapters

    def _load_content(self) -> None:
        """安全加载文件内容"""
        try:
            # 直接传递文件路径给read_epub
            self.book = epub.read_epub(str(self.epub_path))  # 关键修复点

            # 添加NCX忽略选项以消除警告
            # self.book = epub.read_epub(
            #     str(self.epub_path),
            #     options={'ignore_ncx': True}  # 可选：抑制未来版本警告
            # )

            if not hasattr(self.book, 'get_items'):
                raise AttributeError("无效的EPUB解析结果")
        except Exception as e:
            raise RuntimeError(f"EPUB加载失败: {str(e)}")

    def extract_chapters(
            self,
            selected: Optional[Set[str]] = None
    ) -> List[Tuple[str, str]]:
        """
        提取章节内容（修复返回值结构）
        """
        if self.book is None:
            raise RuntimeError("必须先调用validate_and_load()")

        chapters = []
        for idx, item in enumerate(self.book.get_items(), 1):
            if self._is_content_item(item):
                try:
                    # 确保始终返回两个值
                    title, text = self._process_item(item, idx)
                    if self._should_include(title, idx, selected):
                        chapters.append((title, text))  # 确保添加的是元组
                except ValueError as e:
                    # 记录错误但继续处理
                    print(f"章节 {idx} 处理失败: {str(e)}")
        return chapters

    def _is_content_item(self, item) -> bool:
        """判断是否为有效内容项"""
        return isinstance(item, epub.EpubHtml) and item.get_content()

    def _process_item(self, item, index: int) -> Tuple[str, str]:
        """处理单个内容项（允许空内容）"""
        try:
            soup = BeautifulSoup(item.get_content(), 'html.parser')
            title = self._find_title(soup) or f"Chapter_{index}"
            self._clean_unwanted_elements(soup)
            text = ' '.join(p.get_text() for p in soup.find_all(['p', 'li']))
            return title.strip(), text.strip()
        except Exception as e:
            # 返回默认值替代抛出异常
            return f"Chapter_{index}", ""

    def _find_title(self, soup) -> str:
        """多策略标题探测"""
        for selector in ['h1', 'h2', 'h3', 'title']:
            if tag := soup.select_one(selector):
                return tag.get_text()
        return ""

    def _clean_unwanted_elements(self, soup) -> None:
        """深度清理非内容元素"""
        for elem in soup(['script', 'style', 'nav', 'footer', 'head', 'meta']):
            elem.decompose()

    def _should_include(
            self,
            title: str,
            index: int,
            selected: Optional[Set[str]]
    ) -> bool:
        """智能章节过滤"""
        if not selected:
            return True

        # 匹配规则
        patterns = {
            'numeric': r'^\d+$',
            'range': r'^(\d+)-(\d+)$',
            'regex': r'^/(.*)/$'
        }

        for pattern in selected:
            # 处理数字匹配
            if re.match(patterns['numeric'], pattern):
                if index == int(pattern):
                    return True

            # 处理范围选择
            range_match = re.match(patterns['range'], pattern)
            if range_match:
                start, end = map(int, range_match.groups())
                if start <= index <= end:
                    return True

            # 处理正则表达式
            regex_match = re.match(patterns['regex'], pattern)
            if regex_match:
                if re.search(regex_match.group(1), title, re.IGNORECASE):
                    return True

            # 直接文本匹配
            if pattern.lower() in title.lower():
                return True

        return False