import os
import zipfile
import tempfile
import xml.etree.ElementTree as ET
from bs4 import BeautifulSoup
from fpdf import FPDF
import platform


def find_system_chinese_font():
    """尝试自动查找系统中常见的中文字体"""
    system = platform.system()
    search_paths = []

    if system == "Windows":
        search_paths = [
            "C:/Windows/Fonts/simhei.ttf",  # 黑体
            "C:/Windows/Fonts/simsun.ttc",  # 宋体
            "C:/Windows/Fonts/msyh.ttc"  # 微软雅黑
        ]
    elif system == "Darwin":  # macOS
        search_paths = [
            "/Library/Fonts/华文细黑.ttf",
            "/System/Library/Fonts/Supplemental/Songti.ttc"
        ]
    elif system == "Linux":
        search_paths = [
            "/usr/share/fonts/opentype/noto/NotoSansCJK-Regular.ttc",
            "/usr/share/fonts/truetype/wqy/wqy-microhei.ttc"
        ]

    for path in search_paths:
        if os.path.isfile(path):
            return path
    return None

def create_pdf(text, output_path, font_path):
    """生成PDF文件"""
    try:
        pdf = FPDF()
        pdf.add_page()
        pdf.set_auto_page_break(auto=True, margin=15)

        # 注册并设置中文字体
        pdf.add_font("CNFont", "", font_path, uni=True)
        pdf.set_font("CNFont", size=12)

        # 设置有效页面宽度（左右各15mm边距）
        effective_page_width = pdf.w - 2 * 15

        # 写入内容并自动换行
        pdf.multi_cell(effective_page_width, 10, txt=text)
        pdf.output(output_path)
    except Exception as e:
        print(f"生成PDF失败: {str(e)}")


def extract_epub_chapters(epub_path, output_dir, ifpdf):
    # 创建临时目录解压EPUB
    with tempfile.TemporaryDirectory() as temp_dir:
        # 解压EPUB文件
        with zipfile.ZipFile(epub_path, 'r') as epub:
            epub.extractall(temp_dir)

        # 解析container.xml获取content.opf路径
        container_path = os.path.join(temp_dir, 'META-INF', 'container.xml')
        tree = ET.parse(container_path)
        ns = {'n': 'urn:oasis:names:tc:opendocument:xmlns:container'}
        rootfile = tree.find('.//n:rootfile', ns)
        content_opf_path = rootfile.attrib['full-path']

        # 解析content.opf文件
        opf_abs_path = os.path.join(temp_dir, content_opf_path)
        opf_dir = os.path.dirname(opf_abs_path)
        tree = ET.parse(opf_abs_path)
        opf_ns = {
            'opf': 'http://www.idpf.org/2007/opf',
            'dc': 'http://purl.org/dc/elements/1.1/'
        }

        # 构建manifest映射表
        manifest = {}
        for item in tree.findall('.//opf:manifest/opf:item', opf_ns):
            item_id = item.get('id')
            item_href = item.get('href')
            manifest[item_id] = os.path.normpath(os.path.join(opf_dir, item_href))

        # 获取spine顺序
        spine = []
        for itemref in tree.findall('.//opf:spine/opf:itemref', opf_ns):
            idref = itemref.get('idref')
            if idref in manifest:
                spine.append(manifest[idref])

        # 提取每个章节内容
        os.makedirs(output_dir, exist_ok=True)
        for idx, chapter_path in enumerate(spine, 1):
            try:
                with open(chapter_path, 'r', encoding='utf-8') as f:
                    html_content = f.read()
                soup = BeautifulSoup(html_content, 'html.parser')

                # 移除脚本和样式标签
                for script in soup(["script", "style", "nav"]):
                    script.decompose()

                headings = soup.find_all(['h1', 'h2', 'h3', 'h4', 'h5', 'h6'])

                title = soup.find('h1') or soup.find('h2') or soup.find('title')
                title_text = f'Chapter_{idx}' + title.get_text().strip() if title else f'Chapter_{idx}'

                # 清理文件名中的非法字符
                import re  # 需要添加到文件顶部
                safe_title = re.sub(r'[\\/*?:"<>|]', '', title_text)
                safe_title = safe_title[:100].strip()  # 限制长度并去除空格

                # 提取正文文本
                body = soup.find('body')
                if not body:
                    continue

                # 获取文本并清理格式
                text = body.get_text(separator='\n', strip=True)
                cleaned_text = '\n'.join([line.strip() for line in text.splitlines() if line.strip()])

                # 保存章节到文件
                output_path = os.path.join(output_dir, f'{safe_title}.txt')
                with open(output_path, 'w', encoding='utf-8') as f_out:
                    f_out.write(cleaned_text)

                if ifpdf:
                    print(f"正在生成 {output_path} 的PDF...")
                    pdf_output = output_path.replace('.txt', '.pdf')
                    create_pdf(cleaned_text, pdf_output, find_system_chinese_font())

            except Exception as e:
                print(f"处理章节 {chapter_path} 时出错: {str(e)}")


if __name__ == "__main__":
    import sys

    if len(sys.argv) < 3:
        print("用法: python read_epub_new.py <epub文件路径> <输出目录>")
        sys.exit(1)

    epub_path = sys.argv[1]
    output_dir = sys.argv[2]
    ifpdfstr = sys.argv[3]
    ifpdf = False
    if ifpdfstr == "pdf":
        ifpdf = True

    extract_epub_chapters(epub_path, output_dir, ifpdf)
    print(f"成功提取章节到 {output_dir} 目录")