import os
import zipfile
import shutil
from pathlib import Path
import xml.etree.ElementTree as ET
import re
import copy
import sys

# 全局变量，用于标记BeautifulSoup是否可用
BS4_AVAILABLE = False

def check_and_install_dependencies():
    """检查并安装必要的依赖"""
    global BS4_AVAILABLE
    
    try:
        from bs4 import BeautifulSoup
        BS4_AVAILABLE = True
        print("BeautifulSoup库已安装")
    except ImportError:
        print("BeautifulSoup库未安装，尝试安装...")
        try:
            import subprocess
            subprocess.check_call([sys.executable, "-m", "pip", "install", "beautifulsoup4"])
            from bs4 import BeautifulSoup
            BS4_AVAILABLE = True
            print("BeautifulSoup库安装成功！")
        except Exception as e:
            print(f"安装BeautifulSoup库失败: {e}")
            print("将使用正则表达式进行HTML处理，效果可能不如BeautifulSoup")
            BS4_AVAILABLE = False

# 在模块加载时检查依赖
check_and_install_dependencies()

try:
    from bs4 import BeautifulSoup
    BS4_AVAILABLE = True
except ImportError:
    BS4_AVAILABLE = False
    print("警告: BeautifulSoup库未安装，将使用正则表达式进行HTML处理")

def remove_empty_lines(html_content):
    """
    去除HTML内容中的空行
    
    参数:
        html_content: HTML文本内容
        
    返回:
        处理后的HTML内容
    """
    # 如果BeautifulSoup可用，使用它进行更精确的HTML处理
    if BS4_AVAILABLE:
        try:
            # 解析HTML
            soup = BeautifulSoup(html_content, 'html.parser')
            
            # 1. 删除空的段落标签
            for p in soup.find_all('p'):
                # 检查段落是否为空或只包含空白字符
                if not p.get_text(strip=True):
                    p.decompose()
                # 处理只包含&nbsp;的段落
                elif p.get_text().strip() == '\xa0' or p.get_text().strip() == '&nbsp;':
                    p.decompose()
            
            # 2. 删除空的div标签
            for div in soup.find_all('div'):
                if not div.get_text(strip=True):
                    div.decompose()
            
            # 3. 删除连续的<br>标签，只保留一个
            for br in soup.find_all('br'):
                next_sibling = br.next_sibling
                while next_sibling and (next_sibling.name == 'br' or (isinstance(next_sibling, str) and not next_sibling.strip())):
                    if hasattr(next_sibling, 'decompose'):
                        next_tag = next_sibling.next_sibling
                        next_sibling.decompose()
                        next_sibling = next_tag
                    else:
                        next_sibling = next_sibling.next_sibling
            
            # 4. 处理段落之间的空行问题（通用方法）
            # 找到所有非空段落
            paragraphs = []
            for p in soup.find_all('p'):
                if p.get_text(strip=True):
                    paragraphs.append(p)
            
            # 处理连续的段落，确保它们之间没有空行
            for i in range(len(paragraphs)-1):
                current_p = paragraphs[i]
                next_p = paragraphs[i+1]
                
                # 如果两个段落之间有空白段落或空白文本节点，移除它们
                sibling = current_p.next_sibling
                while sibling and sibling != next_p:
                    next_sib = sibling.next_sibling
                    if isinstance(sibling, str) and not sibling.strip():
                        # 移除空白文本节点
                        sibling.extract()
                    elif hasattr(sibling, 'name') and sibling.name == 'p' and not sibling.get_text(strip=True):
                        # 移除空段落
                        sibling.decompose()
                    sibling = next_sib
            
            # 5. 添加CSS样式来控制段落间距
            style_tag = soup.new_tag('style')
            style_tag.string = """
            p { margin: 0; padding: 0; }
            div { margin: 0; padding: 0; }
            body { line-height: 1.5; }
            """
            
            # 添加样式标签到head
            head = soup.find('head')
            if head:
                # 检查是否已有类似样式
                existing_style = False
                for style in head.find_all('style'):
                    if 'margin: 0' in style.string or 'margin:0' in style.string:
                        existing_style = True
                        break
                
                if not existing_style:
                    head.append(style_tag)
            else:
                # 如果没有head标签，创建一个
                head = soup.new_tag('head')
                head.append(style_tag)
                if soup.html:
                    soup.html.insert(0, head)
                else:
                    # 如果没有html标签，创建完整的文档结构
                    html = soup.new_tag('html')
                    html.append(head)
                    body = soup.find('body')
                    if body:
                        html.append(body)
                    soup.append(html)
            
            # 返回处理后的HTML
            return str(soup)
        except Exception as e:
            print(f"使用BeautifulSoup处理HTML时出错: {e}，将回退到正则表达式处理")
            # 如果BeautifulSoup处理失败，回退到正则表达式处理
    
    # 使用正则表达式处理
    # 1. 处理HTML标签之间的多个空行
    pattern1 = r'>\s*\n\s*\n+\s*<'
    result = re.sub(pattern1, '>\n<', html_content)
    
    # 2. 处理空的段落标签 <p></p> 或 <p> </p>
    pattern2 = r'<p[^>]*>\s*</p>'
    result = re.sub(pattern2, '', result)
    
    # 3. 处理只包含换行符的段落
    pattern3 = r'<p[^>]*>\s*\n+\s*</p>'
    result = re.sub(pattern3, '', result)
    
    # 4. 处理只包含空格和换行的div标签
    pattern4 = r'<div[^>]*>\s*\n*\s*</div>'
    result = re.sub(pattern4, '', result)
    
    # 5. 处理连续的<br>标签
    pattern5 = r'(<br[^>]*>\s*){2,}'
    result = re.sub(pattern5, '<br/>', result)
    
    # 6. 处理段落内的多个连续空行
    pattern6 = r'(\n\s*){2,}'
    result = re.sub(pattern6, '\n', result)
    
    # 7. 处理只包含&nbsp;的段落
    pattern7 = r'<p[^>]*>(\s*&nbsp;\s*)+</p>'
    result = re.sub(pattern7, '', result)
    
    # 8. 处理只包含空格的段落（使用非贪婪匹配）
    pattern8 = r'<p[^>]*?>\s*?</p>'
    result = re.sub(pattern8, '', result)
    
    # 9. 处理只有一个空格的文本行
    pattern9 = r'\n\s+\n'
    result = re.sub(pattern9, '\n', result)
    
    # 10. 处理空段落后跟着非空段落的情况
    pattern10 = r'<p[^>]*>(\s*|(&nbsp;)*)</p>\s*\n*\s*<p[^>]*>'
    result = re.sub(pattern10, '<p>', result)
    
    # 11. 处理连续的段落开始标签
    pattern11 = r'</p>\s*\n*\s*<p[^>]*>\s*\n*\s*</p>'
    result = re.sub(pattern11, '</p>', result)
    
    # 12. 处理任何文本之间的多余空行（通用方法）
    pattern12 = r'(</p>)\s*\n+\s*(<p)'
    result = re.sub(pattern12, r'\1\n\2', result)
    
    # 13. 处理段落之间的多余空行
    pattern13 = r'</p>\s*\n+\s*<p'
    result = re.sub(pattern13, '</p>\n<p', result)
    
    # 14. 移除文本行开头和结尾的空白字符
    lines = result.split('\n')
    trimmed_lines = [line.strip() for line in lines]
    result = '\n'.join(trimmed_lines)
    
    # 15. 添加CSS样式控制段落间距
    style_tag = '<style>p { margin: 0; padding: 0; } div { margin: 0; padding: 0; } body { line-height: 1.5; }</style>'
    head_pattern = r'<head>(.*?)</head>'
    head_match = re.search(head_pattern, result, re.DOTALL)
    
    if head_match:
        # 如果找到head标签，在其中添加样式
        head_content = head_match.group(1)
        new_head = f'<head>{head_content}{style_tag}</head>'
        result = re.sub(head_pattern, new_head, result, flags=re.DOTALL)
    else:
        # 如果没有找到head标签，在body标签前添加head标签
        body_pattern = r'<body'
        if re.search(body_pattern, result):
            result = re.sub(body_pattern, f'<head>{style_tag}</head><body', result, count=1)
        else:
            # 如果没有body标签，在html标签后添加head标签
            html_pattern = r'<html[^>]*>'
            if re.search(html_pattern, result):
                result = re.sub(html_pattern, f'\\g<0><head>{style_tag}</head>', result, count=1)
            else:
                # 如果没有html标签，在文档开头添加样式
                result = f'<head>{style_tag}</head>{result}'
    
    return result

def remove_empty_lines_from_epub(input_epub, output_path):
    """
    从EPUB文件中去除HTML文件的空行
    
    参数:
        input_epub: 输入的EPUB文件路径
        output_path: 输出的EPUB文件路径
        
    返回:
        处理是否成功
    """
    # 检查是否安装了BeautifulSoup
    global BS4_AVAILABLE
    if not BS4_AVAILABLE:
        check_and_install_dependencies()
    
    # 创建临时目录
    temp_dir = os.path.join(os.path.dirname(output_path), "temp_remove_empty_lines")
    
    try:
        # 解压EPUB文件
        with zipfile.ZipFile(input_epub, 'r') as zip_ref:
            zip_ref.extractall(temp_dir)
        
        # 处理所有HTML文件
        html_files_count = 0
        css_files = []
        
        # 首先找到所有CSS文件
        for root, dirs, files in os.walk(temp_dir):
            for file in files:
                if file.endswith('.css'):
                    css_path = os.path.join(root, file)
                    css_files.append(css_path)
        
        # 修改CSS文件，添加控制空行的样式
        for css_path in css_files:
            try:
                with open(css_path, 'r', encoding='utf-8', errors='ignore') as f:
                    css_content = f.read()
                
                # 添加控制段落间距的CSS
                css_additions = """
                p { margin: 0 !important; padding: 0 !important; }
                div { margin: 0 !important; padding: 0 !important; }
                body { line-height: 1.5 !important; }
                .empty-line { display: none !important; }
                """
                
                # 检查是否已有类似样式
                if 'margin: 0' not in css_content and 'margin:0' not in css_content:
                    css_content += css_additions
                    
                    with open(css_path, 'w', encoding='utf-8') as f:
                        f.write(css_content)
                    
                    print(f"已更新CSS文件: {os.path.basename(css_path)}")
            except Exception as e:
                print(f"处理CSS文件 {css_path} 时出错: {e}")
        
        # 创建一个全局CSS文件，确保样式被应用
        global_css_path = os.path.join(temp_dir, "remove_empty_lines.css")
        with open(global_css_path, 'w', encoding='utf-8') as f:
            f.write("""
            p { margin: 0 !important; padding: 0 !important; }
            div { margin: 0 !important; padding: 0 !important; }
            body { line-height: 1.5 !important; }
            .empty-line { display: none !important; }
            """)
        
        # 找到OPF文件，添加全局CSS引用
        container_xml_path = os.path.join(temp_dir, "META-INF", "container.xml")
        if os.path.exists(container_xml_path):
            try:
                # 解析container.xml获取OPF路径
                container_tree = ET.parse(container_xml_path)
                container_root = container_tree.getroot()
                ns = {'ns': 'urn:oasis:names:tc:opendocument:xmlns:container'}
                rootfile_element = container_root.find('.//ns:rootfile', ns)
                
                if rootfile_element is not None:
                    opf_path = rootfile_element.get('full-path')
                    opf_full_path = os.path.join(temp_dir, opf_path)
                    opf_dir = os.path.dirname(opf_full_path)
                    
                    if os.path.exists(opf_full_path):
                        # 解析OPF文件
                        opf_tree = ET.parse(opf_full_path)
                        opf_root = opf_tree.getroot()
                        opf_ns = {'opf': 'http://www.idpf.org/2007/opf'}
                        
                        # 找到manifest元素
                        manifest = opf_root.find('.//opf:manifest', opf_ns)
                        if manifest is not None:
                            # 添加全局CSS到manifest
                            css_rel_path = os.path.relpath(global_css_path, opf_dir)
                            css_id = "remove_empty_lines_css"
                            
                            # 检查是否已存在
                            existing_css = False
                            for item in manifest.findall('.//opf:item', opf_ns):
                                if item.get('id') == css_id:
                                    existing_css = True
                                    break
                            
                            if not existing_css:
                                # 创建新的item元素
                                css_item = ET.Element('{http://www.idpf.org/2007/opf}item')
                                css_item.set('id', css_id)
                                css_item.set('href', css_rel_path.replace('\\', '/'))
                                css_item.set('media-type', 'text/css')
                                manifest.append(css_item)
                                
                                # 保存修改后的OPF文件
                                ET.register_namespace('', 'http://www.idpf.org/2007/opf')
                                ET.register_namespace('dc', 'http://purl.org/dc/elements/1.1/')
                                opf_tree.write(opf_full_path, encoding='utf-8', xml_declaration=True)
                                
                                print(f"已添加全局CSS到OPF文件")
            except Exception as e:
                print(f"处理OPF文件时出错: {e}")
        
        # 处理HTML文件
        for root, dirs, files in os.walk(temp_dir):
            for file in files:
                if file.endswith(('.html', '.xhtml', '.htm')):
                    file_path = os.path.join(root, file)
                    try:
                        # 读取文件内容
                        with open(file_path, 'r', encoding='utf-8', errors='ignore') as f:
                            content = f.read()
                        
                        # 去除空行
                        processed_content = remove_empty_lines(content)
                        
                        # 添加全局CSS引用（如果使用BeautifulSoup）
                        if BS4_AVAILABLE:
                            try:
                                soup = BeautifulSoup(processed_content, 'html.parser')
                                
                                # 添加CSS引用
                                head = soup.find('head')
                                if head:
                                    # 检查是否已有CSS链接
                                    css_link_exists = False
                                    for link in head.find_all('link', rel='stylesheet'):
                                        if 'remove_empty_lines.css' in link.get('href', ''):
                                            css_link_exists = True
                                            break
                                    
                                    if not css_link_exists:
                                        # 创建CSS链接
                                        css_link = soup.new_tag('link')
                                        css_link['rel'] = 'stylesheet'
                                        css_link['type'] = 'text/css'
                                        css_link['href'] = os.path.relpath(global_css_path, root).replace('\\', '/')
                                        head.append(css_link)
                                
                                # 处理空段落，添加class
                                for p in soup.find_all('p'):
                                    if not p.get_text(strip=True):
                                        p['class'] = p.get('class', []) + ['empty-line']
                                
                                processed_content = str(soup)
                            except Exception as e:
                                print(f"使用BeautifulSoup处理HTML时出错: {e}")
                        
                        # 写回文件
                        with open(file_path, 'w', encoding='utf-8') as f:
                            f.write(processed_content)
                        
                        html_files_count += 1
                    except Exception as e:
                        print(f"处理文件 {file_path} 时出错: {e}")
        
        # 创建新的EPUB文件
        with zipfile.ZipFile(output_path, 'w', zipfile.ZIP_DEFLATED) as zipf:
            # 首先添加mimetype文件（不压缩）
            mimetype_path = os.path.join(temp_dir, "mimetype")
            if os.path.exists(mimetype_path):
                zipf.write(mimetype_path, "mimetype", compress_type=zipfile.ZIP_STORED)
            
            # 添加其他所有文件
            for root, dirs, files in os.walk(temp_dir):
                for file in files:
                    file_path = os.path.join(root, file)
                    rel_path = os.path.relpath(file_path, temp_dir)
                    if rel_path != "mimetype":  # mimetype已经添加
                        zipf.write(file_path, rel_path)
        
        print(f"已处理 {html_files_count} 个HTML文件，去除空行")
        return True
    
    except Exception as e:
        print(f"处理EPUB文件时出错: {e}")
        return False
    
    finally:
        # 清理临时目录
        if os.path.exists(temp_dir):
            shutil.rmtree(temp_dir)

def get_epub_chapter_count(opf_root, opf_ns, id_to_item=None):
    """
    获取EPUB文件的章节总数

    参数:
        opf_root: 解析后的OPF文件根元素
        opf_ns: OPF命名空间
        id_to_item: (可选) ID到项目的映射字典

    返回:
        章节总数和spine项目列表
    """
    # 找到spine元素
    spine = opf_root.find('.//opf:spine', opf_ns)
    if spine is None:
        return 0, []
        
    # 统计chapter数量
    spine_items = []
    for itemref in spine.findall('opf:itemref', opf_ns):
        idref = itemref.get('idref')
        if id_to_item and idref in id_to_item:
            # 修复这里的解包问题，处理4个值的情况
            if len(id_to_item[idref]) == 4:
                item, file_path, rel_path, _ = id_to_item[idref]  # 解包4个值，忽略第4个值
            else:
                item, file_path, rel_path = id_to_item[idref]  # 解包3个值
            spine_items.append((idref, file_path, rel_path))
        else:
            spine_items.append((idref, None, None))
            
    return len(spine_items), spine_items

def extract_toc_ncx(opf_root, opf_ns, temp_dir, opf_dir):
    """
    提取EPUB的目录信息

    参数:
        opf_root: 解析后的OPF文件根元素
        opf_ns: OPF命名空间
        temp_dir: 临时目录
        opf_dir: OPF文件所在目录

    返回:
        目录文件路径和解析后的目录树，如果找不到则返回None
    """
    # 查找toc.ncx文件
    toc_path = None
    toc_id = None
    
    # 检查spine的toc属性
    spine = opf_root.find('.//opf:spine', opf_ns)
    if spine is not None:
        toc_id = spine.get('toc')
    
    if not toc_id:
        # 尝试从manifest中查找toc.ncx
        manifest = opf_root.find('.//opf:manifest', opf_ns)
        if manifest is not None:
            for item in manifest.findall('opf:item', opf_ns):
                if item.get('media-type') == 'application/x-dtbncx+xml':
                    toc_id = item.get('id')
                    toc_path = item.get('href')
                    break
                elif 'toc' in item.get('id', '').lower() and item.get('href', '').endswith('.ncx'):
                    toc_id = item.get('id')
                    toc_path = item.get('href')
                    break
    else:
        # 从manifest中获取toc_id对应的href
        manifest = opf_root.find('.//opf:manifest', opf_ns)
        if manifest is not None:
            for item in manifest.findall('opf:item', opf_ns):
                if item.get('id') == toc_id:
                    toc_path = item.get('href')
                    break
    
    # 如果找到toc.ncx，解析它
    if toc_path:
        # 规范化路径
        toc_full_path = os.path.normpath(os.path.join(opf_dir, toc_path.replace('\\', '/')))
        if os.path.exists(toc_full_path):
            try:
                toc_tree = ET.parse(toc_full_path)
                return toc_full_path, toc_tree
            except Exception as e:
                print(f"解析TOC文件出错: {e}")
    
    return None, None

def split_epub_by_size(input_epub, output_dir, max_size_mb=30, remove_empty_lines_flag=False):
    """
    将EPUB文件按大小平均拆分
    
    参数:
        input_epub: 输入的EPUB文件路径
        output_dir: 输出目录
        max_size_mb: 每个拆分文件的最大大小(MB)
        remove_empty_lines_flag: 是否去除HTML文件中的空行
    """
    # 转换为字节
    max_size_bytes = max_size_mb * 1024 * 1024
    
    # 确保输出目录存在
    Path(output_dir).mkdir(parents=True, exist_ok=True)
    
    # 获取原始文件名（不带扩展名）
    original_name = os.path.splitext(os.path.basename(input_epub))[0]
    
    # 临时解压目录
    temp_dir = os.path.join(output_dir, f"temp_{original_name}")
    
    try:
        # 解压EPUB文件
        with zipfile.ZipFile(input_epub, 'r') as zip_ref:
            zip_ref.extractall(temp_dir)
        
        # 如果需要去除空行，处理所有HTML文件
        if remove_empty_lines_flag:
            print("正在处理HTML文件，去除空行...")
            for root, dirs, files in os.walk(temp_dir):
                for file in files:
                    if file.endswith(('.html', '.xhtml', '.htm')):
                        file_path = os.path.join(root, file)
                        try:
                            # 读取文件内容
                            with open(file_path, 'r', encoding='utf-8', errors='ignore') as f:
                                content = f.read()
                            
                            # 去除空行
                            processed_content = remove_empty_lines(content)
                            
                            # 写回文件
                            with open(file_path, 'w', encoding='utf-8') as f:
                                f.write(processed_content)
                        except Exception as e:
                            print(f"处理文件 {file_path} 时出错: {e}")
        
        # 检查EPUB结构并识别关键文件
        mimetype_file = os.path.join(temp_dir, "mimetype")
        container_file = os.path.join(temp_dir, "META-INF", "container.xml")
        
        # 确保关键文件存在
        if not os.path.exists(mimetype_file) or not os.path.exists(container_file):
            print("警告: EPUB文件结构不完整，可能导致拆分后的文件损坏")
        
        # 获取内容文件的路径
        content_files = []
        metadata_files = []
        
        # 识别元数据和内容文件
        for root, dirs, files in os.walk(temp_dir):
            for file in files:
                file_path = os.path.join(root, file)
                rel_path = os.path.relpath(file_path, temp_dir)
                
                # 元数据文件（保留在每个部分中）
                if rel_path == "mimetype" or \
                   rel_path.startswith("META-INF/") or \
                   (rel_path.endswith(".opf") or rel_path.endswith(".ncx")):
                    metadata_files.append((file_path, os.path.getsize(file_path), rel_path))
                # 内容文件（进行拆分）
                else:
                    content_files.append((file_path, os.path.getsize(file_path), rel_path))
        
        # 计算内容文件的总大小
        content_size = sum(size for _, size, _ in content_files)
        
        # 计算每个部分的大致内容大小（减去元数据大小的考虑）
        metadata_size = sum(size for _, size, _ in metadata_files)
        available_size = max_size_bytes - metadata_size
        
        # 确保每个部分至少能容纳一个内容文件
        num_parts = max(1, min((content_size + available_size - 1) // available_size, 
                            len(content_files)))
        
        target_content_per_part = content_size / num_parts
        
        print(f"总大小: {(content_size + metadata_size)/1024/1024:.2f}MB, 将拆分为 {num_parts} 个部分")
        
        # 按大小分组内容文件
        parts = [[] for _ in range(num_parts)]
        current_sizes = [0] * num_parts
        
        # 优先分配大文件
        for file_path, file_size, rel_path in sorted(content_files, key=lambda x: x[1], reverse=True):
            # 找到当前最小的部分
            target_part = current_sizes.index(min(current_sizes))
            parts[target_part].append((file_path, rel_path))
            current_sizes[target_part] += file_size
        
        # 创建每个部分的EPUB
        for i, part_files in enumerate(parts, 1):
            if part_files:  # 确保这个部分有文件
                create_epub_part(temp_dir, metadata_files, part_files, output_dir, 
                                original_name, i, num_parts)
    
    finally:
        # 清理临时目录
        if os.path.exists(temp_dir):
            shutil.rmtree(temp_dir)

def create_epub_part(base_dir, metadata_files, content_files, output_dir, original_name, part_num, total_parts):
    """创建一个EPUB部分，保持EPUB文件结构完整"""
    output_filename = f"{original_name}-{total_parts}部分-第{part_num}部分.epub"
    output_path = os.path.join(output_dir, output_filename)
    
    with zipfile.ZipFile(output_path, 'w', zipfile.ZIP_DEFLATED) as zipf:
        # 首先添加mimetype文件（不压缩）
        for file_path, _, rel_path in metadata_files:
            if rel_path == "mimetype":
                zipf.write(file_path, rel_path, compress_type=zipfile.ZIP_STORED)
                break
        
        # 添加所有其他元数据文件
        for file_path, _, rel_path in metadata_files:
            if rel_path != "mimetype":
                zipf.write(file_path, rel_path)
        
        # 添加内容文件
        for file_path, rel_path in content_files:
            zipf.write(file_path, rel_path)
    
    print(f"已创建: {output_path} ({part_num}/{total_parts})")

def split_epub_by_pages(input_epub, output_dir, page_ranges, remove_empty_lines_flag=False):
    """
    按照章节范围拆分EPUB文件（修复损坏问题）
    
    参数:
        input_epub: 输入的EPUB文件路径
        output_dir: 输出目录
        page_ranges: 章节范围列表，如 [1, 3, 6] 表示拆分为1-2章、3-5章两部分
        remove_empty_lines_flag: 是否去除HTML文件中的空行
    """
    # 确保输出目录存在
    Path(output_dir).mkdir(parents=True, exist_ok=True)
    
    # 获取原始文件名（不带扩展名）
    original_name = os.path.splitext(os.path.basename(input_epub))[0]
    
    # 临时解压目录
    temp_dir = os.path.join(output_dir, f"temp_{original_name}_pages")
    
    try:
        # 解压EPUB文件
        with zipfile.ZipFile(input_epub, 'r') as zip_ref:
            zip_ref.extractall(temp_dir)
        
        # 如果需要去除空行，处理所有HTML文件
        if remove_empty_lines_flag:
            print("正在处理HTML文件，去除空行...")
            for root, dirs, files in os.walk(temp_dir):
                for file in files:
                    if file.endswith(('.html', '.xhtml', '.htm')):
                        file_path = os.path.join(root, file)
                        try:
                            # 读取文件内容
                            with open(file_path, 'r', encoding='utf-8', errors='ignore') as f:
                                content = f.read()
                            
                            # 去除空行
                            processed_content = remove_empty_lines(content)
                            
                            # 写回文件
                            with open(file_path, 'w', encoding='utf-8') as f:
                                f.write(processed_content)
                        except Exception as e:
                            print(f"处理文件 {file_path} 时出错: {e}")
        
        # 1. 检查关键文件
        mimetype_file = os.path.join(temp_dir, "mimetype")
        container_file = os.path.join(temp_dir, "META-INF", "container.xml")
        
        if not os.path.exists(mimetype_file):
            print("警告: 缺少mimetype文件，将自动创建")
            with open(mimetype_file, 'w', encoding='utf-8') as f:
                f.write("application/epub+zip")
        
        if not os.path.exists(container_file):
            raise Exception("错误: 缺少container.xml文件，无法确定OPF文件位置")
        
        # 2. 解析container.xml获取OPF路径
        container_tree = ET.parse(container_file)
        container_root = container_tree.getroot()
        ns = {'ns': 'urn:oasis:names:tc:opendocument:xmlns:container'}
        rootfile_element = container_root.find('.//ns:rootfile', ns)
        if rootfile_element is None:
            raise Exception("错误: container.xml中没有找到rootfile元素")
        
        opf_path = rootfile_element.get('full-path')
        opf_full_path = os.path.join(temp_dir, opf_path)
        opf_dir = os.path.dirname(opf_full_path)
        
        if not os.path.exists(opf_full_path):
            raise Exception(f"错误: 找不到OPF文件 {opf_full_path}")
        
        # 3. 解析OPF文件
        opf_tree = ET.parse(opf_full_path)
        opf_root = opf_tree.getroot()
        opf_ns = {'opf': 'http://www.idpf.org/2007/opf', 'dc': 'http://purl.org/dc/elements/1.1/'}
        
        # 创建ID到文件路径的映射
        manifest = opf_root.find('.//opf:manifest', opf_ns)
        if manifest is None:
            raise Exception("错误: OPF文件中没有manifest部分")
        
        id_to_item = {}
        href_to_id = {}
        for item in manifest.findall('opf:item', opf_ns):
            item_id = item.get('id')
            item_href = item.get('href')
            if item_id and item_href:
                # 规范化路径
                norm_href = os.path.normpath(os.path.join(opf_dir, item_href.replace('\\', '/')))
                rel_path = os.path.relpath(norm_href, temp_dir)
                id_to_item[item_id] = (item, norm_href, rel_path)
                href_to_id[rel_path] = item_id
        
        # 4. 获取spine顺序和章节总数
        total_chapters, spine_items = get_epub_chapter_count(opf_root, opf_ns, id_to_item)
        print(f"EPUB文件章节总数: {total_chapters}")
        
        # 5. 提取目录信息
        toc_path, toc_tree = extract_toc_ncx(opf_root, opf_ns, temp_dir, opf_dir)
        if toc_path:
            print(f"找到目录文件: {os.path.basename(toc_path)}")
        else:
            print("警告: 未找到目录文件")
        
        # 6. 处理章节范围
        if not page_ranges or len(page_ranges) < 2:
            raise Exception("错误: 章节范围至少需要两个值")
        
        # 确保章节范围是升序的并且起始章节是1
        page_ranges = sorted(page_ranges)
        if page_ranges[0] != 1:
            page_ranges[0] = 1
            
        # 处理范围超出总章节数的情况
        if page_ranges[-1] > total_chapters + 1:
            # 保留原始范围中未超出的部分
            adjusted_ranges = []
            for i, value in enumerate(page_ranges):
                if value <= total_chapters + 1:
                    adjusted_ranges.append(value)
                else:
                    # 只替换超出范围的值
                    # 如果前一个值已经接近总章节数，则直接使用总章节数+1作为最后一个范围
                    if adjusted_ranges and adjusted_ranges[-1] >= total_chapters - 5:
                        adjusted_ranges.append(total_chapters + 1)
                        break
                    
                    # 获取原始范围中未超出部分的最大值
                    last_valid = adjusted_ranges[-1] if adjusted_ranges else 1
                    
                    # 设置最后一个值为总章节数+1
                    if i == len(page_ranges) - 1:
                        adjusted_ranges.append(total_chapters + 1)
                    else:
                        # 保持原始范围的相对间隔
                        # 计算原始间隔和可用间隔
                        original_step = value - last_valid
                        available_space = total_chapters + 1 - last_valid
                        
                        # 根据可用空间和原始间隔计算新的点
                        if available_space <= original_step:
                            # 如果可用空间小于原始间隔，直接添加总章节数+1
                            adjusted_ranges.append(total_chapters + 1)
                            break
                        else:
                            # 否则保持相对间隔
                            next_point = min(last_valid + original_step, total_chapters + 1)
                            adjusted_ranges.append(next_point)
            
            # 确保最后一个点是总章节数+1
            if adjusted_ranges[-1] < total_chapters + 1:
                adjusted_ranges.append(total_chapters + 1)
                
            print(f"章节范围超出总章节数，已调整为: {adjusted_ranges} (仅调整超出部分)")
            page_ranges = adjusted_ranges
        elif page_ranges[-1] <= total_chapters:
            # 确保最后一个范围包含最后一章
            page_ranges.append(total_chapters + 1)
            print(f"已自动添加最后一个章节范围点: {page_ranges}")
        
        # 7. 识别所有资源文件及其引用关系
        resource_files = set()
        resource_references = {}  # 映射资源到引用它的内容文件
        
        # 先标记所有资源类型文件
        for item in manifest.findall('opf:item', opf_ns):
            media_type = item.get('media-type', '')
            if media_type.startswith(('image/', 'text/css', 'application/javascript', 'font/')):
                href = item.get('href')
                norm_href = os.path.normpath(os.path.join(opf_dir, href.replace('\\', '/')))
                rel_path = os.path.relpath(norm_href, temp_dir)
                resource_files.add(rel_path)
                resource_references[rel_path] = set()  # 初始化引用集合
        
        # 分析内容文件中对资源的引用
        for idref, file_path, rel_path in spine_items:
            if file_path and os.path.exists(file_path) and rel_path:
                try:
                    # 读取内容文件
                    with open(file_path, 'r', encoding='utf-8', errors='ignore') as f:
                        content = f.read()
                    
                    # 检查对资源的引用
                    for resource_path in resource_files:
                        resource_name = os.path.basename(resource_path)
                        if resource_name in content:
                            resource_references[resource_path].add(rel_path)
                except:
                    # 如果无法读取，假定是二进制文件，跳过分析
                    pass
        
        # 8. 按章节范围拆分
        total_parts = len(page_ranges) - 1
        print(f"将拆分为 {total_parts} 个部分")
        
        for i in range(total_parts):
            start_page = page_ranges[i] - 1  # 转为0-based
            end_page = page_ranges[i+1] - 1 if i+1 < len(page_ranges) else total_chapters
            
            # 获取当前部分的spine文件
            part_spine_items = spine_items[start_page:end_page]
            
            if not part_spine_items:
                print(f"警告: 章节范围 {start_page+1}-{end_page} 没有内容")
                continue
            
            # 创建这个部分的临时目录
            part_temp_dir = os.path.join(temp_dir, f"part_{i+1}")
            os.makedirs(part_temp_dir, exist_ok=True)
            
            # 复制必须的元数据文件
            shutil.copy(mimetype_file, os.path.join(part_temp_dir, "mimetype"))
            os.makedirs(os.path.join(part_temp_dir, "META-INF"), exist_ok=True)
            shutil.copy(container_file, os.path.join(part_temp_dir, "META-INF", "container.xml"))
            
            # 创建这个部分的OPF文件
            part_opf = copy.deepcopy(opf_root)
            
            # 更新manifest - 只保留需要的项目
            part_manifest = part_opf.find('.//opf:manifest', opf_ns)
            part_manifest.clear()  # 清空原有manifest
            
            # 添加必须的元数据项目
            def add_manifest_item(part_manifest, item_id, href, media_type):
                item = ET.Element('{http://www.idpf.org/2007/opf}item')
                item.set('id', item_id)
                item.set('href', href)
                item.set('media-type', media_type)
                part_manifest.append(item)
            
            # 添加spine项目到manifest和收集当前部分的内容文件路径
            spine_ids = set()
            spine_hrefs = set()  # 记录所有spine项目的href
            current_content_files = []
            for idref, file_path, rel_path in part_spine_items:
                if idref in id_to_item:
                    original_item, _, _ = id_to_item[idref]
                    add_manifest_item(
                        part_manifest,
                        original_item.get('id'),
                        original_item.get('href'),
                        original_item.get('media-type')
                    )
                    spine_ids.add(idref)
                    if file_path and rel_path:
                        spine_hrefs.add(rel_path)
                        current_content_files.append(rel_path)
            
            # 识别当前部分需要的资源文件
            needed_resources = set()
            for resource_path, referrers in resource_references.items():
                # 如果资源被当前部分的任何内容文件引用，则包含它
                if any(content_file in referrers for content_file in current_content_files):
                    needed_resources.add(resource_path)
                # 或者如果它是CSS或样式表，总是包含它（安全起见）
                elif resource_path.endswith(('.css', '.xpgt', '.js')):
                    needed_resources.add(resource_path)
                    
            # 添加需要的资源文件到manifest
            for rel_path in needed_resources:
                if rel_path in href_to_id:
                    item_id = href_to_id[rel_path]
                    if item_id in id_to_item and item_id not in spine_ids:
                        original_item, _, _ = id_to_item[item_id]
                        add_manifest_item(
                            part_manifest,
                            original_item.get('id'),
                            original_item.get('href'),
                            original_item.get('media-type')
                        )
            
            # 添加TOC文件到manifest（如果存在）
            if toc_path:
                toc_rel_path = os.path.relpath(toc_path, temp_dir)
                if toc_rel_path in href_to_id:
                    toc_id = href_to_id[toc_rel_path]
                    if toc_id in id_to_item and toc_id not in spine_ids:
                        original_item, _, _ = id_to_item[toc_id]
                        add_manifest_item(
                            part_manifest,
                            original_item.get('id'),
                            original_item.get('href'),
                            'application/x-dtbncx+xml'
                        )
            
            # 更新spine - 只保留当前部分
            part_spine = part_opf.find('.//opf:spine', opf_ns)
            part_spine.clear()  # 清空原有spine
            
            # 保持toc属性（如果存在）
            spine = opf_root.find('.//opf:spine', opf_ns)
            if spine is not None and spine.get('toc'):
                part_spine.set('toc', spine.get('toc'))
            
            for idref, _, _ in part_spine_items:
                itemref = ET.Element('{http://www.idpf.org/2007/opf}itemref')
                itemref.set('idref', idref)
                part_spine.append(itemref)
            
            # 保存修改后的OPF文件
            part_opf_path = os.path.join(part_temp_dir, opf_path)
            os.makedirs(os.path.dirname(part_opf_path), exist_ok=True)
            
            # 确保XML格式正确
            ET.register_namespace('', 'http://www.idpf.org/2007/opf')
            ET.register_namespace('dc', 'http://purl.org/dc/elements/1.1/')
            part_opf_tree = ET.ElementTree(part_opf)
            part_opf_tree.write(part_opf_path, encoding='utf-8', xml_declaration=True)
            
            # 处理并修改TOC文件（如果存在）
            if toc_path and toc_tree:
                # 复制TOC文件
                part_toc_path = os.path.join(part_temp_dir, os.path.relpath(toc_path, temp_dir))
                os.makedirs(os.path.dirname(part_toc_path), exist_ok=True)
                
                # 修改TOC只保留当前部分的navPoints
                part_toc_root = copy.deepcopy(toc_tree.getroot())
                toc_ns = {'ncx': 'http://www.daisy.org/z3986/2005/ncx/'}
                
                # 更新navMap
                navMap = part_toc_root.find('.//ncx:navMap', toc_ns)
                if navMap is not None:
                    # 收集所有navPoint
                    navPoints = navMap.findall('.//ncx:navPoint', toc_ns)
                    points_to_keep = []
                    
                    for navPoint in navPoints:
                        # 检查content指向的文件是否在当前部分中
                        content = navPoint.find('.//ncx:content', toc_ns)
                        if content is not None:
                            src = content.get('src', '')
                            if src:
                                # 处理锚点
                                src_parts = src.split('#')
                                src_file = src_parts[0]
                                
                                # 规范化路径
                                norm_href = os.path.normpath(os.path.join(opf_dir, src_file.replace('\\', '/')))
                                rel_path = os.path.relpath(norm_href, temp_dir)
                                
                                # 如果指向的文件在当前部分中，保留这个navPoint
                                if rel_path in spine_hrefs:
                                    points_to_keep.append(navPoint)
                    
                    # 清空navMap并添加保留的navPoint
                    for child in list(navMap):
                        navMap.remove(child)
                    
                    # 重新排序计数
                    for idx, navPoint in enumerate(points_to_keep, 1):
                        navPoint.set('playOrder', str(idx))
                        navMap.append(navPoint)
                
                # 保存修改后的TOC文件
                ET.register_namespace('', 'http://www.daisy.org/z3986/2005/ncx/')
                part_toc_tree = ET.ElementTree(part_toc_root)
                part_toc_tree.write(part_toc_path, encoding='utf-8', xml_declaration=True)
            
            # 复制所有需要的文件到临时目录
            all_files_to_copy = set()
            
            # 添加spine文件
            for _, file_path, rel_path in part_spine_items:
                if file_path and rel_path:
                    all_files_to_copy.add((file_path, rel_path))
            
            # 只添加当前部分需要的资源文件
            for rel_path in needed_resources:
                file_path = os.path.join(temp_dir, rel_path)
                if os.path.exists(file_path):
                    all_files_to_copy.add((file_path, rel_path))
            
            # 始终添加TOC相关的必要文件
            if toc_path:
                all_files_to_copy.add((toc_path, os.path.relpath(toc_path, temp_dir)))
            
            # 实际复制文件
            for file_path, rel_path in all_files_to_copy:
                dest_path = os.path.join(part_temp_dir, rel_path)
                os.makedirs(os.path.dirname(dest_path), exist_ok=True)
                shutil.copy2(file_path, dest_path)
            
            # 创建EPUB文件
            part_name = f"{start_page+1}-{end_page}章"
            output_filename = f"{original_name}-{part_name}.epub"
            output_path = os.path.join(output_dir, output_filename)
            
            # 创建ZIP文件(EPUB)
            with zipfile.ZipFile(output_path, 'w', zipfile.ZIP_DEFLATED) as zipf:
                # 添加mimetype(必须不压缩)
                zipf.write(
                    os.path.join(part_temp_dir, "mimetype"),
                    "mimetype",
                    compress_type=zipfile.ZIP_STORED
                )
                
                # 添加其他文件
                for root, dirs, files in os.walk(part_temp_dir):
                    for file in files:
                        file_path = os.path.join(root, file)
                        rel_path = os.path.relpath(file_path, part_temp_dir)
                        if rel_path != "mimetype":
                            zipf.write(file_path, rel_path)
            
            print(f"已创建: {output_path} ({part_name})")
            
            # 清理这个部分的临时目录
            shutil.rmtree(part_temp_dir)
    
    finally:
        # 清理主临时目录
        if os.path.exists(temp_dir):
            shutil.rmtree(temp_dir)
            
def split_epub_balanced(input_epub, output_dir, max_size_mb=30, remove_empty_lines_flag=False):
    """
    均衡拆分EPUB文件，同时考虑章节完整性和大小限制
    
    参数:
        input_epub: 输入的EPUB文件路径
        output_dir: 输出目录
        max_size_mb: 每个拆分文件的最大大小(MB)
        remove_empty_lines_flag: 是否去除HTML文件中的空行
    """
    # 转换为字节
    max_size_bytes = max_size_mb * 1024 * 1024
    
    # 确保输出目录存在
    Path(output_dir).mkdir(parents=True, exist_ok=True)
    
    # 获取原始文件名（不带扩展名）
    original_name = os.path.splitext(os.path.basename(input_epub))[0]
    
    # 临时解压目录
    temp_dir = os.path.join(output_dir, f"temp_{original_name}_balanced")
    
    try:
        # 解压EPUB文件
        with zipfile.ZipFile(input_epub, 'r') as zip_ref:
            zip_ref.extractall(temp_dir)
        
        # 如果需要去除空行，处理所有HTML文件
        if remove_empty_lines_flag:
            print("正在处理HTML文件，去除空行...")
            for root, dirs, files in os.walk(temp_dir):
                for file in files:
                    if file.endswith(('.html', '.xhtml', '.htm')):
                        file_path = os.path.join(root, file)
                        try:
                            # 读取文件内容
                            with open(file_path, 'r', encoding='utf-8', errors='ignore') as f:
                                content = f.read()
                            
                            # 去除空行
                            processed_content = remove_empty_lines(content)
                            
                            # 写回文件
                            with open(file_path, 'w', encoding='utf-8') as f:
                                f.write(processed_content)
                        except Exception as e:
                            print(f"处理文件 {file_path} 时出错: {e}")
        
        # 1. 检查关键文件
        mimetype_file = os.path.join(temp_dir, "mimetype")
        container_file = os.path.join(temp_dir, "META-INF", "container.xml")
        
        if not os.path.exists(mimetype_file):
            print("警告: 缺少mimetype文件，将自动创建")
            with open(mimetype_file, 'w', encoding='utf-8') as f:
                f.write("application/epub+zip")
        
        if not os.path.exists(container_file):
            raise Exception("错误: 缺少container.xml文件，无法确定OPF文件位置")
        
        # 2. 解析container.xml获取OPF路径
        container_tree = ET.parse(container_file)
        container_root = container_tree.getroot()
        ns = {'ns': 'urn:oasis:names:tc:opendocument:xmlns:container'}
        rootfile_element = container_root.find('.//ns:rootfile', ns)
        if rootfile_element is None:
            raise Exception("错误: container.xml中没有找到rootfile元素")
        
        opf_path = rootfile_element.get('full-path')
        opf_full_path = os.path.join(temp_dir, opf_path)
        opf_dir = os.path.dirname(opf_full_path)
        
        if not os.path.exists(opf_full_path):
            raise Exception(f"错误: 找不到OPF文件 {opf_full_path}")
        
        # 3. 解析OPF文件
        opf_tree = ET.parse(opf_full_path)
        opf_root = opf_tree.getroot()
        opf_ns = {'opf': 'http://www.idpf.org/2007/opf', 'dc': 'http://purl.org/dc/elements/1.1/'}
        
        # 4. 创建ID到文件路径的映射
        manifest = opf_root.find('.//opf:manifest', opf_ns)
        if manifest is None:
            raise Exception("错误: OPF文件中没有manifest部分")
        
        id_to_item = {}
        href_to_id = {}
        for item in manifest.findall('opf:item', opf_ns):
            item_id = item.get('id')
            item_href = item.get('href')
            media_type = item.get('media-type', '')
            if item_id and item_href:
                # 规范化路径
                norm_href = os.path.normpath(os.path.join(opf_dir, item_href.replace('\\', '/')))
                rel_path = os.path.relpath(norm_href, temp_dir)
                id_to_item[item_id] = (item, norm_href, rel_path, media_type)
                href_to_id[rel_path] = item_id
        
        # 5. 获取spine顺序和章节总数
        total_chapters, spine_items = get_epub_chapter_count(opf_root, opf_ns, id_to_item)
        print(f"EPUB文件章节总数: {total_chapters}")
        
        # 6. 提取目录信息
        toc_path, toc_tree = extract_toc_ncx(opf_root, opf_ns, temp_dir, opf_dir)
        if toc_path:
            print(f"找到目录文件: {os.path.basename(toc_path)}")
        
        # 7. 分析章节大小和依赖关系
        chapter_sizes = []  # 存储每个章节及其大小和依赖资源
        resource_files = {}  # 存储所有资源文件和大小
        metadata_files = []  # 存储元数据文件
        metadata_size = 0    # 元数据总大小
        
        # 计算元数据大小
        for root, dirs, files in os.walk(os.path.join(temp_dir, "META-INF")):
            for file in files:
                file_path = os.path.join(root, file)
                rel_path = os.path.relpath(file_path, temp_dir)
                size = os.path.getsize(file_path)
                metadata_files.append((file_path, rel_path))
                metadata_size += size
        
        # 将mimetype添加到元数据
        if os.path.exists(mimetype_file):
            size = os.path.getsize(mimetype_file)
            metadata_files.append((mimetype_file, "mimetype"))
            metadata_size += size
            
        # 将OPF和TOC文件添加到元数据
        if os.path.exists(opf_full_path):
            size = os.path.getsize(opf_full_path)
            metadata_files.append((opf_full_path, opf_path))
            metadata_size += size
            
        if toc_path and os.path.exists(toc_path):
            size = os.path.getsize(toc_path)
            rel_path = os.path.relpath(toc_path, temp_dir)
            metadata_files.append((toc_path, rel_path))
            metadata_size += size
        
        # 识别资源文件
        for item_id, (item, full_path, rel_path, media_type) in id_to_item.items():
            # 资源文件(图片、CSS、字体等)
            if (media_type.startswith(('image/', 'text/css', 'application/javascript', 'font/')) or 
                media_type in ['image/svg+xml', 'application/vnd.ms-opentype', 'application/x-font-opentype',
                               'application/x-font-truetype', 'application/x-font-ttf']):
                if os.path.exists(full_path):
                    size = os.path.getsize(full_path)
                    resource_files[rel_path] = (full_path, size)
            # 一些EPUB文件使用非标准媒体类型，按扩展名判断
            elif rel_path.endswith(('.jpg', '.jpeg', '.png', '.gif', '.svg', '.css', '.js', 
                                   '.ttf', '.otf', '.woff', '.woff2')):
                if os.path.exists(full_path):
                    size = os.path.getsize(full_path)
                    resource_files[rel_path] = (full_path, size)
        
        # 分析章节大小和依赖资源
        html_patterns = [
            'href=["\']([^"\']+)["\']',  # 匹配 href="..." 或 href='...'
            'src=["\']([^"\']+)["\']',   # 匹配 src="..." 或 src='...'
            'url\\(["\']?([^"\'\\)]+)["\']?\\)',  # 匹配 url(...) 或 url("...") 或 url('...')
            'xmlns:xlink=["\'][^"\']*["\']\\s+xlink:href=["\']([^"\']+)["\']'  # 匹配 SVG 中的 xlink:href
        ]
        css_patterns = [
            'url\\(["\']?([^"\'\\)]+)["\']?\\)',  # 匹配 url(...) 或 url("...") 或 url('...')
            '@import\\s+["\']([^"\']+)["\']',     # 匹配 @import "..." 或 @import '...'
        ]
        patterns = re.compile('|'.join(html_patterns + css_patterns))
        
        for i, (idref, file_path, rel_path) in enumerate(spine_items):
            if file_path and rel_path and os.path.exists(file_path):
                # 章节基本大小
                size = os.path.getsize(file_path)
                
                # 找出引用的资源
                referenced_resources = set()
                try:
                    with open(file_path, 'r', encoding='utf-8', errors='ignore') as f:
                        content = f.read()
                    
                    # 使用正则表达式查找引用
                    file_dir = os.path.dirname(file_path)
                    matches = patterns.findall(content)
                    for match in matches:
                        for m in match:
                            if m:  # 非空匹配
                                # 跳过数据URI、外部链接和锚点
                                if (m.startswith('data:') or m.startswith('http:') or 
                                    m.startswith('https:') or m.startswith('#')):
                                    continue
                                
                                # 处理相对路径
                                full_res_path = os.path.normpath(os.path.join(file_dir, m.split('#')[0]))
                                try:
                                    res_rel_path = os.path.relpath(full_res_path, temp_dir)
                                    if res_rel_path in resource_files:
                                        referenced_resources.add(res_rel_path)
                                except ValueError:
                                    # 路径转换错误，可能是无效路径
                                    pass
                    
                    # 备用方法：检查所有资源名称
                    for res_path in resource_files:
                        res_name = os.path.basename(res_path)
                        if (res_name in content and res_path not in referenced_resources and 
                            (res_path.endswith(('.jpg', '.jpeg', '.png', '.gif', '.svg')))):
                            referenced_resources.add(res_path)
                            
                except UnicodeDecodeError:
                    # 无法读取的可能是二进制HTML文件，尝试二进制搜索
                    try:
                        with open(file_path, 'rb') as f:
                            binary_content = f.read()
                            for res_path in resource_files:
                                res_name = os.path.basename(res_path)
                                if res_name.encode('utf-8') in binary_content:
                                    referenced_resources.add(res_path)
                    except:
                        print(f"警告: 无法分析文件 {rel_path} 中的资源引用")
                except Exception as e:
                    print(f"警告: 分析文件 {rel_path} 时出错: {e}")
                
                # 记录章节信息
                chapter_sizes.append({
                    'index': i + 1,  # 1-based index
                    'idref': idref,
                    'file_path': file_path,
                    'rel_path': rel_path,
                    'size': size,
                    'resources': referenced_resources
                })
        
        # 处理CSS文件中的级联引用
        css_files = {path: refs for path, refs in resource_files.items() if path.endswith('.css')}
        css_references = {path: set() for path in css_files}
        
        # 分析CSS文件中的资源引用
        for css_path, (file_path, _) in css_files.items():
            try:
                with open(file_path, 'r', encoding='utf-8', errors='ignore') as f:
                    css_content = f.read()
                
                # 查找CSS中引用的资源
                css_dir = os.path.dirname(file_path)
                matches = re.findall('url\\(["\']?([^"\'\\)]+)["\']?\\)', css_content)
                for match in matches:
                    if match.startswith('data:') or match.startswith('http:') or match.startswith('https:'):
                        continue
                    
                    full_res_path = os.path.normpath(os.path.join(css_dir, match))
                    try:
                        res_rel_path = os.path.relpath(full_res_path, temp_dir)
                        if res_rel_path in resource_files:
                            css_references[css_path].add(res_rel_path)
                    except ValueError:
                        pass
            except:
                pass
        
        # 更新章节的资源引用，包括CSS级联引用
        for chapter in chapter_sizes:
            css_to_add = set()
            for res in list(chapter['resources']):
                if res in css_references:
                    # 如果章节引用了CSS，添加CSS引用的所有资源
                    css_to_add.update(css_references[res])
            
            # 添加CSS引用的资源
            chapter['resources'].update(css_to_add)
        
        # 8. 计算每个拆分部分的最大可用大小
        available_size = max_size_bytes - metadata_size
        print(f"元数据大小: {metadata_size/1024/1024:.2f}MB, 每部分可用大小: {available_size/1024/1024:.2f}MB")
        
        # 9. 智能分组章节
        parts = []
        current_part = []
        current_size = 0
        current_resources = set()
        
        # 检测是否有超大章节
        oversized_chapters = []
        for chapter in chapter_sizes:
            # 计算章节及其资源的总大小
            chapter_total_size = chapter['size']
            for res in chapter['resources']:
                if res in resource_files:
                    chapter_total_size += resource_files[res][1]
            
            # 如果单章超过限制，标记为超大章节
            if chapter_total_size > available_size:
                oversized_chapters.append((chapter['index'], chapter_total_size / 1024 / 1024))
        
        # 如果存在超大章节，警告用户
        if oversized_chapters:
            print("警告: 检测到以下章节(包括其引用的资源)大小超过限制:")
            for index, size_mb in oversized_chapters:
                print(f"  - 第{index}章: {size_mb:.2f}MB")
            print(f"这些章节仍会保持完整，但会导致部分拆分文件超过 {max_size_mb}MB 限制")
        
        # 确保章节完整性的分组策略
        for chapter in chapter_sizes:
            # 计算添加这个章节及其资源后的大小
            chapter_res_size = 0
            new_resources = set()
            
            for res in chapter['resources']:
                if res not in current_resources and res in resource_files:
                    new_resources.add(res)
                    chapter_res_size += resource_files[res][1]
            
            # 如果添加这个章节会导致超过限制，创建新部分
            # 但确保当前部分不为空（避免死循环）
            if current_part and (current_size + chapter['size'] + chapter_res_size > available_size):
                parts.append(current_part.copy())
                current_part = []
                current_size = 0
                current_resources = set()
                
                # 重新计算添加到新部分的大小
                chapter_res_size = 0
                new_resources = set()
                for res in chapter['resources']:
                    if res in resource_files:
                        new_resources.add(res)
                        chapter_res_size += resource_files[res][1]
            
            # 无论大小如何，都将章节添加到当前部分（确保章节完整性）
            current_part.append(chapter)
            current_size += chapter['size'] + chapter_res_size
            current_resources.update(new_resources)
            
            # 日志输出章节添加情况
            print(f"添加章节 {chapter['index']} (大小: {(chapter['size']/1024/1024):.2f}MB + 资源: {(chapter_res_size/1024/1024):.2f}MB) 到部分 {len(parts)+1}")
            if current_size > available_size:
                print(f"  警告: 当前部分大小 {current_size/1024/1024:.2f}MB 超过限制 {available_size/1024/1024:.2f}MB")
        
        # 添加最后一个部分
        if current_part:
            parts.append(current_part)
        
        # 10. 创建每个拆分部分
        print(f"将EPUB拆分为 {len(parts)} 个部分，确保章节完整性")
        
        for i, part_chapters in enumerate(parts, 1):
            start_chapter = part_chapters[0]['index']
            end_chapter = part_chapters[-1]['index']
            part_name = f"{start_chapter}-{end_chapter}章"
            
            # 计算大致大小
            estimated_size = 0
            resources_used = set()
            for chapter in part_chapters:
                estimated_size += chapter['size']
                for res in chapter['resources']:
                    if res not in resources_used and res in resource_files:
                        resources_used.add(res)
                        estimated_size += resource_files[res][1]
            
            print(f"创建部分 {i}/{len(parts)}: {part_name} (包含 {len(part_chapters)} 个章节, 估计大小: {estimated_size/1024/1024:.2f}MB)")
            
            # 创建这个部分的临时目录
            part_temp_dir = os.path.join(temp_dir, f"part_{i}")
            os.makedirs(part_temp_dir, exist_ok=True)
            
            # 复制必须的元数据文件
            for file_path, rel_path in metadata_files:
                dest_path = os.path.join(part_temp_dir, rel_path)
                os.makedirs(os.path.dirname(dest_path), exist_ok=True)
                shutil.copy2(file_path, dest_path)
            
            # 创建这个部分的OPF文件
            part_opf = copy.deepcopy(opf_root)
            
            # 更新manifest - 只保留需要的项目
            part_manifest = part_opf.find('.//opf:manifest', opf_ns)
            part_manifest.clear()  # 清空原有manifest
            
            # 添加必须的元数据项目
            def add_manifest_item(part_manifest, item_id, href, media_type):
                item = ET.Element('{http://www.idpf.org/2007/opf}item')
                item.set('id', item_id)
                item.set('href', href)
                item.set('media-type', media_type)
                part_manifest.append(item)
            
            # 添加spine项目到manifest
            spine_ids = set()
            spine_hrefs = set()  # 记录所有spine项目的href
            needed_resources = set()
            
            for chapter in part_chapters:
                idref = chapter['idref']
                if idref in id_to_item:
                    item, _, _, media_type = id_to_item[idref]
                    add_manifest_item(
                        part_manifest,
                        item.get('id'),
                        item.get('href'),
                        media_type
                    )
                    spine_ids.add(idref)
                    spine_hrefs.add(chapter['rel_path'])
                    needed_resources.update(chapter['resources'])
            
            # 添加需要的资源文件到manifest
            for resource_path in needed_resources:
                if resource_path in href_to_id:
                    item_id = href_to_id[resource_path]
                    if item_id in id_to_item and item_id not in spine_ids:
                        item, _, _, media_type = id_to_item[item_id]
                        add_manifest_item(
                            part_manifest,
                            item.get('id'),
                            item.get('href'),
                            media_type
                        )
            
            # 添加TOC文件到manifest（如果存在）
            if toc_path:
                toc_rel_path = os.path.relpath(toc_path, temp_dir)
                if toc_rel_path in href_to_id:
                    toc_id = href_to_id[toc_rel_path]
                    if toc_id in id_to_item and toc_id not in spine_ids:
                        item, _, _, media_type = id_to_item[toc_id]
                        add_manifest_item(
                            part_manifest,
                            item.get('id'),
                            item.get('href'),
                            media_type
                        )
            
            # 更新spine - 只保留当前部分
            part_spine = part_opf.find('.//opf:spine', opf_ns)
            part_spine.clear()  # 清空原有spine
            
            # 保持toc属性（如果存在）
            spine = opf_root.find('.//opf:spine', opf_ns)
            if spine is not None and spine.get('toc'):
                part_spine.set('toc', spine.get('toc'))
            
            # 按原始顺序添加章节到spine
            for chapter in part_chapters:
                itemref = ET.Element('{http://www.idpf.org/2007/opf}itemref')
                itemref.set('idref', chapter['idref'])
                part_spine.append(itemref)
            
            # 保存修改后的OPF文件
            part_opf_path = os.path.join(part_temp_dir, opf_path)
            os.makedirs(os.path.dirname(part_opf_path), exist_ok=True)
            
            # 确保XML格式正确
            ET.register_namespace('', 'http://www.idpf.org/2007/opf')
            ET.register_namespace('dc', 'http://purl.org/dc/elements/1.1/')
            part_opf_tree = ET.ElementTree(part_opf)
            part_opf_tree.write(part_opf_path, encoding='utf-8', xml_declaration=True)
            
            # 处理并修改TOC文件（如果存在）
            if toc_path and toc_tree:
                # 复制TOC文件
                part_toc_path = os.path.join(part_temp_dir, os.path.relpath(toc_path, temp_dir))
                os.makedirs(os.path.dirname(part_toc_path), exist_ok=True)
                
                # 修改TOC只保留当前部分的navPoints
                part_toc_root = copy.deepcopy(toc_tree.getroot())
                toc_ns = {'ncx': 'http://www.daisy.org/z3986/2005/ncx/'}
                
                # 更新navMap
                navMap = part_toc_root.find('.//ncx:navMap', toc_ns)
                if navMap is not None:
                    # 收集所有navPoint
                    navPoints = navMap.findall('.//ncx:navPoint', toc_ns)
                    points_to_keep = []
                    
                    for navPoint in navPoints:
                        # 检查content指向的文件是否在当前部分中
                        content = navPoint.find('.//ncx:content', toc_ns)
                        if content is not None:
                            src = content.get('src', '')
                            if src:
                                # 处理锚点
                                src_parts = src.split('#')
                                src_file = src_parts[0]
                                
                                # 规范化路径
                                norm_href = os.path.normpath(os.path.join(opf_dir, src_file.replace('\\', '/')))
                                rel_path = os.path.relpath(norm_href, temp_dir)
                                
                                # 如果指向的文件在当前部分中，保留这个navPoint
                                if rel_path in spine_hrefs:
                                    points_to_keep.append(navPoint)
                    
                    # 清空navMap并添加保留的navPoint
                    for child in list(navMap):
                        navMap.remove(child)
                    
                    # 重新排序计数
                    for idx, navPoint in enumerate(points_to_keep, 1):
                        navPoint.set('playOrder', str(idx))
                        navMap.append(navPoint)
                
                # 保存修改后的TOC文件
                ET.register_namespace('', 'http://www.daisy.org/z3986/2005/ncx/')
                part_toc_tree = ET.ElementTree(part_toc_root)
                part_toc_tree.write(part_toc_path, encoding='utf-8', xml_declaration=True)
            
            # 复制所有需要的文件到临时目录
            all_files_to_copy = set()
            
            # 添加章节文件
            for chapter in part_chapters:
                all_files_to_copy.add((chapter['file_path'], chapter['rel_path']))
            
            # 添加需要的资源文件
            for resource_path in needed_resources:
                if resource_path in resource_files:
                    file_path, _ = resource_files[resource_path]
                    all_files_to_copy.add((file_path, resource_path))
            
            # 实际复制文件
            for file_path, rel_path in all_files_to_copy:
                dest_path = os.path.join(part_temp_dir, rel_path)
                os.makedirs(os.path.dirname(dest_path), exist_ok=True)
                shutil.copy2(file_path, dest_path)
            
            # 创建EPUB文件
            output_filename = f"{original_name}-{part_name}.epub"
            output_path = os.path.join(output_dir, output_filename)
            
            # 创建ZIP文件(EPUB)
            with zipfile.ZipFile(output_path, 'w', zipfile.ZIP_DEFLATED) as zipf:
                # 添加mimetype(必须不压缩)
                mimetype_in_part = os.path.join(part_temp_dir, "mimetype")
                if os.path.exists(mimetype_in_part):
                    zipf.write(
                        mimetype_in_part,
                        "mimetype",
                        compress_type=zipfile.ZIP_STORED
                    )
                else:
                    # 如果没有mimetype文件，创建一个
                    zipf.writestr("mimetype", "application/epub+zip", compress_type=zipfile.ZIP_STORED)
                
                # 添加其他文件
                for root, dirs, files in os.walk(part_temp_dir):
                    for file in files:
                        file_path = os.path.join(root, file)
                        rel_path = os.path.relpath(file_path, part_temp_dir)
                        if rel_path != "mimetype":
                            zipf.write(file_path, rel_path)
            
            # 计算实际大小
            actual_size_mb = os.path.getsize(output_path) / (1024 * 1024)
            print(f"已创建: {output_path} (大小: {actual_size_mb:.2f}MB)")
            
            # 清理这个部分的临时目录
            shutil.rmtree(part_temp_dir)
    
    finally:
        # 清理主临时目录
        if os.path.exists(temp_dir):
            shutil.rmtree(temp_dir)

def main(input_file="input.epub", output_directory="output_files", max_size_mb=30, split_mode="balanced", chapter_ranges=None, remove_empty_lines_flag=False):
    """
    EPUB拆分主函数，通过参数控制拆分逻辑
    
    参数:
        input_file: 输入的EPUB文件路径
        output_directory: 输出目录
        max_size_mb: 每个拆分文件的最大大小(MB)
        split_mode: 拆分模式，可选值: "balanced" (均衡), "size" (按大小), "pages" (按章节范围), "remove_empty_lines" (仅去除空行)
        chapter_ranges: 章节范围列表，仅在split_mode="pages"时使用，例如 [1, 40, 80]
        remove_empty_lines_flag: 是否去除HTML文件中的空行，仅在split_mode不为"remove_empty_lines"时有效
    """
    print("EPUB拆分工具")
    print("-" * 50)
    
    # 检查文件是否存在
    if not os.path.exists(input_file):
        print(f"错误: 找不到文件 '{input_file}'")
        print("请确保文件路径正确")
        return False
    
    print(f"输入文件: {input_file}")
    print(f"输出目录: {output_directory}")
    
    # 确保输出目录存在
    Path(output_directory).mkdir(parents=True, exist_ok=True)
    
    # 根据拆分模式调用相应函数
    if split_mode == "remove_empty_lines":
        # 仅去除空行，不拆分
        output_filename = os.path.splitext(os.path.basename(input_file))[0] + "_no_empty_lines.epub"
        output_path = os.path.join(output_directory, output_filename)
        print(f"正在处理: 去除EPUB文件中的空行")
        # 确保BeautifulSoup库已安装
        check_and_install_dependencies()
        result = remove_empty_lines_from_epub(input_file, output_path)
        return result
    else:
        print(f"最大大小: {max_size_mb}MB")
        print(f"拆分模式: {split_mode}")
        if remove_empty_lines_flag:
            print("将在拆分过程中去除HTML文件中的空行")
            # 确保BeautifulSoup库已安装
            check_and_install_dependencies()
        print("-" * 50)
        
        if split_mode == "size":
            # 按大小拆分
            split_epub_by_size(input_file, output_directory, max_size_mb, remove_empty_lines_flag)
        elif split_mode == "pages":
            # 按章节范围拆分
            if chapter_ranges is None:
                chapter_ranges = [1, 40]  # 默认为1-40章
            split_epub_by_pages(input_file, output_directory, chapter_ranges, remove_empty_lines_flag)
        else:
            # 均衡拆分（默认）
            split_epub_balanced(input_file, output_directory, max_size_mb, remove_empty_lines_flag)
    
    return True

if __name__ == "__main__":
    # 这个部分保留用于兼容命令行调用
    import os
    import sys
    
    # 设置默认值并从命令行获取参数
    input_file = "D:/天星科技发展有限公司/202506moose大模型-中华文化项目/资料与文档/文化历史地理宗教电子书籍/超过30M-未拆分/剑桥中国史（套装全11卷）.epub"  # 默认输入文件
    output_directory = "output_files"  # 默认输出目录
    max_size_mb = 30  # 默认最大大小(MB)
    split_mode = "balanced"  # 默认拆分模式
    remove_empty_lines_flag = False  # 默认不去除空行
    
    # 从命令行参数获取设置
    if len(sys.argv) > 1:
        input_file = sys.argv[1]
    if len(sys.argv) > 2:
        output_directory = sys.argv[2]
    if len(sys.argv) > 3:
        try:
            max_size_mb = int(sys.argv[3])
        except:
            print(f"警告: 无法解析大小参数 '{sys.argv[3]}'，使用默认值 30MB")
    if len(sys.argv) > 4:
        split_mode = sys.argv[4]
    if len(sys.argv) > 5 and sys.argv[5].lower() in ['true', '1', 'yes', 'y']:
        remove_empty_lines_flag = True
        
    # 调用main函数处理拆分逻辑
    main(input_file, output_directory, max_size_mb, split_mode, None, remove_empty_lines_flag)

