import os
import sys
import argparse
import time
import threading
import importlib.util
import re
import shutil
from typing import Tuple, List, Optional
from pathlib import Path
from colorama import init, Fore, Style, Back
from ebooklib import epub
import warnings
warnings.filterwarnings("ignore", category=FutureWarning, module="ebooklib.epub")
from bs4 import BeautifulSoup, SoupStrainer
import chardet
from functools import lru_cache
import traceback
import zipfile

# 初始化颜色输出
init()

cxname = '文件分割工具V2.0 BY SHIKEAIXY'  # 完全由deepseek重构优化

class EncodingUtils:
    """编码处理工具类"""
    
    @staticmethod
    @lru_cache(maxsize=32)
    def detect_encoding(data: bytes, priority_encodings: Tuple[str, ...] = ('utf-8', 'gb18030', 'gbk', 'big5', 'utf-16')) -> str:
        """智能检测编码（带缓存）"""
        # 检查BOM标记
        if data.startswith(b'\xef\xbb\xbf'):
            return 'utf-8-sig'
        if data.startswith(b'\xff\xfe'):
            return 'utf-16-le'
        if data.startswith(b'\xfe\xff'):
            return 'utf-16-be'
        
        # 优先使用chardet进行检测，因为它更准确
        try:
            result = chardet.detect(data)
            if result['confidence'] > 0.7:
                detected_encoding = result['encoding']
                # 验证检测到的编码是否真的可用
                try:
                    data.decode(detected_encoding)
                    return detected_encoding
                except:
                    pass
        except:
            pass
        
        # 如果chardet检测失败，使用配置的编码优先级
        for enc in priority_encodings:
            try:
                # 对于UTF-16，尝试检测字节序
                if enc.lower() == 'utf-16':
                    try:
                        decoded = data.decode('utf-16-le')
                        # 检查解码结果是否包含乱码
                        if '�' not in decoded[:1000]:  # 检查前1000个字符
                            return 'utf-16-le'
                    except:
                        try:
                            decoded = data.decode('utf-16-be')
                            if '�' not in decoded[:1000]:
                                return 'utf-16-be'
                        except:
                            continue
                else:
                    decoded = data.decode(enc)
                    # 检查解码结果是否包含乱码
                    if '�' not in decoded[:1000]:  # 检查前1000个字符
                        return enc
            except UnicodeDecodeError:
                continue
            
        return 'utf-8'  # 默认回退

    @staticmethod
    @lru_cache(maxsize=32)
    def try_multiple_encodings(data: bytes, encodings: Tuple[str, ...] = None) -> Tuple[str, str]:
        """尝试多种编码，返回最佳编码和解码后的内容"""
        if encodings is None:
            encodings = ('utf-8', 'gb18030', 'gbk', 'big5', 'utf-16-le', 'utf-16-be')
        
        best_encoding = 'utf-8'
        best_content = ""
        min_replacements = float('inf')
        
        for encoding in encodings:
            try:
                content = data.decode(encoding, errors='replace')
                # 计算替换字符的数量
                replacements = content.count('�')
                
                # 如果替换字符较少，认为这是更好的编码
                if replacements < min_replacements:
                    min_replacements = replacements
                    best_encoding = encoding
                    best_content = content
                    
                # 如果没有替换字符，这就是最佳编码
                if replacements == 0:
                    return encoding, content
                    
            except UnicodeDecodeError:
                continue
                
        return best_encoding, best_content

class SimpleLogger:
    """简化日志输出类"""
    
    def __init__(self, debug_mode=False):
        self.debug_mode = debug_mode
        self.lock = threading.Lock()
        self.file_count = 0
        self.processed_count = 0
    
    def print_header(self):
        """打印程序头"""
        print(f"{Fore.CYAN}开源软件: https://gitee.com/SHIKEAIXY/Divide_Files{Style.RESET_ALL}")
        print(f"\n{Back.BLUE}{Fore.WHITE} {cxname} {Style.RESET_ALL}")
        print(f"{Fore.CYAN}(调试模式: {'开启' if self.debug_mode else '关闭'}){Style.RESET_ALL}\n")
    
    def print_file_count(self, count):
        """打印找到的文件数"""
        self.file_count = count
        print(f"{Back.GREEN}{Fore.WHITE} 找到 {count} 个文件 {Style.RESET_ALL}")
    
    def print_processing(self, filename):
        """打印正在处理的文件"""
        with self.lock:
            self.processed_count += 1
            print(f"[{self.processed_count}/{self.file_count}] 处理中: {filename}")
    
    def print_success(self, message):
        """打印成功消息"""
        print(f"{Fore.GREEN}✓ {message}{Style.RESET_ALL}")
    
    def print_warning(self, message):
        """打印警告消息"""
        print(f"{Fore.YELLOW}⚠ {message}{Style.RESET_ALL}")
    
    def print_error(self, message):
        """打印错误消息"""
        print(f"{Fore.RED}✗ {message}{Style.RESET_ALL}")
    
    def print_summary(self, elapsed_time):
        """打印总结信息"""
        print(f"\n{Back.GREEN}{Fore.WHITE} 处理完成 (耗时: {elapsed_time:.1f}秒) {Style.RESET_ALL}")

class ConfigManager:
    """配置管理器"""
    
    DEFAULT_CONFIG = '''# 文件分割工具配置
# 调试选项
debug_mode = False  # 启用详细调试信息
log_file = "日志.log"  # 日志文件路径

# 处理设置
max_split_size = 4 * 1024 * 1024  # 默认分割大小(4MB)
output_dir = "分割文件"  # 输出目录
supported_extensions = (".txt", ".epub", ".fb2", ".pdf", ".mobi", ".rtf")
keep_original = True  # 保留源文件
convert_to_txt = True  # 转换为TXT
encoding_priority = ("utf-8", "gb18030", "gbk", "big5", "utf-16")
clean_spaces = True  # 清理空格空行
max_epub_size = 50  # EPUB最大处理大小(MB)
use_multithreading = True  # 启用多线程
remove_corrupted = False  # 删除乱码文件(调试时建议关闭)
corruption_threshold = 0.05  # 乱码阈值(5%)
preserve_formatting = False  # 保留原始格式
safe_split_chars = "。！？；\\n"  # 安全分割字符

# EPUB处理选项
epub_ignore_missing = True  # 忽略缺失的文件
epub_max_retries = 3       # 最大重试次数

# 自动继续选项
auto_continue = False      # 自动继续处理
'''

    @classmethod
    def initialize(cls):
        """初始化配置"""
        config_path = Path(cls.get_app_path()) / "config.py"
        help_path = Path(cls.get_app_path()) / "教程.txt"
        
        if not config_path.exists():
            try:
                with open(config_path, 'w', encoding='utf-8') as f:
                    f.write(cls.DEFAULT_CONFIG)
                
                help_content = '''文件分割工具使用说明
========================
1. 将要处理的文件放在程序目录
2. 支持格式: .txt/.epub/.fb2/.pdf/.mobi/.rtf
3. 分割后文件保存在"分割文件"目录
4. 修改config.py可调整参数
5. 重新运行程序即可'''
                with open(help_path, 'w', encoding='utf-8') as f:
                    f.write(help_content)
                
                print(f"{Fore.YELLOW}首次运行，已创建配置文件{Style.RESET_ALL}")
                input("按Enter键退出并配置...")
                sys.exit(0)
            except Exception as e:
                print(f"{Fore.RED}创建配置文件失败: {str(e)}{Style.RESET_ALL}")
                sys.exit(1)
        
        return cls.load_config(config_path)

    @staticmethod
    def load_config(config_path):
        """加载配置文件"""
        try:
            spec = importlib.util.spec_from_file_location("config", config_path)
            config = importlib.util.module_from_spec(spec)
            spec.loader.exec_module(config)
            
            # 确保有所有必要的属性
            if not hasattr(config, 'auto_continue'):
                config.auto_continue = False
                
            return config
        except Exception as e:
            print(f"{Fore.RED}加载配置文件失败: {str(e)}{Style.RESET_ALL}")
            sys.exit(1)

    @staticmethod
    def get_app_path() -> str:
        """获取应用程序路径"""
        if getattr(sys, 'frozen', False):
            return os.path.dirname(sys.executable)
        return os.path.dirname(os.path.abspath(__file__))

class TextProcessor:
    """文本处理工具类"""
    
    @staticmethod
    def clean_text_content(text: str, config) -> str:
        """基础文本清理"""
        # 移除特殊不可见字符
        text = re.sub(r'[\u200b-\u200f\u202a-\u202e]', '', text)
        
        # 统一换行符
        text = text.replace('\r\n', '\n').replace('\r', '\n')
        
        # 清理多余空行
        if config.clean_spaces:
            lines = [line.strip() for line in text.split('\n') if line.strip()]
            text = '\n'.join(lines)
            
        return text
    
    @staticmethod
    def sanitize_filename(filename: str) -> str:
        """清理文件名中的特殊字符"""
        # 删除各种括号及其中内容
        brackets_patterns = [
            r'（[^）]*）',  # 中文括号
            r'\([^)]*\)',  # 英文括号
            r'\[[^]]*\]',  # 方括号
            r'【[^】]*】',  # 实心括号
            r'\{[^}]*\}'   # 花括号
        ]
        
        for pattern in brackets_patterns:
            filename = re.sub(pattern, '', filename)
        
        # 替换特殊字符
        filename = re.sub(r'[<>:"/\\|?*]', '_', filename)
        # 去除首尾空格
        filename = filename.strip()
        # 合并连续空格
        filename = re.sub(r'\s+', ' ', filename)
        
        return filename

class FileValidator:
    """文件验证器"""
    
    @staticmethod
    def is_epub_valid(file_path: str, config) -> bool:
        """增强的EPUB验证方法"""
        try:
            # 先检查是否是有效的ZIP文件
            if not zipfile.is_zipfile(file_path):
                return False
                
            # 快速检查而不完全解析
            with zipfile.ZipFile(file_path, 'r') as zf:
                # 检查必需的EPUB文件
                required_files = ['mimetype', 'META-INF/container.xml']
                if not all(f in zf.namelist() for f in required_files):
                    return False
            
            # 尝试读取但不加载全部内容
            epub.read_epub(file_path, options={
                'ignore_ncx': True,
                'ignore_missing': config.epub_ignore_missing,
                'ignore_errors': True,
                'load_chunks': 1024  # 仅读取部分内容
            })
            return True
        except Exception as e:
            if config.debug_mode:
                print(f"{Fore.YELLOW}EPUB验证失败: {str(e)}{Style.RESET_ALL}")
            return False

    @staticmethod
    def is_file_corrupted(file_path: str, config) -> bool:
        """检查文件是否损坏"""
        if not config.remove_corrupted:
            return False
            
        # EPUB文件特殊处理
        if file_path.lower().endswith('.epub'):
            return not FileValidator.is_epub_valid(file_path, config)
            
        try:
            with open(file_path, 'rb') as f:
                data = f.read(1024 * 1024)
                for enc in config.encoding_priority:
                    try:
                        content = data.decode(enc, errors='strict')
                        if '�' in content:
                            return (content.count('�') / len(content)) > config.corruption_threshold
                        return False
                    except UnicodeDecodeError:
                        continue
                try:
                    result = chardet.detect(data)
                    if result['confidence'] > 0.7:
                        content = data.decode(result['encoding'], errors='replace')
                        return (content.count('�') / len(content)) > config.corruption_threshold
                except:
                    pass
                return False
        except:
            return True

class FileSplitter:
    """文件分割器"""
    
    def __init__(self, config, logger):
        self.config = config
        self.logger = logger
        self.lock = threading.Lock()
    
    @staticmethod
    def format_file_size(size_bytes: int) -> str:
        """格式化文件大小"""
        for unit in ['B', 'KB', 'MB', 'GB']:
            if size_bytes < 1024:
                return f"{size_bytes:.2f} {unit}"
            size_bytes /= 1024
        return f"{size_bytes:.2f} TB"
    
    def find_safe_split_position(self, content: str) -> int:
        """找到安全的分割位置"""
        max_pos = len(content)
        
        # 更准确的字符数计算：根据实际内容估算平均字节数
        # 先分析前1000个字符的字节比例
        sample_size = min(1000, len(content))
        sample_content = content[:sample_size]
        sample_bytes = len(sample_content.encode('utf-8'))
        avg_bytes_per_char = sample_bytes / sample_size
        
        # 根据实际比例计算目标字符数
        target_chars = int(self.config.max_split_size / avg_bytes_per_char)
        
        # 如果内容小于目标大小，直接返回全部内容
        if max_pos <= target_chars:
            return max_pos
        
        # 查找最后一个安全分割字符
        safe_chars = self.config.safe_split_chars
        search_start = min(target_chars, max_pos)
        search_end = max(0, target_chars - 1024)
        
        for i in range(search_start, search_end, -1):
            if content[i] in safe_chars:
                return i + 1  # 包括分割字符
        
        # 如果没有找到安全字符，尝试找换行符
        last_newline = content.rfind('\n', 0, target_chars)
        if last_newline > 0:
            return last_newline + 1
        
        # 最后手段：在空格处分隔
        last_space = content.rfind(' ', 0, target_chars)
        if last_space > 0:
            return last_space + 1
        
        # 实在找不到就硬分割，但尽量不在中文字符中间
        if target_chars < max_pos and '\u4e00' <= content[target_chars] <= '\u9fff':
            return target_chars - 1
        return target_chars
    
    def split_large_file(self, input_path: str, output_dir: str) -> Tuple[int, str]:
        """安全分割文件，确保不破坏文字完整性"""
        try:
            input_size = os.path.getsize(input_path)
            if input_size <= self.config.max_split_size:
                return 0, ""
            
            base_name = TextProcessor.sanitize_filename(os.path.splitext(os.path.basename(input_path))[0])
            ext = os.path.splitext(input_path)[1]
            os.makedirs(output_dir, exist_ok=True)
            
            # 读取文件内容并检测最佳编码
            with open(input_path, 'rb') as f:
                raw_data = f.read()
                
            # 使用多种编码尝试，选择最佳结果
            encoding, content = EncodingUtils.try_multiple_encodings(raw_data, self.config.encoding_priority)
            
            if self.config.debug_mode:
                self.logger.print_success(f"检测到最佳编码: {encoding}")
            
            # 分割内容
            valid_parts = 0
            part_num = 1
            buffer = ""
            
            # 动态计算目标字符数（基于实际内容）
            sample_size = min(1000, len(content))
            sample_content = content[:sample_size]
            sample_bytes = len(sample_content.encode('utf-8'))
            avg_bytes_per_char = sample_bytes / sample_size
            target_chars = int(self.config.max_split_size / avg_bytes_per_char)
            
            if self.config.debug_mode:
                self.logger.print_success(f"平均字节/字符: {avg_bytes_per_char:.2f}, 目标字符数: {target_chars:,}")
            
            while content:
                # 如果剩余内容小于分割大小，直接处理
                if len(content) <= target_chars:
                    part_content = content
                    content = ""
                else:
                    split_pos = self.find_safe_split_position(content)
                    part_content = content[:split_pos]
                    content = content[split_pos:]
                
                if part_content:
                    temp_path = os.path.join(output_dir, f"{base_name}_{part_num:02d}{ext}")
                    final_path = os.path.join(output_dir, f"{base_name}_{part_num:02d}{ext}")
                    
                    with open(temp_path, 'w', encoding='utf-8') as dst_file:
                        dst_file.write(part_content)
                    
                    if not FileValidator.is_file_corrupted(temp_path, self.config):
                        os.rename(temp_path, final_path)
                        valid_parts += 1
                        if self.config.debug_mode:
                            file_size = os.path.getsize(final_path)
                            self.logger.print_success(f"分割文件: {final_path} ({self.format_file_size(file_size)})")
                    else:
                        os.remove(temp_path)
                        self.logger.print_warning(f"删除损坏的分割文件: {final_path}")
                    part_num += 1
                    
            return valid_parts, output_dir
                
        except Exception as e:
            self.logger.print_error(f"文件分割失败: {str(e)}")
            if self.config.debug_mode:
                traceback.print_exc()
            return 0, ""

class EPUBConverter:
    """EPUB转换器（增强容错版本）"""
    
    def __init__(self, config, logger):
        self.config = config
        self.logger = logger
    
    def convert_to_text(self, epub_path: str, txt_path: str) -> bool:
        """将EPUB转换为文本文件（自动修复常见错误）"""
        try:
            if not self._validate_epub_structure(epub_path):
                self.logger.print_error("EPUB文件结构不完整")
                return False

            book = self._read_epub_with_retry(epub_path)
            if book is None:
                return False

            texts = self._extract_epub_text(book)
            if not texts:
                self.logger.print_error("未提取到有效文本内容")
                return False

            cleaned_text = TextProcessor.clean_text_content('\n\n'.join(texts), self.config)
            self._save_text(txt_path, cleaned_text)
            
            return True
            
        except Exception as e:
            self.logger.print_error(f"EPUB转换失败: {str(e)}")
            if self.config.debug_mode:
                traceback.print_exc()
            return False

    def _validate_epub_structure(self, epub_path: str) -> bool:
        """验证EPUB基本文件结构"""
        try:
            with zipfile.ZipFile(epub_path, 'r') as zf:
                required = {'mimetype', 'META-INF/container.xml'}
                if not required.issubset(set(zf.namelist())):
                    missing = required - set(zf.namelist())
                    self.logger.print_warning(f"缺失关键文件: {missing}")
                    return False
                
                # 验证mimetype文件内容和格式
                with zf.open('mimetype') as f:
                    mimetype_content = f.read(30).decode('ascii').lower()
                    if mimetype_content != 'application/epub+zip':
                        self.logger.print_warning(f"无效的mimetype内容: {mimetype_content}")
                        return False
                        
                    # 检查mimetype是否是ZIP中的第一个文件且未压缩
                    mimetype_info = zf.getinfo('mimetype')
                    if mimetype_info.compress_type != zipfile.ZIP_STORED:
                        self.logger.print_warning("mimetype文件不应被压缩")
                        return False
                        
            return True
        except zipfile.BadZipFile:
            self.logger.print_error("无效的ZIP文件格式")
            return False
        except Exception as e:
            self.logger.print_error(f"EPUB结构验证失败: {str(e)}")
            if self.config.debug_mode:
                traceback.print_exc()
            return False

    def _read_epub_with_retry(self, epub_path: str) -> Optional[epub.EpubBook]:
        """带重试机制的EPUB读取"""
        retry_count = 0
        last_error = None
        
        while retry_count < self.config.epub_max_retries:
            try:
                # 尝试读取EPUB文件，配置容错选项
                book = epub.read_epub(
                    epub_path,
                    options={
                        'ignore_ncx': True,  # 忽略NCX文件错误
                        'ignore_missing': self.config.epub_ignore_missing,  # 忽略缺失文件
                        'ignore_errors': True,  # 忽略一般错误
                        'load_chunks': 4096  # 分块读取减少内存占用
                    }
                )
                if self.config.debug_mode:
                    self.logger.print_success(f"成功读取EPUB文件(尝试 {retry_count + 1}次)")
                return book
                
            except KeyError as e:
                # 处理关键文件缺失错误
                last_error = e
                wait_time = 0.5 * (retry_count + 1)  # 指数退避等待
                self.logger.print_warning(
                    f"EPUB文件关键引用缺失 - {str(e)} "
                    f"(尝试 {retry_count + 1}/{self.config.epub_max_retries}, "
                    f"{wait_time:.1f}秒后重试...)"
                )
                time.sleep(wait_time)
                retry_count += 1
                
            except Exception as e:
                # 处理其他类型的错误
                last_error = e
                if retry_count < self.config.epub_max_retries - 1:  # 不是最后一次尝试
                    wait_time = 1.0 * (retry_count + 1)
                    self.logger.print_warning(
                        f"EPUB读取错误: {str(e)} "
                        f"(尝试 {retry_count + 1}/{self.config.epub_max_retries}, "
                        f"{wait_time:.1f}秒后重试...)"
                    )
                    time.sleep(wait_time)
                    retry_count += 1
                else:
                    break  # 最后一次尝试失败直接退出循环

        # 所有尝试都失败后的处理
        error_msg = f"无法读取EPUB文件(尝试 {retry_count}次后失败)"
        if last_error:
            error_msg += f": {str(last_error)}"
            
        self.logger.print_error(error_msg)
        
        if self.config.debug_mode and last_error:
            traceback.print_exc()
            
        return None

    def _extract_epub_text(self, book: epub.EpubBook) -> List[str]:
        """从EPUB对象提取文本内容"""
        texts = []
        strainer = SoupStrainer(['p', 'div', 'h1', 'h2', 'h3', 'li', 'article', 'section'])
        
        for item in book.get_items():
            if isinstance(item, epub.EpubHtml):
                try:
                    content = item.get_content()
                    encoding, decoded = EncodingUtils.try_multiple_encodings(content, self.config.encoding_priority)
                    
                    # 使用更宽松的HTML解析
                    soup = BeautifulSoup(decoded, 'html.parser', parse_only=strainer)
                    
                    # 提取文本并保留基本格式
                    text = soup.get_text('\n', strip=True)
                    
                    # 清理并规范化文本
                    text = re.sub(r'\n{3,}', '\n\n', text)  # 减少连续空行
                    text = text.strip()
                    
                    if text and len(text) > 10:  # 过滤过短内容
                        texts.append(text)
                        
                except Exception as e:
                    if self.config.debug_mode:
                        self.logger.print_warning(f"内容解析跳过: {item.get_name()} - {str(e)}")
                    continue
                    
        return texts

    def _save_text(self, txt_path: str, content: str):
        """安全保存文本文件"""
        temp_path = None
        try:
            # 确保目录存在
            os.makedirs(os.path.dirname(txt_path), exist_ok=True)
            
            # 写入临时文件后重命名（原子操作）
            temp_path = f"{txt_path}.tmp"
            with open(temp_path, 'w', encoding='utf-8', errors='replace') as f:
                f.write(content)
                
            # 替换原文件（如果存在）
            if os.path.exists(txt_path):
                os.remove(txt_path)
            os.rename(temp_path, txt_path)
            
        except Exception as e:
            # 清理临时文件
            if temp_path and os.path.exists(temp_path):
                try:
                    os.remove(temp_path)
                except:
                    pass
            raise e

class TXTConverter:
    """TXT文件处理器"""
    
    def __init__(self, config, logger):
        self.config = config
        self.logger = logger
    
    def process_txt(self, txt_path: str, output_path: str) -> bool:
        """处理TXT文件"""
        try:
            # 读取文件内容并检测最佳编码
            with open(txt_path, 'rb') as f:
                raw_data = f.read()
                
            # 使用多种编码尝试，选择最佳结果
            encoding, content = EncodingUtils.try_multiple_encodings(raw_data, self.config.encoding_priority)
            
            if self.config.debug_mode:
                self.logger.print_success(f"检测到最佳编码: {encoding}")
            
            # 清理文本内容
            cleaned_text = TextProcessor.clean_text_content(content, self.config)
            
            # 写入输出文件（强制UTF-8编码）
            with open(output_path, 'w', encoding='utf-8', errors='replace') as f:
                f.write(cleaned_text)
            
            return True
        except Exception as e:
            self.logger.print_error(f"TXT处理失败: {str(e)}")
            if self.config.debug_mode:
                traceback.print_exc()
            return False

class FileProcessor:
    """主文件处理器"""
    
    def __init__(self, config):
        self.config = config
        self.logger = SimpleLogger(config.debug_mode)
        self.splitter = FileSplitter(config, self.logger)
        self.epub_converter = EPUBConverter(config, self.logger)
        self.txt_converter = TXTConverter(config, self.logger)
        self.progress_lock = threading.Lock()
        self.file_count = 0
        self.processed_count = 0
    
    def initialize(self):
        """初始化"""
        os.system('title ' + cxname)
        try:
            sys.stdout.reconfigure(encoding='utf-8')
        except:
            pass
    
    def process_directory(self, directory: str):
        """处理目录中的所有文件"""
        try:
            # 获取符合条件的文件列表
            files = [
                f for f in os.listdir(directory)
                if os.path.splitext(f)[1].lower() in self.config.supported_extensions
                and f.lower() not in ("教程.txt", "config.py")
                and os.path.isfile(os.path.join(directory, f))
            ]
            
            self.file_count = len(files)
            if not files:
                self.logger.print_error("未找到可处理文件")
                return
            
            self.logger.print_file_count(self.file_count)
            
            # 根据配置选择处理方式
            if self.config.use_multithreading and self.file_count > 1:
                self.logger.print_warning("使用多线程处理...")
                threads = []
                for file in files:
                    t = threading.Thread(
                        target=self.process_single_file, 
                        args=(os.path.join(directory, file),))
                    threads.append(t)
                    t.start()
                
                for t in threads:
                    t.join()
            else:
                for file in files:
                    self.process_single_file(os.path.join(directory, file))
                    
            # 自动继续处理（如果启用）
            if getattr(self.config, 'auto_continue', False):
                self._auto_continue_processing(directory)
                
        except Exception as e:
            self.logger.print_error(f"目录处理失败: {str(e)}")
            if self.config.debug_mode:
                traceback.print_exc()
    
    def _auto_continue_processing(self, directory: str):
        """自动继续处理未完成的文件"""
        output_dir = Path(directory) / self.config.output_dir
        if not output_dir.exists():
            return
            
        # 查找可能未处理的文件
        unprocessed = []
        for item in output_dir.glob("*"):
            if item.is_dir():
                # 检查是否缺少TXT文件
                txt_files = list(item.glob("*.txt"))
                if not txt_files and self.config.convert_to_txt:
                    # 检查是否有源文件
                    source_files = [
                        f for f in item.glob("*") 
                        if f.suffix.lower() in self.config.supported_extensions
                    ]
                    if source_files:
                        unprocessed.extend(source_files)
        
        if unprocessed:
            self.logger.print_warning(f"发现 {len(unprocessed)} 个可能未完成处理的文件，自动重新处理...")
            for file in unprocessed:
                self.process_single_file(str(file))
    
    def process_single_file(self, file_path: str):
        """处理单个文件"""
        try:
            file_name = os.path.basename(file_path)
            self.logger.print_processing(file_name)
            
            # 准备输出目录
            clean_name = TextProcessor.sanitize_filename(os.path.splitext(file_name)[0])
            output_dir = Path(os.path.dirname(file_path)) / self.config.output_dir / clean_name
            output_dir.mkdir(parents=True, exist_ok=True)
            
            if self.config.debug_mode:
                self.logger.print_success(f"原始文件: {file_path}")
                self.logger.print_success(f"输出目录: {output_dir}")
            
            # 处理原始文件（复制或移动）
            source_copy = output_dir / file_name
            if self.config.keep_original:
                shutil.copy2(file_path, source_copy)
                if self.config.debug_mode:
                    self.logger.print_success(f"保留原始文件副本: {source_copy}")
            else:
                shutil.move(file_path, source_copy)
                if self.config.debug_mode:
                    self.logger.print_warning(f"移动原始文件到: {source_copy}")
            
            # 根据文件类型进行转换
            ext = Path(file_path).suffix.lower()
            txt_file = None
            
            if self.config.convert_to_txt:
                txt_path = output_dir / f"{clean_name}.txt"
                
                if ext == '.epub':
                    if not self.epub_converter.convert_to_text(str(file_path), str(txt_path)):
                        self.logger.print_error(f"EPUB转换失败: {file_name}")
                        return
                elif ext == '.txt':
                    if not self.txt_converter.process_txt(str(file_path), str(txt_path)):
                        self.logger.print_error(f"TXT处理失败: {file_name}")
                        return
                elif ext in ('.fb2', '.mobi', '.rtf'):
                    self.logger.print_warning(f"暂不支持 {ext} 格式的自动转换")
                    return
                else:
                    self.logger.print_warning(f"跳过不支持转换的文件类型: {ext}")
                    return

                txt_file = txt_path
                self.logger.print_success(f"转换成功: {txt_path}")
                
                # 验证转换后的文件
                if FileValidator.is_file_corrupted(str(txt_path), self.config):
                    self.logger.print_error("转换后的文件可能已损坏")
                    if self.config.remove_corrupted:
                        os.remove(txt_path)
                        self.logger.print_warning("已删除损坏的转换文件")
                        return

            # 文件分割处理
            def process_split(input_file, part_type):
                if os.path.getsize(input_file) > self.config.max_split_size:
                    parts, parts_dir = self.splitter.split_large_file(
                        str(input_file), 
                        str(output_dir / part_type)
                    )
                    if parts > 0:
                        self.logger.print_success(f"分割完成: {parts}个{part_type}文件 ({parts_dir})")

            # 分割源文件和转换后的文本文件
            process_split(source_copy, "源文件")
            if txt_file and os.path.exists(txt_file):
                process_split(txt_file, "转换文件")

        except Exception as e:
            self.logger.print_error(f"处理失败: {os.path.basename(file_path)} - {str(e)}")
            if self.config.debug_mode:
                traceback.print_exc()

def main():
    """主程序入口"""
    try:
        config = ConfigManager.initialize()
        processor = FileProcessor(config)
        processor.initialize()
        processor.logger.print_header()
        
        parser = argparse.ArgumentParser(description=f"{cxname}")
        parser.add_argument("path", nargs="?", default=".", help="输入目录路径")
        args = parser.parse_args()
        
        start_time = time.time()
        processor.process_directory(args.path)
        
        processor.logger.print_summary(time.time() - start_time)
        
    except KeyboardInterrupt:
        print(f"\n{Fore.RED}用户中断操作{Style.RESET_ALL}")
    except Exception as e:
        print(f"\n{Fore.RED}严重错误: {str(e)}{Style.RESET_ALL}")
        traceback.print_exc()
        sys.exit(1)
    
    if not getattr(config, 'auto_continue', False):
        input("\n按Enter键退出...")

if __name__ == "__main__":
    main()