import os
import zipfile
import re
import pandas as pd
from collections import Counter, OrderedDict
from bs4 import BeautifulSoup
import argparse
from typing import List, Dict, Tuple, Set
import logging
import json
import time

# 设置日志
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)

class EPUBWordCounter:
    def __init__(self):
        self.word_counter = Counter()
        self.total_words = 0
        self.processed_files = 0
        self.known_words: Set[str] = set()
        self.word_frequencies: Dict[str, float] = {}  # 存储单词频率百分比

    def extract_text_from_epub(self, epub_path: str) -> str:
        """从EPUB文件中提取文本内容（保持原始大小写）"""
        text_content = []

        try:
            with zipfile.ZipFile(epub_path, 'r') as epub:
                # 获取所有HTML/XML文件
                html_files = [f for f in epub.namelist()
                              if f.endswith('.html') or f.endswith('.xhtml') or f.endswith('.htm')]

                for file in html_files:
                    try:
                        with epub.open(file) as f:
                            content = f.read()
                            soup = BeautifulSoup(content, 'html.parser')
                            # 移除脚本和样式标签
                            for script in soup(["script", "style"]):
                                script.decompose()
                            text = soup.get_text()
                            text_content.append(text)
                    except Exception as e:
                        logger.warning(f"处理文件 {file} 时出错: {e}")
                        continue

        except Exception as e:
            logger.error(f"无法打开EPUB文件 {epub_path}: {e}")
            return ""

        return "\n".join(text_content)

    def extract_text_with_file_info(self, epub_path: str, target_files: List[str] = None) -> List[Tuple[str, str]]:
        """从EPUB文件中提取文本内容及其文件名，可指定目标文件列表，并按给定顺序处理"""
        text_with_files = []

        try:
            with zipfile.ZipFile(epub_path, 'r') as epub:
                html_files = [f for f in epub.namelist()
                              if f.endswith('.html') or f.endswith('.xhtml') or f.endswith('.htm')]

                # 如果指定了目标文件列表，只处理这些文件
                if target_files:
                    matched_files = []
                    unmatched_files = []
                    file_mapping = {}  # 用于存储匹配的文件路径

                    # 首先建立文件名到完整路径的映射
                    for file in html_files:
                        file_basename = os.path.basename(file)
                        file_mapping[file_basename] = file
                        file_mapping[file] = file  # 也映射完整路径

                    # 按照用户指定的顺序处理文件
                    for target_file in target_files:
                        target_file = target_file.strip()
                        found = False

                        # 尝试精确匹配
                        if target_file in file_mapping:
                            matched_files.append(file_mapping[target_file])
                            found = True
                        else:
                            # 如果没有精确匹配，尝试部分匹配
                            for file_path, file_basename in [(f, os.path.basename(f)) for f in html_files]:
                                if target_file.lower() in file_basename.lower():
                                    matched_files.append(file_path)
                                    found = True
                                    break

                        if not found:
                            unmatched_files.append(target_file)

                    if matched_files:
                        # 保持用户指定的顺序，但只保留唯一文件
                        unique_files = []
                        seen = set()
                        for file in matched_files:
                            if file not in seen:
                                seen.add(file)
                                unique_files.append(file)
                        html_files = unique_files
                        logger.info(f"找到匹配文件: {html_files}")
                        if unmatched_files:
                            logger.warning(f"未找到文件: {unmatched_files}")
                    else:
                        logger.warning(f"未找到任何匹配文件，将处理所有文件")
                        logger.info(f"可用文件: {html_files}")
                else:
                    # 没有指定目标文件时，按文件名排序
                    html_files.sort()

                # 处理文件
                total_files = len(html_files)
                for i, file in enumerate(html_files, 1):
                    try:
                        logger.info(f"正在处理文件 [{i}/{total_files}]: {file}")
                        start_time = time.time()

                        with epub.open(file) as f:
                            content = f.read()
                            soup = BeautifulSoup(content, 'html.parser')
                            for script in soup(["script", "style"]):
                                script.decompose()

                            text = soup.get_text()
                            # 记录每个文件的文本和文件名
                            text_with_files.append((text, file))

                        elapsed = time.time() - start_time
                        logger.info(f"文件处理完成: {file} (耗时: {elapsed:.2f}秒)")
                    except Exception as e:
                        logger.warning(f"处理文件 {file} 时出错: {e}")
                        continue

        except Exception as e:
            logger.error(f"无法打开EPUB文件 {epub_path}: {e}")
            return []

        return text_with_files

    def clean_and_tokenize(self, text: str) -> List[str]:
        """清理文本并分词（返回小写形式用于统计，但保留原始文本）"""
        # 使用原始文本进行分词，但返回小写形式用于频率统计
        words = re.findall(r'\b[A-Za-z][A-Za-z\-]*[A-Za-z]?\b', text)
        # 过滤掉单个字符（除了'a', 'A', 'i', 'I'）
        words = [word for word in words if len(word) > 1 or word.lower() in ['a', 'i']]
        return [word.lower() for word in words]  # 返回小写用于统计

    def extract_original_words_with_file_positions(self, text: str) -> List[Tuple[str, int]]:
        """提取原始单词及其在文件中的位置（保持大小写）"""
        words_with_positions = []

        # 使用finditer来获取每个匹配的位置
        for match in re.finditer(r'\b[A-Za-z][A-Za-z\-]*[A-Za-z]?\b', text):
            word = match.group()
            if len(word) > 1 or word.lower() in ['a', 'i', 'A', 'I']:
                # 记录单词和它在文件中的序号（从1开始）
                words_with_positions.append((word, len(words_with_positions) + 1))

        return words_with_positions

    def process_epub_file(self, epub_path: str):
        """处理单个EPUB文件"""
        logger.info(f"正在处理: {epub_path}")

        text = self.extract_text_from_epub(epub_path)
        if not text:
            logger.warning(f"无法从 {epub_path} 提取文本")
            return

        words = self.clean_and_tokenize(text)
        self.word_counter.update(words)
        self.total_words += len(words)
        self.processed_files += 1

        logger.info(f"已处理 {len(words)} 个单词来自 {epub_path}")

    def process_directory(self, directory_path: str, frequency_threshold: float = 0.01):
        """处理目录中的所有EPUB文件，使用频率百分比作为已知词判断标准"""
        if not os.path.exists(directory_path):
            logger.error(f"目录不存在: {directory_path}")
            return False

        epub_files = [f for f in os.listdir(directory_path)
                      if f.lower().endswith('.epub')]

        if not epub_files:
            logger.error("在目录中未找到EPUB文件")
            return False

        logger.info(f"找到 {len(epub_files)} 个EPUB文件")

        total_epub_files = len(epub_files)
        for i, epub_file in enumerate(epub_files, 1):
            epub_path = os.path.join(directory_path, epub_file)
            logger.info(f"处理EPUB文件 [{i}/{total_epub_files}]: {epub_file}")
            self.process_epub_file(epub_path)

        # 计算每个单词的频率百分比
        if self.total_words > 0:
            for word, count in self.word_counter.items():
                frequency = (count / self.total_words) * 100
                self.word_frequencies[word] = frequency

        # 构建已知单词集合（频率百分比大于等于阈值的单词的小写形式）
        self.known_words = {word for word, freq in self.word_frequencies.items() if freq >= frequency_threshold}
        logger.info(f"已知单词数量（频率≥{frequency_threshold}%）: {len(self.known_words)}")

        # 记录所有单词的详细频率信息
        self.word_frequency_details = dict(self.word_counter)

        return True

    def find_intelligent_example(self, text: str, target_word: str, target_position: int, context_words: int = 10) -> str:
        """智能查找包含目标单词的完整例句"""
        # 使用正则表达式匹配单词边界，保持大小写敏感
        pattern = re.compile(r'\b' + re.escape(target_word) + r'\b', re.IGNORECASE)

        # 首先尝试找到包含目标单词的完整句子
        sentences = self.split_into_sentences(text)

        for sentence in sentences:
            if pattern.search(sentence):
                # 找到包含目标单词的句子
                words_in_sentence = sentence.split()

                # 找到目标单词在句子中的位置
                for i, word in enumerate(words_in_sentence):
                    if pattern.fullmatch(word):
                        # 计算合适的上下文范围
                        start = max(0, i - context_words)
                        end = min(len(words_in_sentence), i + context_words + 1)

                        # 确保截取的是完整的短语
                        # 如果截取的不是句子开头，在前面加省略号
                        prefix = "..." if start > 0 else ""
                        # 如果截取的不是句子结尾，在后面加省略号
                        suffix = "..." if end < len(words_in_sentence) else ""

                        # 构建上下文
                        context = ' '.join(words_in_sentence[start:end])
                        return f"{prefix}{context}{suffix}"

        # 如果找不到完整句子，使用原来的方法
        return self.find_basic_example(text, target_word, context_words)

    def split_into_sentences(self, text: str) -> List[str]:
        """将文本分割成句子，保持标点符号"""
        # 使用更智能的句子分割，考虑各种结束标点
        sentences = []

        # 先按行分割，再按句子分割
        lines = text.split('\n')
        for line in lines:
            line = line.strip()
            if not line:
                continue

            # 简单的句子分割逻辑
            temp_sentences = re.split(r'([.!?]+["\']?\s+)', line)

            for i in range(0, len(temp_sentences)-1, 2):
                if i + 1 < len(temp_sentences):
                    sentence = temp_sentences[i] + temp_sentences[i+1]
                    sentences.append(sentence.strip())
                else:
                    sentences.append(temp_sentences[i].strip())

        # 过滤空句子和过短的句子
        sentences = [s for s in sentences if len(s) > 10 and any(c.isalpha() for c in s)]
        return sentences

    def find_basic_example(self, text: str, target_word: str, context_words: int) -> str:
        """基础方法查找例句"""
        # 使用正则表达式匹配单词边界，保持大小写敏感
        pattern = re.compile(r'\b' + re.escape(target_word) + r'\b', re.IGNORECASE)

        words = text.split()
        for i, word in enumerate(words):
            if pattern.fullmatch(word):
                # 构建上下文
                start = max(0, i - context_words)
                end = min(len(words), i + context_words + 1)

                # 添加省略号指示截断
                prefix = "..." if start > 0 else ""
                suffix = "..." if end < len(words) else ""

                context = ' '.join(words[start:end])
                return f"{prefix}{context}{suffix}"

        return ""

    def find_new_words_with_examples(self, new_epub_path: str, context_words: int = 12,
                                     enable_cross_file_deduplication: bool = True,
                                     target_files: List[str] = None) -> Dict[str, List[Dict]]:
        """在新书中查找生词并按文件分组"""
        if not self.known_words:
            logger.warning("没有已知单词数据，请先处理目录中的EPUB文件")
            return OrderedDict()

        logger.info(f"开始分析新书: {new_epub_path}")
        if target_files:
            logger.info(f"只解析指定文件: {target_files}")
        if enable_cross_file_deduplication:
            logger.info("启用跨文件去重：前面文件出现过的生词后面不再出现")
        else:
            logger.info("禁用跨文件去重：每个文件独立统计生词")

        text_with_files = self.extract_text_with_file_info(new_epub_path, target_files)
        if not text_with_files:
            logger.error(f"无法从新书 {new_epub_path} 提取文本")
            return OrderedDict()

        # 使用OrderedDict来保持文件顺序
        file_new_words = OrderedDict()
        # 记录所有已经出现过的生词（小写形式），用于跨文件去重
        all_found_new_words = set() if enable_cross_file_deduplication else None

        # 首先初始化有序字典，保持文件顺序
        for text, filename in text_with_files:
            file_new_words[filename] = []

        total_files = len(text_with_files)
        for file_idx, (text, filename) in enumerate(text_with_files, 1):
            logger.info(f"分析文件 [{file_idx}/{total_files}]: {filename}")
            start_time = time.time()

            # 提取当前文件的所有单词及其在文件中的位置
            file_words_with_positions = self.extract_original_words_with_file_positions(text)
            total_words_in_file = len(file_words_with_positions)

            if total_words_in_file == 0:
                logger.info(f"文件 {filename} 中没有找到单词，跳过")
                continue

            found_words_in_file = set()
            new_words_in_file = 0

            for word, word_position in file_words_with_positions:
                word_lower = word.lower()

                # 检查是否已知单词（基于频率百分比阈值）
                is_known_word = word_lower in self.known_words

                # 检查是否已经在本文件发现过
                already_in_file = word_lower in found_words_in_file

                # 检查是否已经在前面文件发现过（如果启用跨文件去重）
                already_in_previous_files = False
                if enable_cross_file_deduplication and all_found_new_words:
                    already_in_previous_files = word_lower in all_found_new_words

                # 判断是否为生词的条件
                is_new_word = (not is_known_word and
                               not already_in_file and
                               (not enable_cross_file_deduplication or not already_in_previous_files) and
                               len(word) > 2 and  # 忽略太短的单词
                               word_lower.isalpha())  # 确保是纯字母单词

                if is_new_word:
                    # 智能查找包含该单词的完整例句
                    example_sentence = self.find_intelligent_example(text, word, word_position, context_words)

                    if example_sentence and len(example_sentence.strip()) > 20:  # 确保例句足够长
                        # 获取该单词在已知语料库中的频率百分比
                        known_frequency = self.word_frequencies.get(word_lower, 0)

                        new_word_data = {
                            'word': word,  # 保持原始大小写
                            'word_lower': word_lower,  # 小写形式用于去重
                            'example_sentence': example_sentence.strip(),
                            'source_file': os.path.basename(filename),
                            'word_position': word_position,  # 在文件中的位置
                            'total_words_in_file': total_words_in_file,
                            'position_percentage': round((word_position / total_words_in_file) * 100, 2),
                            'known_frequency': round(known_frequency, 4),  # 在已知语料库中的频率百分比
                            'is_known_word': is_known_word  # 是否被认定为已知单词
                        }

                        file_new_words[filename].append(new_word_data)
                        found_words_in_file.add(word_lower)
                        new_words_in_file += 1

                        # 如果启用跨文件去重，添加到全局集合
                        if enable_cross_file_deduplication and all_found_new_words is not None:
                            all_found_new_words.add(word_lower)

            elapsed = time.time() - start_time
            logger.info(f"文件分析完成: {filename} (发现 {new_words_in_file} 个生词, 耗时: {elapsed:.2f}秒)")

        # 按位置排序每个文件的生词，并移除空文件
        result = OrderedDict()
        for filename, words_list in file_new_words.items():
            if words_list:
                # 按单词在文件中的位置排序
                words_list.sort(key=lambda x: x['word_position'])
                result[filename] = words_list

        # 统计各文件生词数量
        for filename, words_list in result.items():
            logger.info(f"文件 '{os.path.basename(filename)}' 中发现 {len(words_list)} 个生词")

        total_new_words = sum(len(words) for words in result.values())
        logger.info(f"在新书中共发现 {total_new_words} 个生词")
        return result

    def get_word_frequency_with_case(self, top_n: int = None) -> List[Dict]:
        """获取单词频率统计（包含大小写信息）"""
        if not self.word_counter:
            return []

        total_count = sum(self.word_counter.values())
        word_freq = []

        for word, count in self.word_counter.most_common(top_n):
            frequency = (count / total_count) * 100 if total_count > 0 else 0
            word_freq.append({
                'word': word,  # 显示小写形式
                'count': count,
                'frequency_percent': round(frequency, 4),
                'cumulative_percent': 0,
                'is_known': word in self.known_words
            })

        # 计算累计百分比
        cumulative = 0
        for item in word_freq:
            cumulative += item['frequency_percent']
            item['cumulative_percent'] = round(cumulative, 4)

        return word_freq

    def save_to_excel(self, output_path: str, top_n: int = 1000, frequency_threshold: float = 0.01):
        """保存结果到Excel文件"""
        if not self.word_counter:
            logger.error("没有数据可保存")
            return False

        word_freq = self.get_word_frequency_with_case(top_n)

        # 创建DataFrame
        df = pd.DataFrame(word_freq)

        try:
            with pd.ExcelWriter(output_path, engine='openpyxl') as writer:
                # 单词频率表
                df.to_excel(writer, sheet_name='Word Frequency', index=False)

                # 添加摘要信息
                summary_data = {
                    '统计项目': ['处理的文件数', '总单词数', '唯一单词数',
                             '输出单词数', '已知单词数', '已知词频率阈值(%)'],
                    '数值': [self.processed_files, self.total_words,
                           len(self.word_counter), len(word_freq), len(self.known_words),
                           frequency_threshold]
                }
                summary_df = pd.DataFrame(summary_data)
                summary_df.to_excel(writer, sheet_name='Summary', index=False)

            logger.info(f"结果已保存到: {output_path}")
            return True

        except Exception as e:
            logger.error(f"保存Excel文件时出错: {e}")
            return False

    def save_new_words_to_excel_by_file(self, file_words: Dict[str, List[Dict]], output_path: str, target_files: List[str] = None):
        """按文件保存生词到Excel文件（每个文件一个sheet，按文件顺序排列）"""
        if not file_words:
            logger.warning("没有生词数据可保存")
            return False

        try:
            with pd.ExcelWriter(output_path, engine='openpyxl') as writer:
                # 创建摘要工作表
                summary_data = []
                total_new_words = 0
                cumulative_words = 0

                # 按文件顺序处理
                for filename, words_list in file_words.items():
                    file_basename = os.path.basename(filename)
                    file_word_count = words_list[0]['total_words_in_file'] if words_list else 0
                    new_words_count = len(words_list)
                    total_new_words += new_words_count
                    cumulative_words += new_words_count

                    summary_data.append({
                        '文件名': file_basename,
                        '生词数量': new_words_count,
                        '文件总单词数': file_word_count,
                        '生词密度(%)': round((new_words_count / file_word_count) * 100, 2) if file_word_count > 0 else 0,
                        '累计生词数': cumulative_words,
                        '占比(%)': round((new_words_count / total_new_words) * 100, 2) if total_new_words > 0 else 0
                    })

                # 摘要表
                summary_df = pd.DataFrame(summary_data)
                summary_df.to_excel(writer, sheet_name='摘要', index=False)

                # 每个文件一个sheet，按文件顺序创建
                for filename, words_list in file_words.items():
                    if words_list:
                        df = pd.DataFrame(words_list)
                        # 重新排列列顺序
                        df = df[['word', 'known_frequency', 'is_known_word', 'word_position',
                                 'total_words_in_file', 'position_percentage', 'example_sentence', 'source_file']]

                        # 重命名列名为更友好的名称
                        df = df.rename(columns={
                            'word': '单词',
                            'known_frequency': '已知语料频率(%)',
                            'is_known_word': '是否已知单词',
                            'word_position': '单词位置',
                            'total_words_in_file': '文件总单词数',
                            'position_percentage': '位置百分比(%)',
                            'example_sentence': '例句',
                            'source_file': '源文件'
                        })

                        # 使用文件名作为sheet名称（清理后）
                        file_basename = os.path.basename(filename)
                        sheet_name = self.clean_sheet_name(file_basename)
                        df.to_excel(writer, sheet_name=sheet_name, index=False)

                # 所有生词汇总表
                all_words = []
                for filename, words_list in file_words.items():
                    all_words.extend(words_list)

                if all_words:
                    all_df = pd.DataFrame(all_words)
                    all_df = all_df[['word', 'known_frequency', 'is_known_word', 'word_position',
                                     'total_words_in_file', 'position_percentage', 'example_sentence', 'source_file']]

                    all_df = all_df.rename(columns={
                        'word': '单词',
                        'known_frequency': '已知语料频率(%)',
                        'is_known_word': '是否已知单词',
                        'word_position': '单词位置',
                        'total_words_in_file': '文件总单词数',
                        'position_percentage': '位置百分比(%)',
                        'example_sentence': '例句',
                        'source_file': '源文件'
                    })

                    # 按文件和位置排序
                    all_df = all_df.sort_values(by=['源文件', '单词位置'])
                    all_df.to_excel(writer, sheet_name='所有生词', index=False)

            logger.info(f"生词表已按文件顺序保存到: {output_path}")
            return True

        except Exception as e:
            logger.error(f"保存生词表时出错: {e}")
            return False

    def clean_sheet_name(self, name: str, max_length: int = 31) -> str:
        """清理sheet名称，确保符合Excel限制"""
        # 移除非法字符
        clean_name = re.sub(r'[\\/*?:[\]]', '', name)
        # 截断长度
        if len(clean_name) > max_length:
            clean_name = clean_name[:max_length-3] + '...'
        return clean_name

    def save_known_words(self, output_path: str, frequency_threshold: float = 0.01):
        """保存已知单词列表到文件（小写形式）"""
        try:
            known_words_with_freq = {
                word: freq for word, freq in self.word_frequencies.items()
                if freq >= frequency_threshold
            }
            with open(output_path, 'w', encoding='utf-8') as f:
                json.dump(known_words_with_freq, f, ensure_ascii=False, indent=2)
            logger.info(f"已知单词列表已保存到: {output_path}")
        except Exception as e:
            logger.error(f"保存已知单词列表时出错: {e}")

def get_output_filename(epub_path: str, target_files: List[str] = None, suffix: str = "_new_words.xlsx") -> str:
    """根据EPUB文件名生成输出文件名"""
    base_name = os.path.splitext(os.path.basename(epub_path))[0]
    if target_files:
        # 使用前两个文件的名称作为标识
        file_names = [os.path.splitext(os.path.basename(f))[0] for f in target_files[:2]]
        files_suffix = "_".join(file_names)
        if len(target_files) > 2:
            files_suffix += f"_and_{len(target_files) - 2}_more"
        return f"{base_name}_{files_suffix}{suffix}"
    else:
        return base_name + suffix

def parse_target_files(target_files_str: str) -> List[str]:
    """解析目标文件字符串，支持逗号分隔的列表"""
    if not target_files_str:
        return None
    return [f.strip() for f in target_files_str.split(',') if f.strip()]

def main():
    parser = argparse.ArgumentParser(description='统计EPUB文件中英文单词频率并识别生词（使用频率百分比版本）')
    parser.add_argument('directory', help='包含EPUB文件的目录路径')
    parser.add_argument('-n', '--new-book', help='新EPUB文件路径（用于识别生词）')
    parser.add_argument('-f', '--target-files', help='指定只解析新书中的特定文件列表，用英文逗号分隔（如: Chapter_Five.xhtml,Section0023.xhtml）')
    parser.add_argument('-o', '--output', default='word_frequency.xlsx',
                        help='输出Excel文件路径 (默认: word_frequency.xlsx)')
    parser.add_argument('--new-words-output',
                        help='生词输出文件路径 (默认: 使用新书文件名)')
    parser.add_argument('--known-words-output', default='known_words.json',
                        help='已知单词列表输出文件路径 (默认: known_words.json)')
    parser.add_argument('-t', '--top-n', type=int, default=1000,
                        help='输出前N个最常出现的单词 (默认: 1000)')
    parser.add_argument('-c', '--context-words', type=int, default=12,
                        help='例句上下文单词数量 (默认: 12)')
    parser.add_argument('--frequency-threshold', type=float, default=0.01,
                        help='已知单词频率百分比阈值 (默认: 0.01%%)')
    parser.add_argument('--no-cross-file-deduplication', action='store_true',
                        help='禁用跨文件去重（默认启用）')

    args = parser.parse_args()

    # 解析目标文件列表
    target_files = parse_target_files(args.target_files)

    # 创建计数器实例
    counter = EPUBWordCounter()

    # 处理目录中的EPUB文件
    if not counter.process_directory(args.directory, args.frequency_threshold):
        return

    # 保存已知单词列表
    counter.save_known_words(args.known_words_output, args.frequency_threshold)

    # 保存主要结果到Excel
    if not counter.save_to_excel(args.output, args.top_n, args.frequency_threshold):
        return

    # 如果有新书，处理新书中的生词
    if args.new_book:
        if not os.path.exists(args.new_book):
            logger.error(f"新书文件不存在: {args.new_book}")
            return

        # 自动生成输出文件名
        if not args.new_words_output:
            args.new_words_output = get_output_filename(args.new_book, target_files)

        logger.info("开始分析新书中的生词...")
        start_time = time.time()

        file_words = counter.find_new_words_with_examples(
            args.new_book,
            args.context_words,
            not args.no_cross_file_deduplication,  # 如果不禁用就去重
            target_files  # 传递目标文件列表
        )

        total_time = time.time() - start_time
        logger.info(f"新书分析完成，总耗时: {total_time:.2f}秒")

        if file_words:
            counter.save_new_words_to_excel_by_file(file_words, args.new_words_output, target_files)
        else:
            logger.info("在新书中未发现生词")

    # 打印统计信息
    logger.info(f"处理完成! 共处理 {counter.processed_files} 个文件")
    logger.info(f"总单词数: {counter.total_words}")
    logger.info(f"唯一单词数: {len(counter.word_counter)}")
    logger.info(f"已知单词数（频率≥{args.frequency_threshold}%）: {len(counter.known_words)}")
    logger.info(f"主要结果已保存到: {args.output}")

    if args.new_book:
        logger.info(f"生词结果已保存到: {args.new_words_output}")

if __name__ == "__main__":
    main()