import os
import zipfile
import re
from collections import Counter
from bs4 import BeautifulSoup
import argparse
from typing import List, Dict
import logging
import json
import PyPDF2  # Added for PDF support

# 设置日志
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)


class FrequencyGenerator:
    """频率生成器类 - 用于生成单词频率数据，支持EPUB, PDF和TXT文件"""

    def __init__(self):
        self.word_counter = Counter()
        self.total_words = 0
        self.processed_files = 0
        self.word_frequencies: Dict[str, float] = {}

    def extract_text_from_epub(self, epub_path: str) -> str:
        """从EPUB文件中提取文本内容"""
        text_content = []

        try:
            with zipfile.ZipFile(epub_path, 'r') as epub:
                html_files = [f for f in epub.namelist()
                              if f.endswith('.html') or f.endswith('.xhtml') or f.endswith('.htm')]

                for file in html_files:
                    try:
                        with epub.open(file) as f:
                            content = f.read()
                            soup = BeautifulSoup(content, 'html.parser')
                            for script in soup(["script", "style"]):
                                script.decompose()
                            text = soup.get_text()
                            text_content.append(text)
                    except Exception as e:
                        logger.warning(f"处理文件 {file} 时出错: {e}")
                        continue

        except Exception as e:
            logger.error(f"无法打开EPUB文件 {epub_path}: {e}")
            return ""

        return "\n".join(text_content)

    def extract_text_from_pdf(self, pdf_path: str) -> str:
        """从PDF文件中提取文本内容"""
        text_content = []

        try:
            with open(pdf_path, 'rb') as f:
                pdf_reader = PyPDF2.PdfReader(f)
                for page in pdf_reader.pages:
                    text = page.extract_text()
                    if text:
                        text_content.append(text)
        except Exception as e:
            logger.error(f"无法打开PDF文件 {pdf_path}: {e}")
            return ""

        return "\n".join(text_content)

    def extract_text_from_txt(self, txt_path: str) -> str:
        """从TXT文件中提取文本内容"""
        try:
            with open(txt_path, 'r', encoding='utf-8') as f:
                return f.read()
        except Exception as e:
            logger.error(f"无法打开TXT文件 {txt_path}: {e}")
            return ""

    def clean_and_tokenize(self, text: str) -> List[str]:
        """清理文本并分词"""
        words = re.findall(r'\b[A-Za-z][A-Za-z\-]*[A-Za-z]?\b', text)
        words = [word for word in words if len(word) > 1 or word.lower() in ['a', 'i']]
        return [word.lower() for word in words]

    def process_file(self, file_path: str):
        """处理单个文件"""
        logger.info(f"正在处理: {file_path}")

        text = ""
        if file_path.lower().endswith('.epub'):
            text = self.extract_text_from_epub(file_path)
        elif file_path.lower().endswith('.pdf'):
            text = self.extract_text_from_pdf(file_path)
        elif file_path.lower().endswith('.txt'):
            text = self.extract_text_from_txt(file_path)

        if not text:
            logger.warning(f"无法从 {file_path} 提取文本")
            return

        words = self.clean_and_tokenize(text)
        self.word_counter.update(words)
        self.total_words += len(words)
        self.processed_files += 1

        logger.info(f"已处理 {len(words)} 个单词来自 {file_path}")

    def process_directory(self, directory_path: str):
        """处理目录中的所有文件"""
        if not os.path.exists(directory_path):
            logger.error(f"目录不存在: {directory_path}")
            return False

        # 查找所有支持的文件类型
        supported_files = []
        for f in os.listdir(directory_path):
            if f.lower().endswith(('.epub', '.pdf', '.txt')):
                supported_files.append(f)

        if not supported_files:
            logger.error("在目录中未找到支持的文件 (EPUB, PDF, TXT)")
            return False

        logger.info(f"找到 {len(supported_files)} 个支持的文件")

        # 重置计数器
        self.word_counter = Counter()
        self.total_words = 0
        self.processed_files = 0
        self.word_frequencies = {}

        total_files = len(supported_files)
        for i, file_name in enumerate(supported_files, 1):
            file_path = os.path.join(directory_path, file_name)
            logger.info(f"处理文件 [{i}/{total_files}]: {file_name}")
            self.process_file(file_path)

        # 计算每个单词的频率百分比
        if self.total_words > 0:
            for word, count in self.word_counter.items():
                frequency = (count / self.total_words) * 100
                self.word_frequencies[word] = frequency

        logger.info(f"处理完成，共统计到 {len(self.word_frequencies)} 个唯一单词")

        return True

    def save_word_frequencies(self, output_path: str):
        """保存所有单词频率到文件，按频率从高到低排序"""
        try:
            # 按频率从高到低排序
            sorted_words = sorted(
                self.word_frequencies.items(),
                key=lambda x: x[1],
                reverse=True
            )

            # 转换为字典，保持排序顺序
            word_frequencies_sorted = {}
            for word, freq in sorted_words:
                word_frequencies_sorted[word] = freq

            # 确保输出目录存在
            os.makedirs(os.path.dirname(output_path) if os.path.dirname(output_path) else '.', exist_ok=True)

            with open(output_path, 'w', encoding='utf-8') as f:
                json.dump(word_frequencies_sorted, f, ensure_ascii=False, indent=2)
            logger.info(f"单词频率数据已保存到: {output_path}")
        except Exception as e:
            logger.error(f"保存单词频率数据时出错: {e}")


def main():
    parser = argparse.ArgumentParser(description='生成文件的单词频率数据，支持EPUB, PDF和TXT格式')
    parser.add_argument('directory', help='包含文件的目录路径')
    parser.add_argument('-o', '--output', default='word_frequencies.json',
                        help='单词频率输出文件路径 (默认: word_frequencies.json)')

    args = parser.parse_args()

    # 创建频率生成器实例
    frequency_generator = FrequencyGenerator()

    # 处理目录中的文件
    if not frequency_generator.process_directory(args.directory):
        return

    # 保存单词频率数据
    frequency_generator.save_word_frequencies(args.output)

    # 打印统计信息
    logger.info(f"处理完成! 共处理 {frequency_generator.processed_files} 个文件")
    logger.info(f"总单词数: {frequency_generator.total_words}")
    logger.info(f"唯一单词数: {len(frequency_generator.word_counter)}")
    logger.info(f"单词频率数据已保存到: {args.output}")


if __name__ == "__main__":
    main()